id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
256705 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from api_x.zyt.vas import user
def get_user_cash_balance(account_user_id):
# FIXME:
# 在application成为真正独立子系统时,这里应该使用api交互
return user.get_user_cash_balance(account_user_id)
| StarcoderdataPython |
158148 | <reponame>rockman/learn-python-flask
from simple_forum.db import db
tags_to_posts = db.Table('tags_to_posts',
db.Column('tag_id', db.Integer, db.ForeignKey('tag.id')),
db.Column('post_id', db.Integer, db.ForeignKey('post.id'))
)
class Tag(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text, nullable=False)
posts = db.relationship('Post', secondary=tags_to_posts, backref=db.backref('tags', lazy=True)) | StarcoderdataPython |
3552294 | import io
import os
from unittest import mock
import numpy as np
import pytest
import tempfile
from mlagents_envs.communicator_objects.demonstration_meta_pb2 import (
DemonstrationMetaProto,
)
from mlagents.trainers.tests.mock_brain import (
create_mock_3dball_behavior_specs,
setup_test_behavior_specs,
)
from mlagents.trainers.demo_loader import (
load_demonstration,
demo_to_buffer,
get_demo_files,
write_delimited,
)
BEHAVIOR_SPEC = create_mock_3dball_behavior_specs()
def test_load_demo():
path_prefix = os.path.dirname(os.path.abspath(__file__))
behavior_spec, pair_infos, total_expected = load_demonstration(
path_prefix + "/test.demo"
)
assert np.sum(behavior_spec.sensor_specs[0].shape) == 8
assert len(pair_infos) == total_expected
_, demo_buffer = demo_to_buffer(path_prefix + "/test.demo", 1, BEHAVIOR_SPEC)
assert (
len(demo_buffer["continuous_action"]) == total_expected - 1
or len(demo_buffer["discrete_action"]) == total_expected - 1
)
def test_load_demo_dir():
path_prefix = os.path.dirname(os.path.abspath(__file__))
behavior_spec, pair_infos, total_expected = load_demonstration(
path_prefix + "/test_demo_dir"
)
assert np.sum(behavior_spec.sensor_specs[0].shape) == 8
assert len(pair_infos) == total_expected
_, demo_buffer = demo_to_buffer(path_prefix + "/test_demo_dir", 1, BEHAVIOR_SPEC)
assert (
len(demo_buffer["continuous_action"]) == total_expected - 1
or len(demo_buffer["discrete_action"]) == total_expected - 1
)
def test_demo_mismatch():
path_prefix = os.path.dirname(os.path.abspath(__file__))
# observation size mismatch
with pytest.raises(RuntimeError):
mismatch_obs = setup_test_behavior_specs(
False, False, vector_action_space=2, vector_obs_space=9
)
_, demo_buffer = demo_to_buffer(path_prefix + "/test.demo", 1, mismatch_obs)
# action mismatch
with pytest.raises(RuntimeError):
mismatch_act = setup_test_behavior_specs(
False, False, vector_action_space=3, vector_obs_space=9
)
_, demo_buffer = demo_to_buffer(path_prefix + "/test.demo", 1, mismatch_act)
# action type mismatch
with pytest.raises(RuntimeError):
mismatch_act_type = setup_test_behavior_specs(
True, False, vector_action_space=[2], vector_obs_space=9
)
_, demo_buffer = demo_to_buffer(
path_prefix + "/test.demo", 1, mismatch_act_type
)
# number obs mismatch
with pytest.raises(RuntimeError):
mismatch_obs_number = setup_test_behavior_specs(
False, True, vector_action_space=2, vector_obs_space=9
)
_, demo_buffer = demo_to_buffer(
path_prefix + "/test.demo", 1, mismatch_obs_number
)
def test_edge_cases():
path_prefix = os.path.dirname(os.path.abspath(__file__))
# nonexistent file and directory
with pytest.raises(FileNotFoundError):
get_demo_files(os.path.join(path_prefix, "nonexistent_file.demo"))
with pytest.raises(FileNotFoundError):
get_demo_files(os.path.join(path_prefix, "nonexistent_directory"))
with tempfile.TemporaryDirectory() as tmpdirname:
# empty directory
with pytest.raises(ValueError):
get_demo_files(tmpdirname)
# invalid file
invalid_fname = os.path.join(tmpdirname, "mydemo.notademo")
with open(invalid_fname, "w") as f:
f.write("I'm not a demo")
with pytest.raises(ValueError):
get_demo_files(invalid_fname)
# invalid directory
with pytest.raises(ValueError):
get_demo_files(tmpdirname)
# valid file
valid_fname = os.path.join(tmpdirname, "mydemo.demo")
with open(valid_fname, "w") as f:
f.write("I'm a demo file")
assert get_demo_files(valid_fname) == [valid_fname]
# valid directory
assert get_demo_files(tmpdirname) == [valid_fname]
@mock.patch("mlagents.trainers.demo_loader.get_demo_files", return_value=["foo.demo"])
def test_unsupported_version_raises_error(mock_get_demo_files):
# Create a metadata proto with an unsupported version
bad_metadata = DemonstrationMetaProto()
bad_metadata.api_version = 1337
# Write the metadata to a temporary buffer, which will get returned by open()
buffer = io.BytesIO()
write_delimited(buffer, bad_metadata)
m = mock.mock_open(read_data=buffer.getvalue())
# Make sure that we get a RuntimeError when trying to load this.
with mock.patch("builtins.open", m):
with pytest.raises(RuntimeError):
load_demonstration("foo")
| StarcoderdataPython |
11246207 | import unittest
from ..context import zoia
import zoia.parse.normalization
class TestNormalization(unittest.TestCase):
def test_strip_diacritics(self):
self.assertEqual(
zoia.parse.normalization.strip_diacritics('foo'), 'foo'
)
self.assertEqual(
zoia.parse.normalization.strip_diacritics('Foo'), 'Foo'
)
self.assertEqual(
zoia.parse.normalization.strip_diacritics('Fóò'), 'Foo'
)
def test_normalize_string(self):
self.assertEqual(zoia.parse.normalization.normalize_name('foo'), 'foo')
self.assertEqual(zoia.parse.normalization.normalize_name('Foo'), 'foo')
self.assertEqual(zoia.parse.normalization.normalize_name('Fóò'), 'foo')
def test_split_name(self):
self.assertEqual(
zoia.parse.normalization.split_name('Doe'), ['', 'Doe']
)
self.assertEqual(
zoia.parse.normalization.split_name('<NAME>'), ['John', 'Doe']
)
self.assertEqual(
zoia.parse.normalization.split_name('<NAME>'),
['John', '<NAME>'],
)
self.assertEqual(
zoia.parse.normalization.split_name('John Q. Public'),
['<NAME>.', 'Public'],
)
def test_normalize_title_word(self):
self.assertEqual(
zoia.parse.normalization.normalize_title_word('The'), 'the'
)
self.assertEqual(
zoia.parse.normalization.normalize_title_word('"Why"'), 'why'
)
self.assertEqual(
zoia.parse.normalization.normalize_title_word(r'$\eta_3$'), 'eta3'
)
| StarcoderdataPython |
4873319 | n = int(input())
matriz = input().split()
mult = [2,3,4,5]
n2, n3, n4, n5 = 0, 0, 0 ,0
for x in range(n):
for y in range(4):
n1 = int(matriz[x])
if n1 % mult[y] == 0:
pos = y
if pos == 0:
n2 += 1
elif pos == 1:
n3 += 1
elif pos == 2:
n4 += 1
else:
n5 += 1
print("%d Multiplo(s) de 2"%n2)
print("%d Multiplo(s) de 3"%n3)
print("%d Multiplo(s) de 4"%n4)
print("%d Multiplo(s) de 5"%n5)
| StarcoderdataPython |
8177758 | <filename>cli/medperf/commands/dataset/__init__.py
from .associate import DatasetBenchmarkAssociation
from .submit import DatasetRegistration
from .list import DatasetsList
from .create import DataPreparation
all = [DatasetBenchmarkAssociation, DatasetRegistration, DatasetsList, DataPreparation]
| StarcoderdataPython |
1997407 | <filename>sme_material_apps/proponentes/migrations/0010_auto_20200730_1420.py
# Generated by Django 2.2.9 on 2020-07-30 17:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proponentes', '0009_auto_20200729_1628'),
]
operations = [
migrations.AlterField(
model_name='proponente',
name='kits',
field=models.ManyToManyField(blank=True, related_name='proponentes', to='core.Kit'),
),
]
| StarcoderdataPython |
5116980 | # -*- coding: utf-8 -*-
"""
// Copyright 2020 PDF Association, Inc. https://www.pdfa.org
//
// This material is based upon work supported by the Defense Advanced
// Research Projects Agency (DARPA) under Contract No. HR001119C0079.
// Any opinions, findings and conclusions or recommendations expressed
// in this material are those of the author(s) and do not necessarily
// reflect the views of the Defense Advanced Research Projects Agency
// (DARPA). Approved for public release.
//
// SPDX-License-Identifier: Apache-2.0
//
// Generates a 3D/VR visualization JSON file for use with "3D Force-graph"
// from the "referencesGraph.json" file.
//
// See https://github.com/vasturiano/3d-force-graph/
//
// Author: <NAME>
"""
import json
jfile = open("referencesGraph.json")
indata = json.load(jfile)
normrefs = indata["ISO32000_2_DB"]
nodes = []
for doc in normrefs:
n = {}
n["id"] = doc["id"]
n["name"] = doc["title"]
# n["nOutLinks"] = len(doc["refs"])
# n["nInLinks"] = len(doc["referencedBy"])
# Size of planet node is proportional to the square of the number of out-going references
n["val"] = len(doc["refs"]) * len(doc["refs"])
# Short name is everything before a COMMA (normally the ISO document number or simple title)
# then trimmed before a COLON (which will strip off ISO years but so be it!)
if "label" in doc:
n["short"] = doc["label"]
elif "orgs" in doc and doc["orgs"]:
org = doc["orgs"][0]
s = org["org"]
if "stid" in org:
s += ", " + org["stid"]
if "date" in doc:
s += ", " + doc["date"]
n["short"] = s
else:
n["short"] = doc["title"]
# Make PDF 2.0 the large red centre of the 3D universe!
# otherwise rough grouping (and thus color coding of node) based on title
# Parsing "group" property by the first org in orgs array
if "orgs" in doc and doc["orgs"]:
n["group"] = doc["orgs"][0]["org"]
else:
n["group"] = "Other"
nodes.append(n)
links = []
for doc in normrefs:
refs = []
refs = doc["refs"]
for ref in refs:
lnk = {}
lnk["source"] = doc["id"]
lnk["target"] = ref
# Make all 1st order links from PDF 2.0 red
# otherwise do rough grouping (and thus color coding of link) based on source title
# Make PDF 2.0 the large red centre of the 3D universe
if doc["id"] == 0:
lnk["color"] = "red"
if "orgs" in doc:
lnk["group"] = doc["orgs"][0]["org"]
else:
lnk["group"] = "Other"
# 'desc' attribute is what links display below their label (default attribute 'name') but in smaller text
# This text is too long and makes for too much... need short friendly names for documents!
# lnk_doc = next(r for r in normrefs if r["id"] == ref)
# lnk["desc"] = "From " + doc["title"] + " to " + lnk_doc["title"]
links.append(lnk)
outdata = {}
outdata["nodes"] = nodes
outdata["links"] = links
with open("pdf20-norm-refs.json", 'w') as outfile:
json.dump(outdata, outfile, indent=4)
| StarcoderdataPython |
163657 | <gh_stars>1-10
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
@plugin_pool.register_plugin
class Disqus(CMSPluginBase):
model = CMSPlugin
render_template = "core/disqus.html"
| StarcoderdataPython |
6657213 | <gh_stars>1-10
import numpy as np
import sys
import cv2
import tensorflow as tf
# settings
input_model = "./models/output.pb"
input_name = "model_input"
output_node_name = "mobilenetv2_1.00_224/Logits/Softmax"
output_model = "./models/output_quant.tflite"
# load validation set
import glob
image_folder = "../calibration"
img_path = glob.glob(image_folder+"/*")
if len(img_path)==0:
print("image not found")
sys.exit(1)
validation_data_set=[]
for file_name in img_path:
img = cv2.imread(file_name) #BGR
img = cv2.resize(img,(224, 224))
ary = np.asarray(img, dtype=np.float32)
ary = np.expand_dims(ary, axis=0)
ary = ary/255.0
validation_data_set.append(ary)
#quantize
def representative_dataset_gen():
for i in range(len(validation_data_set)):
yield [validation_data_set[i]]
converter = tf.lite.TFLiteConverter.from_frozen_graph(input_model,[input_name],[output_node_name])
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.uint8
converter.inference_output_type = tf.uint8
tflite_quant_model = converter.convert()
with open(output_model, 'wb') as o_:
o_.write(tflite_quant_model)
| StarcoderdataPython |
5080459 | <gh_stars>0
IDENTITY = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
def inverse(a):
return [value[1] for value in sorted([(n - 1, i + 1) for i, n in enumerate(a)])]
def composition(a, b):
return [a[v - 1] for v in b]
| StarcoderdataPython |
3350496 | #program to calculate midpoints of a line.
x1 = float(input('The value of x1: '))
y1 = float(input('The value of y1: '))
x2 = float(input('The value of x1: '))
y2 = float(input('The value of y2: '))
x_m_point = (x1 + x2)/2
y_m_point = (y1 + y2)/2
print();
print( "The midpoint's x value is: ",x_m_point)
print( "The midpoint's y value is: ",y_m_point)
print(f"The midpoint of line is : {(x_m_point, y_m_point)}")
| StarcoderdataPython |
5011671 | <reponame>Tabor-Research-Group/ChemOS
__all__ = ['bot']
| StarcoderdataPython |
12824557 | <gh_stars>0
# Standard lib imports
import logging
# Third party imports
# None
# Project level imports
# None
log = logging.getLogger(__name__)
class Dashboard(object):
def __init__(self, connection):
"""
Initialize a new instance
"""
self.conn = connection
def get_local_zone(self, *args, **kwargs):
"""
Gets the local VDC details
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category', 'cfTimeFrame', 'cfTarget']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting local VDC info")
return self.conn.get(url='dashboard/zones/localzone', params=param)
def get_local_zone_replication_groups(self, *args, **kwargs):
"""
Gets the local VDC replication groups details
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting vpools in local VDC")
return self.conn.get(url='dashboard/zones/localzone/replicationgroups', params=param)
def get_local_zone_rglinks_failed(self, *args, **kwargs):
"""
Gets the local VDC replication group failed links details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
{
u'_embedded': {
u'_instances': [
]
},
u'_links': {
u'self': {
u'href': u'/dashboard/zones/localzone/rglinksFailed'
}
},
u'title': u'rglinksFailedList'
}
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting failed links for vpools in local VDC")
return self.conn.get(url='dashboard/zones/localzone/rglinksFailed')
def get_local_zone_storage_pools(self, *args, **kwargs):
"""
Gets the local VDC storage pool details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category', 'cfTimeFrame', 'cfTarget']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting varrays in local VDC")
return self.conn.get(url='dashboard/zones/localzone/storagepools', params=param)
def get_local_zone_nodes(self, *args, **kwargs):
"""
Gets the local vdc node details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category', 'cfTimeFrame', 'cfTarget']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting nodes in local VDC")
return self.conn.get(url='dashboard/zones/localzone/nodes', params=param)
def get_storage_pool(self, storage_pool_id, *args, **kwargs):
"""
Gets the storage pool details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param storage_pool_id: Storage pool identifier
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category', 'cfTimeFrame', 'cfTarget']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting info for varray '{0}'".format(storage_pool_id))
return self.conn.get(
url='dashboard/storagepools/{0}'.format(storage_pool_id), params=param)
def get_node(self, node_id, *args, **kwargs):
"""
Gets the node instance details
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param node_id: Node identifier
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category', 'cfTimeFrame', 'cfTarget']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting info for node '{0}'".format(node_id))
return self.conn.get(
url='dashboard/nodes/{0}'.format(node_id))
def get_disk(self, disk_id, *args, **kwargs):
"""
Gets the disk instance details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param disk_id: Storage pool identifier
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting info for disk '{0}'".format(disk_id))
return self.conn.get(
url='dashboard/disks/{0}'.format(disk_id), params=param)
def get_process(self, process_id, *args, **kwargs):
"""
Gets the process instance details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param process_id: Identity of the process
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting info for PID '{0}'".format(process_id))
return self.conn.get(
url='dashboard/processes/{0}'.format(process_id), params=param)
def get_node_processes(self, node_id, *args, **kwargs):
"""
Gets the node instance process details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param node_id: Identity of the process
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting processes for node '{0}'".format(node_id))
return self.conn.get(
url='dashboard/nodes/{0}/processes'.format(node_id), params=param)
def get_node_disks(self, node_id, *args, **kwargs):
"""
Gets the node instance disk details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param node_id: Identity of the node
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting disks for node '{0}'".format(node_id))
return self.conn.get(
url='dashboard/nodes/{0}/disks'.format(node_id), params=param)
def get_storage_pool_nodes(self, storage_pool_id, *args, **kwargs):
"""
Gets the storage pool node details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param storage_pool_id: Identity of the storage pool
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category', 'cfTimeFrame', 'cfTarget']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting nodes for varray '{0}'".format(storage_pool_id))
return self.conn.get(
url='dashboard/storagepools/{0}/nodes'.format(storage_pool_id), params=param)
def get_local_zone_replication_group_bootstrap_links(self, *args, **kwargs):
"""
Gets the local VDC replication group bootstrap links details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
{
u'_embedded': {
u'_instances': [
]
},
u'_links': {
u'self': {
u'href': u'/dashboard/zones/localzone/rglinksBootstrap'
}
},
u'title': u'rglinksBootstrapList'
}
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting vpool bootstrap links in local VDC")
return self.conn.get(
url='dashboard/zones/localzone/rglinksBootstrap', params=param)
def get_replication_group(self, replication_group_id, *args, **kwargs):
"""
Gets the replication group instance details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param replication_group_id: Replication group identifier
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting info for vpool '{0}'".format(replication_group_id))
return self.conn.get(
url='dashboard/replicationgroups/{0}'.format(replication_group_id), params=param)
def get_replication_group_link(self, rglink_id, *args, **kwargs):
"""
Gets the replication group link instance details
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param rglink_id: Replication group link identifier
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting info for vpool link '{0}'".format(rglink_id))
return self.conn.get(url='dashboard/rglinks/{0}'.format(rglink_id), params=param)
def get_replication_group_links(self, replication_group_id, *args, **kwargs):
"""
Gets the replication group instance associated link details.
Required role(s):
SYSTEM_ADMIN
SYSTEM_MONITOR
Example JSON result from the API:
Too large to output here
:param replication_group_id: Replication group identifier
"""
keys = ['dataType', 'startTime', 'endTime', 'interval', 'category']
param = {}
for key in keys:
value = kwargs.get(key)
if value:
param[key] = value
log.info("Getting links for vpool '{0}'".format(replication_group_id))
return self.conn.get(
url='dashboard/replicationgroups/{0}/rglinks'.format(
replication_group_id),
params=param
)
| StarcoderdataPython |
5095427 | <filename>app/models.py
from sqlalchemy.ext.hybrid import hybrid_property
from flask.ext.login import UserMixin
from app import db, bcrypt
class Requesters(db.Model, UserMixin):
''' A requester who needs help '''
__tablename__ = 'requesters'
username = db.Column(db.String, primary_key=True)
lat = db.Column(db.Float)
lng = db.Column(db.Float)
message = db.Column(db.String)
radius = db.Column(db.Float)
matched = db.Column(db.String)
class Responder(db.Model, UserMixin):
''' A responder who has an account on the website. '''
__tablename__ = 'responders'
first_name = db.Column(db.String)
last_name = db.Column(db.String)
phone = db.Column(db.String)
email = db.Column(db.String, primary_key=True)
confirmation = db.Column(db.Boolean)
_password = db.Column(db.String)
@property
def full_name(self):
return '{} {}'.format(self.first_name, self.last_name)
@hybrid_property
def password(self):
return self._password
@password.setter
def _set_password(self, plaintext):
self._password = bcrypt.generate_password_hash(plaintext)
def check_password(self, plaintext):
return (self.password == plaintext)
def get_id(self):
return self.email
| StarcoderdataPython |
5085272 | import typing
from collections import OrderedDict
from flytekit import ContainerTask
from flytekit.common.translator import get_serializable
from flytekit.core import context_manager
from flytekit.core.base_task import kwtypes
from flytekit.core.context_manager import Image, ImageConfig
from flytekit.core.launch_plan import LaunchPlan, ReferenceLaunchPlan
from flytekit.core.task import ReferenceTask, task
from flytekit.core.workflow import ReferenceWorkflow, workflow
default_img = Image(name="default", fqn="test", tag="tag")
serialization_settings = context_manager.SerializationSettings(
project="project",
domain="domain",
version="version",
env=None,
image_config=ImageConfig(default_image=default_img, images=[default_img]),
)
def test_references():
rlp = ReferenceLaunchPlan("media", "stg", "some.name", "cafe", inputs=kwtypes(in1=str), outputs=kwtypes())
sdk_lp = get_serializable(OrderedDict(), serialization_settings, rlp)
assert sdk_lp.has_registered
rt = ReferenceTask("media", "stg", "some.name", "cafe", inputs=kwtypes(in1=str), outputs=kwtypes())
sdk_task = get_serializable(OrderedDict(), serialization_settings, rt)
assert sdk_task.has_registered
rw = ReferenceWorkflow("media", "stg", "some.name", "cafe", inputs=kwtypes(in1=str), outputs=kwtypes())
sdk_wf = get_serializable(OrderedDict(), serialization_settings, rw)
assert sdk_wf.has_registered
def test_basics():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a=y, b=b)
return x, d
sdk_wf = get_serializable(OrderedDict(), serialization_settings, my_wf, False)
assert len(sdk_wf.interface.inputs) == 2
assert len(sdk_wf.interface.outputs) == 2
assert len(sdk_wf.nodes) == 2
# Gets cached the first time around so it's not actually fast.
sdk_task = get_serializable(OrderedDict(), serialization_settings, t1, True)
assert "pyflyte-execute" in sdk_task.container.args
lp = LaunchPlan.create("testlp", my_wf,)
sdk_lp = get_serializable(OrderedDict(), serialization_settings, lp)
assert sdk_lp.id.name == "testlp"
def test_fast():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
sdk_task = get_serializable(OrderedDict(), serialization_settings, t1, True)
assert "pyflyte-fast-execute" in sdk_task.container.args
def test_container():
@task
def t1(a: int) -> (int, str):
return a + 2, str(a) + "-HELLO"
t2 = ContainerTask(
"raw",
image="alpine",
inputs=kwtypes(a=int, b=str),
input_data_dir="/tmp",
output_data_dir="/tmp",
command=["cat"],
arguments=["/tmp/a"],
)
sdk_task = get_serializable(OrderedDict(), serialization_settings, t2, fast=True)
assert "pyflyte" not in sdk_task.container.args
| StarcoderdataPython |
8158276 | from app.instances import db
class Theme(db.Model):
"""
A theme for the web view
"""
__tablename__ = "themes"
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), unique=True, nullable=False)
| StarcoderdataPython |
4815489 | from itertools import product
import copy
initialRows = []
with open("./11/input.txt") as inputFile:
for line in inputFile:
row = list(line.replace("\n",""))
initialRows.append(row)
def getSeat(x,y, rows):
if x < 0 or y < 0 or x >= len(rows[0]) or y >= len(rows):
return 0
return 0 if rows[y][x] in ["L", "."] else 1
def getNewState(x, y, rows):
a = [-1,0,1]
spacesToCheck = list(product(a, repeat=2))
spacesToCheck.remove((0, 0))
spacesToCheck = [[sum(x) for x in zip((x,y),offset)] for offset in spacesToCheck ]
occupiedSeats = sum([getSeat(*pos, rows) for pos in spacesToCheck])
if rows[y][x] == "L":
if occupiedSeats == 0:
return "#"
else:
return "L"
else:
if occupiedSeats >= 4:
return "L"
else:
return "#"
rows = initialRows
rounds = 0
while True:
rounds += 1
newRows = copy.deepcopy(rows)
numberOfChanges = 0
for y in range(0,len(rows)):
for x in range(0,len(row)):
seat = rows[y][x]
if seat in ["L","#"]:
newState = getNewState(x,y, rows)
if newState != seat:
numberOfChanges += 1
newRows[y][x] = newState
if numberOfChanges == 0:
break
rows = newRows
total = sum([seat in ["#","L"] for row in rows for seat in row])
occupied = sum([seat == "#" for row in rows for seat in row])
print("settled after {} rounds with {} of {} seats occupied".format(rounds, occupied, total)) | StarcoderdataPython |
4811546 | # -*- coding: utf-8 -*-
from datetime import date, time
import pytest
from ..fixtures import parametrize
from korona.html.tags import Input
from korona.templates.html.tags import input as tag
from korona.exceptions import TagAttributeError, AttributeValueError
@parametrize('attributes', [
# accept attribute
({'accept': 'audio/*', 'type': 'file'}),
# align attribute
({'align': 'left', 'type': 'image'}),
({'align': 'right', 'type': 'image'}),
({'align': 'top', 'type': 'image'}),
({'align': 'bottom', 'type': 'image'}),
({'align': 'middle', 'type': 'image'}),
# alt attribute
({'alt': 'temp', 'type': 'image'}),
# autocomplete attribute
({'autocomplete': 'on', 'type': 'email'}),
({'autocomplete': 'off', 'type': 'password'}),
# autofocus attribute
({'autofocus': True}),
# checked attribute
({'checked': True, 'type': 'radio'}),
({'checked': True, 'type': 'checkbox'}),
# dirname attribute
({'name': 'fname', 'dirname': 'fname.dir'}),
# disabled attribute
({'disabled': True}),
# form attribute
({'form': 'form1'}),
# formaction attribute
({'formaction': 'demo.asp', 'type': 'submit'}),
({'formaction': 'demo.asp', 'type': 'image'}),
# formenctype attribute
({'formenctype': 'application/x-www-form-urlencoded', 'type': 'submit'}),
({'formenctype': 'multipart/form-data', 'type': 'image'}),
({'formenctype': 'text/plain', 'type': 'image'}),
# formmethod attribute
({'formmethod': 'get', 'type': 'submit'}),
({'formmethod': 'post', 'type': 'image'}),
# formnovalidate attribute
({'formnovalidate': True, 'type': 'submit'}),
# formtarget attribute
({'formtarget': '_parent', 'type': 'submit'}),
({'formtarget': '_blank', 'type': 'image'}),
# height attribute
({'height': '200', 'type': 'image'}),
# list attribute
({'list': 'list1'}),
# max attribute
({'max': date.today(), 'type': 'date'}),
({'max': '5', 'type': 'number'}),
({'max': 5.45, 'type': 'number'}),
({'max': '12', 'type': 'month'}),
({'max': time(12, 30), 'type': 'time'}),
# maxlength attribute
({'maxlength': '5'}),
# min attribute
({'min': date.today(), 'type': 'date'}),
({'min': '5', 'type': 'number'}),
({'min': 5.45, 'type': 'number'}),
({'min': '12', 'type': 'month'}),
({'min': time(12, 30), 'type': 'time'}),
# multiple attribute
({'multiple': True, 'type': 'file'}),
({'multiple': True, 'type': 'email'}),
# name attribute
({'name': 'name1'}),
# pattern attribute
({'pattern': '[A-Za-z]{3}', 'type': 'email'}),
# placeholder attribute
({'placeholder': '<NAME>', 'type': 'text'}),
# readonly attribute
({'readonly': True}),
# required attribute
({'required': True, 'type': 'search'}),
({'required': False, 'type': 'email'}),
# size attribute
({'size': '10', 'type': 'password'}),
({'size': 10, 'type': 'email'}),
({'size': 10.567, 'type': 'email'}),
# src attribute
({'src': 'demo.asp', 'type': 'image'}),
({'src': 'www.google.com/images/a.jpeg', 'type': 'image'}),
# step attribute
({'step': 1, 'type': 'range'}),
({'step': 1.56, 'type': 'range'}),
# type attribute
({'type': 'submit'}),
# value attribute
({'value': 'value1'}),
# width attribute
({'width': '100', 'type': 'image'}),
({'width': 100, 'type': 'image'}),
({'width': 125.50, 'type': 'image'})
])
def test_construct_input_tag(attributes):
"""Test for validating whether the input tag is constructed correctly or
not.
"""
input = Input(**attributes)
assert input.construct() == tag.render(attributes)
@parametrize('attributes,exception,error_msg', [
# accept attribute
({'accept': 'audio/*'},
AttributeError,
'can only be used with <input type="file"'),
({'accept': 'audio/*', 'type': 'submit'},
AttributeError,
'can only be used with <input type="file"'),
# align attribute
({'align': 'abcd', 'type': 'image'},
TagAttributeError,
'align attribute values should be one of these:'),
({'align': 'top'},
AttributeError,
'align attribute is only used with <input type="image">'),
# alt attribute
({'alt': 'temp'},
AttributeError,
'can only be used with <input type="image">'),
({'alt': 'temp', 'type': 'submit'},
AttributeError,
'can only be used with <input type="image">'),
# autocomplete attribute
({'autocomplete': 'off', 'type': 'submit'},
AttributeError,
'autocomplete attribute works with the following <input> types:'),
({'autocomplete': 'n/a', 'type': 'email'},
TagAttributeError,
'autocomplete attribute values should be one of these:'),
({'autocomplete': 'on'},
AttributeError,
'autocomplete attribute works with the following <input> types:'),
# autofocus attribute
({'autofocus': 'temp'},
AttributeError,
'autofocus attribute should be a boolean value'),
# checked attribute
({'checked': True, 'type': 'email'},
AttributeError,
'checked attribute can be used with'),
({'checked': 'temp', 'type': 'radio'},
AttributeError,
'checked attribute should be a boolean value'),
({'checked': True},
AttributeError,
'checked attribute can be used with'),
# dirname attribute
({'dirname': 'fname.dir'},
AttributeError,
"The dirname attribute's value is always the name of the input field, "
"followed by '.dir'"),
({'name': 'lname', 'dirname': 'fname.dir'},
AttributeError,
"The dirname attribute's value is always the name of the input field, "
"followed by '.dir'"),
# disabled attribute
({'disabled': 'temp'},
AttributeError,
'disabled attribute should be a boolean value'),
# formaction attribute
({'formaction': 'demo.asp', 'type': 'email'},
AttributeError,
'formaction attribute is used with type'),
({'formaction': 'demo.asp'},
AttributeError,
'formaction attribute is used with type'),
({'formaction': 123, 'type': 'submit'},
ValueError,
'is not a valid url'),
# formenctype attribute
({'formenctype': 'temp', 'type': 'submit'},
TagAttributeError,
'formenctype attribute values should be one of these'),
({'formenctype': 'text/plain', 'type': 'email'},
AttributeError,
'formenctype attribute is used with type'),
({'formenctype': 'text/plain'},
AttributeError,
'formenctype attribute is used with type'),
# formmethod attribute
({'formmethod': 'get', 'type': 'email'},
AttributeError,
'formmethod attribute can be used with type'),
({'formmethod': 'get'},
AttributeError,
'formmethod attribute can be used with type'),
({'formmethod': 'PUT', 'type': 'submit'},
TagAttributeError,
'formmethod attribute values should be one of these'),
# formnovalidate attribute
({'formnovalidate': True, 'type': 'email'},
AttributeError,
'formnovalidate attribute can be used with type'),
({'formnovalidate': True},
AttributeError,
'formnovalidate attribute can be used with type'),
({'formnovalidate': 'temp', 'type': 'submit'},
AttributeError,
'formnovalidate attribute should be a boolean value'),
# formtarget attribute
({'formtarget': '_parent', 'type': 'emali'},
AttributeError,
'formtarget attribute can be used with type'),
({'formtarget': '_balnk'},
AttributeError,
'formtarget attribute can be used with type'),
# height attribute
({'height': '200', 'type': 'submit'},
AttributeError,
'height attribute is used only with'),
({'height': '200'},
AttributeError,
'height attribute is used only with'),
({'height': 'temp', 'type': 'image'},
AttributeValueError,
'height attribute should be an integer or float value'),
# max attribute
({'max': date.today(), 'type': 'submit'},
AttributeError,
'max attribute works with the following'),
({'max': date.today()},
AttributeError,
'max attribute works with the following'),
({'max': date.today(), 'type': 'number'},
AttributeValueError,
'attribute should be an integer or float value'),
({'max': '5', 'type': 'date'},
AttributeError,
'attribute should be a date/datetime value'),
({'max': '5', 'type': 'time'},
AttributeError,
'attribute should be a time value'),
# maxlength attribute
({'maxlength': 'temp'},
AttributeValueError,
'attribute should be an integer or float value'),
# min attribute
({'min': date.today(), 'type': 'submit'},
AttributeError,
'min attribute works with the following'),
({'min': date.today()},
AttributeError,
'min attribute works with the following'),
({'min': date.today(), 'type': 'number'},
AttributeValueError,
'attribute should be an integer or float value'),
({'min': '5', 'type': 'date'},
AttributeError,
'attribute should be a date/datetime value'),
({'min': '5', 'type': 'time'},
AttributeError,
'attribute should be a time value'),
# multiple attribute
({'multiple': True, 'type': 'submit'},
AttributeError,
'multiple attribute works with the following'),
# pattern attribute
({'pattern': '[A-Za-z]{3}', 'type': 'submit'},
AttributeError,
'pattern attribute works with the following'),
({'pattern': '[A-Za-z]{3}'},
AttributeError,
'pattern attribute works with the following'),
# placeholder attribute
({'placeholder': '<NAME>', 'type': 'submit'},
AttributeError,
'placeholder attribute works with the following'),
({'placeholder': '<NAME>'},
AttributeError,
'placeholder attribute works with the following'),
# readonly attribute
({'readonly': 'temp'},
AttributeError,
'should be a boolean'),
# required attribute
({'required': True, 'type': 'submit'},
AttributeError,
'required attribute works with the following'),
({'required': True},
AttributeError,
'required attribute works with the following'),
({'required': 'temp', 'type': 'email'},
AttributeError,
'should be a boolean'),
# size attribute
({'size': '10', 'type': 'submit'},
AttributeError,
'size attribute works with the following'),
({'size': '10'},
AttributeError,
'size attribute works with the following'),
({'size': 'temp', 'type': 'email'},
AttributeValueError,
'should be an integer or float value'),
# src attribute
({'src': 'demo.asp', 'type': 'submit'},
AttributeError,
'src attribute is required for'),
({'src': 'demo.asp'},
AttributeError,
'src attribute is required for'),
# step attribute
({'step': 1, 'type': 'submit'},
AttributeError,
'step attribute works with the following'),
({'step': 1},
AttributeError,
'step attribute works with the following'),
({'step': 'temp', 'type': 'range'},
AttributeValueError,
'should be an integer or float value'),
# type attribute
({'type': 'temp'},
TagAttributeError,
'type attribute values should be one of these'),
# value attribute
({'value': 'value1', 'type': 'file'},
AttributeError,
'value attribute cannot be used with'),
# width attribute
({'width': '100', 'type': 'submit'},
AttributeError,
'width attribute is used only with'),
({'width': '100'},
AttributeError,
'width attribute is used only with'),
({'width': 'temp', 'type': 'image'},
AttributeValueError,
'should be an integer or float value')
])
def test_construct_input_tag_error(attributes, exception, error_msg):
"""Test for validating input tag's attributes."""
with pytest.raises(exception) as exc:
Input(**attributes)
assert error_msg in str(exc)
| StarcoderdataPython |
4983684 | <filename>deltabot/plugins/hitokoto/__init__.py<gh_stars>10-100
from nonebot import on_command, CommandSession
from .data_source import get_hitokoto
__plugin_name__ = 'hitokoto(一言)'
__plugin_usage__ = r"""
获取「一言」(附庸风雅利器)
数据来源 hitokoto.cn
Command(s):
- /hitokoto
""".strip()
@on_command('hitokoto', aliases=('每日一句', '一言', 'poem'))
async def hitokoto(session: CommandSession):
ret = await get_hitokoto(session)
if ret:
await session.send(f"『{ret[0]}』\n" +
f"——{ret[1] if ret[1] else ''}「{ret[2]}」".rjust(12, ' '))
# 使用' '字符进行右对齐,防止QQ将普通空格渲染过小 | StarcoderdataPython |
1797032 | <gh_stars>0
import math
import pickle
import numpy as np
from helper.utils import logger
from tasks import register_task
from tasks.base_task import BaseTask
from dataclass.configs import BaseDataClass
from dataclass.choices import BODY_CHOICES
from dataclasses import dataclass,field
from typing import Optional
from helper.utils import compress_datatype,check_var
@dataclass
class BodyConfig(BaseDataClass):
option: Optional[BODY_CHOICES] = field(default='extra',metadata={'help':'the set of data to build'})
task: str = field(default='build_body')
def check_name(dc,key):
if key in dc:
value = dc[key]
else:
raise ValueError(f'{key} must be input')
return value
@register_task('build_body',dataclass=BodyConfig)
class BuildBody(BaseTask):
def __init__(self,
args,
train,
test,
model,
arch,
**kwargs):
self.args = args
self.train = train[:,1:-1]
self.test = test[:,:-1]
self.arch = arch
self.columns = ["resource", "manager", "role1", "role2", "department",
"title", "family_desc", "family"]
dictionary_train = check_name(kwargs,'pivottable_train')
dictionary_test = check_name(kwargs,'pivottable_test')
dictionary = check_name(kwargs,'pivottable')
self.dictionary_train = dictionary_train
self.dictionary_test = dictionary_test
self.dictionary = dictionary
def gen_feature(self,data,feature_list):
features = []
for row in data:
features.append([])
for j in range(len(self.columns)):
for func in feature_list:
features[-1].append(func(self.dictionary[j][row[j]],row,j))
features = np.array(features)
return features
def create_features(self):
if self.args.option == 'extra':
def log_result(x,row,j):
v = x[self.columns[0]].get(row[0],0)
if v > 0:
return math.log(v)
else:
return 0
feature_list = [ lambda x,row,j: x[self.columns[0]].get(row[0],0) if j > 0 else 0,
lambda x,row,j: x[self.columns[1]].get(row[1],0) if j > 1 else 0,
lambda x,row,j: x[self.columns[2]].get(row[2],0) if j > 2 else 0,
lambda x,row,j: x[self.columns[3]].get(row[3],0) if j > 3 else 0,
lambda x,row,j: x[self.columns[4]].get(row[4],0) if j > 4 else 0,
lambda x,row,j: x[self.columns[5]].get(row[5],0) if j > 5 else 0,
lambda x,row,j: x[self.columns[6]].get(row[6],0) if j > 6 else 0,
lambda x,row,j: x[self.columns[7]].get(row[7],0) if j > 7 else 0,
lambda x,row,j: x[self.columns[0]].get(row[0],0)**2 if j in range(7) else 0,
log_result]
elif self.args.option == 'tree':
feature_list = [lambda x,row,j: x[self.columns[0]].get(row[0],0),
lambda x,row,j: x[self.columns[1]].get(row[1],0),
lambda x,row,j: x[self.columns[2]].get(row[2],0),
lambda x,row,j: x[self.columns[3]].get(row[3],0),
lambda x,row,j: x[self.columns[4]].get(row[4],0),
lambda x,row,j: x[self.columns[5]].get(row[5],0),
lambda x,row,j: x[self.columns[6]].get(row[6],0),
lambda x,row,j: x[self.columns[7]].get(row[7],0)]
elif self.args.option == 'meta':
feature_list = [lambda x,row,j: self.dictionary_train[j].get(row[j],{}).get('total',0)]
else:
feature_list = [lambda x,row,j: x[self.columns[0]].get(row[0],0) / x['total'],
lambda x,row,j: x[self.columns[1]].get(row[1],1) / x['total'],
lambda x,row,j: x[self.columns[2]].get(row[2],2) / x['total'],
lambda x,row,j: x[self.columns[3]].get(row[3],3) / x['total'],
lambda x,row,j: x[self.columns[4]].get(row[4],4) / x['total'],
lambda x,row,j: x[self.columns[5]].get(row[5],5) / x['total'],
lambda x,row,j: x[self.columns[6]].get(row[6],6) / x['total'],
lambda x,row,j: x[self.columns[7]].get(row[7],7) / x['total']]
logger.info(f'building dataset with option:{self.args.option}')
feature_train = self.gen_feature(self.train,feature_list)
feature_test = self.gen_feature(self.test,feature_list)
feature_train = compress_datatype(feature_train)
feature_test = compress_datatype(feature_test)
feature_train,feature_test = check_var(feature_train,feature_test)
filename = self.args.option
with open(f'interim_data_store/{filename}_data.pkl','wb') as f:
pickle.dump((feature_train,feature_test),f)
logger.info(f'saving {filename} data at interim_data_store')
return feature_train,feature_test
#%%
| StarcoderdataPython |
12824445 | <reponame>aideyisu/english_study
import json
import xlwt
# 单词乱序
import random
from datetime import datetime
from get_words import get_lines
from side import site_write_line_style
import get_words
# 获取当前日期
# dd/mm/YY H:M:S
def get_datetime():
return datetime.now().strftime("%Y-%m-%d")
# 保存当日全部单词
def save_today_words(word_dict):
with open("exercise_words.csv", "a+") as file:
file.write(json.dumps(word_dict)+'\n')
def output_today_exercise():
# 备份
today_word_list = {}
# 创建一个workbook 设置编码
workbook = xlwt.Workbook(encoding = 'utf-8')
# 创建一个worksheet
worksheet = workbook.add_sheet('My Worksheet')
font = xlwt.Font() # Create Font
# font.bold = True # Set font to Bold
style = xlwt.XFStyle() # Create Style
style.font = font # Add Bold Font to Style
# 居中设置
al = xlwt.Alignment()
al.horz = 0x02 # 设置水平居中
al.vert = 0x01 # 设置垂直居中
style.alignment = al
worksheet.write_merge(0, 0, 0, 7, f'{get_datetime()} english 第{get_lines()}天 ', style)
worksheet.write_merge(1, 3, 0, 0, f'new', style)
worksheet.write_merge(5, 7, 0, 0, f'review', style)
# 读取新单词
new_words = get_words.get_new_words()
# 写入单词
y_start = 1
x_site = 1
new_word_keys = list(new_words.keys())
random.shuffle(new_word_keys)
for word_key in new_word_keys:
today_word_list[word_key] = new_words[word_key]
# worksheet.write(y_start,x_site, label = f'{item}')
style = site_write_line_style()
worksheet.write(y_start,x_site, f'{word_key}', style)
worksheet.write(y_start,x_site+1, ' ', style)
worksheet.write(y_start,x_site+2, ' ', style)
y_start += 1
# 需复习的单词
review_words = get_words.get_review_words()
y_start = 5
x_start = 1
one_day_review_word_keys_list = {}
for one_day in review_words:
oneday_review_word_keys = list(review_words[one_day].keys())
random.shuffle(oneday_review_word_keys)
one_day_review_word_keys_list[one_day] = oneday_review_word_keys
for one_day in one_day_review_word_keys_list:
for review_word in one_day_review_word_keys_list[one_day]:
style = site_write_line_style()
worksheet.write(y_start,x_start, f'{review_word}', style)
worksheet.write(y_start,x_start+1, ' ', style)
worksheet.write(y_start,x_start+2, ' ', style)
y_start += 1
if y_start == 13:
y_start = 1
x_start = 5
# 设置单元格宽度
for number in range(0, 8):
worksheet.col(number).width = 4200
for number in range(0, 13):
worksheet.row(number).height_mismatch = True
worksheet.row(number).height = 775
# 单例保存
save_today_words(today_word_list)
# 保存至表格
workbook.save(f'{get_datetime()}.xls')
# 同时生成答案
output_today_exercise_answers(new_words, new_word_keys, review_words, one_day_review_word_keys_list)
def output_today_exercise_answers(new_words, new_word_keys, review_words, one_day_review_word_keys_list):
# 创建一个workbook 设置编码
workbook = xlwt.Workbook(encoding = 'utf-8')
# 创建一个worksheet
worksheet = workbook.add_sheet('Answers')
font = xlwt.Font() # Create Font
# font.bold = True # Set font to Bold
style = xlwt.XFStyle() # Create Style
style.font = font # Add Bold Font to Style
# 居中设置
al = xlwt.Alignment()
al.horz = 0x02 # 设置水平居中
al.vert = 0x01 # 设置垂直居中
style.alignment = al
worksheet.write_merge(0, 0, 0, 7, f'{get_datetime()} english', style)
worksheet.write_merge(1, 3, 0, 0, f'new', style)
worksheet.write_merge(5, 7, 0, 0, f'review', style)
# 写入单词
y_start = 1
x_site = 1
# TODO 这里需要传进来
for word_key in new_word_keys:
# worksheet.write(y_start,x_site, label = f'{item}')
style = site_write_line_style()
worksheet.write(y_start,x_site, f'{word_key}', style)
worksheet.write(y_start,x_site+1, f'{new_words[word_key]}', style)
worksheet.write(y_start,x_site+2, ' ', style)
y_start += 1
# 需复习的单词
y_start = 5
x_start = 1
for one_day in one_day_review_word_keys_list:
for review_word in one_day_review_word_keys_list[one_day]:
style = site_write_line_style()
worksheet.write(y_start,x_start, f'{review_word}', style)
worksheet.write(y_start,x_start+1, f'{review_words[one_day][review_word]}', style)
worksheet.write(y_start,x_start+2, ' ', style)
y_start += 1
if y_start == 13:
y_start = 1
x_start = 5
# 设置单元格宽度
for number in range(0, 8):
worksheet.col(number).width = 4200
for number in range(0, 13):
worksheet.row(number).height_mismatch = True
worksheet.row(number).height = 775
# 保存至表格
workbook.save(f'{get_datetime()}_answers.xls')
| StarcoderdataPython |
5129500 | """
Deploy Docker containers with Fabric.
"""
from setuptools import find_packages, setup
with open('requirements.txt') as f:
dependencies = f.read().splitlines()
setup(
name='fabdocker',
version='0.3.6',
url='https://github.com/newmotion/fabdocker',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Deploy Docker containers with Fabric.',
long_description='Deploy Docker containers with Fabric.',
packages=find_packages(),
zip_safe=False,
platforms='any',
install_requires=dependencies,
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'Development Status :: 1 - Planning',
# 'Development Status :: 2 - Pre-Alpha',
# 'Development Status :: 3 - Alpha',
'Development Status :: 4 - Beta',
# 'Development Status :: 5 - Production/Stable',
# 'Development Status :: 6 - Mature',
# 'Development Status :: 7 - Inactive',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Operating System :: Unix',
# 'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 3',
'Topic :: Utilities',
]
)
| StarcoderdataPython |
1854761 | <reponame>emschimmel/BrainPi
file_path = '../img/'
max_token_time_seconds = 60 # less is more secure
eigen_trainer_file = './Data/eigen_trainer.yml'
fisher_trainer_file = './Data/fisher_trainer.yml'
lbph_trainer_file = './Data/lbph_trainer.yml'
eigen_data_path = './Data/lfw_eigen/'
fisher_data_path = './Data/lfw_fisher/'
lbph_data_path = './Data/lfw_lbph/'
eigen_name_id_file = './Data/eigen_namedIds.yml'
fisher_name_id_file = './Data/fisher_namedIds.yml'
lbph_name_id_file = './Data/lbph_namedIds.yml'
tensor_model_path = './Data/tensorflow_model/'
tensor_data_path = './Data/tensorflow_ids/'
tensor_id_file = './Data/tensorflow_saved/id_dataset.pkl'
consul_interval = '30s'
consul_timeout = '2s'
consul_ip = '127.0.0.1'
consul_port = 8500
consul_resolver_port = 8600
statsd_ip = 'localhost'
statsd_port = 8125
redis_service_ip = 'localhost'
redis_service_port = 32769
es_service_ip = 'localhost'
es_service_port = 32770
mongo_service_ip = 'localhost'
mongo_service_port = 27017
rabbit_service_ip = 'localhost'
rabbit_service_port = 35672
lightwaveRfIp = '192.168.0.102'
log_dir = '../Logs/'
log_level = 'DEBUG'
| StarcoderdataPython |
11232515 | <filename>branched_lineages.py
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import time
app = dash.Dash(__name__)
server = app.server
app.layout = html.Div([
dcc.Input(id='grandparent', value='input 1'),
dcc.Input(id='parent-a'),
dcc.Input(id='parent-b'),
dcc.Input(id='parent-c'),
html.Div(id='child-a'),
html.Div(id='child-b')
])
app.scripts.config.serve_locally = True
@app.callback(Output('parent-a', 'value'),
[Input('grandparent', 'value')])
def update_parenta(value):
print('parenta')
time.sleep(2)
return 'a: {}'.format(value)
@app.callback(Output('parent-b', 'value'),
[Input('grandparent', 'value')])
def update_parentb(value):
print('parentb')
time.sleep(6)
return 'b: {}'.format(value)
@app.callback(Output('parent-c', 'value'),
[Input('grandparent', 'value')])
def update_parentb(value):
print('parentc')
time.sleep(9)
return 'c: {}'.format(value)
@app.callback(Output('child-a', 'children'),
[Input('parent-a', 'value'),
Input('parent-b', 'value'),
Input('parent-c', 'value')])
def update_childa(parenta_value, parentb_value, parentc_value):
print('childa: {}, {}, {}'.format(parenta_value, parentb_value, parentc_value))
raise Exception
return 'childa: {} + {} + {}'.format(parenta_value, parentb_value, parentc_value)
@app.callback(Output('child-b', 'children'),
[Input('parent-a', 'value'),
Input('parent-b', 'value'),
Input('grandparent', 'value')])
def update_childb(parenta_value, parentb_value, grandparent_value):
print('childb: {}, {}, {}'.format(
parenta_value,
parentb_value,
grandparent_value
))
return 'childb: {} + {} + {}'.format(
parenta_value,
parentb_value,
grandparent_value
)
if __name__ == '__main__':
app.run_server(debug=True, processes=8)
| StarcoderdataPython |
106657 | import torch
import torch.nn as nn
from pysot.utils.latency import predict_latency,compute_latency
table = []
class ReLUConvBN(nn.Module):
"""
Stack of relu-conv-bn
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
"""
:param C_in:
:param C_out:
:param kernel_size:
:param stride:
:param padding:
:param affine:
"""
super(ReLUConvBN, self).__init__()
self.C_in=C_in
self.C_out=C_out
self.kernel_size=kernel_size
self.stride=stride
self.padding=padding
self.affine=True
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_out, kernel_size, stride=stride, padding=padding, bias=False),
nn.BatchNorm2d(C_out, affine=affine))
def forward(self, x):
return self.op(x)
def forward_latency(self,x):
name = "ReLUConvBN H:%d W:%d C_IN:%d C_OUT:%d KERNELSIZE:%d STRIDE:%d PADDING:%d AFFINE:%s"%(x[1],x[2],self.C_in,self.C_out,self.kernel_size,self.stride,self.padding,self.affine)
# table.append(name)
# latency=0.
latency = predict_latency(name)
fun = lambda x:(x+2*self.padding-self.kernel_size)//self.stride+1
return latency,(self.C_out,fun(x[1]),fun(x[2]))
@staticmethod
def _latency(H,W,C_in, C_out, kernel_size, stride, padding, affine=True):
layer = ReLUConvBN(C_in, C_out, kernel_size, stride, padding, affine)
return compute_latency(layer,(1,C_in,H,W))
class DilConv(nn.Module):
"""
relu-dilated conv-bn
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
"""
:param C_in:
:param C_out:
:param kernel_size:
:param stride:
:param padding: 2/4
:param dilation: 2
:param affine:
"""
super(DilConv, self).__init__()
self.C_in=C_in
self.C_out=C_out
self.kernel_size=kernel_size
self.stride=stride
self.padding=padding
self.dilation=dilation
self.affine=affine
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine))
def forward(self, x):
return self.op(x)
def forward_latency(self,x):
name = "DilConv H:%d W:%d C_IN:%d C_OUT:%d KERNELSIZE:%d STRIDE:%d PADDING:%d DILATION:%d AFFINE:%s"%(x[1],x[2],self.C_in,self.C_out,self.kernel_size,self.stride,self.padding,self.dilation,self.affine)
latency = predict_latency(name)
# print(name)
# table.append(name)
# latency=0.
fun = lambda x:(x+2*self.padding-self.dilation*(self.kernel_size-1)-1)//self.stride+1
return latency,(self.C_out,fun(x[1]),fun(x[2]))
@staticmethod
def _latency(H,W,C_in, C_out, kernel_size, stride, padding, dilation, affine=True):
layer = DilConv(C_in, C_out, kernel_size, stride, padding, dilation, affine)
return compute_latency(layer,(1,C_in,H,W))
class SepConv(nn.Module):
"""
implemented separate convolution via pytorch groups parameters
"""
def __init__(self, C_in, C_out, kernel_size, stride, padding, affine=True):
"""
:param C_in:
:param C_out:
:param kernel_size:
:param stride:
:param padding: 1/2
:param affine:
"""
super(SepConv, self).__init__()
self.C_in=C_in
self.C_out=C_out
self.kernel_size=kernel_size
self.stride=stride
self.padding=padding
self.affine=affine
self.op = nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=stride, padding=padding,
groups=C_in, bias=False),
nn.Conv2d(C_in, C_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(C_in, C_in, kernel_size=kernel_size, stride=1, padding=padding,
groups=C_in, bias=False),
nn.Conv2d(C_in, C_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(C_out, affine=affine))
def forward(self, x):
return self.op(x)
def forward_latency(self,x):
name = "SepConv H:%d W:%d C_IN:%d C_OUT:%d KERNELSIZE:%d STRIDE:%d PADDING:%d AFFINE:%s"%(x[1],x[2],self.C_in,self.C_out,self.kernel_size,self.stride,self.padding,self.affine)
latency = predict_latency(name)
# table.append(name)
# latency=0.
fun = lambda x:(x+2*self.padding-self.kernel_size)//self.stride+1
return latency,(self.C_out,fun(x[1]),fun(x[2]))
@staticmethod
def _latency(H,W,C_in, C_out, kernel_size, stride, padding, affine=True):
layer = SepConv(C_in, C_out, kernel_size, stride, padding, affine)
return compute_latency(layer,(1,C_in,H,W))
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def forward_latency(self,x):
name = "Identity H:%d W:%d C_IN:%d"%(x[1],x[2],x[0])
# latency=0.
latency = predict_latency(name)
# table.append(name)
return latency,x
@staticmethod
def _latency(H,W,C_in):
layer = Identity()
return compute_latency(layer,(1,C_in,H,W))
class Zero(nn.Module):
def __init__(self, C_in, C_out, stride):
super(Zero, self).__init__()
self.C_in = C_in
self.C_out = C_out
self.stride = stride
self.is_zero = True
def forward(self, x):
if self.C_in == self.C_out:
if self.stride == 1: return x.mul(0.)
else : return x[:,:,::self.stride,::self.stride].mul(0.)
else:
shape = list(x.shape)
shape[1] = self.C_out
zeros = x.new_zeros(shape, dtype=x.dtype, device=x.device)
return zeros
def forward_latency(self,x):
name = "Zero H:%d W:%d C_IN:%d C_OUT:%d STRIDE:%d"%(x[1],x[2],self.C_in,self.C_out,self.stride)
latency = predict_latency(name)
# table.append(name)
# latency=0.
return latency,(self.C_out,x[1]//self.stride,x[2]//self.stride)
@staticmethod
def _latency(H,W,C_in,C_out,stride):
layer = Zero(C_in,C_out,stride)
return compute_latency(layer,(1,C_in,H,W))
class FactorizedReduce(nn.Module):
"""
reduce feature maps height/width by half while keeping channel same
"""
def __init__(self, C_in, C_out, affine=True):
"""
:param C_in:
:param C_out:
:param affine:
"""
super(FactorizedReduce, self).__init__()
self.C_in=C_in
self.C_out=C_out
self.affine=affine
assert C_out % 2 == 0
self.relu = nn.ReLU(inplace=False)
self.conv_1 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.conv_2 = nn.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False)
self.bn = nn.BatchNorm2d(C_out, affine=affine)
def forward(self, x):
x = self.relu(x)
# x: torch.Size([32, 32, 32, 32])
# conv1: [b, c_out//2, d//2, d//2]
# conv2: []
# out: torch.Size([32, 32, 16, 16])
out = torch.cat([self.conv_1(x), self.conv_2(x[:, :, 1:, 1:])], dim=1)
out = self.bn(out)
return out
def forward_latency(self,x):
name = "FactorizedReduce H:%d W:%d C_IN:%d C_OUT:%d AFFINE:%s"%(x[1],x[2],self.C_in,self.C_out,self.affine)
latency = predict_latency(name)
# table.append(name)
# latency=0.
return latency,(self.C_out,x[1]//2,x[2]//2)
@staticmethod
def _latency(H,W,C_IN,C_OUT,AFFINE):
layer = FactorizedReduce(C_IN,C_OUT,AFFINE)
return compute_latency(layer,(1,C_IN,H,W))
class ResidualReduceBlock(nn.Module):
def __init__(self, C_in, C_out, affine=True):
super(ResidualReduceBlock, self).__init__()
self.downsample_a = nn.Sequential(
ReLUConvBN(C_in, C_out, 3, 2, 1, 1, affine),
ReLUConvBN(C_out, C_out, 3, 1, 1, 1, affine)
)
self.downsample_b = nn.Sequential(
nn.AvgPool2d(kernel_size=2, stride=2, padding=0),
nn.Conv2d(C_in, C_out, kernel_size=1, stride=1, padding=0, bias=False)
)
self.C_in = C_in
self.C_out = C_out
def extra_repr(self):
string = '{name}(inC={C_in}, outC={C_out})'.format(name=self.__class__.__name__, **self.__dict__)
return string
def forward(self, inputs):
downsample_a = self.downsample_a(inputs)
downsample_b = self.downsample_b(inputs)
return downsample_a + downsample_b
| StarcoderdataPython |
134838 | <filename>ngym_shaping/utils/tasktools.py<gh_stars>100-1000
from __future__ import division
from collections import OrderedDict
import numpy as np
def to_map(*args):
"produces ordered dict from given inputs"
if isinstance(args[0], list):
var_list = args[0]
else:
var_list = args
od = OrderedDict()
for i, v in enumerate(var_list):
od[v] = i
return od
def get_idx(t, start_end):
"""
auxiliary function for defining task periods
"""
start, end = start_end
return list(np.where((start <= t) & (t < end))[0])
def get_periods_idx(dt, periods):
"""
function for defining task periods
"""
t = np.linspace(0, periods['tmax'], int(periods['tmax']/dt)+1)
return t, {k: get_idx(t, v) for k, v in periods.items() if k != 'tmax'}
def minmax_number(dist, args):
"""Given input to the random_number_fn function, return min and max."""
if dist == 'uniform':
return args[0], args[1]
elif dist == 'choice':
return np.min(args), np.max(args)
elif dist == 'truncated_exponential':
return args[1], args[2]
elif dist == 'constant':
return args, args
else:
raise ValueError('Unknown dist:', str(dist))
def circular_dist(original_dist):
'''Get the distance in periodic boundary conditions.'''
return np.minimum(abs(original_dist), 2 * np.pi - abs(original_dist))
def divide(x, y):
try:
z = x/y
if np.isnan(z):
raise ZeroDivisionError
return z
except ZeroDivisionError:
return 0
def correct_2AFC(perf):
"""
computes performance
"""
p_decision = perf.n_decision/perf.n_trials
p_correct = divide(perf.n_correct, perf.n_decision)
return p_decision, p_correct
def compute_perf(perf, reward, num_tr_perf, tr_perf):
if tr_perf:
num_tr_perf += 1
perf += (reward - perf)/num_tr_perf
return perf, num_tr_perf
| StarcoderdataPython |
6651927 | """
Base class for GeoJSON services.
Fetches GeoJSON feed from URL to be defined by sub-class.
"""
import geojson
import logging
import requests
from geojson import Point, GeometryCollection, Polygon
from haversine import haversine
from json import JSONDecodeError
from typing import Optional
_LOGGER = logging.getLogger(__name__)
UPDATE_OK = 'OK'
UPDATE_OK_NO_DATA = 'OK_NO_DATA'
UPDATE_ERROR = 'ERROR'
class GeoJsonFeed:
"""Geo JSON feed base class."""
def __init__(self, home_coordinates, url, filter_radius=None):
"""Initialise this service."""
self._home_coordinates = home_coordinates
self._filter_radius = filter_radius
self._url = url
self._request = requests.Request(method="GET", url=url).prepare()
self._last_timestamp = None
def __repr__(self):
"""Return string representation of this feed."""
return '<{}(home={}, url={}, radius={})>'.format(
self.__class__.__name__, self._home_coordinates, self._url,
self._filter_radius)
def _new_entry(self, home_coordinates, feature, global_data):
"""Generate a new entry."""
pass
def update(self):
"""Update from external source and return filtered entries."""
status, data = self._fetch()
if status == UPDATE_OK:
if data:
entries = []
global_data = self._extract_from_feed(data)
# Extract data from feed entries.
for feature in data.features:
entries.append(self._new_entry(self._home_coordinates,
feature, global_data))
filtered_entries = self._filter_entries(entries)
self._last_timestamp = self._extract_last_timestamp(filtered_entries)
return UPDATE_OK, filtered_entries
else:
# Should not happen.
return UPDATE_OK, None
elif status == UPDATE_OK_NO_DATA:
# Happens for example if the server returns 304
return UPDATE_OK_NO_DATA, None
else:
# Error happened while fetching the feed.
return UPDATE_ERROR, None
def _fetch(self):
"""Fetch GeoJSON data from external source."""
try:
with requests.Session() as session:
response = session.send(self._request, timeout=10)
if response.ok:
feature_collection = geojson.loads(response.text)
return UPDATE_OK, feature_collection
else:
_LOGGER.warning(
"Fetching data from %s failed with status %s",
self._request.url, response.status_code)
return UPDATE_ERROR, None
except requests.exceptions.RequestException as request_ex:
_LOGGER.warning("Fetching data from %s failed with %s",
self._request.url, request_ex)
return UPDATE_ERROR, None
except JSONDecodeError as decode_ex:
_LOGGER.warning("Unable to parse JSON from %s: %s",
self._request.url, decode_ex)
return UPDATE_ERROR, None
def _filter_entries(self, entries):
"""Filter the provided entries."""
filtered_entries = entries
# Always remove entries without geometry
filtered_entries = list(
filter(lambda entry:
entry.geometry is not None,
filtered_entries))
# Filter by distance.
if self._filter_radius:
filtered_entries = list(
filter(lambda entry:
entry.distance_to_home <= self._filter_radius,
filtered_entries))
return filtered_entries
def _extract_from_feed(self, feed):
"""Extract global metadata from feed."""
return None
def _extract_last_timestamp(self, feed_entries):
"""Determine latest (newest) entry from the filtered feed."""
return None
class FeedEntry:
"""Feed entry base class."""
def __init__(self, home_coordinates, feature):
"""Initialise this feed entry."""
self._home_coordinates = home_coordinates
self._feature = feature
def __repr__(self):
"""Return string representation of this entry."""
return '<{}(id={})>'.format(self.__class__.__name__, self.external_id)
@property
def geometry(self):
"""Return all geometry details of this entry."""
if self._feature:
return self._feature.geometry
return None
@property
def coordinates(self):
"""Return the best coordinates (latitude, longitude) of this entry."""
if self.geometry:
return GeoJsonDistanceHelper.extract_coordinates(self.geometry)
return None
@property
def title(self) -> Optional[str]:
"""Return the title of this entry."""
return None
@property
def external_id(self) -> Optional[str]:
"""Return the external id of this entry."""
return None
@property
def attribution(self) -> Optional[str]:
"""Return the attribution of this entry."""
return None
@property
def distance_to_home(self):
"""Return the distance in km of this entry to the home coordinates."""
return GeoJsonDistanceHelper.distance_to_geometry(
self._home_coordinates, self.geometry)
def _search_in_feature(self, name):
"""Find an attribute in the feature object."""
if self._feature and name in self._feature:
return self._feature[name]
return None
def _search_in_properties(self, name):
"""Find an attribute in the feed entry's properties."""
if self._feature and self._feature.properties \
and name in self._feature.properties:
return self._feature.properties[name]
return None
class GeoJsonDistanceHelper:
"""Helper to calculate distances between GeoJSON geometries."""
def __init__(self):
"""Initialize the geo distance helper."""
pass
@staticmethod
def extract_coordinates(geometry):
"""Extract the best coordinates from the feature for display."""
latitude = longitude = None
if isinstance(geometry, Point):
# Just extract latitude and longitude directly.
latitude, longitude = geometry.coordinates[1], \
geometry.coordinates[0]
elif isinstance(geometry, GeometryCollection):
# Go through the collection, and extract the first suitable
# geometry.
for entry in geometry.geometries:
latitude, longitude = \
GeoJsonDistanceHelper.extract_coordinates(entry)
if latitude is not None and longitude is not None:
break
elif isinstance(geometry, Polygon):
# Find the polygon's centroid as a best approximation for the map.
longitudes_list = [point[0] for point in geometry.coordinates[0]]
latitudes_list = [point[1] for point in geometry.coordinates[0]]
number_of_points = len(geometry.coordinates[0])
longitude = sum(longitudes_list) / number_of_points
latitude = sum(latitudes_list) / number_of_points
_LOGGER.debug("Centroid of %s is %s", geometry.coordinates[0],
(latitude, longitude))
else:
_LOGGER.debug("Not implemented: %s", type(geometry))
return latitude, longitude
@staticmethod
def distance_to_geometry(home_coordinates, geometry):
"""Calculate the distance between home coordinates and geometry."""
distance = float("inf")
if isinstance(geometry, Point):
distance = GeoJsonDistanceHelper._distance_to_point(
home_coordinates, geometry)
elif isinstance(geometry, GeometryCollection):
distance = GeoJsonDistanceHelper._distance_to_geometry_collection(
home_coordinates, geometry)
elif isinstance(geometry, Polygon):
distance = GeoJsonDistanceHelper._distance_to_polygon(
home_coordinates, geometry.coordinates[0])
else:
_LOGGER.debug("Not implemented: %s", type(geometry))
return distance
@staticmethod
def _distance_to_point(home_coordinates, point):
"""Calculate the distance between home coordinates and the point."""
# Swap coordinates to match: (latitude, longitude).
return GeoJsonDistanceHelper._distance_to_coordinates(
home_coordinates, (point.coordinates[1], point.coordinates[0]))
@staticmethod
def _distance_to_geometry_collection(home_coordinates,
geometry_collection):
"""Calculate the distance between home coordinates and the geometry
collection."""
distance = float("inf")
for geometry in geometry_collection.geometries:
distance = min(distance,
GeoJsonDistanceHelper.distance_to_geometry(
home_coordinates, geometry))
return distance
@staticmethod
def _distance_to_polygon(home_coordinates, polygon):
"""Calculate the distance between home coordinates and the polygon."""
distance = float("inf")
# Calculate distance from polygon by calculating the distance
# to each point of the polygon but not to each edge of the
# polygon; should be good enough
for polygon_point in polygon:
distance = min(distance,
GeoJsonDistanceHelper._distance_to_coordinates(
home_coordinates,
(polygon_point[1], polygon_point[0])))
return distance
@staticmethod
def _distance_to_coordinates(home_coordinates, coordinates):
"""Calculate the distance between home coordinates and the
coordinates."""
# Expecting coordinates in format: (latitude, longitude).
return haversine(coordinates, home_coordinates)
| StarcoderdataPython |
9746371 | <filename>src/jcsclient/dss_connection.py
# Copyright (c) 2016 Jiocloud.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from jcsclient.dss_api.dss_bucket_ops import *
from jcsclient.dss_api.dss_object_ops import *
from jcsclient.config import *
class DSSConnection(object):
"""DSS main class, each cli command is processed here
Object is created from inside the dss Controller
"""
def __init__(self, url, access_key, secret_key, secure, debug):
setup_config_handler(url, access_key, secret_key, secure, debug)
def operate(self, op, options=None):
op.parse_args(options)
op.validate_args()
result = op.execute()
processed_result = op.process_result(result)
return processed_result
def main(self):
pass
def create_bucket(self, bucketName):
op = CreateBucketOp(bucketName)
result = self.operate(op)
return result
def delete_bucket(self, bucketName):
op = DeleteBucketOp(bucketName)
result = self.operate(op)
return result
def head_bucket(self, bucketName):
op = HeadBucketOp(bucketName)
result = self.operate(op)
return result
def list_buckets(self):
op = ListBucketsOp()
result = self.operate(op)
return result
def delete_object(self, buckName, objName):
op = DeleteObjectOp(buckName, objName)
result = self.operate(op, options=None)
return result
def get_object(self, buckName, objName, path=None):
op = GetObjectOp(buckName, objName, path)
result = self.operate(op, options=None)
return result
def list_objects(self, buckName, options=None):
op = ListObjectsOp(buckName)
result = self.operate(op, options)
return result
def head_object(self, buckName, objName):
op = HeadObjectOp(buckName, objName)
result = self.operate(op, options=None)
return result
def put_object(self, buckName, objName, path, encryption=False):
op = PutObjectOp(buckName, objName, path, encryption)
result = self.operate(op)
return result
def init_multipart_upload(self, bucketname, keyname):
op = InitMPUploadOp(bucketname, keyname)
result = self.operate(op)
return result
def upload_multipart_parts(self, buckname, keyname, args_dict, data_path, size):
op = UploadPartOp(buckname=buckname, objname=keyname)
op.parse_args(args_dict)
res = op.execute(fp=data_path, size=size)
result = op.process_result(res)
return result
def complete_multipart_upload(self, bucketname, keyname, args_dict):
op = CompleteMPUploadOp(bucketname, keyname)
result = self.operate(op, args_dict)
return result
def cancel_multipart_upload(self, bucketname, keyname, uploadId):
op = CancelMPUploadOp(bucketname, keyname, uploadId)
result = self.operate(op, options=None)
return result
def list_multipart_parts(self, bucketname, keyname, upload_id, outfile=None):
op = ListPartsOp(bucketname, keyname, upload_id, outfile)
op.parse_args(args_dict=None)
op.validate_args()
result = op.execute()
processed_result = op.process_result(result, outfile)
return processed_result
def list_multipart_uploads(self, buckname):
op = ListMPUploadsOp(buckname)
result = self.operate(op)
return result
def copy_object(self, buckname, keyname, sourceName):
op = CopyObjectOp(buckname=buckname, objname=keyname, copysource=sourceName)
result = self.operate(op, options=None)
return result
def get_presigned_url(self, buckname, objname, expiry):
op = GetPresignedURLOp(buckname=buckname, objname=objname, expiry=expiry)
result = self.operate(op, options=None)
return result
def rename_object(self, buckname, objname, newName):
op = RenameObjectOp(buckname=buckname, objname=objname, newName=newName)
result = self.operate(op, options=None)
return result
| StarcoderdataPython |
9787039 | <gh_stars>0
# Матрица 5x4 заполняется вводом с клавиатуры, кроме последних элементов строк.
# Программа должна вычислять сумму введенных элементов каждой строки и
# записывать ее в последнюю ячейку строки. В конце следует вывести полученную
# матрицу.
M = 5
N = 4
matrix = []
for i in range(N):
row = []
summ = 0
for j in range(M - 1):
num = int(input(f'Строка {i}, элемент {j}: '))
summ += num
row.append(num)
row.append(summ)
matrix.append(row)
for line in matrix:
for item in line:
print(f'{item:>5}', end='')
print()
| StarcoderdataPython |
8184219 | from dataclasses import dataclass
from pathlib import Path
@dataclass
class VinaPublicOptions:
receptor: Path
center_x: float
center_y: float
center_z: float
size_x: float
size_y: float
size_z: float
flex: str = None
cpu: int = None
seed: int = None
exhaustiveness: int = None
num_modes: int = None
energy_range: int = None
weight_hydrogen: float = None
@dataclass
class VinaOptions:
public: VinaPublicOptions
ligand: Path
out: Path
log: Path
| StarcoderdataPython |
1862862 | from functools import wraps
from telegrambot.models import Chat, Bot
from django.core.urlresolvers import reverse
def login_required(view_func):
"""
Decorator for command views that checks that the chat is authenticated,
sends message with link for authenticated if necessary.
"""
@wraps(view_func)
def wrapper(bot, update, **kwargs):
chat = Chat.objects.get(id=update.message.chat.id)
if chat.is_authenticated():
return view_func(bot, update, **kwargs)
from telegrambot.bot_views.login import LoginBotView
login_command_view = LoginBotView.as_command_view()
bot_model = Bot.objects.get(token=bot.token)
kwargs['link'] = reverse('telegrambot:auth', kwargs={'bot': bot_model.user_api.username})
return login_command_view(bot, update, **kwargs)
return wrapper | StarcoderdataPython |
323446 | <gh_stars>0
import unittest
import pandas as pd
from unittest.mock import patch
from managealooma.inputs import Inputs
class TestInputs(unittest.TestCase):
""" Tests the functions that retrieve and manipulate inputs from the API
"""
@patch('managealooma.inputs.Inputs')
def setUp(self, inputs_class):
""" Set up the input class and mocked API response with the list of dictionaries
:param inputs_class:
:return: None
"""
self.inputs = Inputs(api=None)
self.mock_get_inputs()
@patch('managealooma.inputs.Inputs.get_all_inputs')
def mock_get_inputs(self, mock_get_inputs):
""" Create the mocked input list of dictionaries
:param mock_get_mapping:
:return: The mocked input list
"""
sample_input_list = [{'configuration': {'auto_map': 'true'},
'id': '12qw12qw-12qw-12qw-12qw-12qw12qw12qw',
'name': 'webhook_input',
'type': 'RESTAPI',
'validated': True},
# For the Mixpanel Engage API you must have separate code that updates the session ID on a schedule for Alooma to get fresh data
{'configuration': {'auto_map': 'false',
'base_url': 'https://mixpanel.com/api/2.0/engage',
'cron_expression': '0 */1 * * *',
'data_field': 'results',
'frequency': 240,
'headers': [],
'initial_value': 1,
'input_default_schema': 'PUBLIC',
'page_parameter': 'page',
'pagination_type': 'Incremental',
'parameters': [{'parameter': 'session_id',
'type': 'Text',
'value': '1234567869-abcdefghijklmnopqrstuv987'},
{'parameter': 'where',
'template': 'properties["$last_seen"]>="%Y-%m-%dT00:00:00"',
'type': 'Days Past', 'value': 2}],
'primary_keys': [], 'request': 'GET',
'username': '1q2w3e4r5t6y7u8i9o0p'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '34er34-34er-34er-34er-34er34er34er',
'name': 'mixpanel_engage_one',
'paused': False,
'type': 'REST_INPUT',
'validated': True},
{'configuration': {'auto_map': 'false',
'base_url': 'https://mixpanel.com/api/2.0/engage',
'cron_expression': '0 */1 * * *',
'data_field': 'results',
'frequency': 240,
'headers': [],
'initial_value': 1,
'input_default_schema': 'PUBLIC',
'page_parameter': 'page',
'pagination_type': 'Incremental'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '34er34-34er-34er-34er-34er34er34er',
'name': 'mixpanel_engage_two',
'paused': False,
'type': 'REST_INPUT',
'validated': True},
{'configuration': {'auto_map': 'true',
'fileFormat': '{"type":"excel"}',
'input_default_schema': 'GOOGLE_SHEETS',
'input_type': 'GOOGLE_SHEETS',
'oauth2': '9876poiu9876poiu9876poiu',
'query': "(mimeType='application/vnd.google-apps.spreadsheet' or mimeType='application/vnd.google-apps.folder')",
'root_folder': '10qp10qp10qp10qp10qp'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '34er34er-34er-34er-34er-34er34er34er',
'name': 'a_google_sheet',
'paused': False,
'type': 'GOOGLE_SHEETS_STORAGE',
'validated': True},
# For Salesforce the start time should be set to the earliest lastmodifieddata you see on any object
{'configuration': {'auto_map': 'true',
'custom_objects': 'Account AccountHistory Opportunity OpportunityHistory Task User UserRole',
'daily_api_calls': 200000,
'daily_bulk_queries': 10000,
'input_default_schema': 'SALESFORCE',
'oauth2': '<PASSWORD>',
'start_time': '2019-01-01T21:51:11.698503'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '56ty56ty-56ty-56ty-56ty-56ty56ty56ty',
'name': 'salesforce_production',
'paused': False,
'type': 'SALESFORCE',
'validated': True},
{'configuration': {'auto_map': 'true',
'batch_size': 100000,
'database': 'my_database',
'db_type': 'psql',
'input_default_schema': 'MY_DATABASE',
'port': 5432,
'replication_type': 'incremental_load',
'schema': 'public',
'server': 'some-address-on-a-cloud.com',
'tables': '{"orders":"xmin::text::bigint","order_items":"xmin::text::bigint","users":"xmin::text::bigint"}',
'user': 'a_user_for_alooma'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '78ui78ui-78ui-78ui-78ui-78ui78ui78ui',
'name': 'my_database',
'paused': False,
'type': 'ODBC',
'validated': True}]
mock_get_inputs.return_value = sample_input_list
return mock_get_inputs.return_value
# Test 1
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_get_all_inputs_is_list(self):
inputs = self.inputs.get_all_inputs()
self.assertTrue(isinstance(inputs, list))
# Test 2
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_get_all_inputs_list_length_is_six(self):
inputs = self.inputs.get_all_inputs()
self.assertEqual(6, len(inputs))
# Test 3
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_get_input_by_name(self):
single_input = self.inputs.get_input(input_name='webhook_input')
expected_results = {'configuration': {'auto_map': 'true'},
'id': '12qw12qw-12qw-12qw-12qw-12qw12qw12qw',
'name': 'webhook_input',
'type': 'RESTAPI',
'validated': True}
self.assertEqual(expected_results, single_input)
# Test 4
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_view_inputs_dataframe(self):
inputs = self.inputs.view_inputs(print_format='table')
self.assertTrue(isinstance(inputs, pd.DataFrame))
# Test 5
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_view_inputs_list_json(self):
inputs = self.inputs.view_inputs(print_format='json')
self.assertTrue(isinstance(inputs, list))
# Test 6
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_view_inputs_single_json(self):
inputs = self.inputs.view_inputs(print_format='table', single_input='webhook_input')
self.assertFalse(isinstance(inputs, dict))
# Test 7
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_list_inputs(self):
inputs = self.inputs.list_inputs()
expected_results = ['webhook_input', 'mixpanel_engage_one', 'mixpanel_engage_two', 'a_google_sheet', 'salesforce_production', 'my_database']
self.assertEqual(expected_results, inputs)
# Test 8
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_list_tables_success(self):
tables = self.inputs.list_tables(input_name='my_database')
expected_results = ['orders', 'order_items', 'users']
self.assertEqual(expected_results, tables)
# Test 9
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_list_tables_exception_raised(self):
with self.assertRaises(BaseException) as cm:
self.inputs.list_tables(input_name='webhook_input')
self.assertEqual(str(cm.exception), 'The input webhook_input is of type RESTAPI and not of type ODBC')
# Test 10
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_create_input_database_input(self):
new_input = self.inputs.create_input_database(source_credentials={'server': 'server_name',
'schema': 'schema_name',
'port': 'port',
'database': 'database',
'db_type': 'psql',
'user': 'username',
'password': 'password'},
new_input_name='new_db_input',
existing_input='my_database',
tables_dict={'table_one': 'xmin::text::bigint',
'table_two': 'xmin::text::bigint'},
auto_map=True,
input_default_schema='PUBLIC',
replication_type='incremental_load',
batch_size=5000)
expected_input_config = {'auto_map': True,
'batch_size': 5000,
'database': 'database',
'db_type': 'PUBLIC',
'input_default_schema': 'PUBLIC',
'password': 'password',
'port': 'port',
'replication_type': 'incremental_load',
'schema': 'public',
'server': 'server_name',
'tables': '{"table_one": "xmin::text::bigint", "table_two": "xmin::text::bigint"}',
'user': 'username'}
self.assertEqual(expected_input_config, new_input)
# Test 11
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_edit_input_configuration_success(self):
edited_input = self.inputs.edit_input_configuration(input_name='webhook_input', field_to_edit='auto_map', new_field_value='false')
expected_edited_input = {'configuration': {'auto_map': 'false'},
'id': '12qw12qw-12qw-12qw-12qw-12qw12qw12qw',
'name': 'webhook_input',
'type': 'RESTAPI',
'validated': True}
self.assertEqual(expected_edited_input, edited_input)
# Test 12
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_edit_input_configuration_raise_exception(self):
with self.assertRaises(BaseException) as cm:
self.inputs.edit_input_configuration(input_name='webhook_input', field_to_edit='cron_expression', new_field_value='0 */1 * * *')
self.assertEqual(str(cm.exception), 'The field cron_expression is not in the input webhook_input')
# Test 13
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_add_table_to_input_success(self):
edited_input = self.inputs.add_table_to_input(input_name='my_database', new_table_dict={"my_first_new_table": "xmin::text::bigint",
"my_second_new_table": "xmin::text::bigint"})
expected_edited_input = {'configuration': {'auto_map': 'true',
'batch_size': 100000,
'database': 'my_database',
'db_type': 'psql',
'input_default_schema': 'MY_DATABASE',
'port': 5432,
'replication_type': 'incremental_load',
'schema': 'public',
'server': 'some-address-on-a-cloud.com',
'tables': '{"orders": "xmin::text::bigint", "order_items": "xmin::text::bigint", "users": "xmin::text::bigint", "my_first_new_table": "xmin::text::bigint", "my_second_new_table": "xmin::text::bigint"}',
'user': 'a_user_for_alooma'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '78ui78ui-78ui-78ui-78ui-78ui78ui78ui',
'name': 'my_database',
'paused': False,
'type': 'ODBC',
'validated': True}
self.assertEqual(expected_edited_input, edited_input)
# Test 14
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_add_table_to_input_odbc_exception(self):
with self.assertRaises(BaseException) as cm:
self.inputs.add_table_to_input(input_name='mixpanel_engage_one', new_table_dict={"my_first_new_table": "xmin::text::bigint",
"my_second_new_table": "xmin::text::bigint"})
self.assertEqual(str(cm.exception), 'The input mixpanel_engage_one is of type REST_INPUT and not of type ODBC')
# Test 15
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_add_table_to_input_table_in_input_exception(self):
with self.assertRaises(BaseException) as cm:
self.inputs.add_table_to_input(input_name='my_database', new_table_dict={"orders": "xmin::text::bigint",
"my_second_new_table": "xmin::text::bigint"})
self.assertEqual(str(cm.exception), 'The table orders is already in the input my_database')
# Test 16
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_change_auto_mapping_mode_success(self):
edited_input = self.inputs.change_auto_mapping_mode(input_name='webhook_input', new_mapping_mode='false')
expected_edited_input = {'configuration': {'auto_map': 'false'},
'id': '12qw12qw-12qw-12qw-12qw-12qw12qw12qw',
'name': 'webhook_input',
'type': 'RESTAPI',
'validated': True}
self.assertEqual(expected_edited_input, edited_input)
# Test 17
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_add_template_to_parameter_configuration_success(self):
edited_input = self.inputs.add_template_to_parameter_configuration(input_name='mixpanel_engage_one',
add_to_parameter='session_id',
template='properties["$last_seen"]>="%Y-%m-%dT00:00:00"')
expected_edited_input = {'configuration': {'auto_map': 'false',
'base_url': 'https://mixpanel.com/api/2.0/engage',
'cron_expression': '0 */1 * * *',
'data_field': 'results',
'frequency': 240,
'headers': [],
'initial_value': 1,
'input_default_schema': 'PUBLIC',
'page_parameter': 'page',
'pagination_type': 'Incremental',
'parameters': [{'parameter': 'session_id', 'type': 'Text', 'value': '1234567869-abcdefghijklmnopqrstuv987'},
{'parameter': 'where', 'template': 'properties["$last_seen"]>="%Y-%m-%dT00:00:00"', 'type': 'Days Past', 'value': 2}],
'primary_keys': [],
'request': 'GET',
'username': '1q2w3e4r5t6y7u8i9o0p'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '34er34-34er-34er-34er-34er34er34er',
'name': 'mixpanel_engage_one',
'paused': False,
'type': 'REST_INPUT',
'validated': True}
self.assertEqual(expected_edited_input, edited_input)
# Test 18
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_add_template_to_parameter_configuration_no_parameters_exception(self):
with self.assertRaises(BaseException) as cm:
self.inputs.add_template_to_parameter_configuration(input_name='mixpanel_engage_two',
add_to_parameter='where',
template='properties["$last_seen"]>="%Y-%m-%dT00:00:00"')
self.assertEqual(str(cm.exception), 'The input mixpanel_engage_two does not have any parameters')
# Test 19
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_edit_parameter_configuration_success(self):
edited_input = self.inputs.edit_parameter_configuration(input_name='mixpanel_engage_one',
parameter_to_edit='where',
value_to_set='value',
new_value=3)
expected_edited_input = {'configuration': {'auto_map': 'false',
'base_url': 'https://mixpanel.com/api/2.0/engage',
'cron_expression': '0 */1 * * *',
'data_field': 'results',
'frequency': 240,
'headers': [],
'initial_value': 1,
'input_default_schema': 'PUBLIC',
'page_parameter': 'page',
'pagination_type': 'Incremental',
'parameters': [{'parameter': 'session_id',
'type': 'Text',
'value': '1234567869-abcdefghijklmnopqrstuv987'},
{'parameter': 'where',
'template': 'properties["$last_seen"]>="%Y-%m-%dT00:00:00"',
'type': 'Days Past',
'value': 3}],
'primary_keys': [], 'request': 'GET',
'username': '1q2w3e4r5t6y7u8i9o0p'},
'created_at': '2019-01-01T00:00:00.000000',
'id': '34er34-34er-34er-34er-34er34er34er',
'name': 'mixpanel_engage_one',
'paused': False,
'type': 'REST_INPUT',
'validated': True}
self.assertEqual(expected_edited_input, edited_input)
# Test 20
@patch('managealooma.inputs.Inputs.get_all_inputs', mock_get_inputs)
def test_edit_parameter_configuration_no_parameters_exception(self):
with self.assertRaises(BaseException) as cm:
self.inputs.edit_parameter_configuration(input_name='mixpanel_engage_two',
parameter_to_edit='where',
value_to_set='value',
new_value=3)
self.assertEqual(str(cm.exception), 'The input mixpanel_engage_two does not have any parameters')
| StarcoderdataPython |
5142940 | <gh_stars>10-100
from mhkit.wave import resource
from mhkit.wave import io
from mhkit.wave import graphics
from mhkit.wave import performance
| StarcoderdataPython |
1968075 | <reponame>pak21/mtga-scripts
#!/usr/bin/env python3
import contextlib
import os
import sys
import mysql.connector as mysql
DECKS_SQL = '''
select deck_id from decks
'''
DIFF_SQL = '''
select a.name, a.main as a, coalesce(b.main, 0) as b
from deck_cards as a
left join deck_cards as b on a.name = b.name and b.deck_id = %s
where a.deck_id = %s and a.main > 0 and (a.main != b.main or b.main is null)
union
select b.name, coalesce(a.main, 0), b.main from deck_cards as a
right join deck_cards as b on a.name = b.name and a.deck_id = %s
where b.deck_id = %s and b.main > 0 and (a.main != b.main or a.main is null)
'''
def get_distance(cursor, id1, id2):
cursor.execute(DIFF_SQL, (id2, id1, id1, id2))
diffs = cursor.fetchall()
total_diff = 0
for name, count_a, count_b in diffs:
if count_a > count_b:
total_diff += (count_a - count_b)
return total_diff
def main():
conn = mysql.connect(database='mtga', user='philip', password=os.environ['DATABASE_PASSWORD'])
with contextlib.closing(conn.cursor()) as cursor:
cursor.execute(DECKS_SQL)
decks = [row[0] for row in cursor.fetchall()]
print(decks)
for i in range(len(decks)):
print(i)
for j in range(i+1, len(decks)):
d1, d2 = decks[i], decks[j]
if d2 < d1:
d2, d1 = d1, d2
diff = get_distance(cursor, d1, d2)
cursor.execute('insert into deck_distances set deck_id1 = %s, deck_id2 = %s, distance = %s', (d1, d2, diff))
conn.commit()
if __name__ == '__main__':
main()
| StarcoderdataPython |
6546564 | #!/usr/bin/python3
# You are to retrieve the following document using the HTTP protocol
# in a way that you can examine the HTTP Response headers.
# http://data.pr4e.org/intro-short.txt
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/intro-short.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
while True:
data = mysock.recv(512)
if len(data) < 1:
break
print(data.decode(),end='')
mysock.close()
| StarcoderdataPython |
188306 | <reponame>mkhalil7625/capstone-week3
def camelcase(sentence):
""" Convert sentence to camelCase, for example,
"Display all books"
is converted to "displayAllBooks" """
title_case = sentence.title() # Uppercase first letter of each word
upper_camel_cased = title_case.replace(' ', '') # remove spaces
# Lowercase first letter, join with rest of string
# Slices don't produce index out of bounds errors.
# So this still works on empty strings, strings of length 1
return upper_camel_cased[0:1].lower() + upper_camel_cased[1:]
def display_banner():
""" Display program name in banner """
msg = 'AWSOME camelCaseGenerator PROGRAM'
stars = '*' * len(msg)
print(f'\n {stars} \n {msg} \n {stars}\n')
def instructions():
inst=input('Enter a sentence to convert to camelCase: ')
return inst
def main():
display_banner()
sentence = instructions()
output = camelcase(sentence)
print(output)
if __name__ == '__main__':
main()
| StarcoderdataPython |
11239763 |
class QaEntry:
def __init__(self, corresponds = None, question = None, answer = None):
self.corresponds = corresponds
self.question = question
self.answer = answer
self.questionVector = None
self.answerVector = None
def __str__(self):
return "%s %s -- %s" % (self.corresponds, self.question, self.answer)
def extractEntry(entryText: str):
items = entryText.split("\t")
if len(items) > 2:
return QaEntry(int(items[0]) != 0, items[1], items[2])
else :
return QaEntry(None, items[0], items[1])
def entriesFromFile(path: str):
entries = []
with open(path, "r", encoding="utf-8") as input:
while True:
line = input.readline()
if not line: break
yield extractEntry(line.strip(" ")) | StarcoderdataPython |
4991565 | from azureml.core import Workspace
from fairlearn.metrics import selection_rate, MetricFrame
from fairlearn.reductions import GridSearch, EqualizedOdds
import joblib
import numpy as np
import os
from sklearn.metrics import accuracy_score, recall_score, precision_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
#-----WORKSPACE----------------------------------------------------------------#
# Load workspace from config JSON file
ws = Workspace.from_config() # Returns a workspace object based on config file
print(ws.name, 'loaded')
#-----DATASET------------------------------------------------------------------#
# Get the training dataset from registered datasets (see ./01_datastores.py)
data = ws.datasets.get('diabetes dataset') # Get specified dataset from list of all datasets in workspace
print(data)
#-----DATA_PREPROCESSING-------------------------------------------------------#
# Separate features and labels
features = ['Pregnancies','PlasmaGlucose','DiastolicBloodPressure','TricepsThickness','SerumInsulin','BMI','DiabetesPedigree','Age']
X, y = data[features].values, data['Diabetic'].values
# Get sensitive features
S = data[['Age']].astype(int)
# Change value to represent age groups
S['Age'] = np.where(S.Age > 50, 'Over 50', '50 or younger')
# Split data into training set and test set
X_train, X_test, y_train, y_test, S_train, S_test = train_test_split(X, y, S, test_size=0.20, random_state=0, stratify=y)
#-----MODEL--------------------------------------------------------------------#
# Train a classification model
print("Training model...")
diabetes_model = DecisionTreeClassifier().fit(X_train, y_train)
print("Model trained.")
#-----UNFAIRNESS---------------------------------------------------------------#
'''
Unfairness
* Disparity between prediction rates or prediction performance metrics across sensitive feature groups
* Causes: data imbalance, indirect correlation, societal biases
* Fairlearn
* Python package to analyze models and evaluate disparity between predictions and prediction performance for one or more sensitive features
* Works by calculating group metrics for the sensitive features
* Metrics themselves based on standard scikit-learn model evaluation metrics, such as accuracy, precision, or recall for classification models
* Also provides support for mitigating unfairness in models
* Detect fairness:
* Use fairlearn selection_rate function to return the selection rate (percentage of positive predictions) for the overall population
* Use scikit-learn metric functions to calculate overall accuracy, recall, and precision metrics
* Use a MetricFrame to calculate selection rate, accuracy, recall, and precision for each age group in the Age sensitive feature
Note: a mix of fairlearn and scikit-learn metric functions are used to calculate the performance values
'''
# Get predictions for the witheld test data
y_hat = diabetes_model.predict(X_test)
# Get overall metrics
print("Overall Metrics:")
# Get selection rate from fairlearn
overall_selection_rate = selection_rate(y_test, y_hat) # Get selection rate from fairlearn
print("\tSelection Rate:", overall_selection_rate)
# Get standard metrics from scikit-learn
overall_accuracy = accuracy_score(y_test, y_hat)
print("\tAccuracy:", overall_accuracy)
overall_recall = recall_score(y_test, y_hat)
print("\tRecall:", overall_recall)
overall_precision = precision_score(y_test, y_hat)
print("\tPrecision:", overall_precision)
# Get metrics by sensitive group from fairlearn
print('\nMetrics by Group:')
metrics = {'selection_rate': selection_rate,
'accuracy': accuracy_score,
'recall': recall_score,
'precision': precision_score}
group_metrics = MetricFrame(metrics=metrics,
y_true=y_test,
y_pred=y_hat,
sensitive_features=S_test['Age'])
print(group_metrics.by_group)
#-----UNFAIRNESS_MITIGATION----------------------------------------------------#
'''
Mitigating unfairness
* Balance training and validation data
* Apply over-sampling or under-sampling techniques to balance data
* Use stratified splitting algorithms to maintain representative proportions for training and validation
* Perform extensive feature selection and engineering analysis
* Fully explore the interconnected correlations in data to try to differentiate features that are directly predictive from features that encapsulate more complex, nuanced relationships
* Use the model interpretability support in Azure ML to understand how individual features influence predictions.
* Evaluate models for disparity based on significant features
* Trade-off overall predictive performance for lower disparity in predictive performance between sensitive feature groups
* --> 99.5% accuracy with comparable performance across all groups often more desirable than model that is 99.9% accurate but discriminates against a particular subset of cases
Fairlearn unfairnes mitigation algorithms
* Exponentiated Gradient
* Reduction technique that applies a cost-minimization approach to learn optiamal trade-off of overall predictive performance and fairness disparity
* Supports binary classification and regression
* Grid Search
* Simplified version of Exponentiated Gradient algorithm
* Works efficiently with small number of constraints
* Supports binary classification and regression
* Threshold Optimizer
* Post-provessing technique that applies contraint to existing classifier, transforming prediction as appropriate
* Supports binary classification
Fairlearn Constrains
* Demographic parity
* Use with any mitigation algorithms
* Minimize disparity in selection rate across sensitive feature groups
* Example binary classification scenario: ensure equal number of positive predictions in each group
* True positive rate parity
* Use twith any mitigation algorithms
* Minimize disparity in true positive rate across sensitive feature groups
* Example binary classification scenario: ensure that each group contains comparable ratio of true positive predictions
* False-positive rate parity
* Use with any mitigation algorithms
* Minimize disparity in false_positive_rate across sensitive feature groups
* Example binary classification scenario: ensure that each group contains a comparable ratio of false-positive predictions
* Equalized odds
* Use with any of the mitigation algorithms
* Minimize disparity in combined true positive rate and false_positive_rate across sensitive feature groups
* Example binary classification scenario: ensure that each group contains a comparable ratio of true positive and false-positive predictions
* Error rate parity
* Use with any reduction-based mitigation algorithms (Exponentiated Gradient and Grid Search)
* Ensure that the error for each sensitive feature group does not deviate from the overall error rate by more than a specified amount
* Bounded group loss
* Use with any of the reduction-based mitigation algorithms (Exponentiated Gradient and Grid Search)
* Restrict the loss for each sensitive feature group in a regression model
'''
print('Finding mitigated models...')
# Train multiple models
sweep = GridSearch(
DecisionTreeClassifier(),
constraints=EqualizedOdds(),
grid_size=20
)
sweep.fit(X_train, y_train, sensitive_features=S_train.Age)
models = sweep.predictors_
# Save the models and get predictions from them (plus the original unmitigated one for comparison)
model_dir = 'mitigated_models'
os.makedirs(model_dir, exist_ok=True)
model_name = 'diabetes_unmitigated'
print(model_name)
joblib.dump(value=diabetes_model, filename=os.path.join(model_dir, '{0}.pkl'.format(model_name)))
predictions = {model_name: diabetes_model.predict(X_test)}
i = 0
for model in models:
i += 1
model_name = 'diabetes_mitigated_{0}'.format(i)
print(model_name)
joblib.dump(value=model, filename=os.path.join(model_dir, '{0}.pkl'.format(model_name)))
predictions[model_name] = model.predict(X_test)
| StarcoderdataPython |
16945 | <gh_stars>0
"""Primary application.
"""
import json
import logging
import logging.config
import os
import sys
from flask import url_for, render_template, redirect, request
from i_xero import Xero2
from i_xero.i_flask import FlaskInterface
from utils import jsonify, serialize_model
# initialize logging
# The SlackBot app doesn't handle logging in the same way.
# I tried to pass in a logger object from aracnid_logger,
# but it seems to disable all loggers
logging_filename = os.environ.get('LOGGING_CONFIG_FILE')
command_dir = os.path.dirname(sys.argv[0])
logging_dir = os.path.join(os.getcwd(), command_dir)
logging_path = os.path.join(os.getcwd(), logging_filename)
with open(logging_path, 'rt') as file:
logging_config = json.load(file)
formatter = os.environ.get('LOGGING_FORMATTER')
logging_config['handlers']['console']['formatter'] = formatter
logging.config.dictConfig(logging_config)
env_str = os.environ.get('LOG_UNHANDLED_EXCEPTIONS')
LOG_UNHANDLED_EXCEPTIONS = env_str.lower() in ('true', 'yes') if env_str else False
# configure flask application
flask_app = FlaskInterface(__name__).get_app()
# configure xero application
xero_app = Xero2(flask_app)
@flask_app.route("/")
def index():
xero_access = dict(xero_app.obtain_xero_oauth2_token() or {})
return render_template(
"code.html",
title="Home | oauth token",
code=jsonify(xero_access),
)
@flask_app.route("/login")
def login():
redirect_url = url_for("oauth_callback", _external=True)
response = xero_app.oauth_app.authorize(callback_uri=redirect_url)
return response
@flask_app.route("/callback")
def oauth_callback():
try:
response = xero_app.oauth_app.authorized_response()
except Exception as e:
print(e)
raise
# todo validate state value
if response is None or response.get("access_token") is None:
return "Access denied: response=%s" % response
xero_app.store_xero_oauth2_token(response)
return redirect(url_for("index", _external=True))
@flask_app.route("/logout")
def logout():
xero_app.store_xero_oauth2_token(None)
return redirect(url_for("index", _external=True))
@flask_app.route("/refresh-token")
def refresh_token():
xero_token = xero_app.obtain_xero_oauth2_token()
new_token = xero_app.refresh_token()
return render_template(
"code.html",
title="Xero OAuth2 token",
code=jsonify({"Old Token": xero_token, "New token": new_token}),
sub_title="token refreshed",
)
@flask_app.route("/tenants")
def tenants():
available_tenants = xero_app.get_tenants()
if available_tenants is None:
return redirect(url_for("login", _external=True))
return render_template(
"code.html",
title="Xero Tenants",
code=jsonify(available_tenants),
)
@flask_app.route("/invoices")
def get_invoices():
invoices = xero_app.get_invoices()
if invoices is None:
return redirect(url_for("login", _external=True))
code = serialize_model(invoices)
sub_title = "Total invoices found: {}".format(len(invoices.invoices))
return render_template(
"code.html", title="Invoices", code=code, sub_title=sub_title
)
# start the app locally
if __name__ == '__main__':
flask_app.run(host='localhost', port=5000)
| StarcoderdataPython |
1763437 | <filename>back/found/models.py
from django.db import models
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from datetime import datetime
from django_extensions.db.models import TimeStampedModel
from django.contrib.auth import get_user_model
User = get_user_model()
def found_thumbnail_path(instance, filename):
filename = datetime.today().strftime('%Y%m%d%H%M%f')
day = datetime.today().strftime('%Y%m%d')
return f'found/thumbnail/{day}/{filename}.jpeg'
def found_image_path(instance, filename):
filename = datetime.today().strftime('%Y%m%d%H%M%f')
day = datetime.today().strftime('%Y%m%d')
return f'found/origin/{day}/{filename}.jpeg'
class Color(models.Model):
color = models.CharField(max_length=20, unique=True)
class Category(models.Model):
category = models.CharField(max_length=20, unique=True)
class FoundImage(models.Model):
image = models.ImageField(upload_to=found_image_path)
category_1 = models.CharField(max_length=20, blank=True)
category_2 = models.CharField(max_length=20, blank=True)
category_3 = models.CharField(max_length=20, blank=True)
numpy_path = models.CharField(max_length=200, blank=True)
class FoundPosting(TimeStampedModel):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='found')
category = models.ForeignKey(Category, on_delete=models.CASCADE)
color = models.ForeignKey(Color, on_delete=models.CASCADE)
status = models.BooleanField()
content = models.TextField()
class Meta:
ordering = ('-created',)
class FoundThumbnail(models.Model):
posting = models.ForeignKey(FoundPosting, blank=True, null=True, related_name='thumbnail', on_delete=models.CASCADE)
origin = models.ForeignKey(FoundImage, blank=True, null=True, related_name='thumbnail', on_delete=models.CASCADE)
image = ProcessedImageField(
processors=[ResizeToFill(200, 200)],
upload_to=found_thumbnail_path,
format='JPEG',
options={'quality': 90},
)
def __str__(self):
return 'media/%s' % self.image
| StarcoderdataPython |
6423788 | <filename>hash_code.py
# Importing the hashing library
import hashlib
# Importing the visual libraries
from PyInquirer import Separator, prompt
from termcolor import colored
# Defining the hash function.
def hash_func():
# Asking the user for further data regarding algoritms
hash_info = prompt([
{
'type': 'list',
'qmark': '>',
'name': 'algorithm',
'message': 'Which algorithm do you want to use?',
'choices': [
Separator(),
{
'name': 'MD5',
},
{
'name': 'SHA256',
},
{
'name': 'SHA512',
},
{
'name': 'BLAKE2',
},
{
'name': 'BLAKE2b',
},
],
},
{
'type': 'list',
'qmark': '>',
'name': 'type_of_data',
'message': 'What do you want to hash?',
'choices': [
Separator(),
{
'name': 'Text',
},
{
'name': 'File',
},
],
},
])
# Storing the data into seperate variables
algorithm = hash_info['algorithm']
type_of_data = hash_info['type_of_data']
# Determining the type of data to hash and calling the appropriate functions
if type_of_data == 'File':
handle_file_hashing(algorithm)
else:
handle_text_hashing(algorithm)
def handle_text_hashing(algorithm):
# Asking the user for the data
data_info = prompt([
{
'type': 'input',
'qmark': '>',
'name': 'hash_data',
'message': 'Enter data to hash.',
},
])
# Defining the hash_out variable according to the algorithm selected by user
if algorithm == 'MD5':
hash_out = hashlib.md5()
elif algorithm == 'SHA256':
hash_out = hashlib.sha256()
elif algorithm == 'SHA512':
hash_out = hashlib.sha512()
elif algorithm == 'BLAKE2':
hash_out = hashlib.blake2s()
else:
hash_out = hashlib.blake2b()
# Populating it the data after converting it to binary
hash_out.update(data_info['hash_data'].encode())
# Calculating the actual hash
hash_out = hash_out.hexdigest()
# Printing out the hash
print(colored('Your hash is: ', 'white') + colored(hash_out, 'green'))
return None
def handle_file_hashing(algorithm):
# Asking the user for the path to the file
file_info = prompt([
{
'type': 'input',
'qmark': '>',
'name': 'file_name',
'message': 'Enter the path to the file.',
},
])
try:
# Again, Defining the hash_out variable according to the algorithm selected by user
if algorithm == 'MD5':
hash_out = hashlib.md5()
elif algorithm == 'SHA256':
hash_out = hashlib.sha256()
elif algorithm == 'SHA512':
hash_out = hashlib.sha512()
elif algorithm == 'BLAKE2':
hash_out = hashlib.blake2s()
else:
hash_out = hashlib.blake2b()
# Populating it the data after converting it to binary but this time in chunks so as to not put too much strain on memory
with open(file_info['file_name'], 'rb') as file_path:
chunk = 0
while chunk != b'':
chunk = file_path.read(1024)
hash_out.update(chunk)
# Calculating the actual hash
hash_out = hash_out.hexdigest()
# Printing out the hash
print(colored('Your hash is: ', 'white') + colored(hash_out, 'green'))
except Exception as e:
print(colored(
'Can\'t find the file please check the name and make sure the extension is also present.', 'red'))
| StarcoderdataPython |
1724124 | import torch.nn
from torch import nn
import torch
class KLWithMask:
def __call__(self, logits, soft_targets, label_target, ignore_index=-1):
b, c, w, h = soft_targets.shape
# soft_targets = torch.clamp(soft_targets, 0, 1)
# soft_targets = torch.softmax(soft_targets, dim=1)
# print(soft_targets.max(), soft_targets.min())
soft_targets = torch.softmax(soft_targets, dim=1)
soft_targets = soft_targets.clamp(1e-10, 1 - 1e-10)
log_probs = torch.log_softmax(logits, dim=1)
# soft_loss = nn.functional.kl_div(log_probs, soft_targets, reduction="none")
# valid_mask = (label_target != ignore_index).long()
invalid_mask = (label_target == ignore_index).bool()
invalid_mask = invalid_mask.view(b, 1, w, h).expand(b, c, w, h)
# print(valid_mask.shape, valid_mask.unique())
softmax_prob = torch.softmax(logits, dim=1)
soft_targets[invalid_mask] = softmax_prob[invalid_mask]
# kl_loss = torch.sum(soft_loss * valid_mask, dim=1).mean()
# kl_loss = nn.functional.kl_div(log_probs, soft_targets, reduction='batchmean')
kl_loss = nn.functional.kl_div(log_probs, soft_targets, reduction='batchmean')
# kl_loss = nn.functional.kl_div(log_probs, soft_targets, reduction='none')
# kl_loss[invalid_mask] = 0
# kl_loss = torch.sum(kl_loss, dim=1).mean()
# kl_loss = kl_loss.mean()
return kl_loss
| StarcoderdataPython |
8077163 | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage import measure
import nibabel as nib
import SimpleITK as sitk
import glob
def brain_bbox(data, gt):
mask = (data != 0)
brain_voxels = np.where(mask != 0)
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
data_bboxed = data[minZidx:maxZidx, minXidx:maxXidx, minYidx:maxYidx]
gt_bboxed = gt[minZidx:maxZidx, minXidx:maxXidx, minYidx:maxYidx]
return data_bboxed, gt_bboxed
def volume_bounding_box(data, gt, expend=0, status="train"):
data, gt = brain_bbox(data, gt)
print(data.shape)
mask = (gt != 0)
brain_voxels = np.where(mask != 0)
z, x, y = data.shape
minZidx = int(np.min(brain_voxels[0]))
maxZidx = int(np.max(brain_voxels[0]))
minXidx = int(np.min(brain_voxels[1]))
maxXidx = int(np.max(brain_voxels[1]))
minYidx = int(np.min(brain_voxels[2]))
maxYidx = int(np.max(brain_voxels[2]))
minZidx_jitterd = max(minZidx - expend, 0)
maxZidx_jitterd = min(maxZidx + expend, z)
minXidx_jitterd = max(minXidx - expend, 0)
maxXidx_jitterd = min(maxXidx + expend, x)
minYidx_jitterd = max(minYidx - expend, 0)
maxYidx_jitterd = min(maxYidx + expend, y)
data_bboxed = data[minZidx_jitterd:maxZidx_jitterd,
minXidx_jitterd:maxXidx_jitterd, minYidx_jitterd:maxYidx_jitterd]
print([minZidx, maxZidx, minXidx, maxXidx, minYidx, maxYidx])
print([minZidx_jitterd, maxZidx_jitterd,
minXidx_jitterd, maxXidx_jitterd, minYidx_jitterd, maxYidx_jitterd])
if status == "train":
gt_bboxed = np.zeros_like(data_bboxed, dtype=np.uint8)
gt_bboxed[expend:maxZidx_jitterd-expend, expend:maxXidx_jitterd -
expend, expend:maxYidx_jitterd - expend] = 1
return data_bboxed, gt_bboxed
if status == "test":
gt_bboxed = gt[minZidx_jitterd:maxZidx_jitterd,
minXidx_jitterd:maxXidx_jitterd, minYidx_jitterd:maxYidx_jitterd]
return data_bboxed, gt_bboxed
def itensity_normalize_one_volume(volume):
"""
normalize the itensity of an nd volume based on the mean and std of nonzeor region
inputs:
volume: the input nd volume
outputs:
out: the normalized nd volume
"""
pixels = volume[volume > 0]
mean = pixels.mean()
std = pixels.std()
out = (volume - mean)/std
out_random = np.random.normal(0, 1, size=volume.shape)
# out[volume == 0] = out_random[volume == 0]
out = out.astype(np.float32)
return out
class MedicalImageDeal(object):
def __init__(self, img, percent=1):
self.img = img
self.percent = percent
@property
def valid_img(self):
from skimage import exposure
cdf = exposure.cumulative_distribution(self.img)
watershed = cdf[1][cdf[0] >= self.percent][0]
return np.clip(self.img, self.img.min(), watershed)
@property
def norm_img(self):
return (self.img - self.img.min()) / (self.img.max() - self.img.min())
all_flair = glob.glob("flair/*_flair.nii.gz")
for p in all_flair:
data = sitk.GetArrayFromImage(sitk.ReadImage(p))
lab = sitk.GetArrayFromImage(sitk.ReadImage(p.replace("flair", "seg")))
img, lab = brain_bbox(data, lab)
img = MedicalImageDeal(img, percent=0.999).valid_img
img = itensity_normalize_one_volume(img)
lab[lab > 0] = 1
uid = p.split("/")[-1]
sitk.WriteImage(sitk.GetImageFromArray(
img), "/media/xdluo/Data/brats19/data/flair/{}".format(uid))
sitk.WriteImage(sitk.GetImageFromArray(
lab), "/media/xdluo/Data/brats19/data/label/{}".format(uid))
| StarcoderdataPython |
3470522 | <reponame>JankoTreuner/track-my-time<filename>trackmytime/timetracker/migrations/0004_workday_workinghours.py
# Generated by Django 3.2.4 on 2021-06-22 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('timetracker', '0003_auto_20210622_1444'),
]
operations = [
migrations.AddField(
model_name='workday',
name='workinghours',
field=models.TimeField(null=True),
),
]
| StarcoderdataPython |
3366693 | <reponame>sephrat/mealie<gh_stars>0
import json
import pytest
from fastapi.testclient import TestClient
from slugify import slugify
from tests.app_routes import AppRoutes
from tests.utils.recipe_data import RecipeTestData, build_recipe_store
recipe_test_data = build_recipe_store()
@pytest.mark.parametrize("recipe_data", recipe_test_data)
def test_create_by_url(api_client: TestClient, api_routes: AppRoutes, recipe_data: RecipeTestData, token):
api_client.delete(api_routes.recipes_recipe_slug(recipe_data.expected_slug), headers=token)
response = api_client.post(api_routes.recipes_create_url, json={"url": recipe_data.url}, headers=token)
assert response.status_code == 201
assert json.loads(response.text) == recipe_data.expected_slug
def test_create_by_json(api_client: TestClient, api_routes: AppRoutes, token, raw_recipe):
recipe_url = api_routes.recipes_recipe_slug("banana-bread")
api_client.delete(recipe_url, headers=token)
response = api_client.post(api_routes.recipes_create, json=raw_recipe, headers=token)
assert response.status_code == 201
assert json.loads(response.text) == "banana-bread"
def test_create_no_image(api_client: TestClient, api_routes: AppRoutes, token, raw_recipe_no_image):
response = api_client.post(api_routes.recipes_create, json=raw_recipe_no_image, headers=token)
assert response.status_code == 201
assert json.loads(response.text) == "banana-bread-no-image"
def test_read_all_post(api_client: TestClient, api_routes: AppRoutes):
response = api_client.post(api_routes.recipes, json={"properties": ["slug", "description", "rating"]})
assert response.status_code == 200
@pytest.mark.parametrize("recipe_data", recipe_test_data)
def test_read_update(api_client: TestClient, api_routes: AppRoutes, recipe_data, token):
recipe_url = api_routes.recipes_recipe_slug(recipe_data.expected_slug)
response = api_client.get(recipe_url, headers=token)
assert response.status_code == 200
recipe = json.loads(response.content)
test_notes = [
{"title": "My Test Title1", "text": "My Test Text1"},
{"title": "My Test Title2", "text": "My Test Text2"},
]
recipe["notes"] = test_notes
recipe["tools"] = ["one tool", "two tool"]
test_categories = ["one", "two", "three"]
recipe["recipeCategory"] = test_categories
response = api_client.put(recipe_url, json=recipe, headers=token)
assert response.status_code == 200
assert json.loads(response.text) == recipe_data.expected_slug
response = api_client.get(recipe_url)
recipe = json.loads(response.content)
assert recipe["notes"] == test_notes
assert recipe["recipeCategory"].sort() == test_categories.sort()
@pytest.mark.parametrize("recipe_data", recipe_test_data)
def test_rename(api_client: TestClient, api_routes: AppRoutes, recipe_data, token):
recipe_url = api_routes.recipes_recipe_slug(recipe_data.expected_slug)
response = api_client.get(recipe_url, headers=token)
assert response.status_code == 200
recipe = json.loads(response.text)
new_name = recipe.get("name") + "-rename"
new_slug = slugify(new_name)
recipe["name"] = new_name
response = api_client.put(recipe_url, json=recipe, headers=token)
assert response.status_code == 200
assert json.loads(response.text) == new_slug
recipe_data.expected_slug = new_slug
@pytest.mark.parametrize("recipe_data", recipe_test_data)
def test_delete(api_client: TestClient, api_routes: AppRoutes, recipe_data, token):
recipe_url = api_routes.recipes_recipe_slug(recipe_data.expected_slug)
response = api_client.delete(recipe_url, headers=token)
assert response.status_code == 200
| StarcoderdataPython |
4963022 | <reponame>vvikal/folium
# -*- coding: utf-8 -*-
from branca.element import CssLink, Element, Figure, JavascriptLink, MacroElement
from jinja2 import Template
_default_js = [
('leaflet_draw_js',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.draw/1.0.2/leaflet.draw.js')
]
_default_css = [
('leaflet_draw_css',
'https://cdnjs.cloudflare.com/ajax/libs/leaflet.draw/1.0.2/leaflet.draw.css')
]
class Draw(MacroElement):
"""
Vector drawing and editing plugin for Leaflet.
Parameters
----------
export : bool, default False
Add a small button that exports the drawn shapes as a geojson file.
filename : string, default 'data.geojson'
Name of geojson file
position : {'topleft', 'toprigth', 'bottomleft', 'bottomright'}
Position of control.
See https://leafletjs.com/reference-1.6.0.html#control
draw_options : dict, optional
The options used to configure the draw toolbar. See
http://leaflet.github.io/Leaflet.draw/docs/leaflet-draw-latest.html#drawoptions
edit_options : dict, optional
The options used to configure the edit toolbar. See
https://leaflet.github.io/Leaflet.draw/docs/leaflet-draw-latest.html#editpolyoptions
Examples
--------
>>> m = folium.Map()
>>> Draw(
... export=True,
... filename='my_data.geojson',
... position='topleft',
... draw_options={'polyline': {'allowIntersection': False}},
... edit_options={'poly': {'allowIntersection': False}}
... ).add_to(m)
For more info please check
https://leaflet.github.io/Leaflet.draw/docs/leaflet-draw-latest.html
"""
_template = Template(u"""
{% macro script(this, kwargs) %}
var options = {
position: {{ this.position|tojson }},
draw: {{ this.draw_options|tojson }},
edit: {{ this.edit_options|tojson }},
}
// FeatureGroup is to store editable layers.
var drawnItems = new L.featureGroup().addTo(
{{ this._parent.get_name() }}
);
options.edit.featureGroup = drawnItems;
var {{ this.get_name() }} = new L.Control.Draw(
options
).addTo( {{this._parent.get_name()}} );
{{ this._parent.get_name() }}.on(L.Draw.Event.CREATED, function(e) {
var layer = e.layer,
type = e.layerType;
var coords = JSON.stringify(layer.toGeoJSON());
layer.on('click', function() {
alert(coords);
console.log(coords);
});
drawnItems.addLayer(layer);
});
{{ this._parent.get_name() }}.on('draw:created', function(e) {
drawnItems.addLayer(e.layer);
});
{% if this.export %}
document.getElementById('export').onclick = function(e) {
var data = drawnItems.toGeoJSON();
var convertedData = 'text/json;charset=utf-8,'
+ encodeURIComponent(JSON.stringify(data));
document.getElementById('export').setAttribute(
'href', 'data:' + convertedData
);
document.getElementById('export').setAttribute(
'download', {{ this.filename|tojson }}
);
}
{% endif %}
{% endmacro %}
""")
def __init__(self, export=False, filename='data.geojson',
position='topleft', draw_options=None, edit_options=None):
super(Draw, self).__init__()
self._name = 'DrawControl'
self.export = export
self.filename = filename
self.position = position
self.draw_options = draw_options or {}
self.edit_options = edit_options or {}
def render(self, **kwargs):
super(Draw, self).render(**kwargs)
figure = self.get_root()
assert isinstance(figure, Figure), ('You cannot render this Element '
'if it is not in a Figure.')
# Import Javascripts
for name, url in _default_js:
figure.header.add_child(JavascriptLink(url), name=name)
# Import Css
for name, url in _default_css:
figure.header.add_child(CssLink(url), name=name)
export_style = """
<style>
#export {
position: absolute;
top: 5px;
right: 10px;
z-index: 999;
background: white;
color: black;
padding: 6px;
border-radius: 4px;
font-family: 'Helvetica Neue';
cursor: pointer;
font-size: 12px;
text-decoration: none;
top: 90px;
}
</style>
"""
export_button = """<a href='#' id='export'>Export</a>"""
if self.export:
figure.header.add_child(Element(export_style), name='export')
figure.html.add_child(Element(export_button), name='export_button')
| StarcoderdataPython |
8015164 | <filename>preprocess.py<gh_stars>1-10
"""
@version: 2.0
@author: Jonah
@file: __init__.py
@time: 2021/11/10 12:56
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import math
import multiprocessing
import argparse
import time
from multiprocessing import cpu_count
import sys
from scipy.fftpack import fft
import csv
from plot_format import plot_norm
from kmeans import KernelKMeans, ICA
from utils import *
from wave_freq import *
from features import *
import warnings
from matplotlib.pylab import mpl
# os.getcwd()
np.seterr(invalid='ignore')
class Preprocessing:
def __init__(self, idx, thr_dB, magnification_dB, data_path, processor):
self.idx = idx
self.thr_dB = thr_dB
self.magnification_dB = magnification_dB
self.thr_V = pow(10, self.thr_dB / 20) / pow(10, 6)
self.counts = 0
self.duration = 0
self.amplitude = 0
self.rise_time = 0
self.energy = 0
self.RMS = 0
self.hit_num = 0
self.time = 0
self.channel_num = 0
self.sample_interval = 0
self.freq_max = 0
self.magnification = pow(10, self.magnification_dB / 20)
self.data_path = data_path
self.processor = processor
def skip_n_column(self, file, n=3):
for _ in range(n):
file.readline()
def cal_features(self, dataset, time_label, valid_wave_idx):
start = time_label[valid_wave_idx[0]]
end = time_label[valid_wave_idx[-1]]
self.duration = end - start
max_idx = np.argmax(abs(dataset))
self.amplitude = abs(dataset[max_idx])
self.rise_time = time_label[max_idx] - start
valid_data = dataset[valid_wave_idx[0]:(valid_wave_idx[-1] + 1)]
self.energy = np.sum(np.multiply(pow(valid_data, 2), self.sample_interval))
self.RMS = math.sqrt(self.energy / self.duration)
return valid_data
def cal_counts(self, valid_data):
self.counts = 0
N = len(valid_data)
for idx in range(1, N):
if valid_data[idx - 1] <= self.thr_V <= valid_data[idx]:
self.counts += 1
def cal_freq(self, valid_data, valid_wave_idx):
Fs = 1 / self.sample_interval
N = valid_wave_idx[-1] - valid_wave_idx[0] + 1
frq = (np.arange(N) / N) * Fs
fft_y = fft(valid_data)
abs_y = np.abs(fft_y) / N
half_frq = frq[range(int(N / 2))]
abs_y_half = abs_y[range(int(N / 2))]
abs_y_half[0] = 0
self.freq_max = half_frq[np.argmax(abs_y_half)]
def save_features(self, result):
valid, tra_1, tra_2, tra_3, tra_4 = [], [], [], [], []
txt_name = self.data_path.split('/')[-1] + '.txt'
f = open(txt_name, "w")
f.write("ID, Time(s), Chan, Thr(μV), Thr(dB), Amp(μV), Amp(dB), "
"RiseT(s), Dur(s), Eny(aJ), RMS(μV), Frequency(Hz), Counts\n")
pbar = tqdm(result, ncols=100)
for idx, i in enumerate(pbar):
tmp, tmp_tra_1, tmp_tra_2, tmp_tra_3, tmp_tra_4 = i.get()
valid += tmp
tra_1.append(tmp_tra_1)
tra_2.append(tmp_tra_2)
tra_3.append(tmp_tra_3)
tra_4.append(tmp_tra_4)
pbar.set_description("Exporting Data: {}/{}".format(idx + 1, self.processor))
valid = sorted(valid, key=lambda s: float(s.split(',')[0]))
for i in valid:
f.write(i)
f.close()
# print(valid_data)
return valid, tra_1, tra_2, tra_3, tra_4
def main(self, file_name, data=[], tra_1=[], tra_2=[], tra_3=[], tra_4=[], min_cnts=2):
pbar = tqdm(file_name, ncols=100)
for name in pbar:
with open(name, "r") as f:
self.skip_n_column(f)
self.sample_interval = float(f.readline()[29:])
self.skip_n_column(f)
points_num = int(f.readline()[36:])
self.channel_num = int(f.readline().strip()[16:])
self.hit_num = int(f.readline()[12:])
self.time = float(f.readline()[14:])
dataset = np.array([float(i.strip("\n")) for i in f.readlines()[1:]]) / self.magnification
time_label = np.linspace(self.time, self.time + self.sample_interval * (points_num - 1), points_num)
# calculate the duration, amplitude, rise_time, energy and counts
valid_wave_idx = np.where(abs(dataset) >= self.thr_V)[0]
# print(dataset[0], dataset[-1], len(dataset))
# print(valid_wave_idx, valid_wave_idx.shape)
if self.channel_num == 1:
tra_1.append([self.time, self.channel_num, self.sample_interval, points_num, dataset*pow(10, 6), self.hit_num])
elif self.channel_num == 2:
tra_2.append([self.time, self.channel_num, self.sample_interval, points_num, dataset*pow(10, 6), self.hit_num])
elif self.channel_num == 3:
tra_3.append([self.time, self.channel_num, self.sample_interval, points_num, dataset*pow(10, 6), self.hit_num])
elif self.channel_num == 4:
tra_4.append([self.time, self.channel_num, self.sample_interval, points_num, dataset*pow(10, 6), self.hit_num])
if valid_wave_idx.shape[0] > 1:
valid_data = self.cal_features(dataset, time_label, valid_wave_idx)
self.cal_counts(valid_data)
if self.counts > min_cnts:
self.cal_freq(valid_data, valid_wave_idx)
tmp_feature = '{}, {:.7f}, {}, {:.8f}, {:.1f}, {:.8f}, {:.1f}, {:.7f}, {:.7f}, {:.8f}, {:.8f}' \
', {:.8f}, {}\n'.format(self.hit_num, self.time, self.channel_num,
self.thr_V * pow(10, 6), self.thr_dB,
self.amplitude * pow(10, 6),
20 * np.log10(self.amplitude * pow(10, 6)),
self.rise_time, self.duration, self.energy * pow(10, 14),
self.RMS * pow(10, 6), self.freq_max, self.counts)
data.append(tmp_feature)
pbar.set_description("Process: %s | Calculating: %s" % (self.idx, name.split('_')[2]))
# ID, Time(s), Chan, Thr(μV)P, Thr(dB), Amp(μV), Amp(dB), RiseT(s), Dur(s), Eny(aJ), RMS(μV), Counts
# print("-" * 50)
# print(self.hit_num, self.time * pow(10, 6), self.channel_num, self.thr_V * pow(10, 6),
# self.amplitude * pow(10, 6), self.rise_time * pow(10, 6), self.duration * pow(10, 6),
# self.energy * pow(10, 14), self.RMS * pow(10, 6), self.counts)
return data, tra_1, tra_2, tra_3, tra_4
def read_pac_data(self, file_name, tra_1=[], tra_2=[], tra_3=[], tra_4=[]):
pbar = tqdm(file_name, ncols=100)
for name in pbar:
with open(name, "r") as f:
self.skip_n_column(f)
self.sample_interval = float(f.readline()[29:])
self.skip_n_column(f)
points_num = int(f.readline()[36:])
self.channel_num = int(f.readline().strip()[16:])
self.hit_num = int(f.readline()[12:])
self.time = float(f.readline()[14:])
dataset = np.array([float(i.strip("\n")) for i in f.readlines()[1:]]) / self.magnification * pow(10, 6)
if self.channel_num == 1:
tra_1.append([self.time, self.channel_num, self.sample_interval, points_num, dataset, self.hit_num])
elif self.channel_num == 2:
tra_2.append([self.time, self.channel_num, self.sample_interval, points_num, dataset, self.hit_num])
elif self.channel_num == 3:
tra_3.append([self.time, self.channel_num, self.sample_interval, points_num, dataset, self.hit_num])
elif self.channel_num == 4:
tra_4.append([self.time, self.channel_num, self.sample_interval, points_num, dataset, self.hit_num])
pbar.set_description("Process: %s | Calculating: %s" % (self.idx, name.split('_')[2]))
return tra_1, tra_2, tra_3, tra_4
def read_pac_features(self, res, min_cnts=2):
pri, chan_1, chan_2, chan_3, chan_4 = [], [], [], [], []
pbar = tqdm(res, ncols=100)
for i in pbar:
tmp = []
ls = i.strip("\n").split(', ')
if int(ls[-1]) > min_cnts:
for r, j in zip([0, 7, 0, 8, 1, 8, 1, 7, 7, 8, 8, 8, 0], ls):
tmp.append(int(j) if r == 0 else round(float(j), r))
pri.append(tmp)
if int(ls[2]) == 1:
chan_1.append(tmp)
elif int(ls[2]) == 2:
chan_2.append(tmp)
elif int(ls[2]) == 3:
chan_3.append(tmp)
elif int(ls[2]) == 4:
chan_4.append(tmp)
pbar.set_description("Process: %s | Calculating: %s" % (self.idx, ls[0]))
return pri, chan_1, chan_2, chan_3, chan_4
def convert_pac_data(file_list, data_path, processor, threshold_dB, magnification_dB):
# check existing file
tar = data_path.split('/')[-1] + '.txt'
if tar in file_list:
print("=" * 46 + " Warning " + "=" * 45)
while True:
ans = input("The exported data file has been detected. Do you want to overwrite it: (Enter 'yes' or 'no') ")
if ans.strip() == 'yes':
os.remove(tar)
break
elif ans.strip() == 'no':
sys.exit(0)
print("Please enter 'yes' or 'no' to continue!")
file_list = os.listdir(data_path)
each_core = int(math.ceil(len(file_list) / float(processor)))
result, data_tra, tmp_all = [], [], []
print("=" * 47 + " Start " + "=" * 46)
start = time.time()
# Multiprocessing acceleration
pool = multiprocessing.Pool(processes=processor)
for idx, i in enumerate(range(0, len(file_list), each_core)):
process = Preprocessing(idx, threshold_dB, magnification_dB, data_path, processor)
result.append(pool.apply_async(process.main, (file_list[i:i + each_core],)))
pri, tra_1, tra_2, tra_3, tra_4 = process.save_features(result)
pool.close()
pool.join()
for idx, tra in enumerate([tra_1, tra_2, tra_3, tra_4]):
tra = [j for i in tra for j in i]
try:
data_tra.append(sorted(tra, key=lambda x: x[-1]))
except IndexError:
data_tra.append([])
print('Warning: There is no data in channel %d!' % idx)
pri = np.array([np.array(i.strip('\n').split(', ')).astype(np.float32) for i in pri])
chan_1 = pri[np.where(pri[:, 2] == 1)[0]]
chan_2 = pri[np.where(pri[:, 2] == 2)[0]]
chan_3 = pri[np.where(pri[:, 2] == 3)[0]]
chan_4 = pri[np.where(pri[:, 2] == 4)[0]]
end = time.time()
print("=" * 46 + " Report " + "=" * 46)
print("Calculation Info--Quantity of valid data: %s" % pri.shape[0])
print("Waveform Info--Channel 1: %d | Channel 2: %d | Channel 3: %d | Channel 4: %d" %
(len(data_tra[0]), len(data_tra[1]), len(data_tra[2]), len(data_tra[3])))
print("Features Info--All channel: %d | Channel 1: %d | Channel 2: %d | Channel 3: %d | Channel 4: %d" %
(pri.shape[0], chan_1.shape[0], chan_2.shape[0], chan_3.shape[0], chan_4.shape[0]))
print("Finishing time: {} | Time consumption: {:.3f} min".format(time.asctime(time.localtime(time.time())),
(end - start) / 60))
return data_tra[0], data_tra[1], data_tra[2], data_tra[3], pri, chan_1, chan_2, chan_3, chan_4
def main_read_pac_data(file_list, data_path, processor, threshold_dB, magnification_dB):
# check existing file
tar = data_path.split('/')[-1] + '.txt'
if tar in file_list:
exist_idx = np.where(np.array(file_list) == tar)[0][0]
file_list = file_list[0:exist_idx] + file_list[exist_idx+1:]
each_core = int(math.ceil(len(file_list) / float(processor)))
result, tra_1, tra_2, tra_3, tra_4 = [], [], [], [], []
data_tra = []
print("=" * 47 + " Start " + "=" * 46)
start = time.time()
# Multiprocessing acceleration
pool = multiprocessing.Pool(processes=processor)
for idx, i in enumerate(range(0, len(file_list), each_core)):
process = Preprocessing(idx, threshold_dB, magnification_dB, data_path, processor)
result.append(pool.apply_async(process.read_pac_data, (file_list[i:i + each_core],)))
QApplication.processEvents()
pbar = tqdm(result, ncols=100)
for idx, i in enumerate(pbar):
tmp_1, tmp_2, tmp_3, tmp_4 = i.get()
tra_1.append(tmp_1)
tra_2.append(tmp_2)
tra_3.append(tmp_3)
tra_4.append(tmp_4)
pbar.set_description("Exporting Data: {}/{}".format(idx + 1, processor))
pool.close()
pool.join()
for idx, tra in enumerate([tra_1, tra_2, tra_3, tra_4]):
tra = [j for i in tra for j in i]
try:
data_tra.append(sorted(tra, key=lambda x: x[-1]))
except IndexError:
data_tra.append([])
print('Warning: There is no data in channel %d!' % idx)
end = time.time()
print("=" * 46 + " Report " + "=" * 46)
print("Channel 1: %d | Channel 2: %d | Channel 3: %d | Channel 4: %d" %
(len(data_tra[0]), len(data_tra[1]), len(data_tra[2]), len(data_tra[3])))
print("Finishing time: {} | Time consumption: {:.3f} min".format(time.asctime(time.localtime(time.time())),
(end - start) / 60))
return data_tra[0], data_tra[1], data_tra[2], data_tra[3]
def main_read_pac_features(data_path):
dir_features = data_path.split('/')[-1] + '.txt'
with open(dir_features, 'r') as f:
res = [i.strip("\n").strip(',') for i in f.readlines()[1:]]
print("=" * 47 + " Start " + "=" * 46)
start = time.time()
pri = np.array([np.array(i.strip('\n').split(', ')).astype(np.float32) for i in res])
chan_1 = pri[np.where(pri[:, 2] == 1)[0]]
chan_2 = pri[np.where(pri[:, 2] == 2)[0]]
chan_3 = pri[np.where(pri[:, 2] == 3)[0]]
chan_4 = pri[np.where(pri[:, 2] == 4)[0]]
end = time.time()
print("=" * 46 + " Report " + "=" * 46)
print("All channel: %d | Channel 1: %d | Channel 2: %d | Channel 3: %d | Channel 4: %d" %
(pri.shape[0], chan_1.shape[0], chan_2.shape[0], chan_3.shape[0], chan_4.shape[0]))
print("Finishing time: {} | Time consumption: {:.3f} min".format(time.asctime(time.localtime(time.time())),
(end - start) / 60))
return pri, chan_1, chan_2, chan_3, chan_4
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-path", "--data_path", type=str,
default=r"D:\data\3D porous TC4-8mA-compression test-z1-0.01-20201010",
help="Absolute path of data(add 'r' in front)")
parser.add_argument("-thr", "--threshold_dB", type=int, default=25, help="Detection threshold")
parser.add_argument("-mag", "--magnification_dB", type=int, default=60, help="Magnification /dB")
parser.add_argument("-cpu", "--processor", type=int, default=cpu_count(), help="Number of Threads")
parser.add_argument("-cnts", "--min_cnts", type=int, default=2, help="Number of Threads")
opt = parser.parse_args()
print("=" * 44 + " Parameters " + "=" * 44)
print(opt)
opt.data_path = opt.data_path.replace('\\', '/')
os.chdir(opt.data_path)
file_list = os.listdir(opt.data_path)
# print(file_list)
data_tra_1, data_tra_2, data_tra_3, data_tra_4, data_pri, chan_1, chan_2, chan_3, chan_4 = convert_pac_data(file_list, opt.data_path, opt.processor, opt.threshold_dB, opt.magnification_dB)
# data_tra_1, data_tra_2, data_tra_3, data_tra_4 = main_read_pac_data(file_list, opt.data_path, opt.processor, opt.threshold_dB, opt.magnification_dB)
# data_pri, chan_1, chan_2, chan_3, chan_4 = main_read_pac_features(opt.data_path, opt.processor, opt.threshold_dB, opt.magnification_dB, 6)
# chan = chan_1
# Time, Amp, RiseT, Dur, Eny, RMS, Counts = chan[:, 1], chan[:, 5], chan[:, 7] * pow(10, 6), chan[:, 8] * pow(10, 6), \
# chan[:, 9], chan[:, 10], chan[:, -1]
# feature_idx = [Amp, Dur, Eny]
# xlabelz = ['Amplitude (μV)', 'Duration (μs)', 'Energy (aJ)']
# ylabelz = ['PDF(A)', 'PDF(D)', 'PDF(E)']
# color_1 = [255 / 255, 0 / 255, 102 / 255] # red
# color_2 = [0 / 255, 136 / 255, 204 / 255] # blue
# status = 'test'
# features = Features(color_1, color_2, Time, feature_idx, status)
# features.plot_correlation(Amp, Eny, xlabelz[0], xlabelz[2])
# features.plot_correlation(Dur, Amp, xlabelz[1], xlabelz[0])
# features.plot_correlation(Dur, Eny, xlabelz[1], xlabelz[2])
# waveform = Waveform(color_1, color_2, data_tra_1, opt.path, 'test', status, 'pac', 24) | StarcoderdataPython |
1980753 | # <NAME>, 100%
# performance, O(d + n log*n)
# since all information is avaliable before a plan has to be made,
# there is no reason to make a plan at all.
# keep track of sets of nodes that a soldier *could* have travelled between
# everytime two nodes are fogged over together, merge them into one new node
# this is called disjoint set union, or DSU
# track how many soldiers are in each disjoint set, and evacuate the max possible
# when a vehicle is there
# just using DSU isn't fast enough, as each cell in each day's range has to be merged
# and each day's range could be the entire range of days, leading to O(d * n log*n) worst case
# luckily, each day's range is continuous, so the disjoint set can include a right_bound
# to indicate where it ends. Now only cells outside of the current range will be merged.
N = int(input())
soldiers = [0] + [int(x) for x in input().split()]
saved = 0
parent = list(range(N + 1))
right_bound = list(range(N + 1))
def parOf(node):
if node != parent[node]:
parent[node] = parOf(parent[node])
return parent[node]
for day in range(int(input())):
l, r, V = [int(x) for x in input().split()]
x = parOf(l)
bound = right_bound[x] + 1
while bound <= r:
y = parOf(bound)
soldiers[x] += soldiers[y]
parent[y] = x
right_bound[x] = right_bound[y]
bound = right_bound[x] + 1
curr = min(soldiers[x], V)
soldiers[x] -= curr
saved += curr
print(saved) | StarcoderdataPython |
6415999 | <reponame>techsparksguru/python_ci_automation
import json
from datetime import timedelta
from flask_httpauth import HTTPBasicAuth
from werkzeug.security import safe_str_cmp
from flask_jwt import JWT, jwt_required, current_identity
from flask import Flask, jsonify, make_response, request, abort, url_for
class User(object):
def __init__(self, id, email, username, password):
self.id = id
self.email = email
self.username = username
self.password = password
def __repr__(self):
return json.dumps({'id':self.id, 'username':self.username, 'email':self.email})
users = [
User(1, '<EMAIL>', 'Suhas', 'abcxyz'),
User(2, '<EMAIL>', 'Pradeep', '<PASSWORD>'),
User(3, '<EMAIL>', 'Test', 'abcxyz'),
User(4, '<EMAIL>', 'Techsparks1', 'abcxyz'),
User(5, '<EMAIL>', 'Techsparks2', 'abcxyz')
]
tasks = [
{
'id': 1,
'title': u'Buy groceries',
'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',
'done': False
},
{
'id': 2,
'title': u'Learn Python',
'description': u'Need to find a good Python tutorial on the web',
'done': False
}
]
email_table = {u.email: u for u in users}
userid_table = {u.id: u for u in users}
def authenticate(email, password):
user = email_table.get(email, None)
if user and safe_str_cmp(user.password.encode('utf-8'), password.encode('utf-8')):
return user
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
def make_public_task(task):
new_task = {}
for field in task:
if field == 'id':
new_task['id'] = task['id']
new_task['uri'] = url_for('get_tasks_v1', id=task['id'], _external=True)
else:
new_task[field] = task[field]
return new_task
auth = HTTPBasicAuth()
@auth.get_password
def get_password(username):
if username == 'techsparksuser':
return '<PASSWORD>'
return None
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 401)
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'super-secret'
app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=36000)
app.config['JWT_AUTH_USERNAME_KEY'] = 'email'
app.config['JWT_AUTH_URL_RULE'] = '/login'
jwt = JWT(app, authenticate, identity)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/user-identity')
@jwt_required()
def user_identified():
return '%s' %current_identity
@app.route('/todo/api/v1.0/tasks', methods=['GET'])
def get_tasks_v1():
args = request.args
return jsonify({'tasks': [make_public_task(task) for task in tasks]})
@app.route('/todo/api/v2.0/tasks', methods=['GET'])
@auth.login_required
def get_tasks_v2():
return jsonify({'tasks': tasks})
@app.route('/todo/api/v3.0/tasks', methods=['GET'])
@jwt_required()
def get_tasks_v3():
return jsonify({'tasks': tasks})
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task_v1(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
@app.route('/todo/api/v2.0/tasks/<int:task_id>', methods=['GET'])
@auth.login_required
def get_task_v2(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
@app.route('/todo/api/v3.0/tasks/<int:task_id>', methods=['GET'])
@jwt_required()
def get_task_v3(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
return jsonify({'task': task[0]})
@app.route('/todo/api/v1.0/tasks', methods=['POST'])
def create_task_v1():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({'task': task}), 201
@app.route('/todo/api/v2.0/tasks', methods=['POST'])
@auth.login_required
def create_task_v2():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({'task': task}), 201
@app.route('/todo/api/v3.0/tasks', methods=['POST'])
@jwt_required()
def create_task_v3():
if not request.json or not 'title' in request.json:
abort(400)
task = {
'id': tasks[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
'done': False
}
tasks.append(task)
return jsonify({'task': task}), 201
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['PUT'])
def update_task_v1(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != str:
abort(400)
if 'description' in request.json and type(request.json['description']) is not str:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task[0]['title'] = request.json.get('title', task[0]['title'])
task[0]['description'] = request.json.get('description', task[0]['description'])
task[0]['done'] = request.json.get('done', task[0]['done'])
return jsonify({'task': task[0]})
@app.route('/todo/api/v2.0/tasks/<int:task_id>', methods=['PUT'])
@auth.login_required
def update_task_v2(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != str:
abort(400)
if 'description' in request.json and type(request.json['description']) is not str:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task[0]['title'] = request.json.get('title', task[0]['title'])
task[0]['description'] = request.json.get('description', task[0]['description'])
task[0]['done'] = request.json.get('done', task[0]['done'])
return jsonify({'task': task[0]})
@app.route('/todo/api/v3.0/tasks/<int:task_id>', methods=['PUT'])
@jwt_required()
def update_task_v3(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != str:
abort(400)
if 'description' in request.json and type(request.json['description']) is not str:
abort(400)
if 'done' in request.json and type(request.json['done']) is not bool:
abort(400)
task[0]['title'] = request.json.get('title', task[0]['title'])
task[0]['description'] = request.json.get('description', task[0]['description'])
task[0]['done'] = request.json.get('done', task[0]['done'])
return jsonify({'task': task[0]})
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['DELETE'])
def delete_task_v1(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return jsonify({'result': True})
@app.route('/todo/api/v2.0/tasks/<int:task_id>', methods=['DELETE'])
@auth.login_required
def delete_task_v2(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return jsonify({'result': True})
@app.route('/todo/api/v3.0/tasks/<int:task_id>', methods=['DELETE'])
@jwt_required()
def delete_task_v3(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
abort(404)
tasks.remove(task[0])
return jsonify({'result': True})
@app.route("/")
def all_links():
links = []
methods = []
for rule in app.url_map.iter_rules():
links.append(rule.rule)
methods.append(list(rule.methods))
endpoints = list(zip(links, methods))
endpoints.pop()
endpoints.pop(8)
return jsonify({'endpoints': endpoints})
if __name__ == '__main__':
app.run() | StarcoderdataPython |
3439720 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-18 19:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("shop", "0024_auto_20160605_2126"),
]
operations = [
migrations.CreateModel(
name="CreditNote",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
("amount", models.DecimalField(decimal_places=2, max_digits=10)),
("text", models.TextField()),
(
"pdf",
models.FileField(blank=True, null=True, upload_to=b"creditnotes/"),
),
(
"paid",
models.BooleanField(
default=False,
help_text="Whether this creditnote has been paid.",
verbose_name="Paid?",
),
),
("sent_to_customer", models.BooleanField(default=False)),
(
"user",
models.ForeignKey(
help_text="The user this credit note belongs to.",
on_delete=django.db.models.deletion.CASCADE,
related_name="creditnotes",
to=settings.AUTH_USER_MODEL,
verbose_name="User",
),
),
],
options={"abstract": False},
)
]
| StarcoderdataPython |
4927441 | <filename>letsadd/haversine/models.py
from django.db import models
from django.urls import reverse
from .managers import ParkQuerySet
class Park(models.Model):
name = models.CharField(max_length=254)
slug = models.SlugField(unique=True)
location = models.CharField(max_length=254)
latitude = models.FloatField()
longitude = models.FloatField()
established = models.DateField()
area = models.FloatField(help_text='acres')
visitors = models.PositiveIntegerField(blank=True, null=True)
description = models.TextField(blank=True)
whs = models.BooleanField('World Heritage Site', default=False)
br = models.BooleanField('Biosphere Reserve', default=False)
url = models.URLField('URL')
objects = ParkQuerySet.as_manager()
class Meta:
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('haversine:park_detail', args=[self.slug])
| StarcoderdataPython |
11392060 | <gh_stars>0
# code golf challenge
# https://code.golf/happy-numbers#python
# wren 20210608
# A happy number is defined by the following Sequence:
# Starting with any positive integer, replace the number
# by the sum of the squares of its digits in base-ten,
# and repeat the process until the number either equals 1
# (where it will stay), or it loops endlessly in a cycle
# that does not include 1. Those numbers for which this
# process ends in 1 are happy numbers, while those that do
# not end in 1 are sad numbers.
# For example, 19 is happy, as the associated Sequence is:
# 12 + 92 = 82
# 82 + 22 = 68
# 62 + 82 = 100
# 12 + 02 + 02 = 1.
# Print all the happy numbers from 1 to 200 inclusive, each on their own line.
# notes
# kind of cheated by just printing 1, though 1 should be accepted by the func
# function happy
def happy(number):
# the sum of each digit in the input number raised to the second power
# the number is converted to a string in order to iterate through the
# digits, then each digit is converted back to an intager and squared
# each squared digit is then added together
sum_number = sum(int(digit)**2 for digit in str(number))
# if that final sum of squared digits is already within the list (after the
# first list value) of sums, initialized in the for loop handing the
# number to the function, the function passes, being in this list means
# that we have already tried out the squared digits of the number and
# that this is not a happy number
if sum_number in contain[1:]:
pass
# elif conditionally, the sum of the squared digits is 1, this means
# that we have fund a happy number, and we can now print the First
# value in the initallized list, which is the number we are iterating
# through
elif sum_number == 1:
print(contain[0])
# if neither of these conditions are met, it measn that we have not yet
# reached our decision point for whether this is a happy number, and
# must recursivally pass the sum of the square digits to the happy function
# after first adding the sum to the initallized list for checking in the
# next pass
else:
contain.append(sum_number)
happy(sum_number)
# for the requested range plus one for range is zero based
for number in range(1,200+1):
# initialize the number iterating through as the first value in a list
# so that the function can work upon the variable
contain = [number]
# input the number iterating through to the happy function
happy(number)
| StarcoderdataPython |
301963 | <gh_stars>1-10
# Generated by Django 3.1 on 2020-11-10 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("manager", "0006_auto_20201105_1007"),
]
operations = [
migrations.RemoveField(
model_name="credentialdefinition",
name="credential_json",
),
migrations.AddField(
model_name="credentialdefinition",
name="support_revocation",
field=models.BooleanField(default=True),
),
]
| StarcoderdataPython |
1811548 | """ This is the Entry point for Twitoff."""
from .app import create_app
APP = create_app() | StarcoderdataPython |
11394321 | <gh_stars>1-10
from fontbakery.codetesting import (assert_PASS,
assert_results_contain,
CheckTester,
TEST_FILE)
from fontbakery.checkrunner import FAIL
from fontbakery.profiles import opentype as opentype_profile
from fontTools.ttLib import TTFont
def test_check_dsig():
""" Does the font have a DSIG table ? """
check = CheckTester(opentype_profile,
"com.google.fonts/check/dsig")
# Our reference Cabin Regular font is good (theres a DSIG table declared):
ttFont = TTFont(TEST_FILE("cabin/Cabin-Regular.ttf"))
# So it must PASS the check:
assert_PASS(check(ttFont),
'with a good font...')
# Then we remove the DSIG table so that we get a FAIL:
del ttFont['DSIG']
assert_results_contain(check(ttFont),
FAIL, 'lacks-signature',
'with a font lacking a DSIG table...')
| StarcoderdataPython |
4991372 | #!/usr/bin/env python
from __future__ import print_function
import skimage as skimage
from skimage import transform, color, exposure, io
from skimage.viewer import ImageViewer
import random
from random import choice
import numpy as np
from collections import deque
import time
import math
import os
import pandas as pd
import cv2
import csv
from PIL import Image
import json
import keras
from keras.models import model_from_json
from keras.models import Sequential, load_model, Model
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, Dense, Flatten, merge, MaxPooling2D, Input, AveragePooling2D, Lambda, Activation, Embedding
from keras.optimizers import SGD, Adam, rmsprop
from keras import backend as K
from keras.utils import np_utils
from keras.preprocessing.image import array_to_img, img_to_array
from vizdoom import DoomGame, ScreenResolution
from vizdoom import *
import itertools as it
from time import sleep
import tensorflow as tf
from networks import Networks
#DEATHMATCH_ACTION5_NAME = [
# "ATTACK",
# "MOVE_FORWARD",
# "MOVE_BACKWARD",
# "TURN_LEFT",
# "TURN_RIGHT"
#]
DEATHMATCH_ACTION5_NAME = [
"MOVE_LEFT",
"MOVE_RIGHT",
"ATTACK",
"MOVE_FORWARD",
"MOVE_BACKWARD",
"TURN_LEFT",
"TURN_RIGHT"
]
def preprocessImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img, size, mode='constant')
img = skimage.color.rgb2gray(img)
return img
def ResizeImg(img, size):
img = np.rollaxis(img, 0, 3) # It becomes (640, 480, 3)
img = skimage.transform.resize(img, size, mode='constant')
return img
bTrain = True
bUseImitation = False
bRecordSamples = False
nMaxSamples = 1000
nSamples = 0
gameCfg = "./scenarios/deathmatch_7action.cfg"
# This is for saving model of imitation learning.
model_path = "../ViZDoom-models/CarCloneModel-deathmatch-50000-epoch10-5action-256x256-modify1/"
class CNNAction:
def __init__(self, gameName):
model_json = model_path + "test_model.json"
model_h5 = model_path + "test_model.h5"
with open(model_json, 'r') as jfile:
self.model = model_from_json(json.load(jfile))
self.model.compile("adam", "categorical_crossentropy")
self.model.load_weights(model_h5)
self.imgList = []
self.model.summary()
self.w1 = 256
self.h1 = 256
self.inputW = 128
self.inputH = 128
self.frame_per_action = 4
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.observe = 2000
# Performance Statistics
self.stats_window_size = 50 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
self.mavg_ammo_left = [] # Moving Average of Ammo used
self.mavg_kill_counts = [] # Moving Average of Kill Counts
# sample picture number
dataPath = "ImitationData/" + gameName
if not os.path.exists(dataPath):
os.mkdir(dataPath)
imgPath = dataPath + "/img"
if not os.path.exists(imgPath):
os.mkdir(imgPath)
self.sampleNum = 0
self.imgPath = imgPath
self.dataPath = dataPath
self.cvsPath = dataPath + "/test.csv"
self.sampleCSVFile = open(self.cvsPath, "w")
self.sampleCSVWriter = csv.writer(self.sampleCSVFile)
self.sampleCSVWriter.writerow(["name", "action", "action_name"])
def GenerateSamples(self, screen, action):
self.sampleNum = self.sampleNum + 1
t = time.time()
now = int(round(t*1000))
timeStr = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(now/1000))
savedFileName = "%s/doom-%s-%d.jpg" % (self.imgPath, timeStr, self.sampleNum)
self.sampleCSVWriter.writerow([savedFileName, action, DEATHMATCH_ACTION5_NAME[action]])
self.sampleCSVFile.flush()
# skimage.io.imsave("hy.jpg", screen.transpose(1, 2, 0))
dst = ResizeImg(screen, (256, 256))
skimage.io.imsave(savedFileName, dst)
return
def next_action(self, state, save_graph=False):
action_id = self.f_eval(state)
return action_id
def reset(self):
pass
# prev_state is only used for evaluation, so has a batch size of 1
# self.prev_state = self.init_state_e
def prepare_f_eval_args(self, state):
"""
Prepare inputs for evaluation.
"""
screen = np.float32(state)
return screen
def f_eval(self, state):
screen = self.prepare_f_eval_args(state)
img = screen
# print (img.shape)
img = cv2.resize(img.transpose(1, 2, 0), (self.w1, self.h1), interpolation=cv2.INTER_AREA)
self.imgList.append(img)
# if len(self.imgList) < 4:
# return 0
# img1Int = self.imgList[0].transpose(2, 1, 0).astype(int)
img1 = array_to_img(self.imgList[0].astype(int))
# img2 = array_to_img(self.imgList[1].astype(int))
# img3 = array_to_img(self.imgList[2].astype(int))
# img4 = array_to_img(self.imgList[3].astype(int))
w = self.w1
h = self.h1
merge_img = Image.new('RGB', (w, h), 0xffffff)
merge_img.paste(img1, (0, 0))
# merge_img.paste(img2, (w, 0))
# merge_img.paste(img3, (0, h))
# merge_img.paste(img4, (w, h))
merge_img.save("hy.jpg")
merge_img = merge_img.resize((self.inputW, self.inputH))
img5 = img_to_array(merge_img).transpose(0, 1, 2)
img5 = img5.astype("float32")
img5 = (img5 * (1. / 255)) - 0.5
imgs = img5[None, :, :, :]
# print (imgs.shape)
action_id = self.model.predict(imgs, batch_size=1)
action_list = np.argsort(-action_id, axis=1)
self.imgList.pop(0)
return int(action_list[0][0])
class C51Agent:
def __init__(self, state_size, action_size, num_atoms, gameName):
# get size of state and action
self.state_size = state_size
self.action_size = action_size
# these is hyper parameters for the DQN
self.gamma = 0.99
self.learning_rate = 0.0001
self.epsilon = 1.0
self.initial_epsilon = 1.0
self.final_epsilon = 0.0001
self.batch_size = 32
self.observe = 2000
self.explore = 100000 # orig: 50000
self.frame_per_action = 4
self.update_target_freq = 3000
self.timestep_per_train = 100 # Number of timesteps between training interval
# Initialize Atoms
self.num_atoms = num_atoms # 51 for C51
self.v_max = 30 # Max possible score for Defend the center is 26 - 0.1*26 = 23.4
self.v_min = -10 # -0.1*26 - 1 = -3.6
self.delta_z = (self.v_max - self.v_min) / float(self.num_atoms - 1)
self.z = [self.v_min + i * self.delta_z for i in range(self.num_atoms)]
# Create replay memory using deque
self.memory = deque()
self.max_memory = 100000 # orig: 50000 # number of previous transitions to remember
# Models for value distribution
self.model = None
self.target_model = None
# Performance Statistics
self.stats_window_size = 50 # window size for computing rolling statistics
self.mavg_score = [] # Moving Average of Survival Time
self.var_score = [] # Variance of Survival Time
self.mavg_ammo_left = [] # Moving Average of Ammo used
self.mavg_kill_counts = [] # Moving Average of Kill Counts
# sample picture number
dataPath = "ImitationData/" + gameName
if not os.path.exists(dataPath):
os.mkdir(dataPath)
imgPath = dataPath + "/img"
if not os.path.exists(imgPath):
os.mkdir(imgPath)
self.sampleNum = 0
self.imgPath = imgPath
self.dataPath = dataPath
self.cvsPath = dataPath + "/test.csv"
self.sampleCSVFile = open(self.cvsPath, "w")
self.sampleCSVWriter = csv.writer(self.sampleCSVFile)
self.sampleCSVWriter.writerow(["name", "action", "action_name"])
def update_target_model(self):
"""
After some time interval update the target model to be same with model
"""
self.target_model.set_weights(self.model.get_weights())
def GenerateSamples(self, screen, action):
self.sampleNum = self.sampleNum + 1
t = time.time()
now = int(round(t*1000))
timeStr = time.strftime('%Y-%m-%d-%H-%M-%S',time.localtime(now/1000))
savedFileName = "%s/doom-%s-%d.jpg" % (self.imgPath, timeStr, self.sampleNum)
self.sampleCSVWriter.writerow([savedFileName, action, DEATHMATCH_ACTION5_NAME[action]])
self.sampleCSVFile.flush()
# skimage.io.imsave("hy.jpg", screen.transpose(1, 2, 0))
dst = ResizeImg(screen, (256, 256))
skimage.io.imsave(savedFileName, dst)
return
def get_action(self, state, bTrain=True):
"""
Get action from model using epsilon-greedy policy
"""
if bTrain:
if np.random.rand() <= self.epsilon:
action_idx = random.randrange(self.action_size)
else:
action_idx = self.get_optimal_action(state)
else:
action_idx = self.get_optimal_action(state)
return action_idx
def get_optimal_action(self, state):
"""Get optimal action for a state
"""
z = self.model.predict(state) # Return a list [1x51, 1x51, 1x51]
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1)
# Pick action with the biggest Q value
action_idx = np.argmax(q)
return action_idx
def shape_reward(self, r_t, misc, prev_misc, t):
# Check any kill count orig reward:
# if (misc[0] > prev_misc[0]):
# r_t = r_t + 1
# if (misc[1] < prev_misc[1]): # Use ammo
# r_t = r_t - 0.1
# if (misc[2] < prev_misc[2]): # Loss HEALTH
# r_t = r_t - 0.1
# hy modify
if (misc[0] > prev_misc[0]): # kill
r_t = r_t + 1
if (misc[1] < prev_misc[1]): # Use ammo
r_t = r_t - 0.2
if (misc[2] < prev_misc[2]): # Loss HEALTH
r_t = r_t - 0.1
return r_t
# save sample <s,a,r,s'> to the replay memory
def replay_memory(self, s_t, action_idx, r_t, s_t1, is_terminated, t):
self.memory.append((s_t, action_idx, r_t, s_t1, is_terminated))
if self.epsilon > self.final_epsilon and t > self.observe:
self.epsilon -= (self.initial_epsilon - self.final_epsilon) / self.explore
if len(self.memory) > self.max_memory:
self.memory.popleft()
# Update the target model to be same with model
if t % self.update_target_freq == 0:
self.update_target_model()
# pick samples randomly from replay memory (with batch_size)
def train_replay(self):
num_samples = min(self.batch_size * self.timestep_per_train, len(self.memory))
replay_samples = random.sample(self.memory, num_samples)
state_inputs = np.zeros(((num_samples,) + self.state_size))
next_states = np.zeros(((num_samples,) + self.state_size))
m_prob = [np.zeros((num_samples, self.num_atoms)) for i in range(action_size)]
action, reward, done = [], [], []
for i in range(num_samples):
state_inputs[i,:,:,:] = replay_samples[i][0]
action.append(replay_samples[i][1])
reward.append(replay_samples[i][2])
next_states[i,:,:,:] = replay_samples[i][3]
done.append(replay_samples[i][4])
z = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
z_ = self.model.predict(next_states) # Return a list [32x51, 32x51, 32x51]
# Get Optimal Actions for the next states (from distribution z)
optimal_action_idxs = []
z_concat = np.vstack(z)
q = np.sum(np.multiply(z_concat, np.array(self.z)), axis=1) # length (num_atoms x num_actions)
q = q.reshape((num_samples, action_size), order='F')
optimal_action_idxs = np.argmax(q, axis=1)
# Project Next State Value Distribution (of optimal action) to Current State
for i in range(num_samples):
if done[i]: # Terminal State
# Distribution collapses to a single point
Tz = min(self.v_max, max(self.v_min, reward[i]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += (m_u - bj)
m_prob[action[i]][i][int(m_u)] += (bj - m_l)
else:
for j in range(self.num_atoms):
Tz = min(self.v_max, max(self.v_min, reward[i] + self.gamma * self.z[j]))
bj = (Tz - self.v_min) / self.delta_z
m_l, m_u = math.floor(bj), math.ceil(bj)
m_prob[action[i]][i][int(m_l)] += z_[optimal_action_idxs[i]][i][j] * (m_u - bj)
m_prob[action[i]][i][int(m_u)] += z_[optimal_action_idxs[i]][i][j] * (bj - m_l)
loss = self.model.fit(state_inputs, m_prob, batch_size=self.batch_size, epochs=1, verbose=0)
return loss.history['loss']
# load the saved model
def load_model(self, name):
self.model.load_weights(name)
# save the model which is under training
def save_model(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
gameCfgFile = os.path.basename(gameCfg)
gameName, extension = os.path.splitext(gameCfgFile)
# Avoid Tensorflow eats up GPU memory
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
game = DoomGame()
game.load_config(gameCfg)
game.set_sound_enabled(False)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.set_window_visible(True)
game.init()
game.new_episode()
game_state = game.get_state()
misc = game_state.game_variables # [KILLCOUNT, AMMO, HEALTH]
prev_misc = misc
action_size = game.get_available_buttons_size()
img_rows, img_cols = 64, 64
# Convert image into Black and white
img_channels = 4 # We stack 4 frames
# C51
num_atoms = 51
state_size = (img_rows, img_cols, img_channels)
if bUseImitation:
agent = CNNAction(gameName)
else:
agent = C51Agent(state_size, action_size, num_atoms, gameName)
agent.model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
agent.target_model = Networks.value_distribution_network(state_size, num_atoms, action_size, agent.learning_rate)
if not bTrain:
file = "./models/" + "c51_ddqn_" + gameName + ".h5"
agent.load_model(file)
# Start training
epsilon = agent.initial_epsilon
GAME = 0
t = 0
max_life = 0 # Maximum episode life (Proxy for agent performance)
life = 0
x_t = game_state.screen_buffer # 480 x 640
x_t = preprocessImg(x_t, size=(img_rows, img_cols))
s_t = np.stack(([x_t]*4), axis=2) # It becomes 64x64x4
s_t = np.expand_dims(s_t, axis=0) # 1x64x64x4
is_terminated = game.is_episode_finished()
# Buffer to compute rolling statistics
life_buffer, ammo_buffer, kills_buffer = [], [], []
while not game.is_episode_finished():
loss = 0
r_t = 0
a_t = np.zeros([action_size])
# Epsilon Greedy
if bUseImitation:
action_idx = agent.next_action(game_state.screen_buffer)
else:
action_idx = agent.get_action(s_t, bTrain)
if not bTrain and bRecordSamples:
agent.GenerateSamples(game_state.screen_buffer, action_idx)
nSamples += 1
if nSamples > nMaxSamples:
break
a_t[action_idx] = 1
a_t = a_t.astype(int)
game.set_action(a_t.tolist())
skiprate = agent.frame_per_action
game.advance_action(skiprate)
game_state = game.get_state() # Observe again after we take the action
is_terminated = game.is_episode_finished()
r_t = game.get_last_reward() # each frame we get reward of 0.1, so 4 frames will be 0.4
if (is_terminated):
if (life > max_life):
max_life = life
GAME += 1
life_buffer.append(life)
ammo_buffer.append(misc[1])
kills_buffer.append(misc[0])
# print ("Episode Finish ", misc)
print ("Episode: lifetime(%d) ammo(%d) kills(%d)" % (life, misc[1], misc[0]))
game.new_episode()
game_state = game.get_state()
misc = game_state.game_variables
x_t1 = game_state.screen_buffer
x_t1 = game_state.screen_buffer
misc = game_state.game_variables
x_t1 = preprocessImg(x_t1, size=(img_rows, img_cols))
x_t1 = np.reshape(x_t1, (1, img_rows, img_cols, 1))
s_t1 = np.append(x_t1, s_t[:, :, :, :3], axis=3)
if bUseImitation:
r_t = 0
else:
r_t = agent.shape_reward(r_t, misc, prev_misc, t)
if (is_terminated):
life = 0
else:
life += 1
# update the cache
prev_misc = misc
if not bUseImitation:
if bTrain:
# save the sample <s, a, r, s'> to the replay memory and decrease epsilon
agent.replay_memory(s_t, action_idx, r_t, s_t1, is_terminated, t)
# Do the training
if t > agent.observe and t % agent.timestep_per_train == 0:
loss = agent.train_replay()
else:
sleep(0.01)
s_t = s_t1
t += 1
# save progress every 10000 iterations
if not bUseImitation:
if t % 10000 == 0 and bTrain:
file = "./models/" + "c51_ddqn_" + gameName + ".h5"
print("Now we save model: %s" %(file))
agent.model.save_weights(file, overwrite=True)
# print info
state = ""
if t <= agent.observe:
state = "observe"
elif t > agent.observe and t <= agent.observe + agent.explore:
state = "explore"
else:
state = "train"
else:
state = "observe"
if (is_terminated):
if bUseImitation:
print("TIME", t, "/ GAME", GAME, "/ ACTION", action_idx,
"/ LIFE", max_life, "/ LOSS", loss)
else:
print("TIME", t, "/ GAME", GAME, "/ STATE", state,
"/ EPSILON", agent.epsilon, "/ ACTION", action_idx,
"/ REWARD", r_t, "/ LIFE", max_life, "/ LOSS", loss)
# Training times.
if GAME > 5000:
break
# Save Agent's Performance Statistics
if bUseImitation:
if GAME % agent.stats_window_size == 0:
print("Update Rolling Statistics")
agent.mavg_score.append(np.mean(np.array(life_buffer)))
agent.var_score.append(np.var(np.array(life_buffer)))
agent.mavg_ammo_left.append(np.mean(np.array(ammo_buffer)))
agent.mavg_kill_counts.append(np.mean(np.array(kills_buffer)))
# Reset rolling stats buffer
life_buffer, ammo_buffer, kills_buffer = [], [], []
# Write Rolling Statistics to file
with open("statistics/imitation_stats.txt", "w") as stats_file:
stats_file.write('Game: ' + str(GAME) + '\n')
stats_file.write('Max Score: ' + str(max_life) + '\n')
stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\n')
stats_file.write('var_score: ' + str(agent.var_score) + '\n')
stats_file.write('mavg_ammo_left: ' + str(agent.mavg_ammo_left) + '\n')
stats_file.write('mavg_kill_counts: ' + str(agent.mavg_kill_counts) + '\n')
else:
if GAME % agent.stats_window_size == 0 and t > agent.observe:
print("Update Rolling Statistics")
agent.mavg_score.append(np.mean(np.array(life_buffer)))
agent.var_score.append(np.var(np.array(life_buffer)))
agent.mavg_ammo_left.append(np.mean(np.array(ammo_buffer)))
agent.mavg_kill_counts.append(np.mean(np.array(kills_buffer)))
# Reset rolling stats buffer
life_buffer, ammo_buffer, kills_buffer = [], [], []
# Write Rolling Statistics to file
file = "./statistics/" + "c51_ddqn_stats_" + gameName + ".txt"
with open(file, "w") as stats_file:
stats_file.write('Game: ' + str(GAME) + '\n')
stats_file.write('Max Score: ' + str(max_life) + '\n')
stats_file.write('mavg_score: ' + str(agent.mavg_score) + '\n')
stats_file.write('var_score: ' + str(agent.var_score) + '\n')
stats_file.write('mavg_ammo_left: ' + str(agent.mavg_ammo_left) + '\n')
stats_file.write('mavg_kill_counts: ' + str(agent.mavg_kill_counts) + '\n')
| StarcoderdataPython |
5158614 | from typing import List, Optional
from fastapi.encoders import jsonable_encoder
from .enums import ReportTypes
from .models import Report, ReportCreate, ReportUpdate
def get(*, db_session, report_id: int) -> Optional[Report]:
"""
Get a report by id.
"""
return db_session.query(Report).filter(Report.id == report_id).one_or_none()
def get_most_recent_by_incident_id_and_type(
*, db_session, incident_id: int, report_type: ReportTypes
) -> Optional[Report]:
"""
Get most recent report by incident id and report type.
"""
return (
db_session.query(Report)
.filter(Report.incident_id == incident_id)
.filter(Report.type == report_type)
.order_by(Report.created_at.desc())
.first()
)
def get_all_by_incident_id_and_type(
*, db_session, incident_id: int, report_type: ReportTypes
) -> Optional[Report]:
"""
Get all reports by incident id and report type.
"""
return (
db_session.query(Report)
.filter(Report.incident_id == incident_id)
.filter(Report.type == report_type)
)
def get_all_by_type(*, db_session, report_type: ReportTypes) -> List[Optional[Report]]:
"""
Get all reports by type.
"""
return db_session.query(Report).filter(Report.type == report_type)
def get_all(*, db_session) -> List[Optional[Report]]:
"""
Get all reports.
"""
return db_session.query(Report)
def create(*, db_session, report_in: ReportCreate) -> Report:
"""
Create a new report.
"""
report = Report(**report_in.dict())
db_session.add(report)
db_session.commit()
return report
def update(*, db_session, report: Report, report_in: ReportUpdate) -> Report:
"""Updates a report."""
report_data = jsonable_encoder(report)
update_data = report_in.dict(skip_defaults=True)
for field in report_data:
if field in update_data:
setattr(report, field, update_data[field])
db_session.add(report)
db_session.commit()
return report
def delete(*, db_session, report_id: int):
"""Deletes a report."""
db_session.query(Report).filter(Report.id == report_id).delete()
db_session.commit()
| StarcoderdataPython |
6491408 | <reponame>robot-1/demuxts<filename>DemuxTS/dvbobjects/descriptors.py
class descriptor():
def __init__(self, stream):
self.stream = stream
self.tag = None
self.length = None
class AssociationTagDescriptor(descriptor):
def __init__(self):
super(AssociationTagDescriptor, self).__init__(mpkt)
self.association = None
self.use = None
self.selector_length = None
self.transaction_id = None
self.timeout = None
def get_dsi_info(self):
pass
def __str__(self):
return "Association Tag Desciptor Type" | StarcoderdataPython |
11237747 | #!/usr/bin/python
"""
All hardcoded values across weatherSensor is handled here
"""
class Constant(object):
"""
Each constant holds its unique text, all text values stored here and
these are used throughout weatherSensor.
"""
CONFIG_SECTION_APP = 'APP'
USE_MOCK_SENSOR = 'use_mock_sensor'
POLLING_INTERVAL = 'polling_interval'
CONFIG_SECTION_TEMPERATURE = 'SENSOR_TEMPERATURE'
CONFIG_SECTION_HUMIDITY = 'SENSOR_HUMIDITY'
CONFIG_SECTION_LIGHT = 'SENSOR_LIGHT'
CONFIG_SECTION_CPU = 'SENSOR_CPU'
CONFIG_SECTION_IP_ADDRESS = 'SENSOR_IP_ADDRESS'
SENSOR_ENABLE = 'sensor_enable'
SENSOR_CALIBRATION = 'sensor_calibration'
SENSOR_JSON_KEY = 'sensor_json_key'
SENSOR_GPIO_PIN = 'sensor_gpio_pin'
SENSOR_TYPE = 'sensor_type'
SENSOR_TYPE_OPTION_HTU21D = 'HTU21D'
SENSOR_TYPE_OPTION_SHT31D = 'SHT31'
CONFIG_SECTION_MQTT = 'MQTT'
MQTT_HOST = 'mqtt_host'
MQTT_PORT = 'mqtt_port'
MQTT_TOPIC = 'mqtt_topic'
CONFIG_SECTION_WIFI = 'WIFI'
SENSOR_IP_ADDRESS_INTERFACE = 'interface'
WIFI_PING_HOST = 'wifi_ping_host'
LOGGER_NAME = 'sensorLogger'
LOGGER_FILE_NAME = 'config/logging.ini'
CONFIG_FILE_NAME = ['config/configuration.ini', '../config/configuration.ini']
def __init__(self):
pass
| StarcoderdataPython |
8117612 | <filename>benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_ml/SystemIPC/cmp_lbm/power.py
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0625215,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.251796,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.441843,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.113259,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.196123,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.112482,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.421864,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0442103,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.66993,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0834736,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00410572,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0488594,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0303643,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.132333,
'Execution Unit/Register Files/Runtime Dynamic': 0.03447,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.135193,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.306863,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.42037,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 6.18823e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 6.18823e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 5.35724e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 2.05599e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000436186,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000613523,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000605007,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.02919,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.85673,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0587278,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0991423,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.16674,
'Instruction Fetch Unit/Runtime Dynamic': 0.188279,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0741613,
'L2/Runtime Dynamic': 0.046456,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.89657,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.426293,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0213349,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0213349,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.99773,
'Load Store Unit/Runtime Dynamic': 0.552844,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0526083,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.105217,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0186709,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0197777,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.115445,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00964849,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.306221,
'Memory Management Unit/Runtime Dynamic': 0.0294262,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 16.7765,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.291221,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00929577,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0519787,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.352496,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 2.58987,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.033408,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.228929,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.236032,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0524199,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0845514,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0426787,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.17965,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0237669,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.25319,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0445915,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00219873,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0261458,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0162609,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0707373,
'Execution Unit/Register Files/Runtime Dynamic': 0.0184597,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0634357,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.145481,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.977897,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 3.33075e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 3.33075e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 2.88174e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.10499e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00023359,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000329022,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000326259,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0156321,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 0.994333,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0314966,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0530935,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.26111,
'Instruction Fetch Unit/Runtime Dynamic': 0.100877,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0402462,
'L2/Runtime Dynamic': 0.0247094,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.59195,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.229922,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0114795,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0114794,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.64615,
'Load Store Unit/Runtime Dynamic': 0.298014,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0283064,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0566124,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.010046,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.010647,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.061824,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00517368,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.23519,
'Memory Management Unit/Runtime Dynamic': 0.0158207,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 13.0254,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.1173,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00379256,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.023859,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.144951,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.56227,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0323358,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.228086,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.228414,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0507559,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0818673,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0413239,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.173947,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0230314,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.23937,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0431523,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00212893,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0253139,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0157447,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0684662,
'Execution Unit/Register Files/Runtime Dynamic': 0.0178737,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.061415,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.140841,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.966126,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 3.23055e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 3.23055e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 2.79506e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.07176e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000226174,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000318736,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000316439,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0151358,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 0.962765,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0305082,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.051408,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.22801,
'Instruction Fetch Unit/Runtime Dynamic': 0.0976873,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.039096,
'L2/Runtime Dynamic': 0.0243007,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.58106,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.222969,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0111273,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0111273,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.63361,
'Load Store Unit/Runtime Dynamic': 0.288972,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0274381,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.054876,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00973786,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0103215,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0598612,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00501187,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.232698,
'Memory Management Unit/Runtime Dynamic': 0.0153334,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.9623,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.113514,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0036714,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0231038,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.140289,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.53271,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0316093,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.227516,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.222957,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0497706,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0802781,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0405217,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.17057,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0227405,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.23007,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0421213,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0020876,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0248052,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0154391,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0669266,
'Execution Unit/Register Files/Runtime Dynamic': 0.0175267,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0601618,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.137931,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.958921,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 3.21365e-05,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 3.21365e-05,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 2.7805e-05,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 1.06622e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000221784,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.000313862,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.000314761,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.014842,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 0.944077,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0300098,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0504101,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.20841,
'Instruction Fetch Unit/Runtime Dynamic': 0.0958906,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0391723,
'L2/Runtime Dynamic': 0.0245996,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.57754,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.221386,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0110133,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0110133,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.62954,
'Load Store Unit/Runtime Dynamic': 0.286713,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0271568,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0543139,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00963804,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0102229,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0586992,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00493036,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.231364,
'Memory Management Unit/Runtime Dynamic': 0.0151532,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.928,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.110802,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00359395,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0226729,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.137069,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.51835,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 9.432335722666922,
'Runtime Dynamic': 9.432335722666922,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.257062,
'Runtime Dynamic': 0.219756,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 55.9492,
'Peak Power': 89.0614,
'Runtime Dynamic': 7.42295,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 55.6921,
'Total Cores/Runtime Dynamic': 7.2032,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.257062,
'Total L3s/Runtime Dynamic': 0.219756,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}} | StarcoderdataPython |
309350 | from Utility import resources as ex
from module.keys import bot_support_server_id, patreon_role_id, patreon_super_role_id
# noinspection PyBroadException,PyPep8
class Patreon:
@staticmethod
async def get_patreon_users():
"""Get the permanent patron users"""
return await ex.conn.fetch("SELECT userid from patreon.users")
@staticmethod
async def get_patreon_role_members(super_patron=False):
"""Get the members in the patreon roles."""
support_guild = ex.client.get_guild(int(bot_support_server_id))
# API call will not show role.members
if not super_patron:
patreon_role = support_guild.get_role(int(patreon_role_id))
else:
patreon_role = support_guild.get_role(int(patreon_super_role_id))
return patreon_role.members
@staticmethod
async def check_if_patreon(user_id, super_patron=False):
"""Check if the user is a patreon.
There are two ways to check if a user ia a patreon.
The first way is getting the members in the Patreon/Super Patreon Role.
The second way is a table to check for permanent patreon users that are directly added by the bot owner.
-- After modifying -> We take it straight from cache now.
"""
if user_id in ex.cache.patrons:
if super_patron:
return ex.cache.patrons.get(user_id) == super_patron
return True
@staticmethod
async def add_to_patreon(user_id):
"""Add user as a permanent patron."""
try:
user_id = int(user_id)
await ex.conn.execute("INSERT INTO patreon.users(userid) VALUES($1)", user_id)
ex.cache.patrons[user_id] = True
except:
pass
@staticmethod
async def remove_from_patreon(user_id):
"""Remove user from being a permanent patron."""
try:
user_id = int(user_id)
await ex.conn.execute("DELETE FROM patreon.users WHERE userid = $1", user_id)
ex.cache.patrons.pop(user_id, None)
except:
pass
async def reset_patreon_cooldown(self, ctx):
"""Checks if the user is a patreon and resets their cooldown."""
# Super Patrons also have the normal Patron role.
if await self.check_if_patreon(ctx.author.id):
ctx.command.reset_cooldown(ctx)
| StarcoderdataPython |
11323797 | <reponame>aryarm/happler
#!/usr/bin/env python
import sys
import time
import numpy as np
from pgenlib import PgenReader
variant_ct_start = 3220890
variant_ct_end = 3253426
variant_ct = variant_ct_end - variant_ct_start
pgen = PgenReader(bytes("/projects/ps-gymreklab/resources/datasets/ukbiobank/array_imputed/pfile_converted/chr1.pgen", "utf8"), sample_subset=np.arange(int(sys.argv[1]), dtype=np.uint32))
sample_ct = pgen.get_raw_sample_ct()
data = np.empty((variant_ct, sample_ct * 2), dtype=np.int32)
pgen.read_alleles_range(variant_ct_start, variant_ct_end, data)
| StarcoderdataPython |
9745479 | <gh_stars>100-1000
import tensorflow.keras as keras
from omlt.io import write_onnx_model_with_bounds
import pytest
# from conftest import get_neural_network_data
from keras.layers import Dense, Conv2D
from keras.models import Model, Sequential
from pyomo.common.fileutils import this_file_dir
from tensorflow.keras.optimizers import Adamax
def train_models():
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_sigmoid")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_sigmoid")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_sigmoid_output_activation")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="sigmoid",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_sigmoid_output_activation")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_relu")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="relu",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_relu")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_relu_output_activation")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="relu",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="relu",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_131_relu_output_activation")
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_linear_131_sigmoid_softplus_output_activation")
nn.add(
Dense(
units=3,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="softplus",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(
this_file_dir() + "/models/keras_linear_131_sigmoid_softplus_output_activation"
)
x, y, x_test = get_neural_network_data("131")
nn = Sequential(name="keras_big")
N = 100
nn.add(
Dense(
units=N,
input_dim=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=N,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=N,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
activation="sigmoid",
)
)
nn.add(
Dense(
units=1,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
activation="softplus",
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/big")
x, y, x_test = get_neural_network_data("2353")
nn = Sequential(name="keras_linear_2353")
nn.add(
Dense(
units=3,
input_dim=2,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=42
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=43
),
)
)
nn.add(
Dense(
units=5,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=52
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=53
),
)
)
nn.add(
Dense(
units=3,
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
bias_initializer=keras.initializers.RandomNormal(
mean=0.0, stddev=0.05, seed=63
),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
history = nn.fit(
x=x, y=y, validation_split=0.2, batch_size=16, verbose=1, epochs=15
)
nn.save(this_file_dir() + "/models/keras_linear_2353")
def train_conv():
nn = Sequential(name="keras_conv_7x7_relu")
nn.add(
Conv2D(
filters=1,
kernel_size=(2, 2),
activation="relu",
data_format="channels_first",
kernel_initializer=keras.initializers.RandomNormal(
mean=1.0, stddev=0.05, seed=62
),
input_shape=(1, 7, 7),
)
)
nn.compile(optimizer=Adamax(learning_rate=0.01), loss="mae")
import tf2onnx
import tempfile
onnx_model, _ = tf2onnx.convert.from_keras(nn)
input_bounds = dict()
for i in range(7):
for j in range(7):
input_bounds[0, i, j] = (0.0, 1.0)
with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as f:
write_onnx_model_with_bounds(f.name, onnx_model, input_bounds)
print(f"Wrote ONNX model with bounds at {f.name}")
if __name__ == "__main__":
train_models()
train_conv()
| StarcoderdataPython |
238791 | from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from cms_opencomparison import models
import httplib
def get_grid(request, grid_id):
grid = None
try:
grid = models.Grid.objects.get(pk=grid_id)
h = None
if grid.is_ssl():
h = httplib.HTTPSConnection(grid.get_hostname(),grid.get_port())
else:
h = httplib.HTTPConnection(grid.get_hostname(),grid.get_port())
headers = {
'User-Agent': 'django_cms_opencomparison',
}
print grid.get_grid_url() + grid.slug
h.request('GET', grid.get_grid_url() + grid.slug + "/", "", headers)
res = h.getresponse()
res = res.read()
except Exception, e:
print e
res = ''
return HttpResponse(
res,
mimetype='application/json')
def get_package(request, grid_id, package_slug):
grid = None
try:
grid = models.Grid.objects.get(pk=grid_id)
h = None
if grid.is_ssl():
h = httplib.HTTPSConnection(grid.get_hostname(),grid.get_port())
else:
h = httplib.HTTPConnection(grid.get_hostname(),grid.get_port())
headers = {
'User-Agent': 'django_cms_opencomparison',
}
h.request('GET', grid.get_package_url() + package_slug + "/", "", headers)
res = h.getresponse()
res = res.read()
except Exception, e:
res = ''
return HttpResponse(
res,
mimetype='application/json')
| StarcoderdataPython |
1764086 | # vim:fileencoding=utf-8:noet
""" python function """
# Copyright (c) 2010 - 2019, © Badassops LLC / <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*
#* File : bao_create.py
#* Description : python class to create or delete a VPC and it components
#* Author : <NAME> <<EMAIL>>
#* Version : 0.2
#* Date : Feb 21, 2019
#*
#* History :
#* Date: Author: Info:
#* Jun 1, 2010 LIS First Release
#* Mar 30, 2019 LIS refactored
import sys
from time import strftime
from bao_network import set_network_config, get_cidr
from bao_vpc import AwsVPC
from bao_eip import AwsEIP
from bao_subnet import AwsSubnet
from bao_internet_gateway import AwsInternetGateway
from bao_nat_gateway import AwsNatGateway
from bao_route_table import AwsRouteTable
class BaoCreate():
""" Class to create or delete a VPC and it components
"""
def __init__(self, **kwargs):
""" initial the object """
self.aws_conn = kwargs.get('aws_conn', {})
self.net_config = kwargs.get('net_config', {})
self.tag = kwargs.get('tag', {})
self.ipv6 = kwargs.get('ipv6', False)
self.vpc_conn = None
self.vpc_id = None
self.vpc_route_table = None
self.subnet_conn = None
self.subnets_fe_id = []
self.subnets_be_id = {}
self.int_gate_id = None
self.nat_gate_info = {}
def _create_vpc(self):
""" create the VPC and update the network configuration with the ipv6 detail """
# for create we only need to know the CIDR block first
vpc_cidr = get_cidr(dc_cfg=self.net_config)
self.vpc_conn = AwsVPC(aws_conn=self.aws_conn, tag=self.tag, cidr=vpc_cidr)
if not self.vpc_conn:
print('error AwsVPC\n')
sys.exit(-1)
# there should be no ID yet
if self.vpc_conn.get_vpc_id() is not None:
print('There is already a VPC with the given tag: {}, aborted.'.format(self.tag))
sys.exit(-1)
print('\t--< Start creating the VPC: {} >--'.format(strftime("%c")))
if self.vpc_conn.create() is None:
print('error creating the VPC\n')
sys.exit(-1)
# get the vpc id and ipv6 details and update the net_config dict
self.vpc_id = self.vpc_conn.get_vpc_id()
networkv6, cidrv6 = self.vpc_conn.get_vpc_cidr(ip_type='v6', split_cidr=True)
self.net_config.update(set_network_config(dc_cfg=self.net_config, \
dc_cidr_v6=cidrv6, dc_network_v6=networkv6))
# get the main route table
self.vpc_route_table = self.vpc_conn.get_main_route_table()
if self.vpc_route_table is None:
print('error getting main route of the VPC\n')
sys.exit(-1)
def _create_subnets(self):
""" create the subnets and keep list of their id """
print('\t--< create the VPC\'s subnet >--')
self.subnet_conn = AwsSubnet(aws_conn=self.aws_conn, tag=self.tag, vpc_id=self.vpc_id)
if not self.subnet_conn:
print('error AwsSubnet\n')
sys.exit(-1)
subnet_position = 0
for _ in self.net_config['vpc_fe_subnets']:
subnet_id = self.subnet_conn.create_subnet(
zone_name=self.net_config['dc_zones_names'][subnet_position], \
subnet_cidr=self.net_config['vpc_fe_subnets'][subnet_position], \
subnet_cidrv6=self.net_config['vpc_fe_subnets_v6'][subnet_position], \
subnet_type='fe',
ipv6=self.ipv6
)
if subnet_id is None:
sys.exit(-1)
subnet_position += 1
self.subnets_fe_id.append(subnet_id)
subnet_position = 0
for _ in self.net_config['vpc_be_subnets']:
subnet_id = self.subnet_conn.create_subnet(
zone_name=self.net_config['dc_zones_names'][subnet_position], \
subnet_cidr=self.net_config['vpc_be_subnets'][subnet_position], \
subnet_cidrv6=self.net_config['vpc_be_subnets_v6'][subnet_position], \
subnet_type='be',
ipv6=self.ipv6
)
if subnet_id is None:
sys.exit(-1)
self.subnets_be_id[self.net_config['dc_zones_names'][subnet_position]] = {'subnet_id': subnet_id}
subnet_position += 1
def _create_internet_gateway(self):
""" create the internet gateway and attach to VPC """
print('\t--< create the internet gateway and attach to the VPC >--')
int_gate_conn = AwsInternetGateway(aws_conn=self.aws_conn, tag=self.tag, vpc_id=self.vpc_id)
if not int_gate_conn:
print('error AwsInternetGateway\n')
sys.exit(-1)
self.int_gate_id = int_gate_conn.create_internet_gateway()
if self.int_gate_id is None:
sys.exit(-1)
result = int_gate_conn.attach_internet_gateway()
if result is None:
sys.exit(-1)
def _create_nat_gateways(self):
""" create the NAT gateways and attach one to each fe-subnet with it own EIP """
# get the subnet ids
subnet_data_fe, _, _ = self.subnet_conn.get_subnet_info(fe_subnet=self.net_config['vpc_fe_subnets'], \
be_subnet=self.net_config['vpc_be_subnets'])
print('\t--< create the NAT gateway and attach to each fe-subnet with it own EIP >--')
nat_gate_conn = AwsNatGateway(aws_conn=self.aws_conn, tag=self.tag)
if not nat_gate_conn:
print('error nat_gate_conn\n')
sys.exit(-1)
eip_conn = AwsEIP(aws_conn=self.aws_conn)
if not eip_conn:
print('error AwsEIP\n')
sys.exit(-1)
for subnet_id in subnet_data_fe:
zone_name = subnet_data_fe[subnet_id]['zone_name']
eip_id = eip_conn.create_eip(tag=self.tag + '-' + 'nat_gate' + '-' + zone_name)
if eip_id is None:
sys.exit(-1)
nat_gateway_id = nat_gate_conn.create_nat_gateway(eip_id=eip_id, subnet_id=subnet_id, \
tag=self.tag + '-' + zone_name)
if nat_gateway_id is None:
sys.exit(-1)
self.nat_gate_info[zone_name] = nat_gateway_id
def _create_routes(self):
"""
create the route for the fe-subnets
create the route for the be-subnets, each subnet get it own route and own NAT gateway
"""
print('\t--< create the route for the fe-subnets >--')
route_conn = AwsRouteTable(aws_conn=self.aws_conn, vpc_id=self.vpc_id, tag=self.tag)
if not route_conn:
print('error AwsRouteTable\n')
sys.exit(-1)
if route_conn.create_fe_route_table(subnets_id=self.subnets_fe_id, \
internet_gateway=self.int_gate_id, main_route_table=self.vpc_route_table) is False:
sys.exit(1)
print('\t--< create the route for the be-subnets, 1 route per subnet with it own NAT gateway >--')
for subnet in self.subnets_be_id:
zone_name = subnet
subnet_id = self.subnets_be_id[zone_name]['subnet_id']
nat_gate_id = self.nat_gate_info[zone_name]
if route_conn.create_be_route_table(subnet_id=subnet_id, \
nat_gateway=nat_gate_id, zone_name=zone_name) is False:
sys.exit(1)
def create(self):
""" create tge VPC and is components """
# start the creation process
self._create_vpc()
self._create_subnets()
self._create_internet_gateway()
self._create_nat_gateways()
self._create_routes()
def get_vpc_detail(self):
""" get the new vpc detail """
vpc_detail = {}
vpc_detail['vpc_id'] = self.vpc_id
vpc_detail['subnets_fe'] = self.subnets_fe_id
vpc_detail['subnets_be'] = self.subnets_be_id
vpc_detail['vpc_int_gate'] = self.int_gate_id
vpc_detail['vpc_nat_gate'] = self.nat_gate_info
vpc_detail['vpc_route_table'] = self.vpc_route_table
return vpc_detail
| StarcoderdataPython |
6513707 | <filename>src/os_rotatefile/rotatefile.py<gh_stars>1-10
import os
import sys
from io import BytesIO
_PY3 = sys.version_info[0] == 3
if _PY3:
string_types = str
integer_types = int
else:
string_types = basestring
integer_types = (int, long)
def _complain_ifclosed(closed):
if closed:
raise ValueError("I/O operation on closed file")
class RotateBase(object):
def __init__(self, base_filename):
self._prefix = os.path.basename(base_filename)
self._path = os.path.abspath(os.path.dirname(base_filename))
self._fp = None
self._idx = -1
self.closed = True
def _get_filename(self, idx):
if idx < 0:
raise ValueError('idx must >= 0')
return os.path.join(self._path, '%s%d' % (self._prefix, idx))
def close(self):
if self.closed:
return
if self._fp is not None:
self._fp.close()
self._fp = None
self.closed = True
class RotateReader(RotateBase):
def __init__(self, base_filename, buffer_size='128k'):
super(RotateReader, self).__init__(base_filename)
self._idx = None
self._end = False
self._buffer_size = valid_size(buffer_size)
self._open_next()
def _open_next(self):
if self._end:
return
if self._idx is None:
for x in [x for x in os.listdir(self._path) if x.startswith(self._prefix)]:
idx = x[len(self._prefix):]
if not idx.isdigit():
continue
if idx.startswith('0') and idx != '0':
continue
idx = int(idx)
if idx < 0:
continue
self._idx = idx if self._idx is None else min(
self._idx, idx)
if self._idx is not None:
self._idx -= 1
else:
raise IOError('file not found')
self._idx += 1
filename = self._get_filename(self._idx)
self._fp = open(filename, "rb")
self.closed = False
def read(self, size=-1):
if size < 0:
size = self._buffer_size
_complain_ifclosed(self.closed)
assert size >= 0, 'size must >= 0'
if self._end or size == 0:
return b''
buf = BytesIO()
need = size
while buf.tell() < size:
data = self._fp.read(need)
if not data:
try:
self._fp.close()
self._open_next()
continue
except IOError:
self._end = True
break
buf.write(data)
need = size - buf.tell()
buf.seek(0)
return buf.read()
def readline(self):
_complain_ifclosed(self.closed)
if self._end:
return b''
buffer = BytesIO()
e = b''
while e != b'\n':
line = self._fp.readline()
if not line:
try:
self._fp.close()
self._open_next()
except IOError:
self._end = True
break
buffer.write(line)
buffer.seek(-1, 1)
e = buffer.read(1)
buffer.seek(0)
return buffer.read()
class RotateWriter(RotateBase):
def __init__(self, base_filename, roll_size='1G'):
super(RotateWriter, self).__init__(base_filename)
self._roll_size = valid_size(roll_size)
assert self._roll_size > 0, 'roll_size must > 0'
self._size = -1
self._open_next()
def _open_next(self):
if not os.path.exists(self._path):
os.makedirs(self._path)
if self._idx < 0:
for x in [x for x in os.listdir(self._path) if x.startswith(self._prefix)]:
idx = x[len(self._prefix):]
if not idx.isdigit():
continue
if idx.startswith('0') and idx != '0':
continue
idx = int(idx)
if idx < 0:
continue
self._idx = max(self._idx, int(idx))
if self._idx >= 0:
filename = self._get_filename(self._idx)
size = os.path.getsize(filename)
if size < self._roll_size:
self._idx -= 1
self._idx += 1
filename = self._get_filename(self._idx)
self._fp = open(filename, "ab")
self._size = os.path.getsize(filename)
self.closed = False
def write(self, data, flush=False):
_complain_ifclosed(self.closed)
if not isinstance(data, bytes):
raise TypeError('unsupported type: {}'.format(type(data).__name__))
if self._fp is None:
self._open_next()
while True:
can_write = self._roll_size - self._size
if len(data) <= can_write:
self._fp.write(data)
if flush:
self._fp.flush()
self._size += len(data)
break
elif can_write > 0:
self._fp.write(data[:can_write])
self._size += can_write
self.close()
self._open_next()
data = data[can_write:]
else:
self.close()
self._open_next()
def flush(self):
_complain_ifclosed(self.closed)
self._fp.flush()
def open_file(name, mode='r', **kwargs):
base_filename = os.path.basename(name)
if not base_filename:
raise ValueError("not support open path")
if os.path.isfile(name) and mode == 'r':
return open(name, 'rb')
def not_support(name, **kwargs):
raise ValueError("mode must be 'r' or 'w'")
c = {'w': RotateWriter, 'r': RotateReader}.get(mode, not_support)
return c(name, **kwargs)
MAX_FILE_SIZE = 1024 ** 4
def valid_size(size):
if not isinstance(size, (integer_types, string_types)) or isinstance(size, bool):
raise TypeError('size must be int or string type')
if isinstance(size, integer_types):
size = str(int(size))
else:
size = size.lower()
multi = 1
weight = {'k': 1, 'm': 2, 'g': 3, 't': 4}
if size[-1] in weight:
multi = pow(1024, weight[size[-1]])
size = size[:-1]
r = int(float(size) * multi)
if r <= 0:
raise ValueError('size must > 0')
elif r > MAX_FILE_SIZE:
raise ValueError('size must <= %d' % MAX_FILE_SIZE)
return r
| StarcoderdataPython |
5133713 | <filename>bot.py
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import fnmatch
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from shutil import rmtree
import telegram
from geoip import geolite2
try:
from telegram import Bot
except Exception as e:
logging.info(e)
print('''\nPython dependencies error:\n ~$ pip3 freeze | grep telegram
~$ pip3 uninstall <libs>\n ~$ pip3 install python-telegram-bot''')
sys.exit(0)
class Poster(object):
def __init__(self, fdir, token, room_id, delete=False):
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
self.fdir = Path(fdir)
self.bot = telegram.Bot(token=token, request=telegram.utils.request.Request(connect_timeout=20, read_timeout=20))
self.room_id = room_id
# self.gl = geolite2.lookup()
self.delete = delete
def get_data(self):
'''
Gets every snapshots from main and subfolders.
Data looks like this:
{'1.1.1.1': [{'Port': '37777', 'Login': 'admin', 'Password': '<PASSWORD>', 'Channel': '1', 'Path': '/.../1.1.1.1_37777_admin_admin_1.jpg'}, {}]}
'''
self.prep_files_to_post = defaultdict(list)
#TODO: grab all channels to an album
for d, dirs, filenames in os.walk(self.fdir):
for folder in dirs:
if "trash" not in folder:
tmp = fnmatch.filter(os.listdir(self.fdir / folder), "*.jpg")
for host in tmp:
ip, data = self.prep_data(self.fdir / folder, host)
self.prep_files_to_post[ip].append(data)
else:
continue
tmp = fnmatch.filter(os.listdir(self.fdir), "*.jpg")
for host in tmp:
try:
ip, data = self.prep_data(self.fdir, host)
self.prep_files_to_post[ip].append(data)
except Exception as e:
continue
return self.prep_files_to_post
def prep_data(self, folder, host):
'''
Gets folder where is snapshots and its name.
From 1.1.1.1_37777_admin_admin_1.jpg returns: '1.1.1.1', {'Port': '37777', 'Login': 'admin', 'Password': '<PASSWORD>', 'Channel': '1', 'Path': '/.../1.1.1.1_37777_admin_admin_1.jpg'}
'''
self.data = host.split("_")
return self.data[0], {"Port": self.data[1], "Login": self.data[2], "Password": <PASSWORD>[3], "Channel": self.data[4], "Model": self.data[5].rstrip(".jpg"), "Path": folder / host}
def sort_list(self, files_to_sort):
'''
Sorts list for each host by channel.
'''
def ckey(data):
return int(data["Channel"])
self.files_to_post_sorted = dict(files_to_sort) #convert from defaultdict object to normal dict
for i in range(len(self.files_to_post_sorted)):
self.files_to_post_sorted[list(self.files_to_post_sorted.keys())[i]] = sorted(self.files_to_post_sorted[list(self.files_to_post_sorted.keys())[i]], key = ckey) #sort by channel
return self.files_to_post_sorted
def post_from(self, files_to_post):
for ip in files_to_post:
for data in files_to_post[ip]:
self.ip = ip
self.port, self.login, self.password, self.channel, self.model, self.fpath = data.values()
try:
logging.info("Trying to get data and post " + str(self.fpath))
self.post(self.ip, self.port, self.login, self.password, self.channel, self.model, self.fpath)
except Exception as e:
logging.info("Cannot try: " + str(e))
continue
if self.delete:
rmtree(self.fdir)
def post(self, ip, port, login, password, channel, model, photo):
try:
self.state = geolite2.lookup(self.ip)
except TypeError:
print('''Python dependencies error:\n
~$ pip3 uninstall python-geoip python-geoip-python3\n ~$ pip3 install python-geoip-python3''')
sys.exit(0)
if self.state:
self.state = str(self.state.country + " - " + self.state.timezone)
else:
self.state = 'Tap2Map'
self.text = "[Shodan:](https://www.shodan.io/host/{}) [{}](tg://msg_url?url=vk.com/wall-163997495?q={})\n*Port:* `{}`\n*Login:* `{}`\n*Password:* `{}`\n*Location:* [{}](https://iplocation.com/?ip={})\n*Channel:* `{}`\n*Model:* `{}`".format(ip, ip, ip, port, login, password, self.state, ip, channel, model)
logging.info("Got data: \n\t\t\t\t\tIP: {}\n\t\t\t\t\tPort: {}\n\t\t\t\t\tLogin: {}\n\t\t\t\t\tPassword: {}\n\t\t\t\t\tLocation: {}\n\t\t\t\t\tChannel: {}\n\t\t\t\t\tModel: {}".format(ip, port, login, password, self.state, channel, model))
self.sent = False
retry_c = 0
while not self.sent:
try:
logging.info("Trying to send post...")
with open(photo, 'rb') as f:
self.sent = self.bot.send_photo(chat_id=self.room_id, photo=f, caption=self.text, parse_mode=telegram.ParseMode.MARKDOWN, timeout=120)
logging.info("Sent.")
except Exception as e:
if retry_c > 4:
break
elif str(e) == 'Timed out':
logging.info("Cannot send post: {}. Sleeping for 5 seconds and trying again...".format(str(e)))
retry_c += 1
time.sleep(5)
pass
else:
logging.info("Cannot send post: {}. Skiping that.".format(str(e)))
break
time.sleep(3)
def start(self):
logging.info("Starting bot...")
self.post_from(self.sort_list(self.get_data()))
if __name__ == '__main__':
ROOM_ID = ''
TOKEN = ''
poster = Poster(Path(os.getcwd()), TOKEN, ROOM_ID, delete=False)
poster.start()
| StarcoderdataPython |
3295783 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
import json
from six import string_types
from toolz.curried import merge, excepts, first, compose, filter, map
@frappe.whitelist()
def apply_price_list(args, as_doc=False):
from erpnext.stock.get_item_details import apply_price_list
def get_item_price(item, args):
batch_no = compose(
lambda x: x.get("batch_no"),
excepts(StopIteration, first, lambda _: {}),
filter(lambda x: x.get("child_docname") == item.get("child_docname")),
lambda x: x.get("items"),
)(args)
if batch_no:
price = frappe.get_cached_value("Batch", batch_no, "px_price_list_rate")
if price:
return price
return item.get("price_list_rate")
def proc_result(result, args):
if args.get("is_return"):
return merge(
result,
{
"children": [
merge(x, {"price_list_rate": get_item_price(x, args)},)
for x in result.get("children")
]
},
)
return result
_args = json.loads(args) if isinstance(args, string_types) else args
return proc_result(apply_price_list(_args, as_doc), _args)
| StarcoderdataPython |
6617486 | <reponame>ravihuang/rfdbbot<filename>atests/libraries/RobotSqliteDatabase.py
import sqlite3
class RobotSqliteDatabase:
def __init__(self):
self._connection = None
def connect_to_database(self, db_file_path):
self._connection = sqlite3.connect(db_file_path)
def close_connection(self):
self._connection.close()
def row_count_is_equal_to(self, count, db_table_name):
actual_count = self._number_of_rows_in(db_table_name)
if not actual_count == int(count):
raise AssertionError('Expected to have %s rows but was %s' %
(count, actual_count))
def _number_of_rows_in(self, db_table_name):
cursor = self._execute('SELECT count() FROM %s' % db_table_name)
return cursor.fetchone()[0]
def _execute(self, sql_statement):
return self._connection.execute(sql_statement)
| StarcoderdataPython |
1843514 | #!/usr/bin/python3
from logic import AND, OR, NOT, ATOM
def at_least_one(formulas):
return OR(formulas)
def all_pairs(lst):
return [ (lst[i],lst[j]) for i in range(0,len(lst)) for j in range(i+1,len(lst)) ]
def at_most_one(formulas):
return AND([NOT(AND([f1, f2])) for (f1, f2) in all_pairs(formulas)])
def exactly_one(formulas):
return AND([at_most_one(formulas), at_least_one(formulas)])
# Translation of Sudoku to propositional logic
def create_variable_name(column, row, number):
return str(column) + "," + str(row) + "," + str(number)
def variable(column, row, number):
return ATOM(create_variable_name(column, row, number))
# Map 9X9 Sudoku instances to propositional formulas
#
# In formulas C1 to C4 below, instead of exactly1 it would be logically
# equivalent to use OR. However, the exactly1 allows Unit Propagation in DPLL
# to infer far more new literals, and cutting down the size of the search tree
# to a small fraction. In fact, our simple SAT solver needs at least a couple
# of hours (and possibly far far longer: these runs did not finish before we
# terminated them after 2 hours) to solve even simple instances if there is
# OR instead of exactly1 in these formulas.
def sudoku2fma(puzzle):
# Every grid cell has exactly one value
C1 = AND([exactly_one([variable(c, r, n) for n in range(1, 10)]) for r in range(1, 10) for c in range(1, 10)])
# Every row has all numbers
C2 = AND([exactly_one([variable(c, r, n) for c in range(1, 10)]) for r in range(1, 10) for n in range(1, 10)])
# Every column has all numbers
C3 = AND([exactly_one([variable(c, r, n) for r in range(1, 10)]) for c in range(1, 10) for n in range(1, 10)])
# Every 3X3 sub-grid has all numbers
C4 = AND(
[exactly_one([variable(c + dc, r + dr, n) for dr in range(0, 3) for dc in range(0, 3)]) for c in range(1, 8, 3)
for r in range(1, 8, 3) for n in range(1, 10)])
# The solution respects the given clues
C5 = AND([variable(x, y, n) for (x, y, n) in puzzle])
return AND([C1, C2, C3, C4, C5])
# Output a satisfying valuation for Sudoku as a 9X9 grid
def showsudokuclues(clues):
for y in range(9, 0, -1):
for x in range(1, 10):
flag = False
for n in range(1, 10):
if (x, y, n) in clues:
print(str(n), end='')
flag = True
if not flag:
print(".", end='')
if x in [3, 6]:
print("|", end='')
print("")
if y in [7, 4]:
print("===|===|===")
def showsudoku(V):
for y in range(9, 0, -1):
for x in range(1, 10):
for n in range(1, 10):
if V[create_variable_name(x, y, n)]:
print(str(n), end='')
if x in [3, 6]:
print("|", end='')
print("")
if y in [7, 4]:
print("-----------")
| StarcoderdataPython |
5098704 | import logging
import threading
from abc import ABC
from abc import abstractmethod
from time import sleep
from typing import Optional
from typing import Union
from kubernetes.client import V1beta1PodDisruptionBudget
from kubernetes.client import V1DeleteOptions
from kubernetes.client import V1Deployment
from kubernetes.client import V1StatefulSet
from kubernetes.client.rest import ApiException
from paasta_tools.autoscaling.autoscaling_service_lib import autoscaling_is_paused
from paasta_tools.kubernetes_tools import create_deployment
from paasta_tools.kubernetes_tools import create_pod_disruption_budget
from paasta_tools.kubernetes_tools import create_stateful_set
from paasta_tools.kubernetes_tools import force_delete_pods
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import KubeDeployment
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.kubernetes_tools import list_all_deployments
from paasta_tools.kubernetes_tools import load_kubernetes_service_config_no_cache
from paasta_tools.kubernetes_tools import paasta_prefixed
from paasta_tools.kubernetes_tools import pod_disruption_budget_for_service_instance
from paasta_tools.kubernetes_tools import update_deployment
from paasta_tools.kubernetes_tools import update_stateful_set
from paasta_tools.utils import load_system_paasta_config
class Application(ABC):
def __init__(
self,
item: Union[V1Deployment, V1StatefulSet],
logging=logging.getLogger(__name__),
) -> None:
"""
This Application wrapper is an interface for creating/deleting k8s deployments and statefulsets
soa_config is KubernetesDeploymentConfig. It is not loaded in init because it is not always required.
:param item: Kubernetes Object(V1Deployment/V1StatefulSet) that has already been filled up.
:param logging: where logs go
"""
if not item.metadata.namespace:
item.metadata.namespace = "paasta"
attrs = {
attr: item.metadata.labels[paasta_prefixed(attr)]
for attr in ["service", "instance", "git_sha", "config_sha"]
}
self.kube_deployment = KubeDeployment(replicas=item.spec.replicas, **attrs)
self.item = item
self.soa_config = None # type: KubernetesDeploymentConfig
self.logging = logging
def load_local_config(
self, soa_dir: str, cluster: str
) -> Optional[KubernetesDeploymentConfig]:
if not self.soa_config:
self.soa_config = load_kubernetes_service_config_no_cache(
service=self.kube_deployment.service,
instance=self.kube_deployment.instance,
cluster=cluster,
soa_dir=soa_dir,
)
return self.soa_config
def __str__(self):
service = self.kube_deployment.service
instance = self.kube_deployment.instance
git_sha = self.kube_deployment.git_sha
config_sha = self.kube_deployment.config_sha
return f"{service}-{instance}-{git_sha}-{config_sha}"
@abstractmethod
def deep_delete(self, kube_client: KubeClient) -> None:
"""
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
"""
pass
def create(self, kube_client: KubeClient):
"""
Create all controllers, HPA, and pod disruption budgets related to this application
:param kube_client:
"""
pass
def update(self, kube_client: KubeClient):
"""
Update all controllers, HPA, and pod disruption budgets related to this application
:param kube_client:
"""
pass
def update_related_api_objects(self, kube_client: KubeClient) -> None:
"""
Update related Kubernetes API objects such as HPAs and Pod Disruption Budgets
:param kube_client:
"""
self.ensure_pod_disruption_budget(kube_client)
def delete_pod_disruption_budget(self, kube_client: KubeClient) -> None:
try:
kube_client.policy.delete_namespaced_pod_disruption_budget(
name=self.item.metadata.name,
namespace=self.item.metadata.namespace,
body=V1DeleteOptions(),
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent pod disruption budget/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted pod disruption budget/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
def ensure_pod_disruption_budget(
self, kube_client: KubeClient
) -> V1beta1PodDisruptionBudget:
max_unavailable: Union[str, int]
if "bounce_margin_factor" in self.soa_config.config_dict:
max_unavailable = (
f"{int((1 - self.soa_config.get_bounce_margin_factor()) * 100)}%"
)
else:
system_paasta_config = load_system_paasta_config()
max_unavailable = system_paasta_config.get_pdb_max_unavailable()
pdr = pod_disruption_budget_for_service_instance(
service=self.kube_deployment.service,
instance=self.kube_deployment.instance,
max_unavailable=max_unavailable,
)
try:
existing_pdr = kube_client.policy.read_namespaced_pod_disruption_budget(
name=pdr.metadata.name, namespace=pdr.metadata.namespace
)
except ApiException as e:
if e.status == 404:
existing_pdr = None
else:
raise
if existing_pdr:
if existing_pdr.spec.min_available is not None:
logging.info(
"Not updating poddisruptionbudget: can't have both "
"min_available and max_unavailable"
)
elif existing_pdr.spec.max_unavailable != pdr.spec.max_unavailable:
logging.info(f"Updating poddisruptionbudget {pdr.metadata.name}")
return kube_client.policy.patch_namespaced_pod_disruption_budget(
name=pdr.metadata.name, namespace=pdr.metadata.namespace, body=pdr
)
else:
logging.info(f"poddisruptionbudget {pdr.metadata.name} up to date")
else:
logging.info(f"creating poddisruptionbudget {pdr.metadata.name}")
return create_pod_disruption_budget(
kube_client=kube_client, pod_disruption_budget=pdr
)
class DeploymentWrapper(Application):
def deep_delete(self, kube_client: KubeClient) -> None:
"""
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
"""
delete_options = V1DeleteOptions(propagation_policy="Foreground")
try:
kube_client.deployments.delete_namespaced_deployment(
self.item.metadata.name,
self.item.metadata.namespace,
body=delete_options,
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent deploy/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted deploy/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
self.delete_pod_disruption_budget(kube_client)
self.delete_horizontal_pod_autoscaler(kube_client)
def get_existing_app(self, kube_client: KubeClient):
return kube_client.deployments.read_namespaced_deployment(
name=self.item.metadata.name, namespace=self.item.metadata.namespace
)
def create(self, kube_client: KubeClient) -> None:
create_deployment(kube_client=kube_client, formatted_deployment=self.item)
self.ensure_pod_disruption_budget(kube_client)
self.sync_horizontal_pod_autoscaler(kube_client)
def deep_delete_and_create(self, kube_client: KubeClient) -> None:
self.deep_delete(kube_client)
timer = 0
while (
self.kube_deployment in set(list_all_deployments(kube_client))
and timer < 60
):
sleep(1)
timer += 1
if timer >= 60 and self.kube_deployment in set(
list_all_deployments(kube_client)
):
try:
force_delete_pods(
self.item.metadata.name,
self.kube_deployment.service,
self.kube_deployment.instance,
self.item.metadata.namespace,
kube_client,
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent deploy/{} from namespace/{}".format(
self.kube_deployment.service, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted deploy/{} from namespace/{}".format(
self.kube_deployment.service, self.item.metadata.namespace
)
)
self.create(kube_client=kube_client)
def update(self, kube_client: KubeClient) -> None:
# If HPA is enabled, do not update replicas.
# In all other cases, replica is set to max(instances, min_instances)
if self.soa_config.config_dict.get("bounce_method", "") == "brutal":
threading.Thread(
target=self.deep_delete_and_create, args=[KubeClient()]
).start()
return
update_deployment(kube_client=kube_client, formatted_deployment=self.item)
def update_related_api_objects(self, kube_client: KubeClient) -> None:
super().update_related_api_objects(kube_client)
self.sync_horizontal_pod_autoscaler(kube_client)
def should_have_hpa(self):
return (
(
self.soa_config.get_max_instances() is not None
or self.soa_config.config_dict.get("horizontal_autoscaling", False)
)
# with bespoke autoscaler, setup_kubernetes_job sets the number of instances directly; no HPA is required.
and self.soa_config.get_autoscaling_params()["decision_policy"] != "bespoke"
and self.soa_config.get_desired_state() != "stop"
)
def sync_horizontal_pod_autoscaler(self, kube_client: KubeClient) -> None:
"""
In order for autoscaling to work, there needs to be at least two configurations
min_instnace, max_instance, and there cannot be instance.
"""
self.logging.info(
f"Syncing HPA setting for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
hpa_exists = self.exists_hpa(kube_client)
if hpa_exists:
if not self.should_have_hpa():
# Remove HPA if autoscaling is disabled
self.delete_horizontal_pod_autoscaler(kube_client)
return
elif autoscaling_is_paused():
self.logging.info(
f"Autoscaler is paused. Setting min instances to {self.item.spec.replicas}."
f"HPA will not scale down service."
)
self.soa_config.set_min_instances(self.item.spec.replicas)
body = self.soa_config.get_autoscaling_metric_spec(
name=self.item.metadata.name,
cluster=self.soa_config.cluster,
namespace=self.item.metadata.namespace,
)
if not body:
self.logging.info(
f"CRIT: autoscaling misconfigured for {self.kube_deployment.service}."
f"{self.kube_deployment.instance}.Please correct the configuration and update pre-commit hook."
)
return
self.logging.debug(body)
if not hpa_exists:
self.logging.info(
f"Creating new HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}"
)
kube_client.autoscaling.create_namespaced_horizontal_pod_autoscaler(
namespace=self.item.metadata.namespace, body=body, pretty=True
)
else:
self.logging.info(
f"Updating new HPA for {self.item.metadata.name}/name in {self.item.metadata.namespace}/namespace"
)
kube_client.autoscaling.replace_namespaced_horizontal_pod_autoscaler(
name=self.item.metadata.name,
namespace=self.item.metadata.namespace,
body=body,
pretty=True,
)
def exists_hpa(self, kube_client: KubeClient) -> bool:
return (
len(
kube_client.autoscaling.list_namespaced_horizontal_pod_autoscaler(
field_selector=f"metadata.name={self.item.metadata.name}",
namespace=self.item.metadata.namespace,
).items
)
> 0
)
def delete_horizontal_pod_autoscaler(self, kube_client: KubeClient) -> None:
try:
kube_client.autoscaling.delete_namespaced_horizontal_pod_autoscaler(
name=self.item.metadata.name,
namespace=self.item.metadata.namespace,
body=V1DeleteOptions(),
)
except ApiException as e:
if e.status == 404:
# Deployment does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
f"not deleting nonexistent HPA/{self.item.metadata.name} from namespace/{self.item.metadata.namespace}"
)
else:
raise
else:
self.logging.info(
"deleted HPA/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
class StatefulSetWrapper(Application):
def deep_delete(self, kube_client: KubeClient) -> None:
"""
Remove all controllers, pods, and pod disruption budgets related to this application
:param kube_client:
"""
delete_options = V1DeleteOptions(propagation_policy="Foreground")
try:
kube_client.deployments.delete_namespaced_stateful_set(
self.item.metadata.name,
self.item.metadata.namespace,
body=delete_options,
)
except ApiException as e:
if e.status == 404:
# StatefulSet does not exist, nothing to delete but
# we can consider this a success.
self.logging.debug(
"not deleting nonexistent statefulset/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
else:
raise
else:
self.logging.info(
"deleted statefulset/{} from namespace/{}".format(
self.item.metadata.name, self.item.metadata.namespace
)
)
self.delete_pod_disruption_budget(kube_client)
def create(self, kube_client: KubeClient):
create_stateful_set(kube_client=kube_client, formatted_stateful_set=self.item)
self.ensure_pod_disruption_budget(kube_client)
def update(self, kube_client: KubeClient):
update_stateful_set(kube_client=kube_client, formatted_stateful_set=self.item)
def get_application_wrapper(
formatted_application: Union[V1Deployment, V1StatefulSet]
) -> Application:
app: Application
if isinstance(formatted_application, V1Deployment):
app = DeploymentWrapper(formatted_application)
elif isinstance(formatted_application, V1StatefulSet):
app = StatefulSetWrapper(formatted_application)
else:
raise Exception("Unknown kubernetes object to update")
return app
| StarcoderdataPython |
4906114 | <reponame>ravngr/jtfadump2
#!/usr/bin/env python3
# -- coding: utf-8 --
import argparse
import code
import logging
import logging.config
import os
import sys
import time
import serial
import pyvisa
import yaml
import capture
import experiment
import exporter
import post_export
import post_process
import util
__author__ = '<NAME>'
__license__ = 'MIT'
__version__ = '0.1.0'
__maintainer__ = '<NAME>'
__email__ = '<EMAIL>'
__status__ = 'Development'
class ExperimentNode:
def __init__(self, exp, children=None):
self.experiment = exp
if children:
self.children = children
self.size = 1 + sum([x.size for x in children])
else:
self.children = None
self.size = 1
def _generate_module_args(global_config, parent, *args, **kwargs):
class_name = kwargs.pop('class')
# If module has global configuration options then load those first
if class_name in global_config:
instance_config = global_config[class_name]
# Overwrite global options with parameters for this instance
instance_config.update(kwargs)
else:
instance_config = kwargs
return util.class_instance_from_dict(class_name, parent, *args, **instance_config)
def _generate_experiment_tree(module_config, global_module_config):
# Generate new node using global config with overrides
config = global_module_config.get(module_config['class'], {})
config.update(module_config)
c = util.class_from_str(config.pop('class'), experiment.__name__)
child_nodes = []
if 'child' in config:
config_child = config.pop('child')
for child in config_child:
child_nodes.append(_generate_experiment_tree(child, global_module_config))
return ExperimentNode(c(**config), child_nodes)
def main():
# Get start time
start_time = time.gmtime()
start_time_str = time.strftime('%Y%m%d_%H%M%S', start_time)
# Generate list of available modules
module_list = 'Experiment Modules:\n\t'
module_list += '\n\t'.join(util.get_module_subclasses(experiment, experiment.Experiment))
module_list += '\n\nCapture Modules:\n\t'
module_list += '\n\t'.join(util.get_module_subclasses(capture, capture.Capture))
module_list += '\n\nPost-Process Modules:\n\t'
module_list += '\n\t'.join(util.get_module_subclasses(post_process, post_process.PostProcessor))
module_list += '\n\nExporter Modules:\n\t'
module_list += '\n\t'.join(util.get_module_subclasses(exporter, exporter.Exporter))
module_list += '\n\nPost-Exporter Modules:\n\t'
module_list += '\n\t'.join(util.get_module_subclasses(post_export, post_export.PostExporter))
# Parse command line arguments
parse = argparse.ArgumentParser(description='jtfadump2 Experiment System',
formatter_class=argparse.RawDescriptionHelpFormatter, epilog=module_list)
parse.add_argument('name', help='Prefix for results folder')
parse.add_argument('config', help='YAML configuration file(s)', nargs='+')
parse.add_argument('-v', '--verbose', help='Verbose output', dest='display_verbose', action='store_true')
parse.set_defaults(display_verbose=False)
parse.add_argument('--visa', help='Display VISA traffic in console', dest='display_visa', action='store_true')
parse.set_defaults(display_visa=False)
parse.add_argument('-q', '--quiet', help='Suppress info logging output', dest='display_quiet', action='store_true')
parse.set_defaults(display_visa=False)
parse.add_argument('-i', '--interactive', help='Drop to interactive python shell on error', dest='interactive',
action='store_true')
parse.set_defaults(interactive=False)
args = parse.parse_args()
# Get experiment prefix
experiment_name = args.name.strip().replace(' ', '_')
# Load configuration YAML file(s)
config = {}
for f in args.config:
with open(f, 'r') as h:
config.update(yaml.load(h))
# Setup directories
result_path = os.path.realpath(config['result']['path'])
if not os.path.exists(result_path) or not os.path.isdir(result_path):
print("Result directory {} either doesn't exist or is not a writable directory".format(result_path),
file=sys.stderr)
return
result_path = os.path.join(result_path, "{}-{}".format(experiment_name, start_time_str))
# Create result directory
os.makedirs(result_path, exist_ok=False)
log_path = os.path.realpath(config['logging'].get('path', result_path))
# Setup logging
logging_config_dict = config.pop('logging')['config']
# Generate log filename
log_filename = os.path.join(log_path, "log-{}-{}.log".format(experiment_name, start_time_str))
# If a filename is None then set the generated name as the target
if logging_config_dict['handlers']:
for (key, value) in logging_config_dict['handlers'].items():
if 'filename' in value and not value['filename']:
logging_config_dict['handlers'][key]['filename'] = log_filename
# Apply logging configuration
logging.config.dictConfig(logging_config_dict)
root_logger = logging.getLogger(__name__)
try:
git_hash = util.get_git_hash()
except OSError:
git_hash = 'not found'
root_logger.info("jtfadump2 {} | git hash: {}".format(__version__, git_hash))
root_logger.info("python {}".format(sys.version))
for m in [('pyserial', serial.VERSION), (pyvisa.__name__, pyvisa.__version__), ('pyyaml', yaml.__version__)]:
root_logger.info("{} {}".format(m[0], m[1]))
root_logger.info("Started: {}".format(time.strftime('%a, %d %b %Y %H:%M:%S +0000', start_time)))
root_logger.info("Launch command: {}".format(' '.join(sys.argv)))
root_logger.info("Result directory: {}".format(result_path))
# Dump configuration to log
root_logger.debug('--- BEGIN CONFIGURATION ---')
for line in yaml.dump(config).split('\n'):
root_logger.debug(line)
root_logger.debug('--- END CONFIGURATION ---')
# Setup modules
if 'experiment' not in config or not config['experiment']:
root_logger.error('Configuration must specify at least one experiment module!', file=sys.stderr)
return
if 'experiment' not in config or not config['capture']:
root_logger.error('Configuration must specify at least one capture module!', file=sys.stderr)
return
if 'export' not in config or not config['export']:
root_logger.error('Configuration must specify at least one export module!', file=sys.stderr)
return
# Get global module configuration
global_module_config = config.pop('modules')
# Create modules
experiment_nodes = []
capture_modules = []
post_process_modules = []
export_modules = []
post_export_modules = []
# Setup experiment stack
experiment_module_count = 0
for node_config in config.pop('experiment'):
node = _generate_experiment_tree(node_config, global_module_config)
experiment_module_count += node.size
experiment_nodes.append(node)
# Generate other modules
for module_config in config.pop('capture'):
capture_modules.append(_generate_module_args(global_module_config, capture.__name__, **module_config))
if 'post_process' in config and config['post_process']:
for module_config in config.pop('post_process'):
post_process_modules.append(_generate_module_args(global_module_config, post_process.__name__,
**module_config))
for module_config in config.pop('export'):
export_modules.append(_generate_module_args(global_module_config, exporter.__name__,
result_directory=result_path, **module_config))
if 'post_export' in config and config['post_export']:
for module_config in config.pop('post_export'):
post_export_modules.append(_generate_module_args(global_module_config, post_export.__name__,
result_directory=result_path, **module_config))
capture_module_count = len(capture_modules)
post_process_module_count = len(post_process_modules)
export_module_count = len(export_modules)
post_export_module_count = len(post_export_modules)
# Note loaded modules
root_logger.info("Loaded {} experiment module{}".format(experiment_module_count,
's' if experiment_module_count is not 1 else ''))
root_logger.info("Loaded {} capture module{}".format(capture_module_count,
's' if capture_module_count is not 1 else ''))
root_logger.info("Loaded {} post-process module{}".format(post_process_module_count,
's' if post_process_module_count is not 1 else ''))
root_logger.info("Loaded {} export module{}".format(export_module_count,
's' if export_module_count is not 1 else ''))
root_logger.info("Loaded {} post-export module{}".format(post_export_module_count,
's' if post_export_module_count is not 1 else ''))
# Track running experiments in order to stop them properly
running_experiments = []
# Catch all exceptions for logging
try:
# Step experiment stack
active_experiments = []
while experiment_nodes:
current_node = experiment_nodes[0]
current_experiment = current_node.experiment
if current_experiment.has_next():
# Add experiment to active list
if current_experiment not in active_experiments:
active_experiments.append(current_experiment)
if current_experiment not in running_experiments:
running_experiments.append(current_experiment)
# Step the active experiment
current_experiment.step()
if current_node.children:
# Append children to node stack
experiment_nodes[:0] = current_node.children
else:
# Save data from this experiment
# Generate a unique identifier for the capture
capture_id = util.rand_hex_str(64)
capture_id_short = capture_id[-8::]
# Timestamp data and attach unique id
data = {
'cap_id': capture_id,
'cap_time': time.strftime('%a, %d %b %Y %H:%M:%S +0000'),
'cap_timestamp': time.time()
}
# Get current state of experiment
for e in active_experiments:
data.update(e.get_state())
# Capture data from all sources
for c in capture_modules:
data.update(c.get_data(active_experiments))
# Apply optional post-processors to data
if post_process_modules:
for p in post_process_modules:
d = p.process(data)
# Don't let badly written post-processors wipe out data
if d is not None:
data = d
else:
root_logger.warn("PostProcessor {} returning no data".format(p.__name__))
# Export data
exported_files = []
for e in export_modules:
f = e.export(capture_id_short, data)
if f is not None:
exported_files.extend(f)
# Post-export
for pe in post_export_modules:
pe.process(exported_files)
else:
# Remove experiment both from the stack and from the active list
experiment_nodes.pop(0)
active_experiments.remove(current_experiment)
# Reset experiment state in case it is re-used
current_experiment.reset()
root_logger.info('Experiment finished normally')
except:
root_logger.exception('Caught exception', exc_info=True)
if args.interactive:
# Drop to interactive shell on error
root_logger.warn('Dropping to interactive shell')
code.interact(local=locals())
raise
finally:
# Stop all running experiments
for e in running_experiments:
e.stop()
root_logger.info('Exiting')
if __name__ == "__main__":
main()
| StarcoderdataPython |
3465015 | <reponame>iTeam-co/pytglib
from ..utils import Object
class UpgradeBasicGroupChatToSupergroupChat(Object):
"""
Creates a new supergroup from an existing basic group and sends a corresponding messageChatUpgradeTo and messageChatUpgradeFrom; requires creator privileges. Deactivates the original basic group
Attributes:
ID (:obj:`str`): ``UpgradeBasicGroupChatToSupergroupChat``
Args:
chat_id (:obj:`int`):
Identifier of the chat to upgrade
Returns:
Chat
Raises:
:class:`telegram.Error`
"""
ID = "upgradeBasicGroupChatToSupergroupChat"
def __init__(self, chat_id, extra=None, **kwargs):
self.extra = extra
self.chat_id = chat_id # int
@staticmethod
def read(q: dict, *args) -> "UpgradeBasicGroupChatToSupergroupChat":
chat_id = q.get('chat_id')
return UpgradeBasicGroupChatToSupergroupChat(chat_id)
| StarcoderdataPython |
9781808 | from typing import Any, Callable, Dict, List, Union
import numpy
from ppq.core import (DataType, OperationMeta, QuantizationStates,
TargetPlatform, TensorMeta, TensorQuantizationConfig,
empty_ppq_cache, ppq_warning)
from ppq.IR import BaseGraph, Operation, QuantableOperation, RunnableGraph
from ppq.IR.base.command import GraphDeployCommand
from ppq.quantization.qfunction.linear import PPQLinearQuantFunction
import torch
from .base import (GLOBAL_DISPATCHING_TABLE, BaseGraphExecutor,
QuantOPRuntimeHook, RuntimeHook)
from .op import TorchBackendContext
def build_meta(value: Any) -> TensorMeta:
if isinstance(value, torch.Tensor):
return TensorMeta.parsing_from_torch_tensor(value)
if isinstance(value, numpy.ndarray):
return TensorMeta.parsing_from_numpy_ndarray(value)
raise TypeError(f'Can not tracing meta for given value(type: {type(value)}), check your graph again.')
class TorchMetaDataTracingHook(RuntimeHook):
def __init__(self, operation: Operation) -> None:
self.input_metas, self.output_metas = [], []
super().__init__(operation, operation_meta=None)
def pre_forward_hook(self, inputs: list, **kwargs) -> list:
# some operations got none as its input
# therefore we have to create meta for those none input value manually.
for tensor, var in zip(inputs, self._hook_to.inputs):
if tensor is None:
ppq_warning(
f'Unexpected input value of operation {self._hook_to.name}, '
f'recieving "None" at its input {self._hook_to.inputs.index(var)}')
self.input_metas.append(TensorMeta(dtype=DataType.NONETYPE, shape=None))
else:
self.input_metas.append(build_meta(tensor))
return inputs
def post_forward_hook(self, outputs: list, **kwargs) -> list:
self.output_metas = [build_meta(tensor) for tensor in outputs]
return outputs
class TorchQuantizeDelegate(Callable):
"""Since PPQ 0.6.2, Interface TorchQuantizeDelegate is introduced to
customize quantization logic: To be specific, you are suppose to inherit
this class, and define your own computation logic within function __call__.
Pass your Delegate to TorchExecutor by TorchExecutor.register_quantize_delegate(c, d)
Where c is the target quantization config, d is your delegator class.
Once you invoke this function, PPQ execution system will hand the quantization
computation of config c over to your delegate. PPQ execution system will no
longer quantize variable related with config c anymore.
Notice that a delegate replaces quantization computation only, it still under the control of PPQ quantization
System, so to say if your config has an invalid state like DEQUANTIZED, PPQ execution system will never been
required to quantize related tensor and so your delegate class will take no effects on config c.
Remove delegate function by TorchExecutor.remove_quantize_delegate(c)
If you have some customized parameter of your delegator logic, set them as class attributes.
Like: self.param1 = ..., self.param2 = ...
Do not edit config structure directly.
Args:
Callable (_type_): _description_
"""
def __init__(self) -> None:
super().__init__()
def __call__(self, tensor: torch.Tensor,
config: TensorQuantizationConfig) -> torch.Tensor:
raise NotImplementedError('Implement this function first.')
class TorchExecutor(BaseGraphExecutor, torch.nn.Module):
def __init__(
self, graph: BaseGraph, fp16_mode: bool = True,
device: str = 'cuda') -> None:
"""
TorchExecutor - executor object which use torch as its backend.
torch backend is used to graph simulating & training(QAT)
all operation forward functions are written with pytorch,
so that they will have gradient recorded by torch engine.
which means you can directly access to tensor.grad after using output.backward()
Args:
graph (BaseGraph):
executing graph object,
TorchExecutor will automatically send all graph parameters towards executing device.
rounding_policy (RoundingPolicy)
rounding_policy takes responsibility for quantizing input/output/parameters during graph executing.
Notice that all quantizer will possess a platform specified rounding_policy (within BaseQuantizer._quant_function),
you are supposed to pass BaseQuantizer._quant_function._rounding_policy
to initialize this executor whenever you have a quantizer.
Different rounding_policy will brings totally different quant behaviour and rounding.
It will greatly change the output of your network in some cases.
fp16_mode (bool, optional): [whether the simulator is running in fp16 mode(unimplemented).]. Defaults to True.
device (str, optional): [
executing device, as same as torch.device,
you can not select gpu to executing yet,
graph will always be send to the very first visible cuda device.
]. Defaults to 'cuda'.
"""
self._default_quant_fn = PPQLinearQuantFunction
self._deployed = False
self._device = device
self._executing_context = TorchBackendContext(executing_device=self._device)
super().__init__(graph)
self._runnable_graph = RunnableGraph(self._graph)
self._delegates = {}
# fp16 is not available for now.
self.fp16_mode = fp16_mode
self.deploy()
def register_quantize_delegate(
self, config: TensorQuantizationConfig,
delegator: TorchQuantizeDelegate):
"""Since PPQ 0.6.2, Interface TorchQuantizeDelegate is introduced to
customize quantization logic: To be specific, you are suppose to
inherit this class, and define your own computation logic within
function __call__.
Pass your Delegate to TorchExecutor by TorchExecutor.register_quantize_delegate(c, d)
Where c is the target quantization config, d is your delegator class.
Once you invoke this function, PPQ execution system will hand the quantization
computation of config c over to your delegate. PPQ execution system will no
longer quantize variable related with config c anymore.
Notice that a delegate replaces quantization computation only, it still under the control of PPQ quantization
System, so to say if your config has an invalid state like DEQUANTIZED, PPQ execution system will never been
required to quantize related tensor and so your delegate class will take no effects on config c.
Remove delegate function by TorchExecutor.remove_quantize_delegate(c)
"""
if not isinstance(delegator, TorchQuantizeDelegate):
raise TypeError(
f'You can only register a TorchQuantizeDelegate as quantization delegator function,'
f' however a/an {type(delegator)} was given')
if not isinstance(config, TensorQuantizationConfig):
raise TypeError(
f'Except a TensorQuantizationConfig instance, however {type(config)} was passed.')
self._delegates[config] = delegator
def remove_quantize_delegate(
self, config: TensorQuantizationConfig):
"""Since PPQ 0.6.2, Interface TorchQuantizeDelegate is introduced to
customize quantization logic: To be specific, you are suppose to
inherit this class, and define your own computation logic within
function __call__.
Pass your Delegate to TorchExecutor by TorchExecutor.register_quantize_delegate(c, d)
Where c is the target quantization config, d is your delegator class.
Once you invoke this function, PPQ execution system will hand the quantization
computation of config c over to your delegate. PPQ execution system will no
longer quantize variable related with config c anymore.
Notice that a delegate replaces quantization computation only, it still under the control of PPQ quantization
System, so to say if your config has an invalid state like DEQUANTIZED, PPQ execution system will never been
required to quantize related tensor and so your delegate class will take no effects on config c.
Remove delegate function by TorchExecutor.remove_quantize_delegate(c)
"""
if not isinstance(config, TensorQuantizationConfig):
raise TypeError(
f'Except a TensorQuantizationConfig instance, however {type(config)} was passed.')
if config in self._delegates:
self._delegates.pop(config)
def deploy(self):
"""Deploy graph parameters towards target device.
Raises:
ValueError: [when target device is unacceptable]
"""
self._deployed = True
self._runnable_graph(GraphDeployCommand(device=self._device))
def to(self, device: str):
# just keep TorchExecutor behaving like torch.nn.Module
self._device = torch.device(device)
self.deploy()
return self
@ torch.no_grad()
def forward(
self,
inputs: Union[dict, list, torch.Tensor],
output_names:List[str] = None,
hooks: Dict[str, RuntimeHook] = None
) -> List[torch.Tensor]:
"""Forward function of this executor.
Notice this forward function will never store and compute gradients.
Args:
inputs (Union[dict, list, torch.Tensor]): [input tensor or somewhat]
output_names (List[str], optional):
onnx output node names, which used to confirm a output order.
Defaults to None.
hooks (Dict[str, RuntimeHook], optional):
A hook table for customizing operation behaviour and collate data during executing.
All hooks should inherit from class RuntimeHook, with all necessary methods implemented.
See also: ppq.executor.base.RuntimeHook
Executor calls hook.pre_forward_hook(operation, input_data) before dispatching operation,
by using this feature, you can dynamically dispatch operation during executing,
or processing input data as you want.(remember to return processed input data)
Executor calls hook.post_forward_hook(operation, output_data) after the execution,
you are supposed to gather all necessary data from execution via this feature.
For Quantable Operation, a much more powerful class:
ppq.executor.base.QuantOpRuntimeHook is provided.
see also: ppq.executor.base.QuantOpRuntimeHook
Defaults to None.
Returns:
List[torch.Tensor]: [executing result, list of tensor objects.]
"""
return self.__forward(
inputs=inputs,
output_names=output_names,
executing_order=self._executing_order,
hooks=hooks
)
def forward_with_gradient(
self,
inputs: Union[dict, list, torch.Tensor],
output_names:List[str] = None,
hooks: Dict[str, RuntimeHook] = None,
) -> List[torch.Tensor]:
"""forward function of this executor.
Notice this one will store and compute gradient.
Args:
inputs (Union[dict, list, torch.Tensor]): [input tensor or somewhat]
output_names (List[str], optional):
onnx output node names, which used to confirm a output order.
Defaults to None.
hooks (Dict[str, RuntimeHook], optional):
A hook table for customizing operation behaviour and collate data during executing.
All hooks should inherit from class RuntimeHook, with all necessary methods implemented.
See also: ppq.executor.base.RuntimeHook
Executor calls hook.pre_forward_hook(operation, input_data) before dispatching operation,
by using this feature, you can dynamically dispatch operation during executing,
or processing input data as you want.(remember to return processed input data)
Executor calls hook.post_forward_hook(operation, output_data) after the execution,
you are supposed to gather all necessary data from execution via this feature.
For Quantable Operation, a much more powerful class:
ppq.executor.base.QuantOpRuntimeHook is provided.
see also: ppq.executor.base.QuantOpRuntimeHook
Defaults to None.
Returns:
List[torch.Tensor]: [executing result, list of tensor objects.]
"""
return self.__forward(
inputs=inputs,
output_names=output_names,
executing_order=self._executing_order,
hooks=hooks
)
def __forward(
self,
inputs: Union[dict, list, torch.Tensor],
executing_order: List[Operation],
output_names:List[str] = None,
hooks: Dict[str, RuntimeHook] = None,
) -> List[torch.Tensor]:
# processing with different input format
if isinstance(inputs, dict):
# directly feed value into variables
for name, value in inputs.items():
if name in self._graph.variables:
var = self._graph.variables[name]
var.value = value
else:
print(f'Can not find variable {name} in your graph, please check.')
else:
inputs = self.prepare_input(inputs=inputs)
for key, value in inputs.items():
assert isinstance(value, torch.Tensor), \
f'TorchExecutor can only accept tensor as its input, while {type(value)} was given'
# input is acceptable, feed input value
self._graph_input_dictionary[key].value = value
# processing with output
last_idx = 0 # record last variable
if output_names is None:
output_names = [name for name in self._graph.outputs]
for name in output_names:
if name not in self._graph.variables:
raise KeyError(f'You are requiring output value of variable {name}(is not a variable name), '
'however it is not a valid variable of current graph.')
source_op = self._graph.variables[name].source_op
if source_op is not None:
last_idx = max(last_idx, executing_order.index(source_op) + 1)
visited_op, result_collector = [], [None for _ in output_names]
# output name can be the same as input name, collect them directly.
for name in output_names:
if name in inputs:
result_collector[output_names.index(name)] = inputs[name]
for operation in executing_order[: last_idx]:
try:
assert isinstance(operation, Operation), 'Oops, seems you got something weird in your graph'
assert isinstance(operation.platform, TargetPlatform), (
f'Operation {operation.name} has an invalid platform setting, '
f'only PPQ.core.TargetPlatform is expected here, while {type(operation.platform)} was given')
platform_dispatching_table = GLOBAL_DISPATCHING_TABLE[operation.platform]
if operation.type not in platform_dispatching_table:
raise NotImplementedError(
f'Graph op: {operation.name}({operation.type}) '
f'has no backend implementation on target platform {operation.platform}.'
'Register this op to ppq.executor.base.py and ppq.executor.op first')
operation_forward_func = platform_dispatching_table[operation.type]
operation_runtime_hook = hooks[operation.name] if (hooks is not None) and (operation.name in hooks) else None
inputs = [var.value for var in operation.inputs]
# if operation is an QuantableOperation, we have to quant its inputs and outputs at first.
if isinstance(operation, QuantableOperation):
input_configs = [_ for _ in operation.config.input_quantization_config]
inputs = [self.quantize_function(input, config) for input, config in zip(inputs, input_configs)]
# PATCH 20220208
for idx, var in enumerate(operation.inputs):
if var.name in output_names:
result_collector[output_names.index(var.name)] = inputs[idx]
# invoking pre-forward hook
if operation_runtime_hook is not None:
if isinstance(operation_runtime_hook, QuantOPRuntimeHook):
inputs = operation_runtime_hook.pre_forward_hook(
inputs=[var.value for var in operation.inputs],
quant_inputs=inputs, quant_configs=input_configs)
elif isinstance(operation_runtime_hook, RuntimeHook):
inputs = operation_runtime_hook.pre_forward_hook(inputs=inputs)
else: raise TypeError(f'invalid hook instance was given with operation: {operation}')
# forward and collecting result
outputs = operation_forward_func(operation, inputs, self._executing_context)
outputs = outputs if isinstance(outputs, (list, tuple)) else [outputs]
fp_outputs = outputs
# quantize all result if is necessary
if isinstance(operation, QuantableOperation):
output_configs = [_ for _ in operation.config.output_quantization_config]
outputs = [self.quantize_function(output, config) for output, config in zip(outputs, output_configs)]
# invoking post-forward hook
if operation_runtime_hook is not None:
if isinstance(operation_runtime_hook, QuantOPRuntimeHook):
outputs = operation_runtime_hook.post_forward_hook(
outputs=fp_outputs, quant_outputs=outputs,
quant_configs=output_configs)
elif isinstance(operation_runtime_hook, RuntimeHook):
outputs = operation_runtime_hook.post_forward_hook(outputs=outputs)
else: raise TypeError(f'invalid hook instance was given with operation: {operation}')
# feed value to graph variables.
for output_idx, output_var in enumerate(operation.outputs):
output_var = operation.outputs[output_idx]
output_var.value = outputs[output_idx]
if output_var.name in output_names:
result_collector[output_names.index(output_var.name)] = outputs[output_idx]
except Exception as _:
raise RuntimeError(f'Error happens when dealing with operation {str(operation)}')
# remove useless value(runtime clear).
visited_op.append(operation)
for var in operation.inputs:
if var.is_parameter: continue
if all(op in visited_op for op in var.dest_ops):
var.value = None
# clear all variable(static clear).
for var in self._graph.variables.values():
if not var.is_parameter:
var.value = None
# end for
return result_collector
@ torch.no_grad()
@ empty_ppq_cache
def tracing_operation_meta(
self,
inputs: Union[dict, list, torch.Tensor],
output_names: List[str] = None,
) -> None:
"""Tracing meta data for each operation, if there are some already
created meta data with your operation, They will be override without
warrning.
Args:
inputs (Union[dict, list, torch.Tensor]): [description]
output_names (List[str], optional): [description]. Defaults to None.
"""
hooks = {}
for op_name, operation in self._graph.operations.items():
hooks[op_name] = TorchMetaDataTracingHook(operation=operation)
self.__forward(
inputs=inputs,
output_names=output_names,
executing_order=self._executing_order,
hooks=hooks)
for op_name, operation in self._graph.operations.items():
operation.meta_data = OperationMeta(
input_metas = hooks[op_name].input_metas,
output_metas = hooks[op_name].output_metas,
operation_name = operation.name,
operation_type = operation.type,
executing_order = self._executing_order.index(operation)
)
def load_graph(self, graph: BaseGraph) -> dict:
super().load_graph(graph)
self._deployed = False
self._runnable_graph = RunnableGraph(self._graph)
self._runnable_graph(GraphDeployCommand(device=self._device))
def quantize_function(self, input: torch.Tensor, config: TensorQuantizationConfig = None) -> torch.Tensor:
if config is None: return self._default_quant_fn(input, config)
elif not QuantizationStates.is_activated(config.state): return input
elif config in self._delegates: return self._delegates[config](input, config)
else: return self._default_quant_fn(input, config)
def dummy_forward(self, hooks: Dict[str, RuntimeHook] = None) -> None:
"""This function allows you to execute entire graph without feeding any
data. This feature is required for operation parameter quantization.
See also: ppq.quantization.optim.ParameterQuantizePass.
This function fakes some input tensors via operation metadata.
ATTENTION: operation must have metadata before invoking this function.
Args:
hooks (Dict[str, RuntimeHook], optional):
A hook table for customizing operation behaviour and collate data during executing.
All hooks should inherit from class RuntimeHook, with all necessary methods implemented.
See also: ppq.executor.base.RuntimeHook
Executor calls hook.pre_forward_hook(operation, input_data) before dispatching operation,
by using this feature, you can dynamically dispatch operation during executing,
or processing input data as you want.(remember to return processed input data)
Executor calls hook.post_forward_hook(operation, output_data) after the execution,
you are supposed to gather all necessary data from execution via this feature.
For Quantable Operation, a much more powerful class:
ppq.executor.base.QuantOpRuntimeHook is provided.
see also: ppq.executor.base.QuantOpRuntimeHook
Defaults to None.
"""
# build dummy input based on meta data
feed_dict = {}
for var_name, input_var in self._graph.inputs.items():
if len(input_var.dest_ops) == 0: continue
dest_op = input_var.dest_ops[0]
dest_idx = dest_op.inputs.index(input_var)
assert isinstance(dest_op, Operation) and dest_op.meta_data is not None, \
'Operation meta has not been traced. Please invoke TorchExecutor.tracing_meta_data() first'
tensor_meta = dest_op.meta_data.input_metas[dest_idx]
feed_dict[var_name] = tensor_meta.create_tensor(device=self._device)
self.forward(inputs=feed_dict, hooks=hooks)
def partial_graph_forward(
self, operations: List[Operation],
feed_dict: Dict[str, torch.Tensor],
output_names:List[str]) -> List[torch.Tensor]:
"""This forward function allows you to execute a series operations in
your graph. (only operations list in your params will be executed with
this function) Which serves as a great feature for quantization aware
training.
Args:
operations (List[Operation]):
operations that you want to execute,
notice that executing will strictly follow your operation order.
feed_dict (Dict[str, torch.Tensor]):
an dictionary contains {variable name: value}, as an input to this execution.
output_names (List[str]):
output variable names.
Returns:
List[torch.Tensor]: [description]
"""
return self.__forward(
inputs=feed_dict,
output_names=output_names,
executing_order=operations
)
| StarcoderdataPython |
6401006 | import discord, os, dotenv
from discord.ext import commands
dotenv.load_dotenv()
TOKEN = os.getenv('TOKEN')
PREFIX = os.getenv('PREFIX')
bot = commands.Bot(
command_prefix=PREFIX,
case_insensitive=True
)
# Events
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name} in {len(bot.guilds)} guilds!")
@bot.event
async def on_message(message):
if message.author.bot:
return
await bot.process_commands(message)
# Commands. Should eventually start using cogs, but this is good for now
@bot.command(
name='hello', aliases=['hi'],
brief='Make the bot say "Hello', # Quick description of command
description='Make the bot say "Hello"' # Long description of command
)
async def hello(ctx):
await ctx.send(f"Hello {ctx.message.author.mention}!")
if __name__ == '__main__':
bot.run(TOKEN) | StarcoderdataPython |
3291145 | <reponame>bethunebtj/BigDL<gh_stars>1-10
from bigdl.nn.layer import *
from optparse import OptionParser
from bigdl.nn.criterion import *
from bigdl.nn.initialization_method import *
from bigdl.optim.optimizer import *
from bigdl.transform.vision.image import *
from math import ceil
def t(input_t):
if type(input_t) is list:
# insert into index 0 spot, such that the real data starts from index 1
temp = [0]
temp.extend(input_t)
return dict(enumerate(temp))
# if dictionary, return it back
return input_t
def inception_layer_v1(input_size, config, name_prefix=""):
concat = Concat(2)
conv1 = Sequential()
conv1.add(SpatialConvolution(input_size, config[1][1], 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(),bias_init_method=ConstInitMethod(0.1))
.set_name(name_prefix + "1x1"))
conv1.add(ReLU(True).set_name(name_prefix + "relu_1x1"))
concat.add(conv1)
conv3 = Sequential()
conv3.add(SpatialConvolution(input_size, config[2][1], 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name(name_prefix + "3x3_reduce"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3_reduce"))
conv3.add(SpatialConvolution(config[2][1], config[2][2], 3, 3, 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name(name_prefix + "3x3"))
conv3.add(ReLU(True).set_name(name_prefix + "relu_3x3"))
concat.add(conv3)
conv5 = Sequential()
conv5.add(SpatialConvolution(input_size, config[3][1], 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name(name_prefix + "5x5_reduce"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5_reduce"))
conv5.add(SpatialConvolution(config[3][1], config[3][2], 5, 5, 1, 1, 2, 2)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name(name_prefix + "5x5"))
conv5.add(ReLU(True).set_name(name_prefix + "relu_5x5"))
concat.add(conv5)
pool = Sequential()
pool.add(SpatialMaxPooling(3, 3, 1, 1, 1, 1, to_ceil=True).set_name(name_prefix + "pool"))
pool.add(SpatialConvolution(input_size, config[4][1], 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name(name_prefix + "pool_proj"))
pool.add(ReLU(True).set_name(name_prefix + "relu_pool_proj"))
concat.add(pool).set_name(name_prefix + "output")
return concat
def inception_v1_no_aux_classifier(class_num, has_dropout=True):
model = Sequential()
model.add(SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3, 1, False)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name("conv1/7x7_s2"))
model.add(ReLU(True).set_name("conv1/relu_7x7"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool1/3x3_s2"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("pool1/norm1"))
model.add(SpatialConvolution(64, 64, 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name("conv2/3x3_reduce"))
model.add(ReLU(True).set_name("conv2/relu_3x3_reduce"))
model.add(SpatialConvolution(64, 192, 3, 3, 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name("conv2/3x3"))
model.add(ReLU(True).set_name("conv2/relu_3x3"))
model.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("conv2/norm2"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool2/3x3_s2"))
model.add(inception_layer_v1(192, t([t([64]), t(
[96, 128]), t([16, 32]), t([32])]), "inception_3a/"))
model.add(inception_layer_v1(256, t([t([128]), t(
[128, 192]), t([32, 96]), t([64])]), "inception_3b/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(inception_layer_v1(480, t([t([192]), t(
[96, 208]), t([16, 48]), t([64])]), "inception_4a/"))
model.add(inception_layer_v1(512, t([t([160]), t(
[112, 224]), t([24, 64]), t([64])]), "inception_4b/"))
model.add(inception_layer_v1(512, t([t([128]), t(
[128, 256]), t([24, 64]), t([64])]), "inception_4c/"))
model.add(inception_layer_v1(512, t([t([112]), t(
[144, 288]), t([32, 64]), t([64])]), "inception_4d/"))
model.add(inception_layer_v1(528, t([t([256]), t(
[160, 320]), t([32, 128]), t([128])]), "inception_4e/"))
model.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True))
model.add(inception_layer_v1(832, t([t([256]), t(
[160, 320]), t([32, 128]), t([128])]), "inception_5a/"))
model.add(inception_layer_v1(832, t([t([384]), t(
[192, 384]), t([48, 128]), t([128])]), "inception_5b/"))
model.add(SpatialAveragePooling(7, 7, 1, 1).set_name("pool5/7x7_s1"))
if has_dropout:
model.add(Dropout(0.4).set_name("pool5/drop_7x7_s1"))
model.add(View([1024], num_input_dims=3))
model.add(Linear(1024, class_num)
.set_init_method(weight_init_method=Xavier(), bias_init_method=Zeros())
.set_name("loss3/classifier"))
model.add(LogSoftMax().set_name("loss3/loss3"))
model.reset()
return model
def inception_v1(class_num, has_dropout=True):
feature1 = Sequential()
feature1.add(SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3, 1, False)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name("conv1/7x7_s2"))
feature1.add(ReLU(True).set_name("conv1/relu_7x7"))
feature1.add(
SpatialMaxPooling(3, 3, 2, 2, to_ceil=True)
.set_name("pool1/3x3_s2"))
feature1.add(SpatialCrossMapLRN(5, 0.0001, 0.75)
.set_name("pool1/norm1"))
feature1.add(SpatialConvolution(64, 64, 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name("conv2/3x3_reduce"))
feature1.add(ReLU(True).set_name("conv2/relu_3x3_reduce"))
feature1.add(SpatialConvolution(64, 192, 3, 3, 1, 1, 1, 1)
.set_init_method(weight_init_method=Xavier(), bias_init_method=ConstInitMethod(0.1))
.set_name("conv2/3x3"))
feature1.add(ReLU(True).set_name("conv2/relu_3x3"))
feature1.add(SpatialCrossMapLRN(5, 0.0001, 0.75).set_name("conv2/norm2"))
feature1.add(
SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool2/3x3_s2"))
feature1.add(inception_layer_v1(192, t([
t([64]), t([96, 128]), t([16, 32]), t([32])]),
"inception_3a/"))
feature1.add(inception_layer_v1(256, t([
t([128]), t([128, 192]), t([32, 96]), t([64])]),
"inception_3b/"))
feature1.add(
SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool3/3x3_s2"))
feature1.add(inception_layer_v1(480, t([
t([192]), t([96, 208]), t([16, 48]), t([64])]),
"inception_4a/"))
output1 = Sequential()
output1.add(SpatialAveragePooling(
5, 5, 3, 3, ceil_mode=True).set_name("loss1/ave_pool"))
output1.add(
SpatialConvolution(512, 128, 1, 1, 1, 1).set_name("loss1/conv"))
output1.add(ReLU(True).set_name("loss1/relu_conv"))
output1.add(View([128 * 4 * 4, 3]))
output1.add(Linear(128 * 4 * 4, 1024).set_name("loss1/fc"))
output1.add(ReLU(True).set_name("loss1/relu_fc"))
if has_dropout:
output1.add(Dropout(0.7).set_name("loss1/drop_fc"))
output1.add(Linear(1024, class_num).set_name("loss1/classifier"))
output1.add(LogSoftMax().set_name("loss1/loss"))
feature2 = Sequential()
feature2.add(inception_layer_v1(512,
t([t([160]), t([112, 224]), t([24, 64]), t([64])]),
"inception_4b/"))
feature2.add(inception_layer_v1(512,
t([t([128]), t([128, 256]), t([24, 64]), t([64])]),
"inception_4c/"))
feature2.add(inception_layer_v1(512,
t([t([112]), t([144, 288]), t([32, 64]), t([64])]),
"inception_4d/"))
output2 = Sequential()
output2.add(SpatialAveragePooling(5, 5, 3, 3).set_name("loss2/ave_pool"))
output2.add(
SpatialConvolution(528, 128, 1, 1, 1, 1).set_name("loss2/conv"))
output2.add(ReLU(True).set_name("loss2/relu_conv"))
output2.add(View([128 * 4 * 4, 3]))
output2.add(Linear(128 * 4 * 4, 1024).set_name("loss2/fc"))
output2.add(ReLU(True).set_name("loss2/relu_fc"))
if has_dropout:
output2.add(Dropout(0.7).set_name("loss2/drop_fc"))
output2.add(Linear(1024, class_num).set_name("loss2/classifier"))
output2.add(LogSoftMax().set_name("loss2/loss"))
output3 = Sequential()
output3.add(inception_layer_v1(528,
t([t([256]), t([160, 320]), t([32, 128]), t([128])]),
"inception_4e/"))
output3.add(SpatialMaxPooling(3, 3, 2, 2, to_ceil=True).set_name("pool4/3x3_s2"))
output3.add(inception_layer_v1(832,
t([t([256]), t([160, 320]), t([32, 128]), t([128])]),
"inception_5a/"))
output3.add(inception_layer_v1(832,
t([t([384]), t([192, 384]), t([48, 128]), t([128])]),
"inception_5b/"))
output3.add(SpatialAveragePooling(7, 7, 1, 1).set_name("pool5/7x7_s1"))
if has_dropout:
output3.add(Dropout(0.4).set_name("pool5/drop_7x7_s1"))
output3.add(View([1024, 3]))
output3.add(Linear(1024, class_num)
.set_init_method(weight_init_method=Xavier(), bias_init_method=Zeros())
.set_name("loss3/classifier"))
output3.add(LogSoftMax().set_name("loss3/loss3"))
split2 = Concat(2).set_name("split2")
split2.add(output3)
split2.add(output2)
mainBranch = Sequential()
mainBranch.add(feature2)
mainBranch.add(split2)
split1 = Concat(2).set_name("split1")
split1.add(mainBranch)
split1.add(output1)
model = Sequential()
model.add(feature1)
model.add(split1)
model.reset()
return model
def get_inception_data(url, sc=None, data_type="train"):
path = os.path.join(url, data_type)
return SeqFileFolder.files_to_image_frame(url=path, sc=sc, class_num=1000)
def config_option_parser():
parser = OptionParser()
parser.add_option("-f", "--folder", type=str, dest="folder", default="",
help="url of hdfs folder store the hadoop sequence files")
parser.add_option("--model", type=str, dest="model", default="", help="model snapshot location")
parser.add_option("--state", type=str, dest="state", default="", help="state snapshot location")
parser.add_option("--checkpoint", type=str, dest="checkpoint", default="", help="where to cache the model")
parser.add_option("-o", "--overwrite", action="store_true", dest="overwrite", default=False,
help="overwrite checkpoint files")
parser.add_option("-e", "--maxEpoch", type=int, dest="maxEpoch", default=0, help="epoch numbers")
parser.add_option("-i", "--maxIteration", type=int, dest="maxIteration", default=62000, help="iteration numbers")
parser.add_option("-l", "--learningRate", type=float, dest="learningRate", default=0.01, help="learning rate")
parser.add_option("--warmupEpoch", type=int, dest="warmupEpoch", default=0, help="warm up epoch numbers")
parser.add_option("--maxLr", type=float, dest="maxLr", default=0.0, help="max Lr after warm up")
parser.add_option("-b", "--batchSize", type=int, dest="batchSize", help="batch size")
parser.add_option("--classNum", type=int, dest="classNum", default=1000, help="class number")
parser.add_option("--weightDecay", type=float, dest="weightDecay", default=0.0001, help="weight decay")
parser.add_option("--checkpointIteration", type=int, dest="checkpointIteration", default=620,
help="checkpoint interval of iterations")
parser.add_option("--gradientMin", type=float, dest="gradientMin", default=0.0, help="min gradient clipping by")
parser.add_option("--gradientMax", type=float, dest="gradientMax", default=0.0, help="max gradient clipping by")
parser.add_option("--gradientL2NormThreshold", type=float, dest="gradientL2NormThreshold", default=0.0, help="gradient L2-Norm threshold")
return parser
if __name__ == "__main__":
# parse options
parser = config_option_parser()
(options, args) = parser.parse_args(sys.argv)
if not options.learningRate:
parser.error("-l --learningRate is a mandatory opt")
if not options.batchSize:
parser.error("-b --batchSize is a mandatory opt")
# init
sparkConf = create_spark_conf().setAppName("inception v1")
sc = get_spark_context(sparkConf)
redire_spark_logs()
show_bigdl_info_logs()
init_engine()
image_size = 224 # create dataset
train_transformer = Pipeline([PixelBytesToMat(),
Resize(256, 256),
RandomCropper(image_size, image_size, True, "Random", 3),
ChannelNormalize(123, 117, 104),
MatToTensor(to_rgb=False),
ImageFrameToSample(input_keys=["imageTensor"], target_keys=["label"])
])
raw_train_data = get_inception_data(options.folder, sc, "train")
train_data = DataSet.image_frame(raw_train_data).transform(train_transformer)
val_transformer = Pipeline([PixelBytesToMat(),
Resize(256, 256),
RandomCropper(image_size, image_size, False, "Center", 3),
ChannelNormalize(123, 117, 104),
MatToTensor(to_rgb=False),
ImageFrameToSample(input_keys=["imageTensor"], target_keys=["label"])
])
raw_val_data = get_inception_data(options.folder, sc, "val")
val_data = DataSet.image_frame(raw_val_data).transform(val_transformer)
# build model
if options.model != "":
# load model snapshot
inception_model = Model.load(options.model)
else:
inception_model = inception_v1_no_aux_classifier(options.classNum)
# set optimization method
iterationPerEpoch = int(ceil(float(1281167) / options.batchSize))
if options.maxEpoch:
maxIteration = iterationPerEpoch * options.maxEpoch
else:
maxIteration = options.maxIteration
warmup_iteration = options.warmupEpoch * iterationPerEpoch
if options.state != "":
# load state snapshot
optim = OptimMethod.load(options.state)
else:
if warmup_iteration == 0:
warmupDelta = 0.0
else:
if options.maxLr:
maxlr = options.maxLr
else:
maxlr = options.learningRate
warmupDelta = (maxlr - options.learningRate)/warmup_iteration
polyIteration = maxIteration - warmup_iteration
lrSchedule = SequentialSchedule(iterationPerEpoch)
lrSchedule.add(Warmup(warmupDelta), warmup_iteration)
lrSchedule.add(Poly(0.5, maxIteration), polyIteration)
optim = SGD(learningrate=options.learningRate, learningrate_decay=0.0, weightdecay=options.weightDecay,
momentum=0.9, dampening=0.0, nesterov=False,
leaningrate_schedule=lrSchedule)
# create triggers
if options.maxEpoch:
checkpoint_trigger = EveryEpoch()
test_trigger = EveryEpoch()
end_trigger = MaxEpoch(options.maxEpoch)
else:
checkpoint_trigger = SeveralIteration(options.checkpointIteration)
test_trigger = SeveralIteration(options.checkpointIteration)
end_trigger = MaxIteration(options.maxIteration)
# Optimizer
optimizer = Optimizer.create(
model=inception_model,
training_set=train_data,
optim_method=optim,
criterion=ClassNLLCriterion(),
end_trigger=end_trigger,
batch_size=options.batchSize
)
if options.checkpoint:
optimizer.set_checkpoint(checkpoint_trigger, options.checkpoint, options.overwrite)
if options.gradientMin and options.gradientMax:
optimizer.set_gradclip_const(options.gradientMin, options.gradientMax)
if options.gradientL2NormThreshold:
optimizer.set_gradclip_l2norm(options.gradientL2NormThreshold)
optimizer.set_validation(trigger=test_trigger,
val_rdd=val_data,
batch_size=options.batchSize,
val_method=[Top1Accuracy(), Top5Accuracy()])
trained_model = optimizer.optimize()
sc.stop()
| StarcoderdataPython |
9711608 | <reponame>philip-schrodt/ICEWS-to-jsonl
"""
utilDEDI2021.py
jsonl read/write for the DEDI programs and other utilities
NOTE 19.10.22: Those customized writeedit()/writesrc() -- originally there is provide a more readable
JSONL format for manual editing, which wasn't needed after a while -- were a bad idea,
or at least should have used a more systematic (and compatible) way of dealing with None
Programmer: <NAME> <<EMAIL>>
This code is covered under the MIT license: http://opensource.org/licenses/MIT
REVISION HISTORY:
31-Jul-2019: Initial version
07-Aug-2019: Added timestamp()
=========================================================================================================
"""
import datetime
import json
WEEK_SUFFIX = "Wk5"
MONTH_INFIX = "202101"
MONTH_SUFFIX = "-" + MONTH_INFIX + ".jsonl"
recOrder = ["ccode", "status",
"+date", "comment", "country",
"+id", "icewsid",
"-headline",
"-text",
"+size", "sizeCategory",
"+protesterdemand", "stateresponse",
"+protest", "protesterviolence", "protesteridentity",
"+event", "eventText",
"-location",
"+region", "version", "language", "publication", "year", "enddate", "citation", "codedDate", "coder"]
srcOrder = ["ccode", "status", "+id",
"+date", "comment", "country", "region", "event", "eventText",
"-headline",
"-text",
"+size", "sizeCategory",
"+protesterdemand", "stateresponse",
"+protesterviolence", "protesteridentity",
"-location",
"+region", "version", "language", "publication", "year", "enddate", "citation"]
def read_file(filename):
""" returns next record in a line-delimited JSON file """
jstr = ""
for line in open(filename, "r"):
if line.startswith("}"):
# print(jstr) # debug: uncomment to find badly formed cases, or put this into a try/except
adict = json.loads(jstr + "}")
yield adict
jstr = ""
else:
if "\t" in line:
line = line.replace("\t", "\\t")
jstr += line[:-1].strip()
def writeedit(rec, fout):
""" Write combined record """
fout.write('{\n')
for fl in recOrder[:-1]:
if fl.startswith("-"):
fl = fl[1:]
fout.write('\n"' + fl + '":\n')
if fl == "location":
fout.write(json.dumps(rec[fl]) + ",")
else:
fout.write(json.dumps(rec[fl], indent=2, sort_keys=True ) + ",")
elif fl.startswith("+"):
fout.write("\n")
fl = fl[1:]
fout.write('"' + fl + '": "' + str(rec[fl]) + '", ')
else:
if fl == "eventText":
fout.write(json.dumps(rec[fl]) + ",")
else:
fout.write('"' + fl + '": "' + str(rec[fl]) + '", ')
fl = recOrder[-1]
fout.write('"' + fl + '": "' + str(rec[fl]) + '"\n}\n')
def writesrc(rec, fout):
""" Write original record """
fout.write('{\n')
for fl in srcOrder[:-1]:
if fl.startswith("-"):
fl = fl[1:]
fout.write('\n"' + fl + '":\n')
if fl == "location":
fout.write(json.dumps(rec[fl]) + ",")
else:
fout.write(json.dumps(rec[fl], indent=2, sort_keys=True ) + ",")
elif fl.startswith("+"):
fout.write("\n")
fl = fl[1:]
fout.write('"' + fl + '": "' + str(rec[fl]) + '", ')
else:
if fl == "eventText":
fout.write('"eventText": '+ json.dumps(rec[fl]) + ",")
else:
fout.write('"' + fl + '": "' + str(rec[fl]) + '", ')
fl = srcOrder[-1]
fout.write('"' + fl + '": "' + str(rec[fl]) + '"\n}\n')
def timestamp():
return '-' + datetime.datetime.now().strftime("%Y%m%d")[2:] + "-" + datetime.datetime.now().strftime("%H%M%S") + ".jsonl"
def newdate(isodate, forward = False):
"""move the date back one day
Note: Python 3.7 has a "datetime.fromisoformat()" function to do the conversion without the string conversions. Though now I've written them..."""
if forward:
thedate = datetime.date(int(isodate[:4]), int(isodate[5:7]), int(isodate[-2:])) + datetime.timedelta(days = 1)
else:
thedate = datetime.date(int(isodate[:4]), int(isodate[5:7]), int(isodate[-2:])) - datetime.timedelta(days = 1)
return thedate.isoformat(), thedate
| StarcoderdataPython |
4893285 | import os
import pandas as pd
from scipy.stats import spearmanr
from larval_gonad.normalization import tpm
from larval_gonad.constants import L3_BULK, L3_SC
def main():
df = pd.concat(
[read_l3_sc(), read_l3_bulk()], sort=True, axis=1, join="inner"
) # type: pd.DataFrame
_, pval = spearmanr(df.values, axis=0)
pd.DataFrame(pval, index=df.columns, columns=df.columns).rename_axis("Spearman pval").to_csv(
snakemake.output[0], sep="\t"
)
def read_l3_sc() -> pd.DataFrame:
gene_lengths = pd.read_feather(snakemake.input.gene_annot).set_index("FBgn")["length"].squeeze()
raw = (
pd.read_feather(snakemake.input.larval_scrnaseq)
.groupby(["FBgn", "rep"])
.Count.sum()
.unstack()
.rename(columns=L3_SC)
)
norm = tpm(raw, gene_lengths).dropna()
return norm
def read_l3_bulk() -> pd.DataFrame:
return (
pd.read_csv(snakemake.input.larval_bulk, sep="\t", index_col=0)
.rename_axis("FBgn")
.rename(columns=L3_BULK)
)
if __name__ == "__main__":
if os.getenv("SNAKE_DEBUG", False):
from larval_gonad.debug import snakemake_debug
snakemake = snakemake_debug(
input=dict(
gene_annot="../../references/gene_annotation_dmel_r6-26.feather",
larval_scrnaseq="../../output/seurat3-cluster-wf/aggegated_gene_counts.feather",
larval_bulk="../../output/bulk-rnaseq-wf/rnaseq_aggregation/tpm_gene_level_counts.tsv",
)
)
main()
| StarcoderdataPython |
3382460 | <filename>password.py
import random
symbols = 'abcdefghijklmnopqrstuvwyzABCDEFGHIJKLMNOPQRSTUVWYZ012456789!@#$%^&*()_+}{|":<>?'
while True:
password_len = int(input('How many characters in the password do you want: '))
password_count = int(input('How many passwords do you want to generate: '))
for _ in range(0,password_count):
password = ''
for _ in range(0,password_len):
password_sym = random.choice(symbols)
password = password + <PASSWORD>
print('Your password:', password)
break
| StarcoderdataPython |
3424738 | """
Show help on a pysoilmap command or show a list of all available commands.
"""
import pysoilmap.cli as cli
import click
@click.command('help', context_settings={'ignore_unknown_options': True})
@click.argument('command', nargs=-1)
def main(command):
"""Show pysoilmap command line help."""
cli.main(command + ('--help',))
| StarcoderdataPython |
9690958 | # -*- coding: utf-8 -*-
"""Format int of float."""
from fractions import Fraction
class Format: # pylint: disable=R0903
"""Output Formats for int or float."""
@staticmethod
def get_fraction(val_float: float) -> str: # noqa: WPS602
"""Format a fraction from a float."""
return str(Fraction(val_float).limit_denominator())
| StarcoderdataPython |
8074926 | <filename>src/testing/TestON/tests/FUNCintentRest/FUNCintentRest.py
# Testing the basic intent functionality of ONOS
# TODO: Replace the CLI calls with REST API equivalents as they become available.
# - May need to write functions in the onosrestdriver.py file to do this
# TODO: Complete implementation of case 3000, 4000, and 6000 as REST API allows
# -Currently there is no support in the REST API for Multi to Single and Single to Multi point intents
# As such, these cases are incomplete and should not be enabled in the .params file
import time
import json
class FUNCintentRest:
def __init__( self ):
self.default = ''
def CASE1( self, main ):
import time
import imp
import re
"""
- Construct tests variables
- GIT ( optional )
- Checkout ONOS master branch
- Pull latest ONOS code
- Building ONOS ( optional )
- Install ONOS package
- Build ONOS package
"""
main.case( "Constructing test variables and building ONOS package" )
main.step( "Constructing test variables" )
main.caseExplanation = "This test case is mainly for loading " +\
"from params file, and pull and build the " +\
" latest ONOS package"
stepResult = main.FALSE
# Test variables
try:
main.testOnDirectory = re.sub( "(/tests)$", "", main.testDir )
main.apps = main.params[ 'ENV' ][ 'cellApps' ]
gitBranch = main.params[ 'GIT' ][ 'branch' ]
main.dependencyPath = main.testOnDirectory + \
main.params[ 'DEPENDENCY' ][ 'path' ]
main.topology = main.params[ 'DEPENDENCY' ][ 'topology' ]
main.scale = ( main.params[ 'SCALE' ][ 'size' ] ).split( "," )
if main.ONOSbench.maxNodes:
main.maxNodes = int( main.ONOSbench.maxNodes )
else:
main.maxNodes = 0
wrapperFile1 = main.params[ 'DEPENDENCY' ][ 'wrapper1' ]
wrapperFile2 = main.params[ 'DEPENDENCY' ][ 'wrapper2' ]
wrapperFile3 = main.params[ 'DEPENDENCY' ][ 'wrapper3' ]
main.startUpSleep = int( main.params[ 'SLEEP' ][ 'startup' ] )
main.checkIntentSleep = int( main.params[ 'SLEEP' ][ 'checkintent' ] )
main.removeIntentsleeo = int( main.params[ 'SLEEP' ][ 'removeintent' ] )
main.rerouteSleep = int( main.params[ 'SLEEP' ][ 'reroute' ] )
main.fwdSleep = int( main.params[ 'SLEEP' ][ 'fwd' ] )
main.addIntentSleep = int( main.params[ 'SLEEP' ][ 'addIntent' ] )
main.checkTopoAttempts = int( main.params[ 'SLEEP' ][ 'topoAttempts' ] )
gitPull = main.params[ 'GIT' ][ 'pull' ]
main.numSwitch = int( main.params[ 'MININET' ][ 'switch' ] )
main.numLinks = int( main.params[ 'MININET' ][ 'links' ] )
main.cellData = {} # for creating cell file
main.hostsData = {}
main.CLIs = []
main.CLIs2 = []
main.ONOSip = []
main.scapyHostNames = main.params[ 'SCAPY' ][ 'HOSTNAMES' ].split( ',' )
main.scapyHosts = [] # List of scapy hosts for iterating
main.assertReturnString = '' # Assembled assert return string
main.ONOSip = main.ONOSbench.getOnosIps()
print main.ONOSip
# Assigning ONOS cli handles to a list
try:
for i in range( 1, main.maxNodes + 1 ):
main.CLIs.append( getattr( main, 'ONOSrest' + str( i ) ) )
main.CLIs2.append( getattr( main, 'ONOScli' + str( i ) ) )
except AttributeError:
main.log.warn( "A " + str( main.maxNodes ) + " node cluster " +
"was defined in env variables, but only " +
str( len( main.CLIs ) ) +
" nodes were defined in the .topo file. " +
"Using " + str( len( main.CLIs ) ) +
" nodes for the test." )
# -- INIT SECTION, ONLY RUNS ONCE -- #
main.startUp = imp.load_source( wrapperFile1,
main.dependencyPath +
wrapperFile1 +
".py" )
main.intentFunction = imp.load_source( wrapperFile2,
main.dependencyPath +
wrapperFile2 +
".py" )
main.topo = imp.load_source( wrapperFile3,
main.dependencyPath +
wrapperFile3 +
".py" )
copyResult1 = main.ONOSbench.scp( main.Mininet1,
main.dependencyPath +
main.topology,
main.Mininet1.home + "custom/",
direction="to" )
if main.CLIs and main.CLIs2:
stepResult = main.TRUE
else:
main.log.error( "Did not properly created list of ONOS CLI handle" )
stepResult = main.FALSE
except Exception as e:
main.log.exception(e)
main.cleanup()
main.exit()
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully construct " +
"test variables ",
onfail="Failed to construct test variables" )
if gitPull == 'True':
main.step( "Building ONOS in " + gitBranch + " branch" )
onosBuildResult = main.startUp.onosBuild( main, gitBranch )
stepResult = onosBuildResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully compiled " +
"latest ONOS",
onfail="Failed to compile " +
"latest ONOS" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
def CASE2( self, main ):
"""
- Set up cell
- Create cell file
- Set cell file
- Verify cell file
- Kill ONOS process
- Uninstall ONOS cluster
- Verify ONOS start up
- Install ONOS cluster
- Connect to cli
"""
# main.scale[ 0 ] determines the current number of ONOS controller
main.numCtrls = int( main.scale[ 0 ] )
main.case( "Starting up " + str( main.numCtrls ) +
" node(s) ONOS cluster" )
main.caseExplanation = "Set up ONOS with " + str( main.numCtrls ) +\
" node(s) ONOS cluster"
#kill off all onos processes
main.log.info( "Safety check, killing all ONOS processes" +
" before initiating environment setup" )
time.sleep( main.startUpSleep )
main.step( "Uninstalling ONOS package" )
onosUninstallResult = main.TRUE
for ip in main.ONOSip:
onosUninstallResult = onosUninstallResult and \
main.ONOSbench.onosUninstall( nodeIp=ip )
stepResult = onosUninstallResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully uninstalled ONOS package",
onfail="Failed to uninstall ONOS package" )
time.sleep( main.startUpSleep )
for i in range( main.maxNodes ):
main.ONOSbench.onosDie( main.ONOSip[ i ] )
print "NODE COUNT = ", main.numCtrls
tempOnosIp = []
for i in range( main.numCtrls ):
tempOnosIp.append( main.ONOSip[i] )
main.ONOSbench.createCellFile( main.ONOSbench.ip_address,
"temp", main.Mininet1.ip_address,
main.apps, tempOnosIp )
main.step( "Apply cell to environment" )
cellResult = main.ONOSbench.setCell( "temp" )
verifyResult = main.ONOSbench.verifyCell()
stepResult = cellResult and verifyResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully applied cell to " + \
"environment",
onfail="Failed to apply cell to environment " )
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
stepResult = packageResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully created ONOS package",
onfail="Failed to create ONOS package" )
time.sleep( main.startUpSleep )
main.step( "Installing ONOS package" )
onosInstallResult = main.TRUE
for i in range( main.numCtrls ):
onosInstallResult = onosInstallResult and \
main.ONOSbench.onosInstall( node=main.ONOSip[ i ] )
stepResult = onosInstallResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully installed ONOS package",
onfail="Failed to install ONOS package" )
time.sleep( main.startUpSleep )
main.step( "Starting ONOS service" )
stopResult = main.TRUE
startResult = main.TRUE
onosIsUp = main.TRUE
for i in range( main.numCtrls ):
onosIsUp = onosIsUp and main.ONOSbench.isup( main.ONOSip[ i ] )
if onosIsUp == main.TRUE:
main.log.report( "ONOS instance is up and ready" )
else:
main.log.report( "ONOS instance may not be up, stop and " +
"start ONOS again " )
for i in range( main.numCtrls ):
stopResult = stopResult and \
main.ONOSbench.onosStop( main.ONOSip[ i ] )
for i in range( main.numCtrls ):
startResult = startResult and \
main.ONOSbench.onosStart( main.ONOSip[ i ] )
stepResult = onosIsUp and stopResult and startResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="ONOS service is ready",
onfail="ONOS service did not start properly" )
# Start an ONOS cli to provide functionality that is not currently
# supported by the Rest API remove this when Leader Checking is supported
# by the REST API
main.step( "Start ONOS cli" )
cliResult = main.TRUE
for i in range( main.numCtrls ):
cliResult = cliResult and \
main.CLIs2[ i ].startOnosCli( main.ONOSip[ i ] )
stepResult = cliResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully start ONOS cli",
onfail="Failed to start ONOS cli" )
# Remove the first element in main.scale list
main.scale.remove( main.scale[ 0 ] )
main.intentFunction.report( main )
def CASE8( self, main ):
# OLD FUNCintentRest CASE 8
# This remains here for archiving and reference purposes and will be
# removed when the new FUNCintentRest is verified to work.
# """
# Compare Topo
# """
# import json
# main.case( "Compare ONOS Topology view to Mininet topology" )
# main.caseExplanation = "Compare topology elements between Mininet" +\
# " and ONOS"
# main.step( "Gathering topology information" )
# # TODO: add a paramaterized sleep here
# devicesResults = main.TRUE # Overall Boolean for device correctness
# linksResults = main.TRUE # Overall Boolean for link correctness
# hostsResults = main.TRUE # Overall Boolean for host correctness
# devices = main.topo.getAllDevices( main )
# hosts = main.topo.getAllHosts( main )
# ports = main.topo.getAllPorts( main )
# links = main.topo.getAllLinks( main )
# clusters = main.topo.getAllClusters( main )
# mnSwitches = main.Mininet1.getSwitches()
# mnLinks = main.Mininet1.getLinks()
# mnHosts = main.Mininet1.getHosts()
# main.step( "Comparing MN topology to ONOS topology" )
# for controller in range( main.numCtrls ):
# controllerStr = str( controller + 1 )
# if devices[ controller ] and ports[ controller ] and\
# "Error" not in devices[ controller ] and\
# "Error" not in ports[ controller ]:
# currentDevicesResult = main.Mininet1.compareSwitches(
# mnSwitches,
# json.loads( devices[ controller ] ),
# json.loads( ports[ controller ] ) )
# else:
# currentDevicesResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentDevicesResult,
# onpass="ONOS" + controllerStr +
# " Switches view is correct",
# onfail="ONOS" + controllerStr +
# " Switches view is incorrect" )
# if links[ controller ] and "Error" not in links[ controller ]:
# currentLinksResult = main.Mininet1.compareLinks(
# mnSwitches, mnLinks,
# json.loads( links[ controller ] ) )
# else:
# currentLinksResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentLinksResult,
# onpass="ONOS" + controllerStr +
# " links view is correct",
# onfail="ONOS" + controllerStr +
# " links view is incorrect" )
# if hosts[ controller ] or "Error" not in hosts[ controller ]:
# currentHostsResult = main.Mininet1.compareHosts(
# mnHosts,
# json.loads( hosts[ controller ] ) )
# else:
# currentHostsResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentHostsResult,
# onpass="ONOS" + controllerStr +
# " hosts exist in Mininet",
# onfail="ONOS" + controllerStr +
# " hosts don't match Mininet" )
# NEW FUNCintentRest Case 8 as based off of the CASE 8 from FUNCintent
"""
Compare ONOS Topology to Mininet Topology
"""
import json
main.case( "Compare ONOS Topology view to Mininet topology" )
main.caseExplanation = "Compare topology elements between Mininet" +\
" and ONOS"
main.log.info( "Gathering topology information from Mininet" )
devicesResults = main.FALSE # Overall Boolean for device correctness
linksResults = main.FALSE # Overall Boolean for link correctness
hostsResults = main.FALSE # Overall Boolean for host correctness
deviceFails = [] # Nodes where devices are incorrect
linkFails = [] # Nodes where links are incorrect
hostFails = [] # Nodes where hosts are incorrect
attempts = main.checkTopoAttempts # Remaining Attempts
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
main.step( "Comparing Mininet topology to ONOS topology" )
while ( attempts >= 0 ) and\
( not devicesResults or not linksResults or not hostsResults ):
time.sleep( 2 )
if not devicesResults:
devices = main.topo.getAllDevices( main )
ports = main.topo.getAllPorts( main )
devicesResults = main.TRUE
deviceFails = [] # Reset for each failed attempt
if not linksResults:
links = main.topo.getAllLinks( main )
linksResults = main.TRUE
linkFails = [] # Reset for each failed attempt
if not hostsResults:
hosts = main.topo.getAllHosts( main )
hostsResults = main.TRUE
hostFails = [] # Reset for each failed attempt
# Check for matching topology on each node
for controller in range( main.numCtrls ):
controllerStr = str( controller + 1 ) # ONOS node number
# Compare Devices
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
try:
deviceData = json.loads( devices[ controller ] )
portData = json.loads( ports[ controller ] )
except (TypeError,ValueError):
main.log.error( "Could not load json: {0} or {1}".format( str( devices[ controller ] ), str( ports[ controller ] ) ) )
currentDevicesResult = main.FALSE
else:
currentDevicesResult = main.Mininet1.compareSwitches(
mnSwitches,deviceData,portData )
else:
currentDevicesResult = main.FALSE
if not currentDevicesResult:
deviceFails.append( controllerStr )
devicesResults = devicesResults and currentDevicesResult
# Compare Links
if links[ controller ] and "Error" not in links[ controller ]:
try:
linkData = json.loads( links[ controller ] )
except (TypeError,ValueError):
main.log.error("Could not load json:" + str( links[ controller ] ) )
currentLinksResult = main.FALSE
else:
currentLinksResult = main.Mininet1.compareLinks(
mnSwitches, mnLinks,linkData )
else:
currentLinksResult = main.FALSE
if not currentLinksResult:
linkFails.append( controllerStr )
linksResults = linksResults and currentLinksResult
# Compare Hosts
if hosts[ controller ] and "Error" not in hosts[ controller ]:
try:
hostData = json.loads( hosts[ controller ] )
except (TypeError,ValueError):
main.log.error("Could not load json:" + str( hosts[ controller ] ) )
currentHostsResult = main.FALSE
else:
currentHostsResult = main.Mininet1.compareHosts(
mnHosts,hostData )
else:
currentHostsResult = main.FALSE
if not currentHostsResult:
hostFails.append( controllerStr )
hostsResults = hostsResults and currentHostsResult
# Decrement Attempts Remaining
attempts -= 1
utilities.assert_equals( expect=[],
actual=deviceFails,
onpass="ONOS correctly discovered all devices",
onfail="ONOS incorrectly discovered devices on nodes: " +
str( deviceFails ) )
utilities.assert_equals( expect=[],
actual=linkFails,
onpass="ONOS correctly discovered all links",
onfail="ONOS incorrectly discovered links on nodes: " +
str( linkFails ) )
utilities.assert_equals( expect=[],
actual=hostFails,
onpass="ONOS correctly discovered all hosts",
onfail="ONOS incorrectly discovered hosts on nodes: " +
str( hostFails ) )
topoResults = hostsResults and linksResults and devicesResults
utilities.assert_equals( expect=main.TRUE,
actual=topoResults,
onpass="ONOS correctly discovered the topology",
onfail="ONOS incorrectly discovered the topology" )
def CASE9( self, main ):
'''
Report errors/warnings/exceptions
'''
main.log.info( "Error report: \n" )
main.ONOSbench.logReport( globalONOSip[0],
[ "INFO", "FOLLOWER", "WARN", "flow", "ERROR" , "Except" ],
"s" )
#main.ONOSbench.logReport( globalONOSip[1], [ "INFO" ], "d" )
def CASE10( self, main ):
"""
Start Mininet topology with OF 1.0 switches
"""
main.OFProtocol = "1.0"
main.log.report( "Start Mininet topology with OF 1.0 switches" )
main.case( "Start Mininet topology with OF 1.0 switches" )
main.caseExplanation = "Start mininet topology with OF 1.0 " +\
"switches to test intents, exits out if " +\
"topology did not start correctly"
main.step( "Starting Mininet topology with OF 1.0 switches" )
args = "--switch ovs,protocols=OpenFlow10"
topoResult = main.Mininet1.startNet( topoFile=main.dependencyPath +
main.topology,
args=args )
stepResult = topoResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully loaded topology",
onfail="Failed to load topology" )
# Exit if topology did not load properly
if not topoResult:
main.cleanup()
main.exit()
def CASE11( self, main ):
"""
Start Mininet topology with OF 1.3 switches
"""
main.OFProtocol = "1.3"
main.log.report( "Start Mininet topology with OF 1.3 switches" )
main.case( "Start Mininet topology with OF 1.3 switches" )
main.caseExplanation = "Start mininet topology with OF 1.3 " +\
"switches to test intents, exits out if " +\
"topology did not start correctly"
main.step( "Starting Mininet topology with OF 1.3 switches" )
args = "--switch ovs,protocols=OpenFlow13"
topoResult = main.Mininet1.startNet( topoFile=main.dependencyPath +
main.topology,
args=args )
stepResult = topoResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully loaded topology",
onfail="Failed to load topology" )
# Exit if topology did not load properly
if not topoResult:
main.cleanup()
main.exit()
def CASE12( self, main ):
"""
Assign mastership to controllers
"""
import re
main.case( "Assign switches to controllers" )
main.step( "Assigning switches to controllers" )
main.caseExplanation = "Assign OF " + main.OFProtocol +\
" switches to ONOS nodes"
assignResult = main.TRUE
switchList = []
# Creates a list switch name, use getSwitch() function later...
for i in range( 1, ( main.numSwitch + 1 ) ):
switchList.append( 's' + str( i ) )
tempONOSip = []
for i in range( main.numCtrls ):
tempONOSip.append( main.ONOSip[ i ] )
assignResult = main.Mininet1.assignSwController( sw=switchList,
ip=tempONOSip,
port='6653' )
if not assignResult:
main.cleanup()
main.exit()
for i in range( 1, ( main.numSwitch + 1 ) ):
response = main.Mininet1.getSwController( "s" + str( i ) )
print( "Response is " + str( response ) )
if re.search( "tcp:" + main.ONOSip[ 0 ], response ):
assignResult = assignResult and main.TRUE
else:
assignResult = main.FALSE
stepResult = assignResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully assigned switches" +
"to controller",
onfail="Failed to assign switches to " +
"controller" )
def CASE13( self,main ):
"""
Create Scapy components
"""
main.case( "Create scapy components" )
main.step( "Create scapy components" )
import json
scapyResult = main.TRUE
for hostName in main.scapyHostNames:
main.Scapy1.createHostComponent( hostName )
main.scapyHosts.append( getattr( main, hostName ) )
main.step( "Start scapy components" )
for host in main.scapyHosts:
host.startHostCli()
host.startScapy()
host.updateSelf()
main.log.debug( host.name )
main.log.debug( host.hostIp )
main.log.debug( host.hostMac )
utilities.assert_equals( expect=main.TRUE,
actual=scapyResult,
onpass="Successfully created Scapy Components",
onfail="Failed to discover Scapy Components" )
def CASE14( self, main ):
"""
Discover all hosts and store its data to a dictionary
"""
main.case( "Discover all hosts" )
stepResult = main.TRUE
main.step( "Discover all ipv4 host hosts " )
hostList = []
# List of host with default vlan
defaultHosts = [ "h1", "h3", "h8", "h9", "h11", "h16", "h17", "h19", "h24" ]
# Lists of host with unique vlan
vlanHosts1 = [ "h4", "h12", "h20" ]
vlanHosts2 = [ "h5", "h13", "h21" ]
vlanHosts3 = [ "h6", "h14", "h22" ]
vlanHosts4 = [ "h7", "h15", "h23" ]
hostList.append( defaultHosts )
hostList.append( vlanHosts1 )
hostList.append( vlanHosts2 )
hostList.append( vlanHosts3 )
hostList.append( vlanHosts4 )
stepResult = main.intentFunction.getHostsData( main, hostList )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully discovered hosts",
onfail="Failed to discover hosts" )
def CASE15( self, main ):
"""
Discover all hosts with scapy arp packets and store its data to a dictionary
"""
main.case( "Discover all hosts using scapy" )
main.step( "Send packets from each host to the first host and confirm onos discovery" )
import collections
if len( main.scapyHosts ) < 1:
main.log.error( "No scapy hosts have been created" )
main.skipCase()
# Send ARP packets from each scapy host component
main.intentFunction.sendDiscoveryArp( main, main.scapyHosts )
stepResult = utilities.retry( f=main.intentFunction.confirmHostDiscovery,
retValue=main.FALSE, args=[ main ],
attempts=main.checkTopoAttempts, sleep=2 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="ONOS correctly discovered all hosts",
onfail="ONOS incorrectly discovered hosts" )
main.step( "Populate hostsData" )
stepResult = main.intentFunction.populateHostData( main )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully populated hostsData",
onfail="Failed to populate hostsData" )
def CASE16( self, main ):
"""
Balance Masters
"""
main.case( "Balance mastership of switches" )
main.step( "Balancing mastership of switches" )
balanceResult = main.FALSE
balanceResult = utilities.retry( f=main.CLIs2[ 0 ].balanceMasters, retValue=main.FALSE, args=[] )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully balanced mastership of switches",
onfail="Failed to balance mastership of switches" )
def CASE17( self, main ):
"""
Stop mininet and remove scapy hosts
"""
main.log.report( "Stop Mininet and Scapy" )
main.case( "Stop Mininet and Scapy" )
main.caseExplanation = "Stopping the current mininet topology " +\
"to start up fresh"
main.step( "Stopping and Removing Scapy Host Components" )
scapyResult = main.TRUE
for host in main.scapyHosts:
scapyResult = scapyResult and host.stopScapy()
main.log.info( "Stopped Scapy Host: {0}".format( host.name ) )
for host in main.scapyHosts:
scapyResult = scapyResult and main.Scapy1.removeHostComponent( host.name )
main.log.info( "Removed Scapy Host Component: {0}".format( host.name ) )
main.scapyHosts = []
main.scapyHostIPs = []
utilities.assert_equals( expect=main.TRUE,
actual=scapyResult,
onpass="Successfully stopped scapy and removed host components",
onfail="Failed to stop mininet and scapy" )
main.step( "Stopping Mininet Topology" )
mininetResult = main.Mininet1.stopNet( )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully stop mininet",
onfail="Failed to stop mininet" )
# Exit if topology did not load properly
if not (mininetResult and scapyResult ):
main.cleanup()
main.exit()
def CASE1000( self, main ):
"""
Add host intents between 2 host:
- Discover hosts
- Add host intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
import time
import json
import re
# Assert variables - These variable's name|format must be followed
# if you want to use the wrapper function
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
# Save leader candidates
intentLeadersOld = main.CLIs2[ 0 ].leaderCandidates()
main.case( "Host Intents Test - " + str( main.numCtrls ) +
" NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case tests Host intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN " +\
"etc;\nThe test will use OF " + main.OFProtocol\
+ " OVS running in Mininet"
main.step( "IPV4: Add and test host intents between h1 and h9" )
main.assertReturnString = "Assertion result for IPV4 host intent with mac addresses\n"
host1 = { "name":"h1","id":"00:00:00:00:00:01/-1" }
host2 = { "name":"h9","id":"00:00:00:00:00:09/-1" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='IPV4',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='IPV4',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "DUALSTACK1: Add host intents between h3 and h11" )
main.assertReturnString = "Assertion Result for dualstack IPV4 with MAC addresses\n"
host1 = { "name":"h3", "id":"00:00:00:00:00:03/-1" }
host2 = { "name":"h11","id":"00:00:00:00:00:0B/-1"}
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='DUALSTACK1',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='DUALSTACK1',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString)
main.step( "DUALSTACK2: Add host intents between h1 and h11" )
main.assertReturnString = "Assertion Result for dualstack2 host intent\n"
host1 = { "name":"h1" }
host2 = { "name":"h11" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='DUALSTACK2',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='DUALSTACK2',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "1HOP: Add host intents between h1 and h3" )
main.assertReturnString = "Assertion Result for 1HOP for IPV4 same switch\n"
host1 = { "name":"h1" }
host2 = { "name":"h3" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='1HOP',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='1HOP',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "VLAN1: Add vlan host intents between h4 and h12" )
main.assertReturnString = "Assertion Result vlan IPV4\n"
host1 = { "name":"h4","id":"00:00:00:00:00:04/100" }
host2 = { "name":"h12","id":"00:00:00:00:00:0C/100" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='VLAN1',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='VLAN1',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "VLAN2: Add inter vlan host intents between h13 and h20" )
main.assertReturnString = "Assertion Result different VLAN negative test\n"
host1 = { "name":"h13" }
host2 = { "name":"h20" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='VLAN2',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='VLAN2',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
# Change the following to use the REST API when leader checking is
# supported by it
main.step( "Confirm that ONOS leadership is unchanged")
intentLeadersNew = main.CLIs2[ 0 ].leaderCandidates()
main.intentFunction.checkLeaderChange( intentLeadersOld,
intentLeadersNew )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass="ONOS Leaders Unchanged",
onfail="ONOS Leader Mismatch")
main.intentFunction.report( main )
def CASE2000( self, main ):
"""
Add point intents between 2 hosts:
- Get device ids | ports
- Add point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
import time
import json
import re
# Assert variables - These variable's name|format must be followed
# if you want to use the wrapper function
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Point Intents Test - " + str( main.numCtrls ) +
" NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test point to point" +\
" intents using " + str( main.numCtrls ) +\
" node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
# No option point intents
main.step( "NOOPTION: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for NOOPTION point intent\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1" }
]
testResult = main.FALSE
installResult = main.intentFunction.installPointIntent(
main,
name="NOOPTION",
senders=senders,
recipients=recipients )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="NOOPTION",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "IPV4: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for IPV4 point intent\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1","mac":"00:00:00:00:00:09" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="IPV4",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="IPV4",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "IPV4_2: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for IPV4 no mac address point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="IPV4_2",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="IPV4_2",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "SDNIP-ICMP: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for SDNIP-ICMP IPV4 using TCP point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01",
"ip":main.h1.hostIp }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1","mac":"00:00:00:00:00:09",
"ip":main.h9.hostIp }
]
ipProto = main.params[ 'SDNIP' ][ 'icmpProto' ]
# Uneccessary, not including this in the selectors
tcpSrc = main.params[ 'SDNIP' ][ 'srcPort' ]
tcpDst = main.params[ 'SDNIP' ][ 'dstPort' ]
installResult = main.intentFunction.installPointIntent(
main,
name="SDNIP-ICMP",
senders=senders,
recipients=recipients,
ethType="IPV4",
ipProto=ipProto,
tcpSrc=tcpSrc,
tcpDst=tcpDst )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="SDNIP_ICMP",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "SDNIP-TCP: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for SDNIP-TCP IPV4 using ICMP point intents\n"
mac1 = main.hostsData[ 'h1' ][ 'mac' ]
mac2 = main.hostsData[ 'h9' ][ 'mac' ]
ip1 = str( main.hostsData[ 'h1' ][ 'ipAddresses' ][ 0 ] ) + "/32"
ip2 = str( main.hostsData[ 'h9' ][ 'ipAddresses' ][ 0 ] ) + "/32"
ipProto = main.params[ 'SDNIP' ][ 'tcpProto' ]
tcp1 = main.params[ 'SDNIP' ][ 'srcPort' ]
tcp2 = main.params[ 'SDNIP' ][ 'dstPort' ]
stepResult = main.intentFunction.pointIntentTcp(
main,
name="SDNIP-TCP",
host1="h1",
host2="h9",
deviceId1="of:0000000000000005/1",
deviceId2="of:0000000000000006/1",
mac1=mac1,
mac2=mac2,
ethType="IPV4",
ipProto=ipProto,
ip1=ip1,
ip2=ip2,
tcp1=tcp1,
tcp2=tcp2 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "DUALSTACK1: Add point intents between h3 and h11" )
main.assertReturnString = "Assertion Result for Dualstack1 IPV4 with mac address point intents\n"
senders = [
{ "name":"h3","device":"of:0000000000000005/3","mac":"00:00:00:00:00:03" }
]
recipients = [
{ "name":"h11","device":"of:0000000000000006/3","mac":"00:00:00:00:00:0B" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "VLAN: Add point intents between h5 and h21" )
main.assertReturnString = "Assertion Result for VLAN IPV4 with mac address point intents\n"
senders = [
{ "name":"h5","device":"of:0000000000000005/5","mac":"00:00:00:00:00:05" }
]
recipients = [
{ "name":"h21","device":"of:0000000000000007/5","mac":"00:00:00:00:00:15" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "1HOP: Add point intents between h1 and h3" )
main.assertReturnString = "Assertion Result for 1HOP IPV4 with no mac address point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01" }
]
recipients = [
{ "name":"h3","device":"of:0000000000000005/3","mac":"00:00:00:00:00:03" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="1HOP IPV4",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="1HOP IPV4",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.intentFunction.report( main )
def CASE3000( self, main ):
"""
Add single point to multi point intents
- Get device ids
- Add single point to multi point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Single To Multi Point Intents Test - " +
str( main.numCtrls ) + " NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test single point to" +\
" multi point intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
main.step( "NOOPTION: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
devices = [ 'of:0000000000000005/8', 'of:0000000000000006/8', \
'of:0000000000000007/8' ]
macs = [ '00:00:00:00:00:08', '00:00:00:00:00:10', '00:00:00:00:00:18' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="NOOPTION",
hostNames=hostNames,
devices=devices,
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="NOOPTION: Successfully added single "
+ " point to multi point intents" +
" with no match action",
onfail="NOOPTION: Failed to add single point"
+ " point to multi point intents" +
" with no match action" )
main.step( "IPV4: Add single point to multi point intents" )
stepResult = main.TRUE
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="IPV4",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and MAC addresses",
onfail="IPV4: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" )
main.step( "IPV4_2: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="IPV4",
hostNames=hostNames,
ethType="IPV4",
lambdaAlloc=False )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4_2: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and no MAC addresses",
onfail="IPV4_2: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and no MAC addresses" )
main.step( "VLAN: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h4', 'h12', 'h20' ]
devices = [ 'of:0000000000000005/4', 'of:0000000000000006/4', \
'of:0000000000000007/4' ]
macs = [ '00:00:00:00:00:04', '00:00:00:00:00:0C', '00:00:00:00:00:14' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="VLAN",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="VLAN: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN",
onfail="VLAN: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN")
def CASE4000( self, main ):
"""
Add multi point to single point intents
- Get device ids
- Add multi point to single point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Multi To Single Point Intents Test - " +
str( main.numCtrls ) + " NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test single point to" +\
" multi point intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
main.step( "NOOPTION: Add multi point to single point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
devices = [ 'of:0000000000000005/8', 'of:0000000000000006/8', \
'of:0000000000000007/8' ]
macs = [ '00:00:00:00:00:08', '00:00:00:00:00:10', '00:00:00:00:00:18' ]
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="NOOPTION",
hostNames=hostNames,
devices=devices,
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="NOOPTION: Successfully added multi "
+ " point to single point intents" +
" with no match action",
onfail="NOOPTION: Failed to add multi point" +
" to single point intents" +
" with no match action" )
main.step( "IPV4: Add multi point to single point intents" )
stepResult = main.TRUE
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="IPV4",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4: Successfully added multi point"
+ " to single point intents" +
" with IPV4 type and MAC addresses",
onfail="IPV4: Failed to add multi point" +
" to single point intents" +
" with IPV4 type and MAC addresses" )
main.step( "IPV4_2: Add multi point to single point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="IPV4",
hostNames=hostNames,
ethType="IPV4",
lambdaAlloc=False )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4_2: Successfully added multi point"
+ " to single point intents" +
" with IPV4 type and no MAC addresses",
onfail="IPV4_2: Failed to add multi point" +
" to single point intents" +
" with IPV4 type and no MAC addresses" )
main.step( "VLAN: Add multi point to single point intents" )
stepResult = main.TRUE
hostNames = [ 'h5', 'h13', 'h21' ]
devices = [ 'of:0000000000000005/5', 'of:0000000000000006/5', \
'of:0000000000000007/5' ]
macs = [ '00:00:00:00:00:05', '00:00:00:00:00:0D', '00:00:00:00:00:15' ]
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="VLAN",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="VLAN: Successfully added multi point"
+ " to single point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN",
onfail="VLAN: Failed to add multi point" +
" to single point intents" )
def CASE5000( self, main ):
# """
# Will add description in next patch set
# """
# assert main, "There is no main"
# assert main.CLIs, "There is no main.CLIs"
# assert main.Mininet1, "Mininet handle should be named Mininet1"
# assert main.numSwitch, "Placed the total number of switch topology in \
# main.numSwitch"
# main.case( "Test host mobility with host intents " )
# main.step( " Testing host mobility by moving h1 from s5 to s6" )
# h1PreMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
# main.log.info( "Moving h1 from s5 to s6")
# main.Mininet1.moveHost( "h1","s5","s6" )
# main.intentFunction.getHostsData( main )
# h1PostMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
# utilities.assert_equals( expect="of:0000000000000006",
# actual=h1PostMove,
# onpass="Mobility: Successfully moved h1 to s6",
# onfail="Mobility: Failed to moved h1 to s6" +
# " to single point intents" +
# " with IPV4 type and MAC addresses" +
# " in the same VLAN" )
# main.step( "IPV4: Add host intents between h1 and h9" )
# stepResult = main.TRUE
# stepResult = main.intentFunction.hostIntent( main,
# onosNode='0',
# name='IPV4',
# host1='h1',
# host2='h9',
# host1Id='00:00:00:00:00:01/-1',
# host2Id='00:00:00:00:00:09/-1' )
# utilities.assert_equals( expect=main.TRUE,
# actual=stepResult,
# onpass="IPV4: Host intent test successful " +
# "between two IPV4 hosts",
# onfail="IPV4: Host intent test failed " +
# "between two IPV4 hosts")
"""
Tests Host Mobility
Modifies the topology location of h1
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Test host mobility with host intents " )
main.step( "Testing host mobility by moving h1 from s5 to s6" )
h1PreMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
main.log.info( "Moving h1 from s5 to s6")
main.Mininet1.moveHost( "h1","s5","s6" )
# Send discovery ping from moved host
# Moving the host brings down the default interfaces and creates a new one.
# Scapy is restarted on this host to detect the new interface
main.h1.stopScapy()
main.h1.startScapy()
# Discover new host location in ONOS and populate host data.
# Host 1 IP and MAC should be unchanged
main.intentFunction.sendDiscoveryArp( main, [ main.h1 ] )
main.intentFunction.populateHostData( main )
h1PostMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
utilities.assert_equals( expect="of:0000000000000006",
actual=h1PostMove,
onpass="Mobility: Successfully moved h1 to s6",
onfail="Mobility: Failed to move h1 to s6" +
" to single point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN" )
main.step( "IPV4: Add host intents between h1 and h9" )
main.assertReturnString = "Assert result for IPV4 host intent between h1, moved, and h9\n"
host1 = { "name":"h1","id":"00:00:00:00:00:01/-1" }
host2 = { "name":"h9","id":"00:00:00:00:00:09/-1" }
installResult = main.intentFunction.installHostIntent( main,
name='IPV4 Mobility IPV4',
onosNode='0',
host1=host1,
host2=host2)
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='Host Mobility IPV4',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1="s6",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.intentFunction.report( main )
| StarcoderdataPython |
225656 | <gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Photutils is an Astropy affiliated package to provide tools for
detecting and performing photometry of astronomical sources. It also
has tools for background estimation, ePSF building, PSF matching,
centroiding, and morphological measurements.
"""
# Affiliated packages may add whatever they like to this file, but
# should keep this content at the top.
# ----------------------------------------------------------------------------
from ._astropy_init import * # noqa
# ----------------------------------------------------------------------------
# Enforce Python version check during package import.
# This is the same check as the one at the top of setup.py
import sys
__minimum_python_version__ = '3.5'
__minimum_numpy_version__ = '1.11'
class UnsupportedPythonError(Exception):
pass
if (sys.version_info <
tuple((int(val) for val in __minimum_python_version__.split('.')))):
raise UnsupportedPythonError('Photutils does not support Python < {}'
.format(__minimum_python_version__))
if not _ASTROPY_SETUP_: # noqa
from .aperture import * # noqa
from .background import * # noqa
from .centroids import * # noqa
from .detection import * # noqa
from .morphology import * # noqa
from .psf import * # noqa
from .segmentation import * # noqa
| StarcoderdataPython |
155594 | <reponame>ThomasDoukas/NTUA_ATDS
from pyspark.sql import SparkSession
from io import StringIO
import csv
def split_complex(x):
res = list(csv.reader(StringIO(x), delimiter=','))[0]
return (res[0], tuple(res[1:]))
spark = SparkSession.builder.appName("Q1RDD").getOrCreate()
sc = spark.sparkContext
# filter(date>2000, cost!=0, earnings!=0)
# map(year, (id, name, earnings percentage))
# reduce max(earnings percentage)
# sort by key
rdd = sc.textFile("hdfs://master:9000/movies/movies.csv") \
.map(lambda x: split_complex(x)) \
.filter(lambda x: True if(x[1][2] and x[1][2].split("-")[0] >= "2000" and float(x[1][4])!=0 and float(x[1][5])!=0) else False) \
.map(lambda x: (x[1][2].split("-")[0], (x[0], x[1][0], (float(x[1][5])-float(x[1][4]))*100/float(x[1][4])))) \
.reduceByKey(lambda x, y: (x if (x[0] < y[0]) else y) if (x[2] == y[2]) else (x if (x[2] > y[2]) else y)) \
.sortByKey() \
.collect()
print("-------------------------------------------------------------")
print("Query 1 - RDD API Output")
print("(Year, (Movie_id, Title, Profit))")
for i in rdd:
print(i)
print("-------------------------------------------------------------")
| StarcoderdataPython |
6557755 | """Plot the COOP Precipitation Reports, don't use lame-o x100"""
import datetime
import psycopg2.extras
from pyiem.reference import TRACE_VALUE
from pyiem.plot import MapPlot
from pyiem.util import get_dbconn
def n(val):
"""pretty"""
if val == TRACE_VALUE:
return 'T'
if val == 0:
return '0'
return '%.2f' % (val,)
def main():
"""Go Main Go"""
pgconn = get_dbconn('iem', user='nobody')
icursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
lats = []
lons = []
vals = []
valmask = []
labels = []
icursor.execute("""
select id, ST_x(s.geom) as lon, ST_y(s.geom) as lat, pday
from summary c, stations s
WHERE day = 'TODAY' and pday >= 0 and pday < 20
and s.network = 'IA_COOP' and s.iemid = c.iemid
""")
for row in icursor:
lats.append(row['lat'])
lons.append(row['lon'])
vals.append(n(row['pday']))
labels.append(row['id'])
valmask.append(True)
mp = MapPlot(title="Iowa COOP 24 Hour Precipitation", axisbg='white',
subtitle="ending approximately %s 7 AM" % (
datetime.datetime.now().strftime("%-d %b %Y"),))
mp.plot_values(lons, lats, vals)
pqstr = "plot ac %s iowa_coop_precip.png iowa_coop_precip.png png" % (
datetime.datetime.now().strftime("%Y%m%d%H%M"), )
mp.postprocess(pqstr=pqstr)
mp.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1879537 | from math import dist, inf
from random import shuffle
from sklearn.datasets import load_iris as load
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
class KNN:
def __init__(self, k=5):
self.__k = k
self.__x_train = []
self.__y_train = []
def __euclid_distance(self, a, b):
s = 0
for ind in range(len(a)):
s += (a[ind] - b[ind]) ** 2
return s ** 0.5
def __find_minimum(self, distances):
min_val, min_ind = distances[0], 0
for ind in range(1, len(distances)):
if distances[ind] < min_val:
min_val, min_ind = distances[ind], ind
return min_ind
def fit(self, x_train, y_train):
self.__x_train = x_train
self.__y_train = y_train
def predict(self, x_test):
predicts = list()
for sample in x_test:
distances = list()
for row in self.__x_train:
distances.append(self.__euclid_distance(sample, row))
votes = dict()
for _ in range(self.__k):
min_ind = self.__find_minimum(distances)
v = self.__y_train[min_ind]
votes[v] = votes.get(v, 0) + 1
distances[min_ind] = inf
max_ind, max_val = 0, 0
for key, value in votes.items():
if value > max_val:
max_ind, max_val = key, value
predicts.append(max_ind)
return predicts
def accuracy(self, y_test, predictions):
correct = 0
for i in range(len(predictions)):
if predictions[i] == y_test[i]:
correct += 1
return correct / len(predictions)
x_train, x_test, y_train, y_test = train_test_split(load().data, load().target, test_size=0.2, shuffle=True)
model = KNN(k=5)
model.fit(x_train, y_train)
predictions = model.predict(x_test)
print("My KNN Accuracy Score:", model.accuracy(y_test, predictions))
# scikit-learn way->
model = KNeighborsClassifier(n_neighbors=5, algorithm='brute', weights='uniform')
model.fit(x_train, y_train)
predictions = model.predict(x_test)
print("Scikit KNN Accuracy Score:", accuracy_score(y_test, predictions)) | StarcoderdataPython |
3539471 | <gh_stars>10-100
#!/usr/bin/env python3
# Copyright 2019 <NAME>, <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :sequential/smnist/train_split_smnist.py
# @author :mc
# @contact :<EMAIL>
# @created :20/03/2020
# @version :1.0
# @python_version :3.6.8
"""
Training a recurrent network and its associated hypernetwork on a continual
learning setting based on the SplitSMNIST Task.
"""
# Do not delete the following import for all executable scripts!
import __init__ # pylint: disable=unused-import
from argparse import Namespace
import matplotlib.pyplot as plt
import os
import sequential.train_utils_sequential as stu
import sequential.train_sequential as sts
from sequential.smnist import train_args_smnist
import sequential.smnist.train_utils_smnist as ctu
import utils.sim_utils as sutils
import sequential.smnist.hpconfig_smnist as hpsearch_cl
import sequential.smnist.hpconfig_smnist_multitask as hpsearch_mt
def run():
""" Run the script"""
#############
### Setup ###
#############
experiment = 'split_smnist'
config = train_args_smnist.parse_cmd_arguments(mode=experiment)
device, writer, logger = sutils.setup_environment(config)
dhandlers = ctu._generate_tasks(config, logger, experiment=experiment)
# We will use the namespace below to share miscellaneous information between
# functions.
shared = Namespace()
shared.feature_size = dhandlers[0].in_shape[0]
# Plot images.
if config.show_plots:
figure_dir = os.path.join(config.out_dir, 'figures')
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
for t, dh in enumerate(dhandlers):
dh.plot_samples('Test Samples - Task %d' % t,
dh.get_train_inputs()[:8], outputs=dh.get_train_outputs()[:8],
show=True, filename=os.path.join(figure_dir,
'test_samples_task_%d.png' % t))
target_net, hnet, dnet = stu.generate_networks(config, shared, dhandlers,
device)
# generate masks if needed
ctx_masks = None
if config.use_masks:
ctx_masks = stu.generate_binary_masks(config, device, target_net)
# We store the target network weights (excluding potential context-mod
# weights after every task). In this way, we can quantify changes and
# observe the "stiffness" of EWC.
shared.tnet_weights = []
# We store the context-mod weights (or all weights) coming from the hypernet
# after every task, in order to quantify "forgetting". Note, the hnet
# regularizer should keep them fix.
shared.hnet_out = []
# Get the task-specific functions for loss and accuracy.
task_loss_func = ctu.get_loss_func(config, device, logger, ewc_loss=False)
accuracy_func = ctu.get_accuracy_func(config)
ewc_loss_func = ctu.get_loss_func(config, device, logger, ewc_loss=True) \
if config.use_ewc else None
replay_fcts = None
if config.use_replay:
replay_fcts = dict()
replay_fcts['rec_loss'] = ctu.get_vae_rec_loss_func()
replay_fcts['distill_loss'] = ctu.get_distill_loss_func()
replay_fcts['soft_trgt_acc'] = ctu.get_soft_trgt_acc_func()
if config.multitask:
summary_keywords=hpsearch_mt._SUMMARY_KEYWORDS
summary_filename=hpsearch_mt._SUMMARY_FILENAME
else:
summary_keywords=hpsearch_cl._SUMMARY_KEYWORDS
summary_filename=hpsearch_cl._SUMMARY_FILENAME
########################
### Train classifier ###
########################
# Train the network task by task. Testing on all tasks is run after
# finishing training on each task.
ret, train_loss, test_loss, test_acc = sts.train_tasks(dhandlers,
target_net, hnet, dnet, device, config, shared, logger, writer,
ctx_masks, summary_keywords, summary_filename,
task_loss_func=task_loss_func, accuracy_func=accuracy_func,
ewc_loss_func=ewc_loss_func, replay_fcts=replay_fcts)
stu.log_results(test_acc, config, logger)
writer.close()
if ret == -1:
logger.info('Program finished successfully.')
if config.show_plots:
plt.show()
else:
logger.error('Only %d tasks have completed training.' % (ret+1))
if __name__ == '__main__':
run()
| StarcoderdataPython |
4897303 | <reponame>m-stoeckel/argos-train<filename>argostrain/data.py
from argostrain.dataset import *
def prepare_data(source_path: Path, target_path: Path, run_path: Path, valid_size=2000):
# Build dataset
dataset = FileDataset(source_path.open(), target_path.open())
print("Read data from file")
# Split and write data
source_data, target_data = dataset.data()
source_data = list(source_data)
target_data = list(target_data)
assert len(source_data) > valid_size
split_data_path = run_path / "split_data/"
split_data_path.mkdir(exist_ok=True)
with (split_data_path / "src-val.txt").open("w") as source_valid_file:
source_valid_file.writelines(source_data[:valid_size])
with (split_data_path / "src-train.txt").open("w") as source_train_file:
source_train_file.writelines(source_data[valid_size:])
with (split_data_path / "tgt-val.txt").open("w") as target_valid_file:
target_valid_file.writelines(target_data[:valid_size])
with (split_data_path / "tgt-train.txt").open("w") as target_train_file:
target_train_file.writelines(target_data[valid_size:])
print("Done splitting data")
| StarcoderdataPython |
188443 | <gh_stars>1-10
from .launch_tools import *
from .arg_parse import get_args
| StarcoderdataPython |
1646736 | import inspect
import logging
from datetime import datetime
from typing import Optional, Set, Callable
from celery.utils import uuid
from server.queue.celery.task_metadata import TaskMetadata
from server.queue.celery.task_status import task_status
from server.queue.framework import TaskQueue, BaseObserver
from server.queue.model import Task, TaskStatus, TaskError
from server.queue.task_utils import task_status_filter
from task_queue.events import TASK_METADATA, RUNTIME_METADATA_ATTR
from task_queue.metadata import TaskRuntimeMetadata
# Default module logger
logger = logging.getLogger(__name__)
def _task_name(task):
return task.__qualname__
class CeleryTaskQueue(TaskQueue):
# Event type for task deletion
TASK_DELETED_EVENT = "task-deleted"
def __init__(self, app, backend, request_transformer, requests):
if app is None:
raise ValueError("Celery app cannot be None")
self.app = app
self._celery_backend = backend
self._celery_tasks = {}
self._req_transformer = request_transformer
for request_type, task in requests.items():
self._celery_tasks[request_type] = task
self._observers: Set[BaseObserver] = set()
def dispatch(self, request):
# Resolve actual celery task to be invoked
celery_task = self._get_celery_task(request)
# Make sure the backend contains required task metadata
task_id = uuid()
meta = TaskMetadata(id=task_id, created=datetime.utcnow(), request=request)
self._celery_backend.store_task_meta(task_id, meta.asdict())
# Invoke celery task
celery_task.apply_async(task_id=task_id, kwargs=request.kwargs())
# Create a new task instance and return to the caller
return Task(
id=task_id, created=meta.created, status_updated=meta.created, request=request, status=TaskStatus.PENDING
)
def _get_celery_task(self, request):
if type(request) not in self._celery_tasks:
raise ValueError(f"Unsupported request type: {type(request)}")
return self._celery_tasks[type(request)]
def terminate(self, task_id):
if self.exists(task_id):
async_result = self.app.AsyncResult(task_id)
async_result.revoke(terminate=True, wait=False)
def delete(self, task_id):
self.terminate(task_id)
if self.exists(task_id):
self._celery_backend.delete_task_meta(task_id)
async_result = self.app.AsyncResult(task_id)
async_result.forget()
self._notify_deleted(task_id)
def get_task(self, task_id):
return self._construct_task(task_id, {})
def _construct_task(self, task_id, active_task_meta):
winnow_meta = self._get_task_meta(task_id)
async_result = self.app.AsyncResult(task_id)
if winnow_meta is None:
return None
status = task_status(async_result.status)
status_updated = winnow_meta.created
if task_id in active_task_meta:
status = TaskStatus.RUNNING
status_updated = datetime.utcfromtimestamp(active_task_meta[task_id]["time_start"])
if status != TaskStatus.PENDING and status != TaskStatus.RUNNING:
status_updated = async_result.date_done
error = None
if status == TaskStatus.FAILURE:
error = self._construct_error(async_result)
return Task(
id=winnow_meta.id,
created=winnow_meta.created,
status_updated=status_updated,
request=winnow_meta.request,
status=status,
error=error,
progress=winnow_meta.progress,
result=async_result.result,
)
def _get_task_meta(self, task_id, transaction=None) -> Optional[TaskMetadata]:
raw_meta = self._celery_backend.get_task_meta(task_id, transaction=transaction)
if raw_meta is None:
return None
return TaskMetadata.fromdict(raw_meta, self._req_transformer)
def _construct_error(self, async_result):
exc_type_name = None
exc_module_name = None
exc_message = None
result = async_result.result
if isinstance(result, Exception):
exc_type = type(result)
exc_type_name = getattr(exc_type, "__name__", None)
exc_module = inspect.getmodule(exc_type)
if exc_module is not None:
exc_module_name = getattr(exc_module, "__name__", None)
exc_message = str(result)
return TaskError(
exc_type=exc_type_name,
exc_message=exc_message,
exc_module=exc_module_name,
traceback=async_result.traceback,
)
def _active_tasks_meta(self):
metadata_index = {}
celery_inspector = self.app.control.inspect()
for metadata_entries in celery_inspector.active().values():
for task_metadata in metadata_entries:
metadata_index[task_metadata["id"]] = task_metadata
return metadata_index
def list_tasks(self, status=None, offset=0, limit=None):
satisfies = task_status_filter(status)
result = []
filtered_count = 0
for task_id in self._celery_backend.task_ids():
task = self._construct_task(task_id, {})
task_satisfies = satisfies(task)
if task_satisfies and offset <= filtered_count < offset + limit:
result.append(task)
filtered_count += int(task_satisfies)
return result, filtered_count
def exists(self, task_id):
return self._celery_backend.exists(task_id=task_id)
def _make_event_handler(self, state, task_handler: Callable[[Task], None]):
"""Create Celery event-receiver callback, which will accept Celery
events, create a task and pass the task to the actual handler.
"""
def event_handler(event):
"""Receive event and pass the corresponding task to the handler."""
state.event(event)
self._update_meta_from_event(event)
task = self.get_task(event["uuid"])
if task is not None:
task_handler(task)
return event_handler
def _notify(self, state, task_handler: Callable[[Task, BaseObserver], None]):
"""Create task handler that loops over the existing observers and apply the provided operation."""
def notifier(task):
"""Notify each observer with the given task"""
for observer in self._observers:
try:
task_handler(task, observer)
except Exception:
logger.exception("Error handling task update")
return self._make_event_handler(state, notifier)
def _update_meta_from_event(self, event):
"""Try to read TaskRuntimeMetadata from event and save it to the backend."""
if RUNTIME_METADATA_ATTR not in event:
return
task_id = event["uuid"]
try:
with self._celery_backend.transaction(task_id) as txn:
task_metadata = self._get_task_meta(task_id, transaction=txn)
if task_metadata is None:
return
runtime_metadata = TaskRuntimeMetadata.fromdict(event[RUNTIME_METADATA_ATTR])
task_metadata.progress = runtime_metadata.progress
self._celery_backend.begin_write_section(transaction=txn) # Necessary for redis transactions
self._celery_backend.store_task_meta(task_id, task_metadata.asdict(), transaction=txn)
except Exception:
logger.exception("Cannot update task metadata")
def observe(self, observer: BaseObserver):
"""Add observer to the queue notification list."""
self._observers.add(observer)
def stop_observing(self, observer: BaseObserver):
"""Remove observer from the queue notification list."""
self._observers.remove(observer)
def _notify_deleted(self, task_id):
"""Send task-deleted event via the Celery message bus."""
retry_policy = self.app.conf.task_publish_retry_policy
with self.app.events.default_dispatcher() as dispatcher:
dispatcher.send(type=self.TASK_DELETED_EVENT, uuid=task_id, retry=True, retry_policy=retry_policy)
def listen(self):
"""Listen for queue events and notify observers.
This is a blocking method, it should be executed in a background thread.
"""
def handle_started(task):
"""Do handle task-started event."""
# This is safe to force the RUNNING state
# because "state-failed" and "state-succeeded"
# events will be handled after that.
task.status = TaskStatus.RUNNING
for observer in self._observers:
try:
observer.on_task_started(task)
except Exception:
logger.exception("Error handling 'task-started' event")
def announce_task_deleted(event):
"""Do handle task-deleted event."""
task_id = event["uuid"]
for observer in self._observers:
try:
observer.on_task_deleted(task_id)
except Exception:
logger.exception(f"Error handling '{self.TASK_DELETED_EVENT}' event")
state = self.app.events.State()
announce_task_sent = self._notify(state, lambda task, observer: observer.on_task_sent(task))
announce_task_started = self._make_event_handler(state, handle_started)
announce_succeeded_tasks = self._notify(state, lambda task, observer: observer.on_task_succeeded(task))
announce_failed_tasks = self._notify(state, lambda task, observer: observer.on_task_failed(task))
announce_revoked_tasks = self._notify(state, lambda task, observer: observer.on_task_revoked(task))
announce_metadata_update = self._notify(state, lambda task, observer: observer.on_task_meta_updated(task))
with self.app.connection() as connection:
receiver = self.app.events.Receiver(
connection,
handlers={
"task-sent": announce_task_sent,
"task-started": announce_task_started,
"task-succeeded": announce_succeeded_tasks,
"task-failed": announce_failed_tasks,
"task-revoked": announce_revoked_tasks,
TASK_METADATA: announce_metadata_update,
self.TASK_DELETED_EVENT: announce_task_deleted,
},
)
receiver.capture(limit=None, timeout=None, wakeup=True)
| StarcoderdataPython |
4800458 | <filename>tracalchemy/ticket.py<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2013 <NAME> <<EMAIL>>
#
# The MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from sqlalchemy import BigInteger, Integer, UnicodeText
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.orm import mapper, relation, relationship, Query
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.schema import (Column, ForeignKey, Index, PrimaryKeyConstraint,
Table)
from tracalchemy.model_util import metadata, split_cc_list
__all__ = ['Ticket']
ticket_table = Table('ticket', metadata,
Column('id', Integer, primary_key=True, autoincrement=True),
Column('type', UnicodeText),
Column('time', BigInteger, server_default=None),
Column('changetime', BigInteger, server_default=None),
Column('component', UnicodeText),
Column('severity', UnicodeText),
Column('priority', UnicodeText),
Column('owner', UnicodeText),
Column('reporter', UnicodeText),
Column('cc', UnicodeText),
Column('version', UnicodeText),
Column('milestone', UnicodeText),
Column('status', UnicodeText),
Column('resolution', UnicodeText),
Column('summary', UnicodeText),
Column('description', UnicodeText),
Column('keywords', UnicodeText),
Index('ticket_time_idx', 'time'),
Index('ticket_status_idx', 'status'),
)
ticket_custom_table = Table('ticket_custom', metadata,
Column('ticket', Integer, ForeignKey('ticket.id'), primary_key=True, nullable=False),
Column('name', UnicodeText, primary_key=True, nullable=False),
Column('value', UnicodeText)
)
class TicketCustom(object):
def __init__(self, name, value):
self.name = name
self.value = value
class TicketQuery(Query):
def by_id(self, ticket_id):
return self.filter(Ticket.id == ticket_id)
class Ticket(object):
custom = association_proxy('_custom', 'name', creator=TicketCustom)
@classmethod
def query(cls, session):
return TicketQuery([cls], session=session)
@classmethod
def example(cls, _session=None, **kwargs):
# Regular users should not need to install the TracDevPlatform plugin
# so we must not put the import in the file header.
# This method is only intended for testing so I guess it's ok to depend
# on pythonic_testcase here.
from trac_dev_platform.test.lib.pythonic_testcase import (assert_true,
assert_is_empty)
ticket = Ticket()
for key in tuple(kwargs):
assert_true(hasattr(ticket, key), message='Unknown attribute %r' % key)
setattr(ticket, key, kwargs.pop(key))
assert_is_empty(kwargs)
if _session:
_session.add(ticket)
return ticket
def __repr__(self):
columns = list(Ticket.__mapper__.columns)
settings = map(lambda column: column.name + '=' + repr(getattr(self, column.name)), columns)
return 'Ticket(%s)' % ', '.join(settings)
def cc_list(self):
return split_cc_list(self.cc)
mapper(TicketCustom, ticket_custom_table)
mapper(Ticket, ticket_table,
properties={
'_custom': relation(
TicketCustom,
collection_class=attribute_mapped_collection('name'),
passive_deletes=True,
),
})
# -----------------------------------------------------------------------------
ticket_change_table = Table('ticket_change', metadata,
Column('ticket', Integer, ForeignKey('ticket.id'), nullable=False),
Column('time', BigInteger, nullable=False),
Column('author', UnicodeText),
Column('field', UnicodeText, nullable=False),
Column('oldvalue', UnicodeText),
Column('newvalue', UnicodeText),
PrimaryKeyConstraint('ticket', 'time', 'field'),
Index('ticket_change_ticket_idx', 'ticket'),
Index('ticket_change_time_idx', 'time'),
)
class TicketChange(object):
@classmethod
def query(cls, session):
return session.query(cls)
mapper(TicketChange, ticket_change_table,
properties={
'ticket_id': ticket_change_table.c.ticket,
'_ticket': relationship(Ticket,
primaryjoin=(Ticket.id==ticket_change_table.c.ticket),
backref='changes'),
},
)
| StarcoderdataPython |
3297875 | <gh_stars>0
"""Make test fixtures for testing tree_intersection."""
import pytest
from .bst import BST
@pytest.fixture
def first_bst():
"""Make the first bst."""
b = BST([20, 17, 21, 18, 22, 19])
return b
@pytest.fixture
def second_bst():
"""Make the second bst."""
b = BST([13, 17, 21, 2, 22, 29])
return b
@pytest.fixture
def third_bst():
"""Make the third bst."""
b = BST([4, 7, 23, 8, 2, 39])
return b
@pytest.fixture
def fourth_bst():
"""Make the fourth bst."""
b = BST(['dog', 'cat', 'fish', 'rat', 'parrot', 'snake'])
return b
@pytest.fixture
def fifth_bst():
"""Make the fifth bst."""
b = BST(['dog', 'pet', 'fish', 'gerbil', 'parrot', 'snake'])
return b
| StarcoderdataPython |
1832014 | # --------------------------------------------Assignment Day 7 | 8th September 2020-------------------------------------------------------
'''Question 1: Write a program to copy the contents of one file to another using a for loop.Do not use built-in copy function'''
# Answer 1 ------------------
with open("input.txt", "r") as f:
with open("output.txt", "a") as f1:
for line in f:
f1.write(line)
# ____________________________________________________________________________________________________________________________________________
'''Question 2: Write a Python program to find maximum and minimum values in the dictionary. Do not use built-in min
and max functions.'''
# Answer 2 ------------
dic = {'a': 23, 'b': 16, 'c':52 , 'd': 14, 'e':5 }
lst = []
for y in dic.values():
lst.append(y)
lst = sorted(lst)
print(f'The Minimum value of dictionary is {lst[0]}')
print(f'The Maximum value of dictionary is {lst[-1]}') | StarcoderdataPython |
3552309 | <filename>new/src/30.01.2021/functions.py
def equate(number1, number2):
resalt = (number1+4*number2)*(number1-3*number2)+number1
return resalt
print(equate(5, 3))
def find_speed(kilometers, hours):
resalt2 = kilometers / hours
return resalt2
print(find_speed(100, 3))
def smallest(number1, number2, number3):
if number1 < number2 and number1 <number3:
return number1
if number2 < number1 and number1 <number3:
return number2
if number3 < number2 and number1 <number1:
return number3
print(smallest(3,1000,5)) | StarcoderdataPython |
3343288 | from . import classification, segmentation, object_detection
from .metric_keys import DetailMetricKey
| StarcoderdataPython |
349491 | #!/usr/bin/env python
import time, struct,sys
import bluetooth
from mindwavemobile.MindwaveDataPoints import AttentionDataPoint, EEGPowersDataPoint
from mindwavemobile.MindwaveDataPointReader import MindwaveDataPointReader
import numpy as np
import pylab as pl
def main():
mdpr = MindwaveDataPointReader()
mdpr.start()
eeg_datapoints = []
attention_datapoints = []
index = 0
try:
while(True):
data = mdpr.readNextDataPoint()
if (data.__class__ is AttentionDataPoint):
attention_datapoints.append((time.time(),data))
if (data.__class__ is EEGPowersDataPoint):
eeg_datapoints.append((time.time(),data))
index+=1
print index
except KeyboardInterrupt:
pass
fmt = 'ddddddddd'
dataFormat = []
file_ = open(sys.argv[1], 'wb')
file_.write(fmt.ljust(25,' '))
for i in xrange(len(eeg_datapoints)):
timestamp = attention_datapoints[i][0]
attention = attention_datapoints[i][1]
delta = eeg_datapoints[i][1].delta
theta = eeg_datapoints[i][1].theta
lowalpha = eeg_datapoints[i][1].lowAlpha
highalpha = eeg_datapoints[i][1].highAlpha
lowbeta = eeg_datapoints[i][1].lowBeta
highbeta = eeg_datapoints[i][1].highBeta
lowgamma = eeg_datapoints[i][1].lowGamma
midgamma = eeg_datapoints[i][1].midGamma
s = struct.pack(fmt,timestamp, delta, theta, lowalpha, highalpha, lowbeta, highbeta, lowgamma, midgamma)
file_.write(s)
file_.close()
if __name__ == '__main__':
main() | StarcoderdataPython |
11362456 | import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv(r"D:\BMSIS_YSP\reac-space-exp\neo4j_loader_and_queries\output\2021-01-26_17-18-42-588599\all_generations_abundance_scores.csv")
print(df.head())
df.plot.area(x='rank_node_deg_by_gen',
y='count_relationships')
plt.show() | StarcoderdataPython |
5103545 | #!/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
#-------------------------------
def put_principal_data( principal_id, public_key_pem, sealed_private_key ):
pass
#-------------------------------
def delete_principal_data( principal_id ):
pass
#-------------------------------
def get_principal_data( principal_id ):
pass
#--------------------------------
def get_slice_secret( observer_pkey_pem, slice_name, slice_fk=None ):
pass
#-------------------------------
def put_slice_secret( observer_pkey_pem, slice_name, slice_secret, slice_fk=None, opencloud_slice=None ):
pass
#-------------------------------
def put_volumeslice_creds( volume_name, slice_name, creds ):
pass
#-------------------------------
def get_volumeslice_volume_names( slice_name ):
pass
#-------------------------------
def get_volumeslice( volume_name, slice_name ):
pass
#-------------------------------
def get_slice_hostnames( slice_name ):
pass
| StarcoderdataPython |
11397167 | <filename>Logicals/May2019/29/textwrap.py<gh_stars>0
import textwrap
def wrap(string, max_width):
return '\n'.join(textwrap.wrap(string, max_width))
def wrap_v2(string, max_width):
list_data = textwrap.wrap(string, max_width)
return '\n'.join(list_data)
def main():
string, max_width = input(), int(input())
result = wrap(string, max_width)
print(result)
result = wrap_v2(string, max_width)
print(result)
if __name__ == '__main__':
main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.