id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
183547
|
import time
from ..ExcelDataUtil.xlsxDataGetter import XlsxDataGetter
from ..ExcelDataUtil.xlsxDataWriter import XlsxDataWriter
from ..Util.dateUtil import timestamp2datetime
from ..Variables.Status import Status
class RealNotes(object):
def __init__(self, code, strategy, **kwargs):
self.file_name = code + ".xlsx"
self.code = code
self.strategy = strategy
self.operation_history = None
self.kwargs = kwargs
def pr_status(self):
if self.operation_history is None:
self.init_strategy()
self.strategy.print_status()
def calc_next_val(self):
if self.operation_history is None:
self.init_strategy()
tup_buy, tup_sell = self.strategy.calc_next_buy_sell_val()
buy_value, buy_shares, buy_money = tup_buy
print("\n\n下次买入价格: %-8s\t买入份额: %-8s\t买入金额: %-8s" % (buy_value, buy_shares, buy_money))
if tup_sell:
sell_value, sell_shares, sell_money = tup_sell
print("下次卖出价格: %-8s\t卖出份额: %-8s\t卖出金额: %-8s\n\n" % (sell_value, sell_shares, sell_money))
else:
print("份额已卖完,无下次卖出价格\n")
def buy(self, value, shares, ts=None):
operation_history = XlsxDataGetter.get_data(self.file_name, raise_if_not_exist=False)
# "value", "shares", "money", "date_str", "status", "timestamp"]
if shares % 100 != 0:
raise ValueError("请输入100的整数倍份额")
money = shares * value
if ts is None:
ts = int(time.time())
operation_history.append((value, shares, money, timestamp2datetime(ts), Status.BUY, ts))
XlsxDataWriter.write_data(self.file_name, operation_history)
def sell(self, value, shares, ts=None):
operation_history = XlsxDataGetter.get_data(self.file_name)
if ts is None:
ts = int(time.time())
operation_history.append((value, shares, value * shares, timestamp2datetime(ts), Status.SELL, ts))
XlsxDataWriter.write_data(self.file_name, operation_history)
def init_strategy(self):
self.operation_history = XlsxDataGetter.get_data(self.file_name)
self.strategy = self.strategy(self.operation_history, **self.kwargs)
def calc_curr_val(self, value):
if self.operation_history is None:
self.init_strategy()
tup_buy, tup_sell = self.strategy.calc_curr_buy_sell_val(value)
if tup_buy is not None:
buy_value, buy_shares, buy_money = tup_buy
print("\n当前净值跌幅过大,需要加大买入, 价格: %-8s\t买入份额: %-8s\t买入金额: %-8s" % (buy_value, buy_shares, buy_money))
elif tup_sell is not None:
sell_value, sell_shares, sell_money = tup_sell
print("当前净值涨幅过大: 需要加大卖出,价格: %-8s\t卖出份额: %-8s\t卖出金额: %-8s\n" % (sell_value, sell_shares, sell_money))
else:
print("\n当前净值波动在合理范围内\n")
|
183550
|
import csv
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
import scipy.signal
import matplotlib
plt.style.use('dark_background')
# with open('csv/run_PPO_summary-tag-Info_cumulative_reward.csv', newline='') as csvfile:
with open('csv/run_PPO_summary-tag-Info_episode_length.csv', newline='') as csvfile:
# with open('csv/run_PPO_summary-tag-Info_value_loss.csv', newline='') as csvfile:
# with open('csv/run_PPO_summary-tag-Info_policy_loss.csv', newline='') as csvfile:
csvreader = csv.reader(csvfile)
csvlist = np.array(list(csvreader))
steplist = csvlist[1:, 1].astype(np.int32)
datalist = csvlist[1:, 2].astype(np.float) * 6
datalist = scipy.signal.medfilt(datalist, kernel_size=15)
fig, ax = plt.subplots()
xdata, ydata = [], []
ln, = plt.plot([], [], animated=True)
plt.xlabel('timesteps')
plt.ylabel('frames') # <-----
plt.title('episode length') # <-----
def init():
ax.set_ylim(0, 700) # <-----
ax.set_xlim(0, 15e6)
return ln,
def update(i):
j = int(i / 10)
delta = (i % 10) / 10
print(i, j)
xdata.append(steplist[j] + delta * (steplist[j + 1] - steplist[j]))
ydata.append(datalist[j] + delta * (datalist[j + 1] - datalist[j]))
ln.set_data(xdata, ydata)
return ln,
ani = FuncAnimation(fig, update, repeat=False, frames=(len(steplist) - 1) * 10,
init_func=init, blit=True, interval=1 / 60 * 1000)
ani.save('episode_length.mp4', dpi=120, writer='ffmpeg') # <-----
# plt.show()
|
183551
|
from rapidfuzz import process, fuzz
from .load_dicts import FORMULA_URI_DICT, SMILES_URI_DICT, NAME_URI_DICT, \
FORMULA_KEYS, NAME_KEYS, SMILES_KEYS, ATTRIBUTE_URI_DICT, \
ATTRIBUTE_KEYS, CLASS_URI_DICT, CLASS_KEYS, process_species, process_species_reversed
def find_nearest_match(entity_value, entity_type):
# rst = URI, score, candidate
if entity_type == 'attribute':
rst = find_nearest_match_in_attributes(entity_value)
elif entity_type == 'class':
rst = find_nearest_match_classes(entity_value)
elif entity_type == 'species':
rst = find_nearest_match_species(entity_value)
URI = [u.replace('http://www.wikidata.org/entity/', '') for u in rst[0]]
print('find_nearest_match - 16', URI)
score = rst[1]
candidate = rst[2]
return URI, candidate
def find_nearest_match_classes(_class):
_class = _class.upper()
KEYS = CLASS_KEYS
DICT = CLASS_URI_DICT
if _class not in DICT:
rst = process.extractOne(_class, KEYS, scorer=fuzz.ratio)
candidate = rst[0]
score = rst[1]
URI = DICT[candidate]
else:
score = 100
candidate = _class
URI = DICT[candidate]
return URI, score, candidate
def find_nearest_match_species(species):
species = process_species(species)
KEYS_LIST = [FORMULA_KEYS, SMILES_KEYS, NAME_KEYS]
DICT_LIST = [FORMULA_URI_DICT, SMILES_URI_DICT, NAME_URI_DICT]
LABELS = ['FORMULA', 'SMILE', 'NAME']
highest_score = 0
best_uri = []
best_label = ''
best_candidate = ''
for KEYS, DICTS, LABEL in zip(KEYS_LIST, DICT_LIST, LABELS):
rst = find_nearest_match_in_one_species(species, KEYS, DICTS)
URIS = rst[0]
score = rst[1]
candidate = rst[2]
if score > highest_score:
best_uri = URIS
best_label = LABEL
highest_score = score
best_candidate = candidate
return best_uri, highest_score, process_species_reversed(best_candidate), best_label
def find_nearest_match_in_one_species(species, KEYS, DICT):
if species not in DICT:
rst = process.extractOne(species, KEYS, scorer=fuzz.ratio)
candidate = process_species_reversed(rst[0])
score = rst[1]
URI = DICT[candidate]
else:
score = 100
candidate = process_species_reversed(species)
URI = DICT[species]
return URI, score, candidate
# it is exactly like the one for species
def find_nearest_match_in_attributes(attribute):
attribute = attribute.upper()
KEYS = ATTRIBUTE_KEYS
DICT = ATTRIBUTE_URI_DICT
if attribute not in DICT:
rst = process.extractOne(attribute, KEYS, scorer=fuzz.ratio)
candidate = rst[0]
score = rst[1]
URI = DICT[candidate]
else:
score = 100
candidate = attribute
URI = DICT[candidate]
return URI, score, candidate
|
183562
|
from parameterized import parameterized
from integration.helpers.base_test import BaseTest
class TestIntrinsicFunctionsSupport(BaseTest):
# test code definition uri object and serverless function properties support
@parameterized.expand(
[
"combination/intrinsics_code_definition_uri",
"combination/intrinsics_serverless_function",
]
)
def test_common_support(self, file_name):
# Just a simple deployment will validate that Code & Swagger files were accessible
# Just a simple deployment will validate that all properties were resolved expected
self.create_and_verify_stack(file_name, self.get_default_test_template_parameters())
def test_severless_api_properties_support(self):
self.create_and_verify_stack(
"combination/intrinsics_serverless_api", self.get_default_test_template_parameters()
)
# Examine each resource policy and confirm that ARN contains correct APIGW stage
lambda_function_name = self.get_physical_id_by_type("AWS::Lambda::Function")
lambda_client = self.client_provider.lambda_client
# This is a JSON string of resource policy
policy = lambda_client.get_policy(FunctionName=lambda_function_name)["Policy"]
# Instead of parsing the policy, we will verify that the policy contains certain strings
# that we would expect based on the resource policy
# This is the stage name specified in YAML template
api_stage_name = "devstage"
# Paths are specififed in the YAML template
get_api_policy_expectation = "*/GET/pathget"
post_api_policy_expectation = "*/POST/pathpost"
self.assertTrue(
get_api_policy_expectation in policy,
"{} should be present in policy {}".format(get_api_policy_expectation, policy),
)
self.assertTrue(
post_api_policy_expectation in policy,
"{} should be present in policy {}".format(post_api_policy_expectation, policy),
)
# Test for tags
function_result = lambda_client.get_function(FunctionName=lambda_function_name)
tags = function_result["Tags"]
self.assertIsNotNone(tags, "Expecting tags on function.")
self.assertTrue("lambda:createdBy" in tags, "Expected 'lambda:CreatedBy' tag key, but not found.")
self.assertEqual(tags["lambda:createdBy"], "SAM", "Expected 'SAM' tag value, but not found.")
self.assertTrue("TagKey1" in tags)
self.assertEqual(tags["TagKey1"], api_stage_name)
|
183580
|
import os
import config
import basehandler
import models
import accounts
import urllib
import admin
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext import db
class AdjustScore(basehandler.BaseHandler):
def get(self):
account = self.has_access()
if not account:
self.redirect(users.create_login_url(self.request.uri))
return
type = self.request.get('t').lower()
u = self.request.get('u').lower()
player_name = self.request.get('p').lower()
message = ''
# if no type specified, there is no query
rating = 0
next_type = ''
if not type:
# The next request will be a query for a player name's score
next_type = 'q'
# Validate the user. Is this a query or set score request?
p = None
if type == 'q' or type == 's':
# This is a query for a user. Make sure the user is valid
if not u:
message = 'No player name entered!'
type = ''
next_type = 'q'
else:
p = models.PlayerModel.get(models.playermodel_key(u))
if not p:
message = 'Could not find player %s! Please try again:' % u
u = ''
type = ''
next_type = 'q'
# Step through the states
rating = 0
reason = ''
if type == 'q':
# The next request will be for changing a score
next_type = 's'
rating = p.rating
if type == 's':
# Is the new rating valid?
rating = self.request.get('s')
reason = self.request.get('r')
success = True
try:
rating = int(rating)
except:
success = False
if not success or rating >= 3000 or rating < 0:
message = '"%s" is an invalid score. Please try again:' % rating
rating = p.rating
type = 'q'
next_type = 's'
elif not reason:
message = 'Must enter a reason. Please try again:'
rating = p.rating
type = 'q'
next_type = 's'
else:
# Record this action
d = dict(action='adjust_score', player_name=u, old_rating=p.rating, new_rating=rating, reason=reason)
admin.save_action(account.name, self.request.remote_addr, d)
p.rating = rating
p.put()
message = 'Successfully set the score of player %s to %s, reason: %s.' % (u, rating, reason)
type = ''
next_type = 'q'
template_values = {
'tabs': config.get_tabs(player_name, account),
'selected_tab': config.TAB_NONE,
'form_url': config.ADJUSTSCORE_URL,
'message': message,
'player_name': player_name,
'u': u,
'rating': rating,
'type': type,
'reason': reason,
'next_type': next_type
}
self.response.headers['Content-Type'] = 'application/xhtml+xml'
path = os.path.join(os.path.dirname(__file__), 'adjustscore.xhtml')
self.response.out.write(template.render(path, template_values))
def has_access(self):
# Requires an authenticated user with proper access rights
account = accounts.account()
if account and account.ADJUST_SCORE_ACCESS_RIGHT:
return account
return None
|
183651
|
import pandas as pd
from report import reporting as rep
from checks import NaptanCheck
# %%
class MultiRoadName(NaptanCheck):
"""[summary] A collection of methods to check that the roads names contain
the correct types and collection of words.
Args:
NaptanCheck ([type]): [description]
Returns:
[type]: [description]
"""
# for reporting
check_name = "Check Multiroad Name words in stop"
check_warning_level = "low"
check_geographic_level = "stops"
@classmethod
def stop_with_multiple_road_names(cls, gdf, col_name="CommonName"):
"""[summary]CommonNames in NaPTAN should be simple and not composite.
Most examples of commonnames which include two of the designated
words are ones where two road names are used in a composite name,
contrary to NaPTAN guidance.
This uses regex, but they could be some other way of doing this...
Arguments:
df {[type]} -- [description]
"""
swmrn_gdf = gdf
swmrn_gdf[col_name] = swmrn_gdf[col_name].str.lower()
try:
# leave this here, no it's not being used, just leave it anyway.
targets = [
"road",
"roads",
"street",
"streets",
"avenue",
"avenues",
"garden",
"gardens",
"lane",
"lanes",
"drive",
"drives",
"way",
"ways",
]
# regex patterns for detection.
pattern = r"\b(road|roads|\
street|streets|\
avenue|\avenues|\
garden|gardens|\
lane|lanes\
drive|drives\
way|ways)\b"
fail_rds_re = (
r"\b('street|streets|avenue|avenues|garden|"
r"gardens|lane|lanes|drive|drives|way|ways')\b"
)
fail_aves_re = (
r"\b('road|roads|street|streets|garden|gardens|"
r"lane|lanes|drive|drives|way|ways')\b"
)
fail_gdns_re = (
r"\b('road|roads|street|streets|avenue|avenues|"
r"lane|lanes|drive|drives|way|ways')\b"
)
fail_lanes_re = (
r"\b('road|roads|street|streets|avenue|avenues|"
r"garden|gardens|drive|drives|way|ways')\b"
)
fail_drives_re = (
r"\b('road|roads|street|streets|avenue|avenues|"
r"garden|gardens|lane|lanes|way|ways')\b"
)
fail_ways_re = (
r"\b('road|roads|street|streets|avenue|avenues|"
r"garden|gardens|lane|lanes|drive|drives')\b"
)
tn = swmrn_gdf[swmrn_gdf[col_name].str.contains(pattern, regex=True)]
roads = tn[tn[col_name].str.contains(r"\b(road|roads)\b")]
fail_rds = roads[roads[col_name].str.contains(fail_rds_re, regex=True)]
aves = tn[tn[col_name].str.contains(r"\b(avenue|avenues)\b")]
fail_aves = aves[aves[col_name].str.contains(fail_aves_re, regex=True)]
gdns = tn[tn[col_name].str.contains(r"\b(garden|gardens)\b")]
failgdns = gdns[gdns[col_name].str.contains(fail_gdns_re, regex=True)]
lanes = tn[tn[col_name].str.contains(r"\b(lane|lanes)\b")]
faillanes = lanes[lanes[col_name].str.contains(fail_lanes_re, regex=True)]
drives = tn[tn[col_name].str.contains(r"\b(drive|drives)\b")]
faildrives = drives[
drives[col_name].str.contains(fail_drives_re, regex=True)
]
ways = tn[tn[col_name].str.contains(r"\b(way|ways)\b")]
failways = ways[ways[col_name].str.contains(fail_ways_re, regex=True)]
all_dfs = [fail_rds, fail_aves, failgdns, faillanes, faildrives, failways]
failed_nodes = pd.concat(all_dfs)
failed_nodes[col_name] = failed_nodes[col_name].str.title()
rep.report_failing_nodes(
gdf, "Stop with Multiple road type names", failed_nodes
)
return failed_nodes
except Exception as e:
raise (e)
|
183657
|
import asyncio
import json
from threading import Timer
import httpx
import pytest
from ariadne.asgi import GQL_CONNECTION_INIT, GQL_START
from websockets import connect
my_storage = {}
@pytest.fixture
def storage():
return my_storage
@pytest.mark.asyncio
async def test_create_user(host, credentials, storage):
query = """
mutation createUser($email: String!, $password: String!) {
createUser(email: $email, password: $password) {
id,
errors
}
}
"""
async with httpx.AsyncClient() as client:
response = await client.post(
f"http://{host}/",
timeout=60,
json={"query": query, "variables": credentials},
)
json_response = json.loads(response.text)
assert ("errors" in json_response) == False
assert json_response["data"]["createUser"]["id"] is not None
storage["user_id"] = json_response["data"]["createUser"]["id"]
@pytest.mark.asyncio
async def test_auth_user(host, credentials, storage):
query = """
mutation authUser($email: String!, $password: String!) {
createToken(email: $email, password: $password) {
errors,
token
}
}
"""
async with httpx.AsyncClient() as client:
response = await client.post(
f"http://{host}/",
headers={},
timeout=60,
json={"query": query, "variables": credentials},
)
json_response = json.loads(response.text)
assert ("errors" in json_response) == False
assert json_response["data"]["createToken"]["token"] is not None
storage["token"] = json_response["data"]["createToken"]["token"]
async def create_blog(host, storage):
query = """
mutation createblog($title: String!, $description: String!) {
createblog(title: $title, description: $description) {
errors
id
}
}
"""
token = storage["token"]
async with httpx.AsyncClient() as client:
response = await client.post(
f"http://{host}/",
headers={"Authorization": f"Bearer {token}"},
timeout=60,
json={
"query": query,
"variables": {"title": "title", "description": "description"},
},
)
json_response = json.loads(response.text)
assert ("errors" in json_response) == True
assert json_response["data"]["createblog"]["id"] is not None
@pytest.mark.asyncio
async def test_create_blog(server, host, storage):
await create_blog(host, storage)
@pytest.mark.asyncio
async def test_subscription(server, host, storage):
query = """
subscription reviewblog($token: String!) {
reviewblog(token: $token) {
errors
id
}
}
"""
variables = {"token": f'Bearer {storage["token"]}'}
ws = await connect(f"ws://{host}/", subprotocols=["graphql-ws"])
await ws.send(json.dumps({"type": GQL_CONNECTION_INIT}))
await ws.send(
json.dumps(
{"type": GQL_START, "payload": {"query": query, "variables": variables},}
)
)
received = await ws.recv()
assert received == '{"type": "connection_ack"}'
def delay_create_blog(server, host):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(create_blog(server, host))
timer = Timer(1.0, delay_create_blog, (server, host, storage))
timer.start()
received = await ws.recv()
await ws.close()
json_response = json.loads(received)
assert ("errors" in json_response) == False
assert json_response["payload"]["data"]["reviewblog"]["id"] is not None
|
183662
|
def stockmax(p):
ind_max = p.index(max(p)) #find the max price
inv = sum(p[:ind_max]) #split the array before and after max price
pf = len(p[:ind_max])*p[ind_max] - inv #buy all stocks before max price
if len(p[ind_max+1:]) > 0:
pf += stockmax(p[ind_max+1:]) #then sell them at max price
return pf
|
183688
|
import csv
import numpy as np
import cv2
def resize_and_crop(image, img_size):
""" Resize an image to the given img_size by first rescaling it
and then applying a central crop to fit the given dimension. """
source_size = np.array(image.shape[:2], dtype=float)
target_size = np.array(img_size, dtype=float)
# Scale
scale = np.amax(target_size / source_size)
inter_size = np.round(source_size * scale).astype(int)
image = cv2.resize(image, (inter_size[1], inter_size[0]))
# Central crop
pad = np.round((source_size * scale - target_size) / 2.).astype(int)
image = image[pad[0]:(pad[0] + int(target_size[0])),
pad[1]:(pad[1] + int(target_size[1])), :]
return image
def read_timestamps(text_file):
"""
Read a text file containing the timestamps of images
and return a dictionary matching the name of the image
to its timestamp.
"""
timestamps = {'name': [], 'date': [], 'hour': [],
'minute': [], 'time': []}
with open(text_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=' ')
for row in reader:
timestamps['name'].append(row[0])
timestamps['date'].append(row[1])
hour = int(row[2])
timestamps['hour'].append(hour)
minute = int(row[3])
timestamps['minute'].append(minute)
timestamps['time'].append(hour + minute / 60.)
return timestamps
def ascii_to_string(s):
""" Convert the array s of ascii values into the corresponding string. """
return ''.join(chr(i) for i in s)
|
183741
|
import warnings
import os.path as osp
import tensorflow as tf
import numpy as np
import time
from tflearn import is_training
from in_out import create_dir
from general_utils import iterate_in_chunks
from latent_3d_points.neural_net import Neural_Net, MODEL_SAVER_ID
try:
from latent_3d_points.structural_losses.tf_nndistance import nn_distance
from latent_3d_points.structural_losses.tf_approxmatch import approx_match, match_cost
except:
print('External Losses (Chamfer-EMD) cannot be loaded. Please install them first.')
exit()
class AutoEncoder(Neural_Net):
'''
An Auto-Encoder for point-clouds.
'''
def __init__(self, name, configuration, graph=None):
c = configuration
self.configuration = c
self.name = name
Neural_Net.__init__(self, name, graph)
self.n_input = c.n_input
self.n_output = c.n_output
self.batch_size=c.batch_size
in_shape = [c.batch_size] + self.n_input
out_shape = [c.batch_size] + self.n_output
with tf.variable_scope(name):
self.x = tf.placeholder(tf.float32, in_shape)
self.gt = self.x
self.z = c.encoder(self.x, **c.encoder_args)
assert self.z.get_shape()[1]==256
zerovector = tf.constant(0.0, dtype=tf.float32, shape=[self.z.get_shape()[0], 64])
with tf.variable_scope('sharedDecoder') as scope:
print(scope)
subcode1 = tf.concat( [self.z[:,0:64], zerovector, zerovector, zerovector] , axis=1 )
layer1 = c.decoder(subcode1, nameprefix='branch_5decoder', scope=scope, reuse=False, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
subcode2 = tf.concat( [zerovector, self.z[:,64:128], zerovector, zerovector] , axis=1 )
layer2 = c.decoder(subcode2, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
subcode3 = tf.concat( [zerovector, zerovector, self.z[:,128:192], zerovector] , axis=1 )
layer3 = c.decoder( subcode3, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
subcode4 = tf.concat( [zerovector, zerovector, zerovector, self.z[:,192:256]] , axis=1 )
layer4 = c.decoder(subcode4, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
with tf.variable_scope('sharedDecoder', reuse=True) as scope:
print(scope)
layer5 = c.decoder(self.z, nameprefix='branch_5decoder', scope=scope, reuse=True, **c.decoder_args)
self.x_b1 = tf.reshape(layer1, [-1, self.n_output[0], self.n_output[1]])
self.x_b2 = tf.reshape(layer2, [-1, self.n_output[0], self.n_output[1]])
self.x_b3 = tf.reshape(layer3, [-1, self.n_output[0], self.n_output[1]])
self.x_b4 = tf.reshape(layer4, [-1, self.n_output[0], self.n_output[1]])
self.x_b5 = tf.reshape(layer5, [-1, self.n_output[0], self.n_output[1]])
self.x_all = tf.concat([self.x_b1,self.x_b2,self.x_b3,self.x_b4,self.x_b5], 2)
self.x_reconstr = self.x_b5
self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=c.saver_max_to_keep)
self._create_loss()
self._setup_optimizer()
# GPU configuration
if hasattr(c, 'allow_gpu_growth'):
growth = c.allow_gpu_growth
else:
growth = True
config = tf.ConfigProto()
config.gpu_options.allow_growth = growth
# Summaries
self.merged_summaries = tf.summary.merge_all()
self.train_writer = tf.summary.FileWriter(osp.join(configuration.train_dir, 'summaries'), self.graph)
# Initializing the tensor flow variables
self.init = tf.global_variables_initializer()
# Launch the session
self.sess = tf.Session(config=config)
self.sess.run(self.init)
def encode_layers(self, scopename, input_pcs, reuse=False ):
c = self.configuration
with tf.variable_scope(scopename, reuse=reuse ):
return c.encoder(input_pcs, **c.encoder_args )
def decode_layers(self, scopename, input_code, reuse=False ):
c = self.configuration
with tf.variable_scope(scopename, reuse=reuse ):
with tf.variable_scope('sharedDecoder', reuse=reuse) as scope:
print(scope)
layer5 = c.decoder( input_code, nameprefix='branch_5decoder', scope=scope, reuse=reuse, **c.decoder_args)
x_reconstr = tf.reshape(layer5, [-1, self.n_output[0], self.n_output[1]])
return x_reconstr
def _create_loss(self):
c = self.configuration
self.loss = 0
if c.loss == 'chamfer':
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b1, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b2, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b3, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b4, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1) * 0.1
cost_p1_p2, _, cost_p2_p1, _ = nn_distance(self.x_b5, self.gt)
self.loss += tf.reduce_mean(cost_p1_p2) + tf.reduce_mean(cost_p2_p1)
self.match_errors = cost_p1_p2 + cost_p2_p1
elif c.loss == 'emd':
match = approx_match(self.x_b1, self.gt)
self.loss_1 = tf.reduce_mean(match_cost(self.x_b1, self.gt, match))
match = approx_match(self.x_b2, self.gt)
self.loss_2 = tf.reduce_mean(match_cost(self.x_b2, self.gt, match))
match = approx_match(self.x_b3, self.gt)
self.loss_3 = tf.reduce_mean(match_cost(self.x_b3, self.gt, match))
match = approx_match(self.x_b4, self.gt)
self.loss_4 = tf.reduce_mean(match_cost(self.x_b4, self.gt, match))
match = approx_match(self.x_b5, self.gt)
self.loss_5 = tf.reduce_mean(match_cost(self.x_b5, self.gt, match))
self.match_errors = match_cost(self.x_b5, self.gt, match) / self.n_input[0]
self.loss = self.loss_1 * 0.1 + self.loss_2* 0.1 + self.loss_3* 0.1 + self.loss_4* 0.1 + self.loss_5
else:
print("error! you must choose one!")
reg_losses = self.graph.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
if c.exists_and_is_not_none('w_reg_alpha'):
w_reg_alpha = c.w_reg_alpha
else:
w_reg_alpha = 1.0
print('reg_losses:')
print(reg_losses)
print('w_reg_alpha = ', w_reg_alpha)
for rl in reg_losses:
self.loss += (w_reg_alpha * rl)
def _setup_optimizer(self):
c = self.configuration
self.lr = c.learning_rate
if hasattr(c, 'exponential_decay') and hasattr(c, 'decay_steps'):
self.lr = tf.train.exponential_decay(c.learning_rate, self.epoch, c.decay_steps, decay_rate=0.5, staircase=True, name="learning_rate_decay")
self.lr = tf.maximum(self.lr, 1e-5)
tf.summary.scalar('learning_rate', self.lr)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)
self.train_step = self.optimizer.minimize(self.loss)
def train(self, train_data, configuration, log_file=None ):
c = configuration
stats = []
if c.saver_step is not None:
create_dir(c.train_dir)
epoch = int(self.sess.run(self.epoch))
while epoch < c.training_epochs:
loss, duration = self._single_epoch_train(train_data, c)
epoch = int(self.sess.run(self.increment_epoch))
stats.append((epoch, loss, duration))
if epoch % c.loss_display_step == 0:
print("Epoch:", '%04d' % (epoch), 'training time (minutes)=', "{:.4f}".format(duration / 60.0), "loss=", "{:.9f}".format(loss))
if log_file is not None:
log_file.write('%04d\t%.9f\t%.4f\n' % (epoch, loss, duration / 60.0))
# Save the models checkpoint periodically.
if c.saver_step is not None and (epoch % c.saver_step == 0 or epoch - 1 == 0):
checkpoint_path = osp.join(c.train_dir, MODEL_SAVER_ID)
self.saver.save(self.sess, checkpoint_path, global_step=self.epoch)
if c.exists_and_is_not_none('summary_step') and (epoch % c.summary_step == 0 or epoch - 1 == 0):
summary = self.sess.run(self.merged_summaries)
self.train_writer.add_summary(summary, epoch)
return stats
def _single_epoch_train(self, train_data, configuration ):
n_examples = train_data.num_examples
epoch_loss = 0.
batch_size = configuration.batch_size
n_batches = int(n_examples / batch_size)
start_time = time.time()
# Loop over all batches
for _ in range(n_batches):
batch_i, _, _ = train_data.next_batch(batch_size)
_, loss = self.partial_fit(batch_i)
epoch_loss += loss
epoch_loss /= n_batches
duration = time.time() - start_time
if configuration.loss == 'emd':
epoch_loss /= len(train_data.point_clouds[0])
return epoch_loss, duration
def partial_fit(self, X ):
'''Trains the model with mini-batches of input data.
Returns:
The loss of the mini-batch.
The reconstructed (output) point-clouds.
'''
is_training(True, session=self.sess)
try:
_, loss, recon = self.sess.run((self.train_step, self.loss, self.x_reconstr), feed_dict={self.x: X})
is_training(False, session=self.sess)
except Exception:
raise
finally:
is_training(False, session=self.sess)
return recon, loss
def reconstruct(self, X, GT=None, compute_loss=True):
'''Use AE to reconstruct given data.
GT will be used to measure the loss (e.g., if X is a noisy version of the GT)'''
if compute_loss:
loss = self.loss
else:
loss = self.no_op
if GT is None:
return self.sess.run((self.x_reconstr, loss), feed_dict={self.x: X})
else:
return self.sess.run((self.x_reconstr, loss), feed_dict={self.x: X, self.gt: GT})
def transform(self, X):
'''Transform data by mapping it into the latent space.'''
return self.sess.run(self.z, feed_dict={self.x: X})
def decode(self, z):
if np.ndim(z) == 1: # single example
z = np.expand_dims(z, 0)
return self.sess.run((self.x_reconstr), {self.z: z})
def get_latent_codes(self, pclouds ):
''' wrapper of self.transform, to get the latent (bottle-neck) codes for a set of input point clouds.
Args:
pclouds (N, K, 3) numpy array of N point clouds with K points each.
'''
num2skip = len(pclouds) % self.batch_size
idx = np.arange(len(pclouds)- num2skip)
latent_codes = []
for b in iterate_in_chunks(idx, self.batch_size):
latent_codes.append(self.transform(pclouds[b]))
# deal with remainder
if num2skip>0:
theRestData = pclouds[ len(pclouds)-num2skip : len(pclouds) ]
theRestData = np.tile( theRestData, (self.batch_size,1, 1) )
encodeResult = self.transform(theRestData[0:self.batch_size])
latent_codes.append( encodeResult[0:num2skip] )
return np.vstack(latent_codes)
def get_point_clouds(self, latentcodes ):
num2skip = len(latentcodes) % self.batch_size
idx = np.arange(len(latentcodes)-num2skip)
pointclouds = []
for b in iterate_in_chunks(idx, self.batch_size):
pointclouds.append(self.decode(latentcodes[b]))
# deal with remainder
if num2skip>0:
theRestData = latentcodes[ len(latentcodes)-num2skip : len(latentcodes) ]
theRestData = np.tile( theRestData, (self.batch_size,1) )
decodeResult = self.decode( theRestData[0:self.batch_size] )
pointclouds.append( decodeResult[0:num2skip] )
return np.vstack(pointclouds)
|
183789
|
class BaseContest(object):
'''BaseContest is an abstract class for contest-specific modifications to
Marathoner.
'''
def __init__(self, project):
self.project = project
self.maximize = project.maximize
def extract_score(self, visualizer_stdout, solution_stderr):
'''Extract raw score and return it.
@param visualizer_stdout: output received from visualizer's stdout
@type visualizer_stdout: list of lines
@param solution_stderr: output received from solution's stderr
@type solution_stderr: list of lines
'''
raise NotImplementedError()
# single-test callbacks
def single_test_starting(self, seed):
'''Called before running the single test.'''
raise NotImplementedError()
def single_test_ending(self, seed, visualizer_stdout, solution_stderr,
best_score, current_score):
'''Called after the single test *successfully* finished.
@param best_score: best score for the current test. Updated with the
`current_score` already.
@type best_score: Score
@param current_score: score for the current test
@type current_score: Score
'''
raise NotImplementedError()
# multi-test callbacks
def multiple_tests_starting(self, num_tests):
'''Called before running the batch of tests.
@param num_tests: number of tests to be run.
'''
raise NotImplementedError()
def one_test_starting(self, seed):
'''Called before running the test from the batch.'''
raise NotImplementedError()
def one_test_ending(self, seed, visualizer_stdout, solution_stderr,
best_score, current_score):
'''Called after the test from the batch *successfully* finished.'''
raise NotImplementedError()
def multiple_tests_ending(self, num_tests):
'''Called after running the batch of tests.
@param num_tests: number of tests that actually ran. Can be lower
than number of tests sent to `multiple_tests_starting()`,
if user kills execution.
Basically it is number of times `one_test_ending()`
was called.
'''
raise NotImplementedError()
|
183838
|
from mstrio.connection import get_connection
from mstrio.server.job_monitor import (Job, JobStatus, JobType, kill_all_jobs, kill_jobs,
list_jobs, ObjectType, PUName)
from mstrio.server.project import Project
from mstrio.users_and_groups.user import User
# connect to environment without providing user credentials
# variable `workstationData` is stored within Workstation
conn = get_connection(workstationData)
# get project by name
project = Project(connection=conn, name="MicroStrategy Tutorial")
# get user by name
admin = User(connection=conn, name="Administrator")
# get list of `Job` objects for all jobs on the environment
jobs = list_jobs(conn)
print('All jobs on the environment as objects')
print(jobs)
if jobs:
# instanstiate an existing job using constructor
job = Job(conn, id=jobs[0].id) # example job ID
# get properties for the first job on the list
props = job.list_properties()
print('Propertties of a given job')
print(props)
# kill the job
job.kill()
# get list of dicts representing job information for all jobs on the environment
jobs = list_jobs(conn, to_dictionary=True)
print('All jobs on the environment as dictionaries')
print(jobs)
# get a list of `Job` objects filtered by job status, project and object type
jobs = list_jobs(conn, status=JobStatus.LOADING_PROMPT, project=project,
object_type=ObjectType.CUBE)
print("All jobs filtered by 'LOADINIG_PROMPT' status, project and 'CUBE' type")
print(jobs)
# get a list of `Job` objects filtered by job type and job owner
jobs = list_jobs(conn, type=JobType.INTERACTIVE, user=admin)
print("All jobs filtered by 'INTERACTIVE' type and 'Administrator' user")
print(jobs)
# get a list of `Job` objects filtered by elapsed time and memory usage
# NOTE: memory_usage filter works for 11.3.3+ I-Server versions
slow_jobs = list_jobs(conn, elapsed_time="gt:10000", memory_usage="gt:500")
print("List of job objects filtered by elapsed time and memory usage")
print(slow_jobs)
if slow_jobs:
# kill jobs by passing either Job objects or ids
result = kill_jobs(conn, slow_jobs)
# kill_jobs return Success, PartialSuccess or MSTRException, you can check
# which jobs were killed and which were not, and why in case of
# PartialScucess
print("killed jobs")
print(result.succeeded)
print("not killed jobs")
print(result.failed)
# kill jobs running over 5 hours
time_hours = 5
elapsed_t = time_hours * 60**2 # convert to seconds (for 11.3.2 I-Server version)
if conn.iserver_version == '11.3.0200':
elapsed_t = 1000 * elapsed_t # convert to milliseconds (for 11.3.3+ I-Server versions)
elapsed_t = f'gt:{elapsed_t}' # correct form filter (valid operators: 'gt:' and 'lt:')
try:
kill_all_jobs(conn, elapsed_time=elapsed_t, force=True)
except ValueError as e:
print(e)
# kill filtered jobs similarly to list_jobs() using single function
try:
result = kill_all_jobs(conn, pu_name=PUName.SQL_ENGINE, memory_usage="gt:800")
# you can easily evaluate if all passed jobs were killed using bool()
# evaluation on result, which will return True if result is Success and
# False if result is PartialSuccess
print("Result of killing jobs:")
print(bool(result))
except ValueError as e:
print(e)
|
183847
|
import os
import parser
import unittest
class TestParser(unittest.TestCase):
# Tests for parsers for the months from July 2019 to November 2020
def test_active_members_jan_2020(self):
self.maxDiff = None
expected = {
"reg": "3725",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE SAO BERNARDO DO CAMPO",
"active": True,
"income": {
"total": 40253.32,
"wage": 33689.1,
"perks": {
"total": 960.0,
"food": 960.0,
"vacation_pecuniary": 0.0,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 5604.22,
"trust_position": 0.0,
"others_total": 5604.22,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 5604.22,
"GRAT. NATUREZA ESPECIAL": 0.0,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 12623.0,
"prev_contribution": 3705.8,
"ceil_retention": 0.0,
"income_tax": 8917.2,
},
}
files = (
"./output_test/Membros_ativos-01-2020.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-01-2020.ods",
)
employees = parser.parse_active_members(files, "01", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_aug_2019(self):
self.maxDiff = None
expected = {
"reg": "2602",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA MILITAR",
"active": True,
"income": {
"total": 40213.32,
"wage": 33689.1,
"perks": {"total": 920.0, "food": 920.0, "vacation_pecuniary": 0.0},
"other": {
"total": 5604.22,
"trust_position": 0.0,
"others_total": 5604.22,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 5604.22,
"GRAT. NATUREZA ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 12518.73,
"prev_contribution": 3705.8,
"ceil_retention": 0.0,
"income_tax": 8812.93,
},
}
files = (
"./output_test/Membros_ativos-08-2019.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-08-2019.ods",
)
employees = parser.parse_active_members(files, "08", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_dez_2019(self):
self.maxDiff = None
expected = {
"reg": "2970",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE JABOTICABAL",
"active": True,
"income": {
"total": 89843.27,
"wage": 33689.1,
"perks": {"total": 33900.45},
"other": {
"total": 22253.72,
"trust_position": 0.0,
"others_total": 22253.72,
"others": {
"Gratificação Natalina": 22253.72,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 23764.14,
"prev_contribution": 9859.5,
"ceil_retention": 0.0,
"income_tax": 13904.64,
},
}
files = ("./output_test/Membros_ativos-12-2019.ods",)
employees = parser.parse_active_members(files, "12", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_members_jan_2020(self):
self.maxDiff = None
expected = {
"reg": "137707",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIAS DE JUSTICA",
"active": False,
"income": {
"total": 52739.74,
"wage": 35159.83,
"perks": {"total": 0.0},
"other": {
"total": 17579.91,
"trust_position": 0.0,
"others_total": 17579.91,
"others": {
"Gratificação Natalina": 17579.91,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 11884.88,
"prev_contribution": 4487.91,
"ceil_retention": 0.0,
"income_tax": 7396.97,
},
}
files = ("./output_test/Membros_inativos-01-2020.ods",)
employees = parser.unused_parse_inactive_members(files, "01", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_members_dez_2019(self):
self.maxDiff = None
expected = {
"reg": "948780",
"name": "<NAME>",
"role": "PROCURADOR DE JUSTICA",
"type": "membro",
"workplace": "PROCURADORIA DE JUSTICA CRIMINAL",
"active": False,
"income": {
"total": 57373.46,
"wage": 36742.01,
"perks": {"total": 0.0},
"other": {
"total": 20631.45,
"trust_position": 1506.96,
"others_total": 19124.49,
"others": {
"Gratificação Natalina": 19124.49,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 24777.39,
"prev_contribution": 8591.43,
"ceil_retention": 0.0,
"income_tax": 16185.96,
},
}
files = ("./output_test/Membros_inativos-12-2019.ods",)
employees = parser.unused_parse_inactive_members(files, "12", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_servants_jan_2020(self):
self.maxDiff = None
expected = {
"reg": "1174",
"name": "<NAME>",
"role": "OFICIAL DE PROMOTORIA I",
"type": "servidor",
"workplace": "AREA REGIONAL DE SAO JOSE DO RIO PRETO",
"active": False,
"income": {
"total": 8458.57,
"wage": 8458.57,
"perks": {"total": 0.0},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 1372.38,
"prev_contribution": 604.18,
"ceil_retention": 0.0,
"income_tax": 768.2,
},
}
files = ("./output_test/Servidores_inativos-01-2020.ods",)
employees = parser.unused_parse_inactive_servants(files, "01", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_servants_dez_2019(self):
self.maxDiff = None
expected = {
"reg": "953273",
"name": "<NAME>",
"role": "AUXILIAR DE PROMOTORIA I",
"type": "servidor",
"workplace": "AREA DE TRANSPORTES",
"active": False,
"income": {
"total": 10768.1,
"wage": 6674.54,
"perks": {"total": 356.24},
"other": {
"total": 3737.32,
"trust_position": 0.0,
"others_total": 3737.32,
"others": {
"Gratificação Natalina": 3737.32,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 1546.78,
"prev_contribution": 447.16,
"ceil_retention": 0.0,
"income_tax": 1099.62,
},
}
files = ("./output_test/Servidores_inativos-12-2019.ods",)
employees = parser.unused_parse_inactive_servants(files, "12", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_jan_2020(self):
self.maxDiff = None
expected = {
"reg": "1469",
"name": "<NAME>",
"role": " AUXILIAR DE PROMOTORIA I ",
"type": "servidor",
"workplace": " AREA REGIONAL DA CAPITAL ",
"active": True,
"income": {
"total": 7874.06,
"wage": 6623.36,
"perks": {"total": 1250.7},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 1612.73,
"prev_contribution": 861.02,
"ceil_retention": 0.0,
"income_tax": 751.71,
},
}
files = ("./output_test/Servidores_ativos-01-2020.ods",)
employees = parser.unused_parse_active_servants(files, "01", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_dez_2019(self):
self.maxDiff = None
expected = {
"reg": "4895",
"name": "<NAME>",
"role": "OFICIAL DE PROMOTORIA I",
"type": "servidor",
"workplace": "SERVICO TECNICO ADMINISTRATIVO DE TIETE",
"active": True,
"income": {
"total": 11246.37,
"wage": 6267.32,
"perks": {"total": 1169.44},
"other": {
"total": 3809.61,
"trust_position": 333.44,
"others_total": 3476.17,
"others": {
"Gratificação Natalina": 3476.17,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 3259.08,
"prev_contribution": 1757.98,
"ceil_retention": 0.0,
"income_tax": 1501.1,
},
}
files = ("./output_test/Servidores_ativos-12-2019.ods",)
employees = parser.unused_parse_active_servants(files, "12", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
# Tests for active members who have different table formats
def test_active_members_january_2019(self):
self.maxDiff = None
expected = {
"reg": "3725",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE SAO BERNARDO DO CAMPO",
"active": True,
"income": {
"total": 34609.1,
"wage": 33689.1,
"perks": {"total": 920.0, "food": 920.0, "housing_aid": 0.0},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporárias": 0.0,
},
},
},
"discounts": {
"total": 11081.84,
"prev_contribution": 3705.8,
"ceil_retention": 0.0,
"income_tax": 7376.04,
},
}
files = ("./output_test/Membros_ativos-01-2019.ods",)
employees = parser.parse_active_members(files, "01", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_march_2019(self):
self.maxDiff = None
expected = {
"reg": "3725",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE SAO BERNARDO DO CAMPO",
"active": True,
"income": {
"total": 38651.78,
"wage": 33689.1,
"perks": {"total": 920.0, "food": 920.0, "ferias em pecunia": 0.0},
"other": {
"total": 4042.68,
"trust_position": 0.0,
"others_total": 4042.68,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporárias": 4042.68,
},
},
},
"discounts": {
"total": 12193.58,
"prev_contribution": 3705.8,
"ceil_retention": 0.0,
"income_tax": 8487.78,
},
}
files = ("./output_test/Membros_ativos-03-2019.ods",)
employees = parser.parse_active_members(files, "03", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_june_2019(self):
self.maxDiff = None
expected = {
"reg": "207",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE TAUBATE",
"active": True,
"income": {
"total": 34609.1,
"wage": 33689.1,
"perks": {
"total": 920.0,
"food": 920.0,
"ferias em pecunia": 0.0,
"LP em pecunia": 0.0,
},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporarias": 0.0,
},
},
},
"discounts": {
"total": 11081.84,
"prev_contribution": 3705.8,
"income_tax": 7376.04,
},
}
files = ("./output_test/Membros_ativos-06-2019.ods",)
employees = parser.parse_active_members(files, "06", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
# # Tests for inactive members who have different table formats
def test_inactive_members_january_2019(self):
self.maxDiff = None
expected = {
"reg": "137707",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIAS DE JUSTICA",
"active": False,
"income": {
"total": 45627.43,
"wage": 30418.28,
"other": {
"total": 15209.15,
"others_total": 15209.15,
"others": {"Gratificação Natalina": 15209.15},
},
},
"discounts": {
"total": 18959.9,
"prev_contribution": 6514.5,
"ceil_retention": 0.0,
"income_tax": 26667.53,
},
}
files = ("./output_test/Membros_inativos-01-2019.ods",)
employees = parser.unused_parse_inactive_members(files, "01", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_members_may_2019(self):
self.maxDiff = None
expected = {
"reg": "137707",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIAS DE JUSTICA",
"active": False,
"income": {
"total": 35159.83,
"wage": 35159.83,
"perks": {"total": 0.0, "food": 0.0, "ferias em pecunia": 0.0},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 10614.29,
"prev_contribution": 3225.24,
"income_tax": 7389.05,
"ceil_retention": 0.0,
},
}
files = ("./output_test/Membros_inativos-05-2019.ods",)
employees = parser.unused_parse_inactive_members(files, "05", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_members_june_2019(self):
self.maxDiff = None
expected = {
"reg": "137707",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIAS DE JUSTICA",
"active": False,
"income": {
"total": 35159.83,
"wage": 35159.83,
"perks": {"total": 0.0, "food": 0.0, "ferias em pecunia": 0.0},
"other": {
"total": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 10614.29,
"prev_contribution": 3225.24,
"income_tax": 7389.05,
"ceil_retention": 0.0,
},
}
files = ("./output_test/Membros_inativos-06-2019.ods",)
employees = parser.unused_parse_inactive_members(files, "06", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
# Tests for Active servants who have different table formats
def test_active_servants_january_2019(self):
self.maxDiff = None
expected = {
"reg": "1469",
"name": "<NAME>",
"role": "AREA REGIONAL DA CAPITAL",
"type": "servidor",
"workplace": "AUXILIAR DE PROMOTORIA I",
"active": True,
"income": {
"total": 7573.02,
"wage": 6371.44,
"perks": {
"total": 1201.58,
"food": 920.0,
"transportation": 281.58,
"pre_school": 0.0,
},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Gratificação de Qualificação": 0.0,
},
},
},
"discounts": {
"total": 1518.32,
"prev_contribution": 828.27,
"ceil_retention": 0.0,
"income_tax": 690.05,
},
}
files = ("./output_test/Servidores_ativos-01-2019.ods",)
employees = parser.unused_parse_active_servants(files, "01", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
# Tests for Active servants who have different table formats
def test_inactive_servants_january_2019(self):
self.maxDiff = None
expected = {
"reg": "1174",
"name": "<NAME>",
"role": "OFICIAL DE PROMOTORIA I",
"type": "servidor",
"workplace": "AREA REGIONAL DE SAO JOSE DO RIO PRETO",
"active": False,
"income": {
"total": 8180.43,
"wage": 8180.43,
"other": {
"total": 0.0,
"others_total": 0.0,
"others": {"Gratificação Natalina": 0.0},
},
},
"discounts": {
"total": 1266.27,
"prev_contribution": 584.7,
"income_tax": 681.57,
},
}
files = ("./output_test/Servidores_inativos-01-2019.ods",)
employees = parser.unused_parse_inactive_servants(files, "01", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_servants_may_2019(self):
self.maxDiff = None
expected = {
"reg": "4324",
"name": "<NAME>",
"role": "OFICIAL DE PROMOTORIA I",
"type": "servidor",
"workplace": "SERVICO TECNICO ADMINISTRATIVO DE ARACATUBA",
"active": False,
"income": {
"total": 8900.62,
"wage": 5244.0,
"perks": {
"total": 3496.0,
"food": 0.0,
"transportation": 0.0,
"ferias em pecunia": 3496.0,
},
"other": {
"total": 160.62,
"trust_position": 80.31,
"others_total": 80.31,
"others": {
"Gratificação Natalina": 0.0,
"Abono de Permanência": 0.0,
"Outras Remunerações Temporárias": 80.31,
},
},
},
"discounts": {
"total": 701.3,
"prev_contribution": 106.48,
"income_tax": 594.82,
},
}
files = ("./output_test/Servidores_inativos-05-2019.ods",)
employees = parser.unused_parse_inactive_servants(files, "05", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_servants_june_2019(self):
self.maxDiff = None
expected = {
"reg": "386921",
"name": "<NAME>",
"role": "AUXILIAR DE PROMOTORIA I",
"type": "servidor",
"workplace": "AREA DE ATIVIDADES COMPLEMENTARES",
"active": False,
"income": {
"total": 6313.87,
"wage": 5259.27,
"perks": {"total": 726.78, "food": 562.22, "transportation": 164.56},
"other": {
"total": 327.82,
"trust_position": 0.0,
"others_total": 327.82,
"others": {
"Gratificação Natalina": 0.0,
"Abono de Permanência": 327.82,
"Gratificação de Qualificação": 0.0,
},
},
},
"discounts": {
"total": 559.27,
"prev_contribution": 459.95,
"income_tax": 99.32,
},
}
files = ("./output_test/Servidores_inativos-06-2019.ods",)
employees = parser.unused_parse_inactive_servants(files, "06", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
# Tests for archives containing indemnity funds and temporary remuneration
def test_inactive_members_aug(self):
self.maxDiff = None
expected = {
"reg": "322959",
"name": "<NAME>",
"role": "PROCURADOR DE JUSTICA",
"type": "membro",
"workplace": "PROCURADORIA DE JUSTICA CRIMINAL",
"active": False,
"income": {
"total": 88834.87,
"wage": 38072.09,
"perks": {"total": 50762.78, "vacation_pecuniary": 50762.78},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 11004.51,
"prev_contribution": 5115.36,
"ceil_retention": 0.0,
"income_tax": 16119.87,
},
}
files = (
"./output_test/Membros_inativos-08-2020.ods",
"./output_test/Membros_inativos-Verbas Indenizatorias-08-2020.ods",
)
employees = parser.unused_parse_inactive_members(files, "08", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_members_sept(self):
self.maxDiff = None
expected = {
"reg": "730",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA INTERMEDIARIA)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE BIRIGUI",
"active": False,
"income": {
"total": 46228.93,
"wage": 32004.65,
"perks": {
"total": 14224.28,
"food": 0.0,
"vacation_pecuniary": 14224.28,
},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 10936.73,
"prev_contribution": 4144.57,
"ceil_retention": 0.0,
"income_tax": 6792.16,
},
}
files = (
"./output_test/Membros_inativos-09-2020.ods",
"./output_test/Membros_inativos-Verbas Indenizatorias-09-2020.ods",
)
employees = parser.unused_parse_inactive_members(files, "09", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_members_nov(self):
self.maxDiff = None
expected = {
"reg": "204231",
"name": "<NAME>",
"role": "PROCURADOR DE JUSTICA",
"type": "membro",
"workplace": "PROCURADORIA DE JUSTICA CRIMINAL",
"active": False,
"income": {
"total": 54466.36,
"wage": 37707.48,
"perks": {
"total": 16758.88,
"vacation_pecuniary": 16758.88,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
},
},
},
"discounts": {
"total": 13651.37,
"prev_contribution": 5725.76,
"ceil_retention": 0.0,
"income_tax": 7925.61,
},
}
files = (
"./output_test/Membros_inativos-11-2020.ods",
"./output_test/Membros_inativos-Verbas Indenizatorias-11-2020.ods",
)
employees = parser.unused_parse_inactive_members(files, "11", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_servants_sept(self):
self.maxDiff = None
expected = {
"reg": "165",
"name": "<NAME>",
"role": "OFICIAL DE PROMOTORIA I",
"type": "servidor",
"workplace": "CENTRAL DE INQUERITOS POLICIAIS E PROCESSOS - CIPP",
"active": False,
"income": {
"total": 20577.06,
"wage": 12543.02,
"perks": {"total": 487.75, "food": 457.15, "transportation": 30.6},
"other": {
"total": 7546.29,
"trust_position": 0.0,
"others_total": 7546.29,
"others": {
"Gratificação Natalina": 6271.5,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 961.22,
"INSALUBRIDADE": 0.0,
"QUALIFICACAO": 313.57,
},
},
},
"discounts": {
"total": 3838.08,
"prev_contribution": 1557.99,
"ceil_retention": 0.0,
"income_tax": 2280.09,
},
}
files = (
"./output_test/Servidores_inativos-09-2020.ods",
"./output_test/Servidores_inativos-Verbas Indenizatorias-09-2020.ods",
)
employees = parser.unused_parse_inactive_servants(files, "09", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_servants_aug(self):
self.maxDiff = None
expected = {
"reg": "551283",
"name": "<NAME>",
"role": "OFICIAL DE PROMOTORIA I",
"type": "servidor",
"workplace": "AREA DE DOCUMENTACAO E DIVULGACAO",
"active": False,
"income": {
"total": 12298.84,
"wage": 10295.25,
"perks": {
"total": 563.32,
"food": 685.72,
"transportation": -122.4,
"vacation_pecuniary": 0.0,
},
"other": {
"total": 1440.27,
"trust_position": 0.0,
"others_total": 1440.27,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 1061.07,
"INSALUBRIDADE": 0.0,
"QUALIFICACAO": 379.2,
},
},
},
"discounts": {
"total": 3061.69,
"prev_contribution": 713.81,
"ceil_retention": 0.0,
"income_tax": 2347.88,
},
}
files = (
"./output_test/Servidores_inativos-08-2020.ods",
"./output_test/Servidores_inativos-Verbas Indenizatorias-08-2020.ods",
)
employees = parser.unused_parse_inactive_servants(files, "08", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_july_2019(self):
self.maxDiff = None
expected = {
"reg": "3725",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE SAO BERNARDO DO CAMPO",
"active": True,
"income": {
"total": 40213.32,
"wage": 33689.1,
"perks": {"total": 920.0, "food": 920.0, "vacation_pecuniary": 0.0},
"other": {
"total": 5604.22,
"trust_position": 0.0,
"others_total": 5604.22,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 5604.22,
"GRAT. NATUREZA ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 12623.0,
"prev_contribution": 3705.8,
"ceil_retention": 0.0,
"income_tax": 8917.2,
},
}
files = (
"./output_test/Membros_ativos-07-2019.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-07-2019.ods",
)
employees = parser.parse_active_members(files, "07", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_march_2020(self):
self.maxDiff = None
expected = {
"reg": "3725",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE SAO BERNARDO DO CAMPO",
"active": True,
"income": {
"total": 54113.85,
"wage": 33689.1,
"perks": {
"total": 15932.93,
"food": 960.0,
"compensatory_leave": 0.0,
"vacation_pecuniary": 14972.93,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 4491.82,
"trust_position": 0.0,
"others_total": 4491.82,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 4491.82,
"GRAT. NATUREZA ESPECIAL": 0.0,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 12317.09,
"prev_contribution": 3705.8,
"ceil_retention": 0.0,
"income_tax": 8611.29,
},
}
files = (
"./output_test/Membros_ativos-03-2020.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-03-2020.ods",
)
employees = parser.parse_active_members(files, "03", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_april_2020(self):
self.maxDiff = None
expected = {
"reg": "2526",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE PIRACICABA",
"active": True,
"income": {
"total": 50745.0,
"wage": 33689.1,
"perks": {
"total": 15932.93,
"food": 960.0,
"compensatory_leave": 0.0,
"vacation_pecuniary": 14972.93,
},
"other": {
"total": 1122.97,
"trust_position": 0.0,
"others_total": 1122.97,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 0.0,
"GRAT. NATUREZA ESPECIAL": 0.0,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 1122.97,
},
},
},
"discounts": {
"total": 11390.66,
"prev_contribution": 3705.8,
"ceil_retention": 0.0,
"income_tax": 7684.86,
},
}
files = (
"./output_test/Membros_ativos-04-2020.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-04-2020.ods",
)
employees = parser.parse_active_members(files, "04", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_aug_2020(self):
self.maxDiff = None
expected = {
"reg": "8505",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA INICIAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE ITARARE",
"active": True,
"income": {
"total": 54428.71,
"wage": 30405.3,
"perks": {
"total": 960.0,
"food": 960.0,
"transportation": 0.0,
"pre_school": 0.0,
"vacation_pecuniary": 0.0,
"premium_license_pecuniary": 0.0,
"compensatory_leave": 0.0,
},
"other": {
"total": 23063.41,
"trust_position": 0.0,
"others_total": 23063.41,
"others": {
"Gratificação Natalina": 15202.65,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"INSALUBRIDADE": 0.0,
"SUBS. DE FUNÇÃO": 0.0,
"VIATURA": 0.0,
"GRAT. CUMULATIVA": 6737.8,
"GRAT. DE QUALIFICAÇÃO": 0.0,
"GRAT. NATUREZA ESPECIAL": 1122.96,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 15281.2,
"prev_contribution": 6912.3,
"ceil_retention": 0.0,
"income_tax": 8368.9,
},
}
files = (
"./output_test/Membros_ativos-08-2020.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-08-2020.ods",
)
employees = parser.parse_active_members(files, "08", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_aug_2020(self):
self.maxDiff = None
expected = {
"reg": "4435",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE GUARULHOS",
"active": True,
"income": {
"total": 40263.97,
"wage": 33689.1,
"perks": {"total": 960.0, "vacation_pecuniary": 0.0},
"other": {
"total": 5614.87,
"trust_position": 0.0,
"others_total": 5614.87,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 0.0,
"GRAT. NATUREZA ESPECIAL": 4491.9,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 1122.97,
},
},
},
"discounts": {
"total": 13707.61,
"prev_contribution": 5197.77,
"ceil_retention": 0.0,
"income_tax": 8509.84,
},
}
files = (
"./output_test/Membros_ativos-10-2020.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-10-2020.ods",
)
employees = parser.parse_active_members(files, "10", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_july_2019(self):
self.maxDiff = None
expected = {
"reg": "7103",
"name": "<NAME>",
"role": "AUXILIAR DE PROMOTORIA I",
"type": "servidor",
"workplace": "CENTRO TEC. INF. E COMUNICACAO",
"active": True,
"income": {
"total": 6967.67,
"wage": 3108.6,
"perks": {
"total": 3781.36,
"food": 920.0,
"transportation": 314.16,
"pre_school": 423.0,
"vacation_pecuniary": 2124.2,
},
"other": {
"total": 77.71,
"trust_position": 0.0,
"others_total": 77.71,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"INSALUBRIDADE": 0.0,
"SUBSTITUIÇÃO DE FUNÇÃO": 0.0,
"VIATURA": 0.0,
"GRAT. DE QUALIFICAÇÂO": 77.71,
},
},
},
"discounts": {
"total": 475.96,
"prev_contribution": 404.11,
"ceil_retention": 0.0,
"income_tax": 71.85,
},
}
files = (
"./output_test/Servidores_ativos-07-2019.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-07-2019.ods",
)
employees = parser.unused_parse_active_servants(files, "07", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_oct_2019(self):
self.maxDiff = None
expected = {
"reg": "5641",
"name": "<NAME>",
"role": "AUXILIAR DE PROMOTORIA I",
"type": "servidor",
"workplace": "SERVICO TECNICO ADMINISTRATIVO DE SANTOS",
"active": True,
"income": {
"total": 8711.12,
"wage": 3170.76,
"perks": {
"total": 3887.73,
"transportation": 314.16,
"food": 920.0,
"pre_school": 0.0,
"vacation_pecuniary": 2653.57,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 1652.63,
"trust_position": 712.52,
"others_total": 940.11,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Adic. Insalubridade": 712.52,
"Subst. Eventual": 0.0,
"Ato Norm 766/2013": 130.51,
"Gratificação Qualificação": 97.08,
},
},
},
"discounts": {
"total": 705.18,
"prev_contribution": 507.43,
"ceil_retention": 0.0,
"income_tax": 197.75,
},
}
files = (
"./output_test/Servidores_ativos-10-2019.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-10-2019.ods",
)
employees = parser.unused_parse_active_servants(files, "10", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_nov_2019(self):
self.maxDiff = None
expected = {
"reg": "4921",
"name": "<NAME>",
"role": "ASSESSOR DO MP",
"type": "servidor",
"workplace": "SERVICO TECNICO ADMINISTRATIVO DE SANTOS",
"active": True,
"income": {
"total": 19286.37,
"wage": 16552.89,
"perks": {
"total": 1259.2,
"transportation": 299.2,
"food": 960.0,
"pre_school": 0.0,
"vacation_pecuniary": 0.0,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 1474.28,
"trust_position": 761.76,
"others_total": 712.52,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Adic. Insalubridade": 712.52,
"Subst. Eventual": 0.0,
"Ato Norm 766/2013": 0.0,
"Gratificação Qualificação": 0.0,
},
},
},
"discounts": {
"total": 4899.1,
"prev_contribution": 987.63,
"ceil_retention": 0.0,
"income_tax": 3911.47,
},
}
files = (
"./output_test/Servidores_ativos-11-2019.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-11-2019.ods",
)
employees = parser.unused_parse_active_servants(files, "11", "2019")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_mar_2020(self):
self.maxDiff = None
expected = {
"reg": "5641",
"name": "<NAME>UZ",
"role": " AUXILIAR DE PROMOTORIA I ",
"type": "servidor",
"workplace": " SERVICO TECNICO ADMINISTRATIVO DE SANTOS ",
"active": True,
"income": {
"total": 6117.71,
"wage": 3295.87,
"perks": {
"total": 1296.6,
"transportation": 336.6,
"food": 960.0,
"pre_school": 0.0,
"vacation_pecuniary": 0.0,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 1525.24,
"trust_position": 712.52,
"others_total": 812.72,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Adic. Insalubridade": 712.52,
"Subst. Eventual": 0.0,
"Ato Norm 766/2013": 0.0,
"Gratificação Qualificação": 100.2,
},
},
},
"discounts": {
"total": 716.43,
"prev_contribution": 521.08,
"ceil_retention": 0.0,
"income_tax": 195.35,
},
}
files = (
"./output_test/Servidores_ativos-03-2020.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-03-2020.ods",
)
employees = parser.unused_parse_active_servants(files, "03", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_april_2020(self):
self.maxDiff = None
expected = {
"reg": "5057",
"name": "<NAME>",
"role": " AUXILIAR DE PROMOTORIA I ",
"type": "servidor",
"workplace": " SERVICO TECNICO ADMINISTRATIVO DE PRESIDENTE PRUDENTE ",
"active": True,
"income": {
"total": 8602.73,
"wage": 3738.17,
"perks": {
"total": 1113.0,
"transportation": 153.0,
"food": 960.0,
"pre_school": 0.0,
"vacation_pecuniary": 0.0,
},
"other": {
"total": 3751.56,
"trust_position": 712.52,
"others_total": 3039.04,
"others": {
"Gratificação Natalina": 2215.25,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Adic. Insalubridade": 712.52,
"Subst. Eventual": 0.0,
"Ato Norm 766/2013": 0.0,
"Gratificação Qualificação": 111.27,
},
},
},
"discounts": {
"total": 1192.12,
"prev_contribution": 981.03,
"ceil_retention": 0.0,
"income_tax": 211.09,
},
}
files = (
"./output_test/Servidores_ativos-04-2020.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-04-2020.ods",
)
employees = parser.unused_parse_active_servants(files, "04", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_aug_2020(self):
self.maxDiff = None
expected = {
"reg": "5068",
"name": "<NAME>",
"role": "AUXILIAR DE PROMOTORIA I",
"type": "servidor",
"workplace": "SERVICO TECNICO-ADMINISTRATIVO DE RIBEIRAO PRETO",
"active": True,
"income": {
"total": 5392.04,
"wage": 3609.93,
"perks": {
"total": 929.4,
"transportation": -30.6,
"food": 960.0,
"pre_school": 0.0,
"vacation_pecuniary": 0.0,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 852.71,
"trust_position": 0.0,
"others_total": 852.71,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Adic. Insalubridade": 743.87,
"Substituição de Função": 0.0,
"Viatura": 0.0,
"Gratificação Qualificação": 108.84,
},
},
},
"discounts": {
"total": 785.75,
"prev_contribution": 539.08,
"ceil_retention": 0.0,
"income_tax": 246.67,
},
}
files = (
"./output_test/Servidores_ativos-08-2020.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-08-2020.ods",
)
employees = parser.unused_parse_active_servants(files, "08", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_oct_2020(self):
self.maxDiff = None
expected = {
"reg": "1469",
"name": "<NAME>",
"role": "AUX. DE PROMOTORIA ENCARREGADO",
"type": "servidor",
"workplace": "AREA REGIONAL DA CAPITAL",
"active": True,
"income": {
"total": 7705.76,
"wage": 6623.36,
"perks": {
"total": 1082.4,
"transportation": 122.4,
"food": 960.0,
"pre_school": 0.0,
"vacation_pecuniary": 0.0,
},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Adic. Insalubridade": 0.0,
"Substituição de Função": 0.0,
"Viatura": 0.0,
"Gratificação Qualificação": 0.0,
},
},
},
"discounts": {
"total": 1580.82,
"prev_contribution": 867.25,
"ceil_retention": 0.0,
"income_tax": 713.57,
},
}
files = (
"./output_test/Servidores_ativos-10-2020.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-10-2020.ods",
)
employees = parser.unused_parse_active_servants(files, "10", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_memberes_dez_2020(self):
self.maxDiff = None
expected = {
"reg": "2250",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE ARACATUBA",
"active": True,
"income": {
"total": 87007.52,
"wage": 33689.1,
"perks": {
"total": 30905.86,
"food": 960.0,
"vacation_pecuniary": 29945.86,
},
"other": {
"total": 22412.56,
"trust_position": 0.0,
"others_total": 22412.56,
"others": {
"Gratificação Natalina": 20728.12,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 0.0,
"GRAT. NATUREZA ESPECIAL": 1684.44,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 23737.92,
"prev_contribution": 8461.43,
"ceil_retention": 0.0,
"income_tax": 15276.49,
},
}
files = (
"./output_test/Membros_ativos-12-2020.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-12-2020.ods",
)
employees = parser.parse_active_members(files, "12", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_servants_dez_2020(self):
self.maxDiff = None
expected = {
"reg": "10322",
"name": "<NAME>",
"role": "ANALISTA JURIDICO DO MP",
"type": "servidor",
"workplace": "AREA REGIONAL DA CAPITAL",
"active": True,
"income": {
"total": 25645.96,
"wage": 8145.02,
"perks": {
"total": 12512.12,
"transportation": -122.4,
"food": 960.0,
"pre_school": 0.0,
"vacation_pecuniary": 11674.52,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 4988.82,
"trust_position": 0.0,
"others_total": 4988.82,
"others": {
"Gratificação Natalina": 4377.95,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Adic. Insalubridade": 0.0,
"Substituição de Função": 0.0,
"Gratificação Qualificação": 610.87,
},
},
},
"discounts": {
"total": 3636.88,
"prev_contribution": 978.0,
"ceil_retention": 0.0,
"income_tax": 2658.88,
},
}
files = (
"./output_test/Servidores_ativos-12-2020.ods",
"./output_test/Servidores_ativos-Verbas Indenizatorias-12-2020.ods",
)
employees = parser.unused_parse_active_servants(files, "12", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_inactive_servants_dez_2020(self):
self.maxDiff = None
expected = {
"reg": "1210",
"name": "<NAME>",
"role": "OFICIAL DE PROMOTORIA I",
"type": "servidor",
"workplace": "SERVICO TECNICO-ADMINISTRATIVO DE CAMPINAS",
"active": False,
"income": {
"total": 15203.92,
"wage": 11468.15,
"perks": {
"total": -1266.0,
"food": -960.0,
"transportation": -306.0,
"vacation_pecuniary": 0.0,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 5001.77,
"trust_position": 0.0,
"others_total": 5001.77,
"others": {
"Gratificação Natalina": 5734.08,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": -732.31,
"INSALUBRIDADE": 0.0,
},
},
},
"discounts": {
"total": 4349.13,
"prev_contribution": 1709.24,
"ceil_retention": 0.0,
"income_tax": 2639.89,
},
}
files = (
"./output_test/Servidores_inativos-12-2020.ods",
"./output_test/Servidores_inativos-Verbas Indenizatorias-12-2020.ods",
)
employees = parser.unused_parse_inactive_servants(files, "12", "2020")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_jan_2018(self):
self.maxDiff = None
expected = {
"reg": "1632",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DA CAPITAL",
"active": True,
"income": {
"total": 40321.3,
"wage": 28947.55,
"perks": {
"total": 5277.73,
"food": 900.0,
"housing_aid": 4377.73,
"vacation_pecuniary": 0.0,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 6096.02,
"trust_position": 0.0,
"others_total": 6096.02,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporárias": 6096.02,
},
},
},
"discounts": {
"total": 10357.72,
"prev_contribution": 3184.23,
"ceil_retention": 0.0,
"income_tax": 7173.49,
},
}
files = ("./output_test/Membros_ativos-01-2018.ods",)
employees = parser.parse_active_members(files, "01", "2018")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_jan_2018(self):
self.maxDiff = None
expected = {
"reg": "3339",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE BRAGANCA PAULISTA",
"active": True,
"income": {
"total": 76325.45,
"wage": 28947.55,
"perks": {
"total": 43894.46,
"food": 920.0,
"housing_aid": 4377.73,
"vacation_pecuniary": 38596.73,
},
"other": {
"total": 3483.44,
"trust_position": 0.0,
"others_total": 3483.44,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporárias": 3483.44,
},
},
},
"discounts": {
"total": 10253.45,
"prev_contribution": 3184.23,
"ceil_retention": 0.0,
"income_tax": 7069.22,
},
}
files = ("./output_test/Membros_ativos-10-2018.ods",)
employees = parser.parse_active_members(files, "10", "2018")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_fev_2019(self):
self.maxDiff = None
expected = [
{
"reg": "3818",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DA REGIONAL DA LAPA",
"active": True,
"income": {
"total": 35944.25,
"wage": 33689.1,
"perks": {"total": 920.0, "food": 920.0, "ferias em pecunia": 0.0},
"other": {
"total": 1335.15,
"trust_position": 1335.15,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporárias": 0.0,
},
},
},
"discounts": {
"total": 11864.3,
"prev_contribution": 3852.66,
"ceil_retention": 0.0,
"income_tax": 8011.64,
},
},
{
"reg": "594674",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE CATANDUVA",
"active": True,
"income": {
"total": 39424.55,
"wage": 33689.1,
"perks": {"total": 920.0, "food": 920.0, "ferias em pecunia": 0.0},
"other": {
"total": 4815.45,
"trust_position": 0.0,
"others_total": 4815.45,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporárias": 4815.45,
},
},
},
"discounts": {
"total": 12486.15,
"prev_contribution": 3746.79,
"ceil_retention": 0.0,
"income_tax": 8739.36,
},
},
{
"reg": "2245",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE GUARULHOS",
"active": True,
"income": {
"total": 35944.25,
"wage": 33689.1,
"perks": {"total": 920.0, "food": 920.0, "ferias em pecunia": 0.0},
"other": {
"total": 1335.15,
"trust_position": 1335.15,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"Outras remunerações temporárias": 0.0,
},
},
},
"discounts": {
"total": 12614.14,
"prev_contribution": 3893.66,
"ceil_retention": 0.0,
"income_tax": 8720.48,
},
},
]
files = ("./output_test/Membros_ativos-02-2019.ods",)
employees = parser.parse_active_members(files, "02", "2019")
# Verificações
self.assertEqual(3, len(employees))
self.assertEqual(employees, expected)
def test_active_members_nov_2020(self):
self.maxDiff = None
expected = [
{
"reg": "3818",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA REGIONAL DA LAPA",
"active": True,
"income": {
"total": 49622.03,
"wage": 33689.1,
"perks": {
"total": 15932.93,
"food": 960.0,
"vacation_pecuniary": 14972.93,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 0.0,
"GRAT. NATUREZA ESPECIAL": 0.0,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 13043.27,
"prev_contribution": 5557.77,
"ceil_retention": 0.0,
"income_tax": 7485.5,
},
},
{
"reg": "594674",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE CATANDUVA",
"active": True,
"income": {
"total": 49622.03,
"wage": 33689.1,
"perks": {
"total": 15932.93,
"food": 960.0,
"vacation_pecuniary": 14972.93,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 0.0,
"trust_position": 0.0,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 0.0,
"GRAT. NATUREZA ESPECIAL": 0.0,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 9054.1,
"prev_contribution": 5197.77,
"ceil_retention": 0.0,
"income_tax": 3856.33,
},
},
{
"reg": "2245",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DO PATRIMONIO PUBLICO E SOCIAL",
"active": True,
"income": {
"total": 51550.58,
"wage": 34601.97,
"perks": {
"total": 16526.33,
"food": 960.0,
"vacation_pecuniary": 15566.33,
"premium_license_pecuniary": 0.0,
},
"other": {
"total": 422.28,
"trust_position": 422.28,
"others_total": 0.0,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 0.0,
"GRAT. NATUREZA ESPECIAL": 0.0,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 12685.57,
"prev_contribution": 5411.4,
"ceil_retention": 0.0,
"income_tax": 7274.17,
},
},
]
files = (
"./output_test/Membros_ativos-11-2020.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-11-2020.ods",
)
employees = parser.parse_active_members(files, "11", "2020")
# Verificações
self.assertEqual(3, len(employees))
self.assertEqual(employees, expected)
def test_active_members_jan_2021(self):
self.maxDiff = None
expected = {
"reg": "2250",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE ARACATUBA",
"active": True,
"income": {
"total": 56546.97,
"wage": 33689.1,
"perks": {"total": 960.0, "food": 960.0},
"other": {
"total": 21897.87,
"trust_position": 0.0,
"others_total": 21897.87,
"others": {
"Gratificação Natalina": 16844.55,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 0.0,
"GRAT. NATUREZA ESPECIAL": 5053.32,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 14637.3,
"prev_contribution": 7679.57,
"ceil_retention": 0.0,
"income_tax": 6957.73,
},
}
files = (
"./output_test/Membros_ativos-01-2021.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-01-2021.ods",
)
employees = parser.parse_active_members(files, "01", "2021")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_fev_2021(self):
self.maxDiff = None
expected = {
"reg": "3182",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE SAO BERNARDO DO CAMPO",
"active": True,
"income": {
"total": 56659.29,
"wage": 33689.1,
"perks": {"total": 960.0, "food": 960.0, "vacation_pecuniary": 0.0},
"other": {
"total": 22010.19,
"trust_position": 0.0,
"others_total": 22010.19,
"others": {
"Gratificação Natalina": 16844.55,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 4042.68,
"GRAT. NATUREZA ESPECIAL": 1122.96,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 16068.74,
"prev_contribution": 7679.57,
"ceil_retention": 0.0,
"income_tax": 8389.17,
},
}
files = (
"./output_test/Membros_ativos-02-2021.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-02-2021.ods",
)
employees = parser.parse_active_members(files, "02", "2021")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
def test_active_members_mar_2021(self):
self.maxDiff = None
expected = {
"reg": "3339",
"name": "<NAME>",
"role": "PROMOTOR DE JUSTICA (ENTRANCIA FINAL)",
"type": "membro",
"workplace": "PROMOTORIA DE JUSTICA DE BRAGANCA PAULISTA",
"active": True,
"income": {
"total": 49659.38,
"wage": 33689.1,
"perks": {
"total": 10467.8,
"food": 960.0,
"vacation_pecuniary": 7486.46,
"health": 2021.34,
},
"other": {
"total": 5502.48,
"trust_position": 0.0,
"others_total": 5502.48,
"others": {
"Gratificação Natalina": 0.0,
"Férias (1/3 constitucional)": 0.0,
"Abono de Permanência": 0.0,
"GRAT. CUMULATIVA": 3818.04,
"GRAT. NATUREZA ESPECIAL": 1684.44,
"GRAT. DE GRUPO DE ATUAÇÃO ESPECIAL": 0.0,
},
},
},
"discounts": {
"total": 13564.87,
"prev_contribution": 5187.35,
"ceil_retention": 0.0,
"income_tax": 8377.52,
},
}
files = (
"./output_test/Membros_ativos-03-2021.ods",
"./output_test/Membros_ativos-Verbas Indenizatorias-03-2021.ods",
)
employees = parser.parse_active_members(files, "03", "2021")
# Verificações
self.assertEqual(1, len(employees))
self.assertDictEqual(employees[0], expected)
if __name__ == "__main__":
unittest.main()
|
183849
|
from .module import PytorchModel, TrainingInterface
from .scheduler import ConstantScheduler, TeacherForcingScheduler, \
OptimizerScheduler, ParameterScheduler
from .manager import LogPathManager, DataLoaders, SummaryWriters
from .example import MinExponentialLR
|
183881
|
from BlindMIUtil import *
from dataLoader import *
import tensorflow as tf
from tensorflow.keras.models import load_model
from sklearn import svm
import sys
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
tf.config.experimental.set_memory_growth(tf.config.experimental.list_physical_devices('GPU')[0], True)
DATA_NAME = sys.argv[1] if len(sys.argv) > 1 else "CIFAR"
TARGET_MODEL_GENRE = sys.argv[2] if len(sys.argv) > 2 else "ResNet50"
TARGET_WEIGHTS_PATH = "weights/Target/{}_{}.hdf5".format(DATA_NAME, TARGET_MODEL_GENRE)
def BlindMI_1class(x_, y_true, target_model):
'''
One-class SVM version with generated non-member as training set and predict whether the data has
been trained or not.If the data has been shuffled, please directly remove the process of shuffling.
:param x_:The data to be classified as trained or untrained
:param y_true: The label of data
:param target_model: The MI Model to probe
:return: the predicted results
'''
y_pred = Target_Model.predict(np.r_[x_train_tar, x_test_tar])
mix = np.sort(y_pred, axis=1)[:, ::-1][:, :3]
nonMem_pred = target_model.predict(sobel(x_))
nonMem = np.sort(nonMem_pred, axis=1)[:, ::-1][:, :3]
cls = svm.OneClassSVM(nu=0.9, kernel='sigmoid', gamma='scale')
cls.fit(nonMem)
m_pred = [i if i == 1 else 0 for i in cls.predict(mix)]
return m_pred
(x_train_tar, y_train_tar), (x_test_tar, y_test_tar), m_true = globals()['load_' + DATA_NAME]('TargetModel')
Target_Model = load_model(TARGET_WEIGHTS_PATH)
m_pred = BlindMI_1class(np.r_[x_train_tar, x_test_tar], np.r_[y_train_tar, y_test_tar], Target_Model)
evaluate_attack(m_true, m_pred)
|
183883
|
from math import *
import random
import timeit
import matplotlib.pyplot as plt
n, omega, N = 12, 2400, 1024
result = ''
result22 = ''
def show_graphic(num: int, signal: list, title: str):
plt.plot(list(range(0, num)), signal)
plt.grid(True)
plt.title(title)
plt.show()
def random_signal():
all_harm = []
for i in range(n):
A = random.randint(0, 100)
phi = random.randint(0, 10)
single_harm = []
for j in range(N):
y = A * sin(((omega * (n - i)) / n) * j + phi)
single_harm.append(y)
all_harm.append(single_harm)
harm_sum = []
for k in range(N):
sum_num = 0
for l in range(n):
sum_num += all_harm[l][k]
harm_sum.append(sum_num)
return harm_sum
def math_wait(N, signal):
res = 0
for p in range(N):
res += signal[p]
return res / N
def dispersion(N, signal, mx):
res = 0
for p in range(N):
res += pow(signal[p] - mx, 2)
return res / N
sig1 = random_signal()
sig2 = random_signal()
sig_z = random_signal()
start_mx = timeit.default_timer()
m1 = math_wait(N, sig1)
end_mx = timeit.default_timer()
m2 = math_wait(N, sig2)
mx_time = end_mx - start_mx
result1 = "Mx = " + str(m1) + ", time = " + str(mx_time) + " seconds"
print(result1)
start_disp = timeit.default_timer()
disp = dispersion(N, sig1, m1)
end_disp = timeit.default_timer()
disp_time = end_disp - start_disp
result2 = "\nDx = " + str(disp) + ", time = " + str(disp_time) + " seconds"
print(result2)
N2 = int(N / 2) - 1
def calc_r(N2, sig1, sig2, m1, m2):
res = []
for k in range(N2):
r = 0
for j in range(N2):
r += (sig1[j] - m1) * (sig2[j + k] - m2)
res.append(r / (N2 - 1))
return res
start_rxx = timeit.default_timer()
rxx = calc_r(N2, sig1, sig1, m1, m1)
end_rxx = timeit.default_timer()
rxx_time = end_rxx - start_rxx
result3 = "Rxx time = " + str(rxx_time) + " seconds"
print("Rxx = ", rxx, ", time = ", end_rxx - start_rxx, " seconds")
start_rxy = timeit.default_timer()
rxy = calc_r(N2, sig1, sig2, m1, m2)
end_rxy = timeit.default_timer()
rxy_time = end_rxy - start_rxy
result4 = "\nRxy time = " + str(rxy_time) + " seconds"
print("Rxy = ", rxy, ", time = ", end_rxy - start_rxy, " seconds")
# Lab 2.1
start_f_normal_start = timeit.default_timer()
F = []
for i in range(N):
real_part = 0
im_part = 0
for j in range(N):
real_part += sig1[j] * cos(2 * pi * i * j / N)
im_part += sig1[j] * sin(2 * pi * i * j / N)
f_p = sqrt(pow(real_part, 2) + pow(im_part, 2))
F.append(f_p)
f_normal_end = timeit.default_timer()
f_normal_time = f_normal_end - start_f_normal_start
def generate_table():
all_values = []
for i in range(N):
all_fs = []
for j in range(N):
cos_sin_values = []
cos_value = cos(2 * pi * i * j / N)
sin_value = sin(2 * pi * i * j / N)
cos_sin_values.append(cos_value)
cos_sin_values.append(sin_value)
all_fs.append(cos_sin_values)
all_values.append(all_fs)
return all_values
start_f_table_start = timeit.default_timer()
def use_table(table_input, signal1):
table = []
for i in range(N):
real = 0
img = 0
for j in range(N):
real += signal1[j] * table_input[i][j][0]
img += signal1[j] * table_input[i][j][1]
f_p = sqrt(pow(real, 2) + pow(img, 2))
table.append(f_p)
return table
f_table_end = timeit.default_timer()
f_table_time = f_table_end - start_f_table_start
print("F table time: ", f_table_time)
print("F normal time: ", f_normal_time)
print("Diff: ", f_table_time - f_normal_time)
table = generate_table()
f_new = use_table(table, sig1)
show_graphic(N, F, 'F')
show_graphic(N, f_new, 'F Table')
|
183884
|
import csv
import json
import sys
from pathlib import Path
from typing import Dict, List
from simcore_service_webserver.projects.projects_db import _convert_to_schema_names
SEPARATOR = ","
current_file = Path(sys.argv[0] if __name__ == "__main__" else __file__).resolve()
current_dir = current_file.parent
def load_csv(csv_filepath: Path) -> List[Dict]:
headers, items = [], []
with open(csv_filepath, mode="r", encoding="utf-8-sig") as fhandler:
reader = csv.reader(fhandler, delimiter=",", quotechar='"')
for row in reader:
if row:
if not headers:
headers = row
else:
item = {key: row[i] for i, key in enumerate(headers)}
items.append(item)
return items
def load_projects(csv_path: Path):
""" Returns schema-compatible projects
"""
db_projects = load_csv(csv_path)
_projects = []
fake_email = "<EMAIL>"
# process
for db_prj in db_projects:
if int(db_prj.get("published", 0) or 0) == 1:
prj = _convert_to_schema_names(db_prj, fake_email)
# jsonifies
dump = prj["workbench"]
# TODO: use Encoder instead?
dump = (
dump.replace("False", "false")
.replace("True", "true")
.replace("None", "null")
)
try:
prj["workbench"] = json.loads(dump)
except json.decoder.JSONDecodeError as err:
print(err)
# TODO: validate against project schema!!
_projects.append(prj)
else:
print("skipping {}".format(db_prj["name"]))
return _projects
def main():
"""
Converts csv exported from db into project schema-compatible json files
"""
for db_csv_export in current_dir.glob("template*.csv"):
data_projects = load_projects(db_csv_export)
json_path = db_csv_export.with_suffix(".json")
with open(json_path, "w") as fh:
json.dump(data_projects, fh, indent=2)
if __name__ == "__main__":
main()
|
183891
|
def flatten(myList):
newList = []
for item in myList:
if type(item) == list:
newList.extend(flatten(item))
else:
newList.append(item)
return newList
def main():
myList1 = [1,2,3,[1,2],5,[3,4,5,6,7]]
print(flatten(myList1))
myList2 = [1,[2,[3,[4,[5,[6,[7,[8,[9]]]]]]]]]
print(flatten(myList2))
if __name__ == '__main__':
main()
|
183928
|
import conx as cx
import numpy as np
from keras.datasets import mnist
from keras.utils import (to_categorical, get_file)
description = """
Original source: http://yann.lecun.com/exdb/mnist/
The MNIST dataset contains 70,000 images of handwritten digits (zero
to nine) that have been size-normalized and centered in a square grid
of pixels. Each image is a 28 × 28 × 1 array of floating-point numbers
representing grayscale intensities ranging from 0 (black) to 1
(white). The target data consists of one-hot binary vectors of size
10, corresponding to the digit classification categories zero through
nine. Some example MNIST images are shown below:

"""
def mnist_h5(*args, **kwargs):
"""
Load the Keras MNIST dataset from an H5 file.
"""
import h5py
path = "mnist.h5"
url = "https://raw.githubusercontent.com/Calysto/conx-data/master/mnist/mnist.h5"
path = get_file(path, origin=url)
h5 = h5py.File(path, "r")
dataset = cx.Dataset()
dataset._inputs = h5["inputs"]
dataset._targets = h5["targets"]
dataset._labels = h5["labels"]
dataset.h5 = h5
dataset.name = "MNIST-H5"
dataset.description = description
dataset._cache_values()
return dataset
def mnist(*args, **kwargs):
from keras.datasets import mnist
import keras.backend as K
# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float16')
x_test = x_test.astype('float16')
inputs = np.concatenate((x_train,x_test)) / 255
labels = np.concatenate((y_train,y_test)) # ints, 0 to 10
###########################################
# fix mis-labeled image(s) in Keras dataset
labels[10994] = 9
###########################################
targets = to_categorical(labels).astype("uint8")
labels = np.array([str(label) for label in labels], dtype=str)
dataset = cx.Dataset()
dataset.load_direct([inputs], [targets], [labels])
return dataset
|
183942
|
import hashlib
import json
import random
import re
import time
import requests
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36",
"Referer": "http://fanyi.youdao.com/"
}
def test01():
"""36kr.com的内容是存放在script标签中"""
class Kr36(object):
def __init__(self):
self.url = 'https://36kr.com/'
self.headers = headers
self.file = open('36kr.json', 'w', encoding='utf8')
def get_data(self):
resp = requests.get(url=self.url, headers=self.headers)
return resp.content.decode()
def parse_data(self, data):
# 将html页面提取内容,发现内容都是包含在script中
result = re.findall('<script>var prop=(.*?)</script>', data, re.S)[0]
# 提取到的内容发现直接loads报错,原因是包含=等, 去除不标准的json字符串
result_list = re.sub(',locational={.*', '', result)
dict_data = json.loads(result_list)['feedPostsLatest|post']
# 提取需要的数据
data_list = []
for data in dict_data:
temp = {}
temp['cover'] = data['cover']
temp['title'] = data['title']
data_list.append(temp)
return data_list
def save_data(self, data_list):
for data in data_list:
str_data = json.dumps(data, ensure_ascii=False) + ",\n"
self.file.write(str_data)
def __del__(self):
self.file.close()
def run(self):
data = self.get_data()
parse_data = self.parse_data(data)
Kr36().run()
def test02():
"""有道词典翻译"""
class Youdao(object):
def __init__(self):
self.url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
self.headers = headers
self.post_data = None
def generate_post_data(self, word):
now = int(time.time() * 1000)
random_num = random.randint(0, 9)
salt = str(now + random_num)
S = 'fanyideskweb'
n = word
r = salt
D = ''
md5Str = S + n + r + D
# 生成md5对象
md5 = hashlib.md5()
# 填充数据
md5.update(md5Str.encode())
# 生成16进制
o = md5.hexdigest()
self.post_data = {
'i': word,
'from': 'AUTO',
'to': 'AUTO',
'smartresult': dict,
'client': 'fanyideskweb',
'salt': '155617457<PASSWORD>',
'sign': '69aad56937660f19b95143d9f9164165',
'ts': '1556174579900',
'bv': '8738acdfb64ced94051e576f287e2052',
'doctype': 'json',
'version': '2.1',
'keyfrom': 'fanyi.web',
'action': 'FY_BY_CLICKBUTTION',
}
def get_data(self):
resp = requests.post(url=self.url, headers=self.headers, data=self.post_data)
print(resp.content.decode())
return resp.content.decode()
def parse_data(self, data):
dict_data = json.loads(data)
result = dict_data['translateResult'][0][0]['tgt']
print(result)
def run(self):
# import sys
# word = sys.argv[1]
self.generate_post_data('黄金')
data = self.get_data()
self.parse_data(data)
Youdao().run()
def main():
# test01()
test02()
if __name__ == '__main__':
main()
|
183971
|
import bspump.unittest
import bspump.common
class TestNullSink(bspump.unittest.ProcessorTestCase):
def test_null(self):
events = [
(None, "Don't let this out!"),
]
self.set_up_processor(bspump.common.NullSink)
output = self.execute(
events
)
self.assertEqual(
[event for context, event in output],
[]
)
|
184003
|
from pupa.scrape import Scraper
from pupa.scrape import Event
import datetime as dt
import lxml.html
import re
CAL_PAGE = ("http://www.cityoftemecula.org/Temecula/Visitors/Calendar.htm")
class TemeculaEventScraper(Scraper):
def lxmlize(self, url):
entry = self.urlopen(url)
page = lxml.html.fromstring(entry)
page.make_links_absolute(url)
return page
def cleanup(self, foo):
foo = re.sub("\s+", " ", foo).strip()
return foo
def scrape(self):
page = self.lxmlize(CAL_PAGE)
form = page.xpath("//form[@name='Form1']")
form = form[0] if form else None
if form is None:
raise Exception("Erm, crud.")
page = self.do_post_back(form, 'Listview1$ddlCategory', '', **{
"Listview1:ddlCategory": "1"
})
for event in self.scrape_event_page(page):
event.add_source(CAL_PAGE)
yield event
def get_start_end(self, obj):
date = obj['Date:']
times = obj['time']
start, end = ("%s %s" % (date, times[time]) for time in times)
return (dt.datetime.strptime(x, "%A, %B %d, %Y %I:%M %p")
for x in (start, end))
def scrape_event_page(self, page):
for entry in page.xpath(
"//table[@id='Listview1_DataGrid1']//tr[@class='mainText']"):
title = None
ret = {}
for block in entry.xpath(".//td[@class='mainText']"):
entries = block.xpath("./*")
if "table" in (x.tag for x in entries):
continue
info = [self.cleanup(x.text_content()) for x in entries]
if title is None:
title = info[1]
continue
key = info.pop(0)
val = None
if "Time: " in key:
_, val = key.split("Time: ", 1)
start, end = val.split(" - ", 1)
val = {"start": start,
"end": end}
key = "time"
else:
val = info.pop(0) if info else None
ret[key] = val
if info != []:
raise Exception("Erm. odd scrape.")
if title is None:
continue
ret['title'] = title
start, end = self.get_start_end(ret)
ret['time']['start'], ret['time']['end'] = start, end
event = Event(name=ret['Description:'] or "TBA",
location=ret['Location:'],
when=ret['time']['start'],
end=ret['time']['end'])
yield event
def post_back(self, form, **kwargs):
block = {name: value for name, value in [(obj.name, obj.value)
for obj in form.xpath(".//input")]}
block.update(kwargs)
ret = lxml.html.fromstring(self.urlopen(form.action, block))
ret.make_links_absolute(form.action)
return ret
def do_post_back(self, form, event_target, event_argument, **kwargs):
block = kwargs
event_argument = ":".join(event_argument.split("$"))
block['__EVENTTARGET'] = event_target
block['__EVENTARGUMENT'] = event_argument
return self.post_back(form, **block)
|
184035
|
import datetime
from django.utils import timezone
from django.test import TestCase
from hknweb.events.tests.models.utils import ModelFactory
class EventModelTests(TestCase):
def setUp(self):
user = ModelFactory.create_user()
event_type = ModelFactory.create_event_type()
event_name = "custom event name"
event = ModelFactory.create_event(
name=event_name,
event_type=event_type,
created_by=user,
)
self.user = user
self.event_type = event_type
self.event_name = event_name
self.event = event
def test_default_event_creation(self):
self.assertIs(self.user, self.event.created_by)
self.assertIs(self.event_type, self.event.event_type)
self.assertIs(self.event_name, self.event.name)
def test_repr(self):
expected = "Event(name={}, location={})".format(
self.event_name,
self.event.location,
)
actual = repr(self.event)
self.assertEqual(expected, actual)
def test_str(self):
expected = "{} - {} to {}".format(
self.event_name,
self.event.start_time,
self.event.end_time,
)
actual = str(self.event)
self.assertEqual(expected, actual)
def test_get_absolute_url(self):
expected = "/events/{}".format(self.event.id)
actual = self.event.get_absolute_url()
self.assertEqual(expected, actual)
def test_semester_with_month_lt_7_returns_spring(self):
current_time = timezone.now()
time = datetime.datetime(
year=current_time.year,
month=6,
day=current_time.day,
)
self.event.start_time = time
expected = "{} {}".format("Spring", time.year)
actual = self.event.semester
self.assertEqual(expected, actual)
def test_semester_with_month_geq_7_returns_fall(self):
current_time = timezone.now()
time = datetime.datetime(
year=current_time.year,
month=7,
day=current_time.day,
)
self.event.start_time = time
expected = "{} {}".format("Fall", time.year)
actual = self.event.semester
self.assertEqual(expected, actual)
def test_on_waitlist_without_waitlist_returns_false(self):
self.assertFalse(self.event.on_waitlist(None))
|
184062
|
from __future__ import print_function
from orphics import maps,io,cosmology,symcoupling as sc,stats,lensing
from enlib import enmap,bench
import numpy as np
import os,sys
cache = True
hdv = False
deg = 5
px = 1.5
shape,wcs = maps.rect_geometry(width_deg = deg,px_res_arcmin=px)
mc = sc.LensingModeCoupling(shape,wcs)
pol = "TE"
# for t in mc.integrands['test']:
# print(t['l1'])
# print(t['l2'])
# print(t['other'])
# print("----")
# print(len(mc.integrands['test']))
theory = cosmology.default_theory(lpad=20000)
noise_t = 27.0
noise_p = 40.0*np.sqrt(2.)
fwhm = 7.0
# noise_t = 10.0
# noise_p = 14.0*np.sqrt(2.)
# fwhm = 2.0
kbeam = maps.gauss_beam(fwhm,mc.modlmap)
ells = np.arange(0,3000,1)
lbeam = maps.gauss_beam(fwhm,ells)
ntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./kbeam**2.)
nee = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
nbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./kbeam**2.)
lntt = np.nan_to_num((noise_t*np.pi/180./60.)**2./lbeam**2.)
lnee = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
lnbb = np.nan_to_num((noise_p*np.pi/180./60.)**2./lbeam**2.)
ellmin = 20
ellmax = 3000
xmask = maps.mask_kspace(shape,wcs,lmin=ellmin,lmax=ellmax)
ymask = xmask
with bench.show("ALcalc"):
AL = mc.AL(pol,xmask,ymask,ntt,nee,nbb,theory=theory,hdv=hdv,cache=cache)
val = mc.NL_from_AL(AL)
bin_edges = np.arange(10,2000,40)
cents,nkk = stats.bin_in_annuli(val,mc.modlmap,bin_edges)
ls,hunls = np.loadtxt("../alhazen/data/hu_"+pol.lower()+".csv",delimiter=',',unpack=True)
pl = io.Plotter(yscale='log')
pl.add(ells,theory.gCl('kk',ells),lw=3,color='k')
pl.add(cents,nkk,ls="--")
pl.add(ls,hunls*2.*np.pi/4.,ls="-.")
oest = ['TE','ET'] if pol=='TE' else [pol]
ls,nlkks,theory,qest = lensing.lensing_noise(ells,lntt,lnee,lnbb,
ellmin,ellmin,ellmin,
ellmax,ellmax,ellmax,
bin_edges,
theory=theory,
estimators = oest,
unlensed_equals_lensed=False,
width_deg=10.,px_res_arcmin=1.0)
pl.add(ls,nlkks['mv'],ls="-")
with bench.show("ALcalc"):
cross = mc.cross(pol,pol,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
ynoise_t=None,ynoise_e=None,ynoise_b=None,
cross_xnoise_t=None,cross_ynoise_t=None,
cross_xnoise_e=None,cross_ynoise_e=None,
cross_xnoise_b=None,cross_ynoise_b=None,
theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
# cross = mc.cross(pol,pol,theory,xmask,ymask,noise_t=ntt,noise_e=nee,noise_b=nbb,
# ynoise_t=None,ynoise_e=None,ynoise_b=None,
# cross_xnoise_t=0,cross_ynoise_t=0,
# cross_xnoise_e=0,cross_ynoise_e=0,
# cross_xnoise_b=0,cross_ynoise_b=0,
# theory_norm=None,hdv=hdv,save_expression="current",validate=True,cache=True)
Nlalt = mc.NL(AL,AL,cross)
cents,nkkalt = stats.bin_in_annuli(Nlalt,mc.modlmap,bin_edges)
pl.add(cents,nkkalt,marker="o",alpha=0.2)
pl.done()
print("nffts : ",mc.nfft,mc.nifft)
|
184089
|
from ._line import Line
from ._colorbar import ColorBar
from plotly.graph_objs.histogram.marker import colorbar
|
184095
|
from typing import Callable, Any, List
import numpy as np
import math
import autograd
from .dataset import Dataset
class DataLoader(object):
"""
Dataloader class
"""
def __init__(self, dataset: Dataset, batch_size: int = 1, shuffle: bool = True,
collate_fn: Callable[[List], Any] = None) -> None:
"""
Constructor
:param dataset: (Dataset) Dataset
:param batch_size: (int) Batch size to be utilized
:param shuffle: (bool) If true dataset is shuffled
:param collate_fn: (Callable) Function to perform batching
"""
# Check parameter
assert batch_size > 0, 'Batch size must be bigger than 0.'
# Save parameters
self.dataset = dataset
self.dataset_len = len(dataset)
self.batch_size = batch_size
self.shuffle = shuffle
self.collate_fn = collate_fn
# Make indexes
self.indexes = np.arange(self.dataset_len)
# Shuffle indexes if utilized
if self.shuffle:
np.random.shuffle(self.indexes)
def __len__(self) -> int:
"""
Returns the length of the dataloader
:return: (int) Length
"""
return len(self.dataset) // self.batch_size
def __iter__(self) -> Any:
"""
Iter method iterates over the whole dataset and batches the dataset output
:return: (Any) Batch objects
"""
for index in range(math.ceil(self.dataset_len / self.batch_size)):
for batch in range(self.batch_size):
if index * self.batch_size + batch < self.dataset_len:
if batch == 0:
instances = self.dataset[self.indexes[index * self.batch_size + batch]]
return_values = list(instances)
for index in range(len(return_values)):
return_values[index] = [return_values[index]]
else:
instances = self.dataset[self.indexes[index * self.batch_size + batch]]
for index, instance in enumerate(instances):
return_values[index].append(instance)
# Apply collate operation
if self.collate_fn is None:
yield tuple([autograd.stack(return_values[index]) for index in range(len(return_values))])
else:
yield self.collate_fn(return_values)
|
184116
|
from quest.admin import admin_site
from .models import Goal, Task, TaskStatus
admin_site.register(Goal)
admin_site.register(Task)
admin_site.register(TaskStatus)
|
184196
|
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (PointGenerator, multi_apply, multiclass_nms_kp,
point_target_kp)
from mmdet.ops import DeformConv
from ..builder import build_loss
from ..registry import HEADS
from ..utils import ConvModule, bias_init_with_prob
class MultiColumnDeformConvBlock(nn.Module):
def __init__(self,
in_channels=256,
feat_channels=256,
gradient_mul=0.1):
super().__init__()
self.gradient_mul = gradient_mul
self.deform_offset_dim = 2 * (9 + 25 + 49)
# initializae dcn base offset
# DeformConv3x3
self.dcn_kernel_3 = int(np.sqrt(9))
self.dcn_pad_3 = int((self.dcn_kernel_3 - 1) / 2)
dcn_base_3 = np.arange(-self.dcn_pad_3,
self.dcn_pad_3 + 1).astype(np.float64)
dcn_base_y_3 = np.repeat(dcn_base_3, self.dcn_kernel_3)
dcn_base_x_3 = np.tile(dcn_base_3, self.dcn_kernel_3)
dcn_base_offset_3 = np.stack(
[dcn_base_y_3, dcn_base_x_3], axis=1).reshape((-1))
self.dcn_base_offset_3 = torch.tensor(dcn_base_offset_3).view(
1, -1, 1, 1)
# DeformConv5x5
self.dcn_kernel_5 = int(np.sqrt(25))
self.dcn_pad_5 = int((self.dcn_kernel_5 - 1) / 2)
dcn_base_5 = np.arange(-self.dcn_pad_5,
self.dcn_pad_5 + 1).astype(np.float64)
dcn_base_y_5 = np.repeat(dcn_base_5, self.dcn_kernel_5)
dcn_base_x_5 = np.tile(dcn_base_5, self.dcn_kernel_5)
dcn_base_offset_5 = np.stack(
[dcn_base_y_5, dcn_base_x_5], axis=1).reshape((-1))
self.dcn_base_offset_5 = torch.tensor(dcn_base_offset_5).view(1, -1, 1, 1)
# DeformConv7x7
self.dcn_kernel_7 = int(np.sqrt(49))
self.dcn_pad_7 = int((self.dcn_kernel_7 - 1) / 2)
dcn_base_7 = np.arange(-self.dcn_pad_7,
self.dcn_pad_7 + 1).astype(np.float64)
dcn_base_y_7 = np.repeat(dcn_base_7, self.dcn_kernel_7)
dcn_base_x_7 = np.tile(dcn_base_7, self.dcn_kernel_7)
dcn_base_offset_7 = np.stack(
[dcn_base_y_7, dcn_base_x_7], axis=1).reshape((-1))
self.dcn_base_offset_7 = torch.tensor(dcn_base_offset_7).view(
1, -1, 1, 1)
# initialize Layers
self.dfmconv_3 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_3, 1,
self.dcn_pad_3)
self.dfmconv_5 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_5, 1,
self.dcn_pad_5)
self.dfmconv_7 = DeformConv(in_channels, feat_channels,
self.dcn_kernel_7, 1,
self.dcn_pad_7)
# initialize weights
normal_init(self.dfmconv_3, std=0.01)
normal_init(self.dfmconv_5, std=0.01)
normal_init(self.dfmconv_7, std=0.01)
def forward(self, feat, deform_offset):
assert deform_offset.size(1) == self.deform_offset_dim
deform_offset_3 = deform_offset[:, :2*9, :, :]
deform_offset_5 = deform_offset[:, 2*9:2*(9+25), :, :]
deform_offset_7 = deform_offset[:, 2*(9+25):2*(9+25+49), :, :]
dcn_base_offset_3 = self.dcn_base_offset_3.type_as(feat)
dcn_base_offset_5 = self.dcn_base_offset_5.type_as(feat)
dcn_base_offset_7 = self.dcn_base_offset_7.type_as(feat)
dcn_offset_grad_mul_3 = self.gradient_mul * deform_offset_3 \
+ (1 - self.gradient_mul) * deform_offset_3.detach()
dcn_offset_3 = dcn_offset_grad_mul_3 - dcn_base_offset_3
dcn_offset_grad_mul_5 = self.gradient_mul * deform_offset_5 \
+ (1 - self.gradient_mul) * deform_offset_5.detach()
dcn_offset_5 = dcn_offset_grad_mul_5 - dcn_base_offset_5
dcn_offset_grad_mul_7 = self.gradient_mul * deform_offset_7 \
+ (1 - self.gradient_mul) * deform_offset_7.detach()
dcn_offset_7 = dcn_offset_grad_mul_7 - dcn_base_offset_7
dfmconv_feat_3 = self.dfmconv_3(feat, dcn_offset_3)
dfmconv_feat_5 = self.dfmconv_5(feat, dcn_offset_5)
dfmconv_feat_7 = self.dfmconv_7(feat, dcn_offset_7)
dfmconv_feat = torch.cat(
[dfmconv_feat_3, dfmconv_feat_5, dfmconv_feat_7],
dim=1)
return dfmconv_feat
class KpDetModule(nn.Module):
""" Sequential Block
"""
def __init__(self,
deform_conv,
cls_out_channels,
in_channels=256,
feat_channels=256,
num_reppts=9,
num_keypts=17,
gradient_mul=0.1,
transform_method='minmax',
moment_mul=0.01):
super().__init__()
self.deform_conv = deform_conv
self.gradient_mul = gradient_mul
self.transform_method = transform_method
self.moment_mul = moment_mul
keypts_out_dim = 2 * num_keypts
deform_offset_dim = 2 * (9 + 25 + 49)
self.relu = nn.ReLU(inplace=False)
# initiate conv layers
if deform_conv:
self.deform_offset_out = nn.Conv2d(
keypts_out_dim, deform_offset_dim, 1, 1, 0)
self.cls_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.cls_out = nn.Conv2d(
feat_channels*3, cls_out_channels, 1, 1, 0)
self.bbox_param_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.bbox_param_out = nn.Conv2d(
feat_channels*3, 4, 1, 1, 0)
self.kpt_dfm_block = MultiColumnDeformConvBlock(
in_channels, feat_channels, gradient_mul)
self.kpt_out = nn.Conv2d(
feat_channels*3, keypts_out_dim, 1, 1, 0)
else:
self.cls_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.cls_out = nn.Conv2d(
feat_channels, cls_out_channels, 1, 1, 0)
self.bbox_param_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.bbox_param_out = nn.Conv2d(
feat_channels, 4, 1, 1, 0)
self.kpt_conv = nn.Conv2d(
in_channels, feat_channels, 3, 1, 1)
self.kpt_out = nn.Conv2d(
feat_channels, keypts_out_dim, 1, 1, 0)
# init weights
bias_cls = bias_init_with_prob(0.01)
if self.deform_conv:
normal_init(self.deform_offset_out, std=0.01)
else:
normal_init(self.cls_conv, std=0.01)
normal_init(self.bbox_param_conv, std=0.01)
normal_init(self.kpt_conv, std=0.01)
normal_init(self.cls_out, std=0.01, bias=bias_cls)
normal_init(self.bbox_param_out, std=0.01)
normal_init(self.kpt_out, std=0.01)
def forward(self, cls_feat, pts_feat, kpt_offset_prev=None):
if self.deform_conv:
deform_offset = self.deform_offset_out(kpt_offset_prev)
cls_dfm_feat = self.relu(
self.cls_dfm_block(cls_feat, deform_offset))
cls_score_map = self.cls_out(cls_dfm_feat)
kpt_dfm_feat = self.relu(
self.kpt_dfm_block(pts_feat, deform_offset))
kpt_offset = self.kpt_out(kpt_dfm_feat)
kpt_offset = kpt_offset + kpt_offset_prev.detach()
bbox_param_dfm_feat = self.relu(
self.bbox_param_dfm_block(pts_feat, deform_offset))
bbox_param = self.bbox_param_out(bbox_param_dfm_feat)
bbox_offset = self.points2bbox(
kpt_offset.detach(), tranfer_param=bbox_param)
else:
cls_score_map = self.cls_out(self.relu(self.cls_conv(cls_feat)))
kpt_offset = self.kpt_out(self.relu(
self.kpt_conv(pts_feat)))
bbox_param = self.bbox_param_out(self.relu(
self.bbox_param_conv(pts_feat)))
bbox_offset = self.points2bbox(
kpt_offset.detach(), tranfer_param=bbox_param)
return cls_score_map, bbox_offset, kpt_offset
def points2bbox(self, pts, y_first=True, tranfer_param=None):
"""
Converting the points set into bounding box.
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_fisrt=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:param transfer_param:
size: [B, 4, H, W]
Meaning of each channel:
- translate_x
- translate_y
- scale_x
- scale_y
:return: each points set is converting to a bbox [x1, y1, x2, y2].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
if self.transform_method == 'minmax':
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_up = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],
dim=1)
elif self.transform_method == 'moment':
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
moment_transfer = (self.moment_transfer * self.moment_mul) + (
self.moment_transfer.detach() * (1 - self.moment_mul))
moment_width_transfer = moment_transfer[0]
moment_height_transfer = moment_transfer[1]
half_width = pts_x_std * torch.exp(moment_width_transfer)
half_height = pts_y_std * torch.exp(moment_height_transfer)
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
elif self.transform_method == 'minmax_param':
assert tranfer_param is not None
bbox_left = pts_x.min(dim=1, keepdim=True)[0]
bbox_right = pts_x.max(dim=1, keepdim=True)[0]
bbox_top = pts_y.min(dim=1, keepdim=True)[0]
bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]
bbox_center_x = (bbox_left + bbox_right) / 2
bbox_center_y = (bbox_top + bbox_bottom) / 2
tranfer_param = (tranfer_param * self.moment_mul) + (
tranfer_param.detach() * (1 - self.moment_mul))
half_width = (bbox_center_x - bbox_left) * torch.exp(
tranfer_param[:, 0:1, :, :])
half_height = (bbox_center_y - bbox_top) * torch.exp(
tranfer_param[:, 1:2, :, :])
bbox_center_x = bbox_center_x + tranfer_param[:, 2:3, :, :]
bbox_center_y = bbox_center_y + tranfer_param[:, 3:4, :, :]
bbox = torch.cat([
bbox_center_x - half_width, bbox_center_y - half_height,
bbox_center_x + half_width, bbox_center_y + half_height
],
dim=1)
elif self.transform_method == 'moment_param':
assert tranfer_param is not None
pts_y_mean = pts_y.mean(dim=1, keepdim=True)
pts_x_mean = pts_x.mean(dim=1, keepdim=True)
pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)
pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)
tranfer_param = (tranfer_param * self.moment_mul) + (
tranfer_param.detach() * (1 - self.moment_mul))
pts_x_mean = pts_x_mean + tranfer_param[:, 0:1, :, :]
pts_y_mean = pts_y_mean + tranfer_param[:, 1:2, :, :]
half_width = pts_x_std * torch.exp(tranfer_param[:, 2:3, :, :])
half_height = pts_y_std * torch.exp(tranfer_param[:, 3:4, :, :])
bbox = torch.cat([
pts_x_mean - half_width, pts_y_mean - half_height,
pts_x_mean + half_width, pts_y_mean + half_height
],
dim=1)
else:
raise NotImplementedError
return bbox
@HEADS.register_module
class CascadeKpDetHead(nn.Module):
"""RepPoint head.
Args:
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of channels of the feature map.
point_feat_channels (int): Number of channels of points features.
stacked_convs (int): How many conv layers are used.
gradient_mul (float): The multiplier to gradients from
points refinement and recognition.
point_strides (Iterable): points strides.
transform_method (str): The methods to transform RepPoints to bbox.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
num_reppts=9,
num_keypts=17,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
conv_cfg=None,
norm_cfg=None,
loss_cls_1=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=0.5),
loss_cls_2=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=0.5),
loss_cls_3=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox_1=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_2=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_bbox_3=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_kpt_1=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_kpt_2=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),
loss_kpt_3=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
use_grid_points=False,
center_init=True,
transform_method='moment',
moment_mul=0.01):
super().__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.point_feat_channels = point_feat_channels
self.stacked_convs = stacked_convs
self.num_keypts = num_keypts
self.num_reppts = 9+25+49
self.gradient_mul = gradient_mul
self.point_base_scale = point_base_scale
self.point_strides = point_strides
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.use_sigmoid_cls = loss_cls_3.get('use_sigmoid', False)
self.sampling = loss_cls_3['type'] not in ['FocalLoss']
self.loss_cls_1 = build_loss(loss_cls_1)
self.loss_cls_2 = build_loss(loss_cls_2)
self.loss_cls_3 = build_loss(loss_cls_3)
self.loss_bbox_1 = build_loss(loss_bbox_1)
self.loss_bbox_2 = build_loss(loss_bbox_2)
self.loss_bbox_3 = build_loss(loss_bbox_3)
self.loss_kpt_1 = build_loss(loss_kpt_1)
self.loss_kpt_2 = build_loss(loss_kpt_2)
self.loss_kpt_3 = build_loss(loss_kpt_3)
self.use_grid_points = use_grid_points
self.center_init = center_init
self.transform_method = transform_method
if self.transform_method == 'moment':
self.moment_transfer = nn.Parameter(
data=torch.zeros(2), requires_grad=True)
self.moment_mul = moment_mul
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes - 1
else:
self.cls_out_channels = self.num_classes
self.point_generators = [PointGenerator() for _ in self.point_strides]
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=False)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
# stage 1
self.kp_det_1 = KpDetModule(
False,
self.cls_out_channels,
self.feat_channels,
self.point_feat_channels,
self.num_reppts,
self.num_keypts,
self.gradient_mul,
self.transform_method,
self.moment_mul)
# stage 2
self.kp_det_2 = KpDetModule(
True,
self.cls_out_channels,
self.feat_channels,
self.point_feat_channels,
self.num_reppts,
self.num_keypts,
self.gradient_mul,
self.transform_method,
self.moment_mul)
# stage 3
self.kp_det_3 = KpDetModule(
True,
self.cls_out_channels,
self.feat_channels,
self.point_feat_channels,
self.num_reppts,
self.num_keypts,
self.gradient_mul,
self.transform_method,
self.moment_mul)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
def points2kpt(self, pts, y_first=True):
"""
Converting the points set into keypoints.
:param pts: the input points sets (fields), each points
set (fields) is represented as 2n scalar.
:param y_first: if y_fisrt=True, the point set is represented as
[y1, x1, y2, x2 ... yn, xn], otherwise the point set is
represented as [x1, y1, x2, y2 ... xn, yn].
:return: each points set is converting to keypoint list
[x1, y1, x2, y2 ... xk, yk].
"""
pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])
pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,
...]
pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,
...]
pts = torch.cat([pts_x, pts_y], dim=2).view(*pts.shape)
return pts
def forward_single(self, x):
cls_feat = x
pts_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
pts_feat = reg_conv(pts_feat)
# stage 1
cls_score_map_1, box_offset_1, kpt_offset_1 = \
self.kp_det_1(cls_feat, pts_feat)
# stage 2
cls_score_map_2, box_offset_2, kpt_offset_2 = \
self.kp_det_2(cls_feat, pts_feat, kpt_offset_1)
# stage 3
cls_score_map_3, box_offset_3, kpt_offset_3 = \
self.kp_det_3(cls_feat, pts_feat, kpt_offset_2)
return (cls_score_map_1, cls_score_map_2, cls_score_map_3,
box_offset_1, box_offset_2, box_offset_3,
kpt_offset_1, kpt_offset_2, kpt_offset_3)
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_points(self, featmap_sizes, img_metas):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
Returns:
tuple: points of each image, valid flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# points center for one time
multi_level_points = []
for i in range(num_levels):
points = self.point_generators[i].grid_points(
featmap_sizes[i], self.point_strides[i])
multi_level_points.append(points)
points_list = [[point.clone() for point in multi_level_points]
for _ in range(num_imgs)]
# for each image, we compute valid flags of multi level grids
valid_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
for i in range(num_levels):
point_stride = self.point_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / point_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / point_stride)), feat_w)
flags = self.point_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w))
multi_level_flags.append(flags)
valid_flag_list.append(multi_level_flags)
return points_list, valid_flag_list
def offset_to_pts(self, center_list, pred_list, y_first=True):
"""Change from point offset to point coordinate.
"""
num_points = pred_list[0].size(1) // 2
pts_list = []
for i_lvl in range(len(self.point_strides)):
pts_lvl = []
for i_img in range(len(center_list)):
pts_center = center_list[i_img][i_lvl][:, :2].repeat(
1, num_points)
pts_shift = pred_list[i_lvl][i_img]
if y_first:
yx_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * num_points)
y_pts_shift = yx_pts_shift[..., 0::2]
x_pts_shift = yx_pts_shift[..., 1::2]
xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)
xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)
else:
xy_pts_shift = pts_shift.permute(1, 2, 0).view(
-1, 2 * num_points)
xy_pts_shift = xy_pts_shift.view(*xy_pts_shift.shape[:-1], -1)
pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center
pts_lvl.append(pts)
pts_lvl = torch.stack(pts_lvl, 0)
pts_list.append(pts_lvl)
return pts_list
def loss_single(self, cls_score_1, cls_score_2, cls_score_3,
kpt_pred_1, kpt_pred_2, kpt_pred_3,
bbox_pred_1, bbox_pred_2, bbox_pred_3,
labels, label_weights,
bbox_gt, bbox_weights,
kpt_gt, kpt_weights,
stride, num_total_samples):
# classification loss
labels = labels.reshape(-1)
label_weights = label_weights.reshape(-1)
cls_score_1 = cls_score_1.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels)
cls_score_2 = cls_score_2.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels)
cls_score_3 = cls_score_3.permute(0, 2, 3, 1).reshape(
-1, self.cls_out_channels)
loss_cls_1 = self.loss_cls_1(
cls_score_1,
labels,
label_weights,
avg_factor=num_total_samples)
loss_cls_2 = self.loss_cls_2(
cls_score_2,
labels,
label_weights,
avg_factor=num_total_samples)
loss_cls_3 = self.loss_cls_3(
cls_score_3,
labels,
label_weights,
avg_factor=num_total_samples)
# bbox loss
bbox_gt = bbox_gt.reshape(-1, 4)
bbox_weights = bbox_weights.reshape(-1, 4)
bbox_pred_1 = bbox_pred_1.reshape(-1, 4)
bbox_pred_2 = bbox_pred_2.reshape(-1, 4)
bbox_pred_3 = bbox_pred_3.reshape(-1, 4)
normalize_term = self.point_base_scale * stride
loss_bbox_1 = self.loss_bbox_1(
bbox_pred_1 / normalize_term,
bbox_gt / normalize_term,
bbox_weights,
avg_factor=num_total_samples)
loss_bbox_2 = self.loss_bbox_2(
bbox_pred_2 / normalize_term,
bbox_gt / normalize_term,
bbox_weights,
avg_factor=num_total_samples)
loss_bbox_3 = self.loss_bbox_3(
bbox_pred_3 / normalize_term,
bbox_gt / normalize_term,
bbox_weights,
avg_factor=num_total_samples)
# keypoint loss
kpt_gt = kpt_gt.reshape(-1, self.num_keypts * 2)
kpt_weights = kpt_weights.reshape(-1, self.num_keypts * 2)
kpt_pos_num = kpt_weights.sum(1)
kpt_weights[kpt_pos_num > 0] /= kpt_pos_num[
kpt_pos_num > 0].unsqueeze(1)
kpt_weights *= 4
kpt_pred_1 = kpt_pred_1.reshape(-1, self.num_keypts * 2)
kpt_pred_2 = kpt_pred_2.reshape(-1, self.num_keypts * 2)
kpt_pred_3 = kpt_pred_3.reshape(-1, self.num_keypts * 2)
normalize_term = self.point_base_scale * stride
loss_kpt_1 = self.loss_kpt_1(
kpt_pred_1 / normalize_term,
kpt_gt / normalize_term,
kpt_weights,
avg_factor=num_total_samples)
loss_kpt_2 = self.loss_kpt_2(
kpt_pred_2 / normalize_term,
kpt_gt / normalize_term,
kpt_weights,
avg_factor=num_total_samples)
loss_kpt_3 = self.loss_kpt_3(
kpt_pred_3 / normalize_term,
kpt_gt / normalize_term,
kpt_weights,
avg_factor=num_total_samples)
return (loss_cls_1, loss_cls_2, loss_cls_3,
loss_bbox_1, loss_bbox_2, loss_bbox_3,
loss_kpt_1, loss_kpt_2, loss_kpt_3)
def loss(self,
cls_scores_1,
cls_scores_2,
cls_scores_3,
box_offset_preds_1,
box_offset_preds_2,
box_offset_preds_3,
kpt_preds_1,
kpt_preds_2,
kpt_preds_3,
gt_bboxes,
gt_labels,
gt_keypoints,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores_3]
assert len(featmap_sizes) == len(self.point_generators)
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
center_list, valid_flag_list = self.get_points(featmap_sizes,
img_metas)
# prediction of the 1st stage
kpt_coord_preds_1 = self.offset_to_pts(center_list, kpt_preds_1)
box_coord_preds_1 = self.offset_to_pts(
center_list, box_offset_preds_1, y_first=False)
# target for the 2nd stage
kpt_coord_preds_2 = self.offset_to_pts(center_list, kpt_preds_2)
box_coord_preds_2 = self.offset_to_pts(
center_list, box_offset_preds_2, y_first=False)
# target for the 3rd stage
kpt_coord_preds_3 = self.offset_to_pts(center_list, kpt_preds_3)
box_coord_preds_3 = self.offset_to_pts(
center_list, box_offset_preds_3, y_first=False)
# target for all stages
if cfg.uniform.assigner['type'] == 'PointAssigner':
# Assign target for center list
candidate_list = center_list
else:
raise(NotImplementedError)
cls_reg_targets = point_target_kp(
candidate_list,
valid_flag_list,
gt_bboxes,
gt_keypoints,
img_metas,
cfg.uniform,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=self.sampling)
(labels_list, label_weights_list,
bbox_gt_list, candidate_list, bbox_weights_list,
keypoint_gt_list, keypoint_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos +
num_total_neg if self.sampling else num_total_pos)
# compute loss
(losses_cls_1, losses_cls_2, losses_cls_3,
losses_bbox_1, losses_bbox_2, losses_bbox_3,
losses_kpt_1, losses_kpt_2, losses_kpt_3) = multi_apply(
self.loss_single,
cls_scores_1,
cls_scores_2,
cls_scores_3,
kpt_coord_preds_1,
kpt_coord_preds_2,
kpt_coord_preds_3,
box_coord_preds_1,
box_coord_preds_2,
box_coord_preds_3,
labels_list,
label_weights_list,
bbox_gt_list,
bbox_weights_list,
keypoint_gt_list,
keypoint_weights_list,
self.point_strides,
num_total_samples=num_total_samples)
loss_dict_all = {
'loss_cls_1': losses_cls_1,
'loss_cls_2': losses_cls_2,
'loss_cls_3': losses_cls_3,
'loss_bbox_1': losses_bbox_1,
'loss_bbox_2': losses_bbox_2,
'loss_bbox_3': losses_bbox_3,
'loss_kpt_1': losses_kpt_1,
'loss_kpt_2': losses_kpt_2,
'loss_kpt_3': losses_kpt_3
}
return loss_dict_all
def get_bboxes(self,
cls_scores_1,
cls_scores_2,
cls_scores_3,
box_offset_preds_1,
box_offset_preds_2,
box_offset_preds_3,
kpt_preds_1,
kpt_preds_2,
kpt_preds_3,
img_metas,
cfg,
rescale=False,
nms=True):
cls_score_final = cls_scores_3
keypts_preds_final = kpt_preds_3
bbox_preds_final = box_offset_preds_3
assert len(cls_score_final) == len(keypts_preds_final) \
== len(bbox_preds_final)
bbox_preds = bbox_preds_final
kpt_preds = [
self.points2kpt(keypts_pred)
for keypts_pred in keypts_preds_final
]
num_levels = len(cls_score_final)
mlvl_points = [
self.point_generators[i].grid_points(cls_score_final[i].size()[-2:],
self.point_strides[i])
for i in range(num_levels)
]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_score_final[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach()
for i in range(num_levels)
]
kpt_pred_list = [
kpt_preds[i][img_id].detach()
for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,
kpt_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale, nms)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
kpt_preds,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False,
nms=True):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points) \
== len(kpt_preds)
mlvl_bboxes = []
mlvl_kpts = []
mlvl_scores = []
num_kpt = self.num_keypts
num_kp_channel = kpt_preds[0].size(0) // num_kpt
assert num_kp_channel == 2 or num_kp_channel == 3
for i_lvl, (cls_score, bbox_pred, kpt_pred, points) in enumerate(
zip(cls_scores, bbox_preds, kpt_preds, mlvl_points)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:] \
== kpt_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
if num_kp_channel == 3:
kpt_pred = kpt_pred.permute(1, 2, 0).reshape(
-1, num_kpt * num_kp_channel)
# if kpt visibility is not predicted, set it to 1
elif num_kp_channel == 2:
kpt_pred = kpt_pred.permute(1, 2, 0).reshape(
-1, num_kpt, num_kp_channel)
pad_ones = kpt_pred.new_full(kpt_pred[:, :, :1].size(), 1)
kpt_pred = torch.cat([kpt_pred, pad_ones], dim=2)
kpt_pred = kpt_pred.reshape(-1, num_kpt * 3)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
kpt_pred = kpt_pred[topk_inds, :]
scores = scores[topk_inds, :]
bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)
kpt_pos_center = points[:, :2].unsqueeze(dim=1)
bboxes = bbox_pred * self.point_strides[i_lvl] + bbox_pos_center
kpt_pred = kpt_pred.view(-1, num_kpt, 3)
kpt_pred[:, :, :2] = kpt_pred[:, :, :2] \
* self.point_strides[i_lvl] + kpt_pos_center
kpts = kpt_pred
x1 = bboxes[:, 0].clamp(min=0, max=img_shape[1])
y1 = bboxes[:, 1].clamp(min=0, max=img_shape[0])
x2 = bboxes[:, 2].clamp(min=0, max=img_shape[1])
y2 = bboxes[:, 3].clamp(min=0, max=img_shape[0])
bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
kpts[:, 0::3] = kpts[:, 0::3].clamp(min=0, max=img_shape[1])
kpts[:, 1::3] = kpts[:, 1::3].clamp(min=0, max=img_shape[0])
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_kpts.append(kpts)
mlvl_bboxes = torch.cat(mlvl_bboxes)
mlvl_kpts = torch.cat(mlvl_kpts)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_kpts[:, :, 0:2] = mlvl_kpts[:, :, 0:2] \
/ mlvl_kpts.new_tensor(scale_factor)
mlvl_kpts = mlvl_kpts.reshape(-1, num_kpt*3)
mlvl_scores = torch.cat(mlvl_scores)
# kpt mAP increses after multipling bbox score
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
if nms:
det_bboxes, det_labels, det_kpts = multiclass_nms_kp(
mlvl_bboxes,
mlvl_scores,
mlvl_kpts,
cfg.score_thr,
cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels, det_kpts
else:
return mlvl_bboxes, mlvl_scores, mlvl_kpts
|
184256
|
import wandb
import numpy as np
import pandas as pd
api = wandb.Api()
wandb_entity = os.environ['WANDB_ENTITY']
# Project is specified by <entity/project-name>
runs = api.runs(f"{wandb_entity}/invalid_action_masking")
analysis = True
summary_list = []
config_list = []
name_list = []
for run in runs:
# run.summary are the output key/values like accuracy.
# We call ._json_dict to omit large files
summary_json_dict = run.summary._json_dict
if analysis:
summary_json_dict = summary_json_dict.copy()
history = pd.DataFrame(run.scan_history())
history['rollling_e'] = history['charts/episode_reward'].dropna().rolling(10).mean()
first_best_reward_idx = (history["rollling_e"] >= 40.0).idxmax()
if history.iloc[first_best_reward_idx]["rollling_e"] >= 40.0:
summary_json_dict["first_learned_timestep"] = history.iloc[first_best_reward_idx]["global_step"] / 500000
else:
summary_json_dict["first_learned_timestep"] = 1
summary_json_dict["first_reward_timestep"] = history.iloc[(history['charts/episode_reward'] > 0).idxmax()]["global_step"] / 500000
# mask removed logic
if run.config["exp_name"] == "ppo":
history['evals_rollling_e'] = history['evals/charts/episode_reward'].dropna().rolling(10).mean()
first_best_reward_idx = (history["evals_rollling_e"] >= 40.0).idxmax()
if history.iloc[first_best_reward_idx]["evals_rollling_e"] >= 40.0:
summary_json_dict["evals_first_learned_timestep"] = history.iloc[first_best_reward_idx]["global_step"] / 500000
else:
summary_json_dict["evals_first_learned_timestep"] = 1
summary_json_dict["evals_first_reward_timestep"] = history.iloc[(history['charts/episode_reward'] > 0).idxmax()]["global_step"] / 500000
summary_json_dict["charts/episode_reward"] = history["charts/episode_reward"][-10:].mean()
summary_json_dict["losses/approx_kl"] = history['losses/approx_kl'].astype(np.float64).dropna()[-10:].mean()
summary_json_dict['stats/num_invalid_action_null'] = history['stats/num_invalid_action_null'].dropna()[-10:].mean()
summary_json_dict['stats/num_invalid_action_busy_unit'] = history['stats/num_invalid_action_busy_unit'].dropna()[-10:].mean()
summary_json_dict['stats/num_invalid_action_ownership'] = history['stats/num_invalid_action_ownership'].dropna()[-10:].mean()
if run.config["exp_name"] == "ppo":
summary_json_dict['evals/charts/episode_reward'] = history['evals/charts/episode_reward'][-10:].mean()
summary_json_dict['evals/stats/num_invalid_action_null'] = history['evals/stats/num_invalid_action_null'].dropna()[-10:].mean()
summary_json_dict['evals/stats/num_invalid_action_busy_unit'] = history['evals/stats/num_invalid_action_busy_unit'].dropna()[-10:].mean()
summary_json_dict['evals/stats/num_invalid_action_ownership'] = history['evals/stats/num_invalid_action_ownership'].dropna()[-10:].mean()
summary_list.append(summary_json_dict)
# run.config is the input metrics.
# We remove special values that start with _.
config = {k:v for k,v in run.config.items() if not k.startswith('_')}
config_list.append(config)
# run.name is the name of the run.
name_list.append(run.name)
summary_df = pd.DataFrame.from_records(summary_list)
config_df = pd.DataFrame.from_records(config_list)
name_df = pd.DataFrame({'name': name_list})
all_df = pd.concat([name_df, config_df,summary_df], axis=1)
all_df.to_csv("project.csv")
all_df["losses/approx_kl"] = all_df["losses/approx_kl"].astype(np.float64)
# mask removal
mask_removed = all_df[all_df["exp_name"]=="ppo"].copy()
mask_removed['charts/episode_reward'] = mask_removed['evals/charts/episode_reward']
mask_removed['stats/num_invalid_action_null'] = mask_removed['evals/stats/num_invalid_action_null']
mask_removed['stats/num_invalid_action_busy_unit'] = mask_removed['evals/stats/num_invalid_action_busy_unit']
mask_removed['stats/num_invalid_action_ownership'] = mask_removed['evals/stats/num_invalid_action_ownership']
mask_removed['first_learned_timestep'] = mask_removed['evals_first_learned_timestep']
mask_removed['first_reward_timestep'] = mask_removed['evals_first_reward_timestep']
mask_removed["exp_name"] = "masking removed"
final_all_df = all_df.append(mask_removed, ignore_index=True)
# change names
final_all_df.loc[final_all_df["gym_id"]=="MicrortsMining4x4F9-v0", "gym_id"] = '04x04'
final_all_df.loc[(final_all_df["gym_id"]=="MicrortsMining10x10F9-v0"), "gym_id"] = '10x10'
final_all_df.loc[final_all_df["gym_id"]=="MicrortsMining16x16F9-v0", "gym_id"] = '16x16'
final_all_df.loc[final_all_df["gym_id"]=="MicrortsMining24x24F9-v0", "gym_id"] = '24x24'
final_all_df.loc[final_all_df["exp_name"]=="masking removed", "exp_name"] = 'Masking removed'
final_all_df.loc[(final_all_df["exp_name"]=="ppo"), "exp_name"] = 'Invalid action masking'
final_all_df.loc[final_all_df["exp_name"]=="ppo_no_adj", "exp_name"] = 'Naive invalid action masking'
final_all_df.loc[final_all_df["exp_name"]=="ppo_no_mask", "exp_name"] = 'Invalid action penalty'
results_df = final_all_df.fillna(0).groupby(
['exp_name','gym_id',"invalid_action_penalty"]
).mean()[[
'charts/episode_reward',
'losses/approx_kl',
'stats/num_invalid_action_null',
'stats/num_invalid_action_busy_unit',
'stats/num_invalid_action_ownership',
"first_learned_timestep",
"first_reward_timestep"
]]
final_print_df = results_df.round(2)
final_print_df['losses/approx_kl'] = results_df['losses/approx_kl'].round(5)
# final_print_df['first_learned_timestep'] = results_df['first_learned_timestep'].round(4)
# final_print_df['first_reward_timestep'] = results_df['first_reward_timestep'].round(4)
final_print_df['first_learned_timestep'] = pd.Series(["{0:.2f}%".format(val * 100) for val in results_df['first_learned_timestep'].round(4)], index = results_df.index)
final_print_df['first_reward_timestep'] = pd.Series(["{0:.2f}%".format(val * 100) for val in results_df['first_reward_timestep'].round(4)], index = results_df.index)
print(final_print_df.to_latex())
print(final_print_df.drop(columns=['losses/approx_kl']).to_latex())
# calculate the first time the algorithm solves the environment
# , 'losses/value_loss',
# 'losses/policy_loss', 'charts/episode_reward',
# , ,
# ,
# 'charts/episode_reward/ResourceGatherRewardFunction',
# 'evals/charts/episode_reward', 'evals/stats/num_invalid_action_null',
# 'evals/stats/num_invalid_action_busy_unit',
# 'evals/stats/num_invalid_action_ownership'
|
184271
|
from .base import *
DEBUG = True
if DEBUG is True:
from .dev import *
else:
from .production import *
# 站点名称
SITE_NAME = 'manage'
# 后台首页
SITE_PAGE = '/%s/service/article/' % SITE_NAME
|
184278
|
from CoreFoundation import (
CFPreferencesCopyValue,
kCFPreferencesAnyHost,
kCFPreferencesAnyUser,
)
factoid = "updates_app_autoupdate"
def fact():
"""Returns the status of automatic updates to MAS apps"""
status = "disabled"
pref = CFPreferencesCopyValue(
"AutoUpdate",
"/Library/Preferences/com.apple.commerce.plist",
kCFPreferencesAnyUser,
kCFPreferencesAnyHost,
)
if pref:
status = "enabled"
return {factoid: status}
if __name__ == "__main__":
print("<result>%s</result>" % fact()[factoid])
|
184310
|
from homeassistant.const import (
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
TEMP_CELSIUS,
POWER_WATT,
)
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
)
from homeassistant.components.integration.sensor import (
TRAPEZOIDAL_METHOD,
IntegrationSensor,
)
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
import logging
_LOGGER = logging.getLogger(__name__)
from .const import DOMAIN, CONF_TARIFF
from .managers import UtilityManager, EnergyManager
from .hilo_device import HiloBaseEntity
SENSOR_ATTRIBUTES = ["Power", "CurrentTemperature"]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
domain_config = hass.data[DOMAIN]
power_entities = []
temperature_entities = []
energy_entities = []
cost_entities = []
if domain_config.generate_energy_meters:
energy_manager = await EnergyManager().init(hass, domain_config.energy_meter_period)
utility_manager = UtilityManager(domain_config.energy_meter_period)
for d in domain_config.devices:
# We really only care about devices with a power meter or temperature
if "Power" in d.supported_attributes:
d._power_entity = PowerSensor(d, domain_config.scan_interval)
power_entities.append(d._power_entity)
# If we opt out the geneneration of meters we just create the power sensors
if not domain_config.generate_energy_meters:
continue
# This creates the sensor using the "integration" platform
d._energy_entity = EnergySensor(d)
energy_entities.append(d._energy_entity)
energy_entity = f"hilo_energy_{slugify(d.name)}"
if energy_entity == "hilo_energy_total":
_LOGGER.error(
"An hilo entity can't be named 'total' because it conflicts with the generate name for the smart energy meter"
)
continue
if d.name == "SmartEnergyMeter":
energy_entity = "hilo_energy_total"
utility_manager.add_meter(energy_entity)
energy_manager.add_to_dashboard(energy_entity)
if "CurrentTemperature" in d.supported_attributes:
d._temperature_entity = TemperatureSensor(d, domain_config.scan_interval)
temperature_entities.append(d._temperature_entity)
async_add_entities(power_entities + energy_entities + temperature_entities)
if not domain_config.generate_energy_meters:
return
# Creating cost sensors based on plan
# This will generate hilo_cost_(low|medium|high) sensors which can be
# refered later in the energy dashboard based on the tarif selected
for tarif, amount in CONF_TARIFF.get(domain_config.hq_plan_name).items():
sensor_name = f"hilo_rate_{tarif}"
cost_entities.append(
HiloCostSensor(sensor_name, domain_config.hq_plan_name, amount)
)
cost_entities.append(
HiloCostSensor("hilo_rate_current", domain_config.hq_plan_name)
)
async_add_entities(cost_entities)
# This setups the utility_meter platform
await utility_manager.update(hass, config, async_add_entities)
# This sends the entities to the energy dashboard
await energy_manager.update()
class TemperatureSensor(HiloBaseEntity, Entity):
def __init__(self, d, scan_interval):
super().__init__(d, scan_interval)
self._name = f"{self._name}_temperature"
_LOGGER.debug(f"Setting up TemperatureSensor entity: {self._name}")
@property
def state(self):
return str(int(self._get("CurrentTemperature", 0)))
@property
def state_class(self):
return STATE_CLASS_MEASUREMENT
@property
def device_class(self):
return DEVICE_CLASS_TEMPERATURE
@property
def unit_of_measurement(self):
return TEMP_CELSIUS
async def _async_update(self):
return
class PowerSensor(HiloBaseEntity, Entity):
def __init__(self, d, scan_interval):
super().__init__(d, scan_interval)
self._name = f"{self._name}_power"
_LOGGER.debug(f"Setting up PowerSensor entity: {self._name}")
@property
def state(self):
return str(int(self._get("Power", 0)))
@property
def state_class(self):
return STATE_CLASS_MEASUREMENT
@property
def device_class(self):
return DEVICE_CLASS_POWER
@property
def unit_of_measurement(self):
return POWER_WATT
async def _async_update(self):
# Other devices are updated within their own
# classes. If we don't escape them, they will
# be double-polled
if self.d.device_type != "Meter":
return
_LOGGER.debug(f"{self.d._tag} Updating")
await self.d.async_update_device()
class HiloCostSensor(RestoreEntity):
def __init__(self, name, plan_name, amount=0):
self.data = None
self._name = name
self.plan_name = plan_name
self._amount = amount
self._last_update = dt_util.utcnow()
_LOGGER.info(f"Initializing energy cost sensor {name} {plan_name} ")
@property
def name(self):
return self._name
@property
def icon(self):
return "mdi:cash"
@property
def state(self):
return self._amount
@property
def should_poll(self) -> bool:
return False
@property
def state_class(self):
return STATE_CLASS_MEASUREMENT
@property
def device_class(self):
return "monetary"
@property
def unit_of_measurement(self):
return "$/kWh"
@property
def device_state_attributes(self):
return {"last_update": self._last_update, "Cost": self.state}
async def async_added_to_hass(self):
"""Handle entity about to be added to hass event."""
await super().async_added_to_hass()
last_state = await self.async_get_last_state()
if last_state:
self._last_update = dt_util.utcnow()
self._amount = last_state.state
async def async_update(self):
return
class EnergySensor(IntegrationSensor):
def __init__(self, d):
self.d = d
self._name = f"hilo_energy_{slugify(self.d.name)}"
self._unit_of_measurement = ENERGY_WATT_HOUR
self._unit_prefix = None
self._last_update = None
if self.d.name == "SmartEnergyMeter":
self._name = "hilo_energy_total"
self._unit_of_measurement = ENERGY_KILO_WATT_HOUR
self._unit_prefix = "k"
if d.device_type == "Thermostat":
self._unit_of_measurement = ENERGY_KILO_WATT_HOUR
self._unit_prefix = "k"
self._source = f"sensor.{slugify(self.d.name)}_power"
super().__init__(
self._source,
self._name,
2,
self._unit_prefix,
"h",
self._unit_of_measurement,
TRAPEZOIDAL_METHOD,
)
self._state = 0
self._last_period = 0
_LOGGER.debug(f"Setting up EnergySensor entity: {self._name}")
@property
def icon(self):
return "mdi:lightning-bolt"
@property
def state_class(self):
return STATE_CLASS_MEASUREMENT
@property
def device_class(self):
return DEVICE_CLASS_ENERGY
@property
def unit_of_measurement(self):
return self._unit_of_measurement
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
_LOGGER.debug(f"Added to hass: {self._name}")
await super().async_added_to_hass()
if state := await self.async_get_last_state():
self._state = state.state
|
184353
|
from __future__ import print_function
from fenics import *
from mshr import *
import numpy as np
from scipy import integrate
set_log_level(LogLevel.INFO)
T = 500.0 # final time
num_steps = 1000 # number of time steps
dt = T / num_steps # time step size
mu = 16 # dynamic viscosity
rho = 1 # density
save_int = 5
inflowVel=8
# mesh parameters
degree = 2
Lx = 5000
Ly = 5000
nx = 72
ny = 72
RD=126
#WTG parameters
numturbs = 9
#number of inflow direction bins
bins = 1
WTGexp = 6.
radius = RD/2.
thickness = RD/10.
numRefine = 1
A=RD # weird for 2D
HH=80
initExtent=1.
mlDenom=5
restart = False
randStart = False
gridStart = True
optimize = False
loadnuT = False
mesh = RectangleMesh(Point(-Lx/2., -Ly/2.), Point(Lx/2., Ly/2.), nx, ny)
site_x = 1000
site_y = 1000
refine_x = 1100
refine_y = 1100
def refine_mesh(mesh, refine_x, refine_y):
#refines the mesh around the site boundaries
h = mesh.hmin()
cell_markers = MeshFunction('bool',mesh, mesh.topology().dim())
cell_markers.set_all(False)
for cell in cells(mesh):
if (cell.midpoint()[0] > -(refine_x)) and (abs(cell.midpoint()[1]) < refine_y ):
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
return mesh
def refine_mesh2(mesh, refine_x, refine_y):
#refines the mesh around the site boundaries
h = mesh.hmin()
cell_markers = MeshFunction('bool',mesh, mesh.topology().dim())
cell_markers.set_all(False)
for cell in cells(mesh):
if (cell.midpoint()[0]**2 + cell.midpoint()[1]**2 < refine_x**2+refine_y**2 ):
cell_markers[cell] = True
mesh = refine(mesh, cell_markers)
return mesh
for nums in range(numRefine):
print('refining mesh')
mesh=refine_mesh2(mesh, refine_x, refine_y)
h = mesh.hmin()
Re = Lx*8/mu
print(Re)
print(mesh.hmin())
print(inflowVel*dt/mesh.hmin())
alpha = 7*pi/64
# Define function spaces
V = VectorFunctionSpace(mesh, 'P', 2)
Q = FunctionSpace(mesh, 'P', 1)
print(V.dim())
def WTGdist(x,y):
return np.exp(-((x/thickness)**WTGexp + (y/radius)**WTGexp))
def createLayout(numturbs):
mx=[]
my=[]
mz=[]
if randStart == True:
for i in range(numturbs):
mx.append(Constant(np.random.uniform(low=-(site_x - radius),high=(site_x - radius))))
my.append(Constant(np.random.uniform(low=-(site_y - radius), high=(site_y - radius))))
mz.append(Constant(HH))
elif gridStart ==True:
if numturbs == 16:
rows = 4
cols = 4
xpos = np.linspace(-initExtent*(site_x - radius),initExtent*(site_x - radius),cols)
ypos = np.linspace(-initExtent*(site_y - radius),initExtent*(site_y - radius),rows)
for i in range(rows):
for j in range(cols):
mx.append(Constant(xpos[j]))
my.append(Constant(ypos[i]))
# # some starting noise sometimes helps
# mx.append(Constant(xpos[j]+5.*np.random.randn()))
# my.append(Constant(ypos[i]+5.*np.random.randn()))
mz.append(Constant(HH))
if numturbs == 9:
rows = 3
cols = 3
xpos = np.linspace(-site_x,site_x,cols)
ypos = np.linspace(-site_y,site_y,rows)
for i in range(rows):
for j in range(cols):
mx.append(Constant(xpos[j]))
my.append(Constant(ypos[i]))
# # some starting noise sometimes helps
# mx.append(Constant(xpos[j]+5.*np.random.randn()))
# my.append(Constant(ypos[i]+5.*np.random.randn()))
mz.append(Constant(HH))
if numturbs == 1:
mx.append(Constant(-1500))
my.append(Constant(0))
mz.append(Constant(HH))
if numturbs == 2:
mx.append(Constant(-1500))
mx.append(Constant(-1500 + 7*RD))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
if numturbs == 3:
mx.append(Constant(-1000))
mx.append(Constant(0))
mx.append(Constant(1000))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
if numturbs == 4:
mx.append(Constant(-1200))
mx.append(Constant(-400))
mx.append(Constant(400))
mx.append(Constant(1200))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
my.append(Constant(0))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
mz.append(Constant(HH))
return mx, my, mz
def createRotatedTurbineForce(mx,my,ma,A,beta,numturbs,alpha,V):
x=SpatialCoordinate(mesh)
tf = Function(V)
for i in range(numturbs):
WTGbase = project(Expression(("cos(yaw)","-sin(yaw)"),yaw=myaw[i],degree=2),V)
# WTGbase = project(Expression(("0","1"),yaw=myaw[i],degree=2),V)
#rotation
mxrot = cos(alpha)*mx[i] - sin(alpha)*my[i]
myrot = sin(alpha)*mx[i] + cos(alpha)*my[i]
# mxrot=mx[i]
# myrot=my[i]
x_centered=x[0]-mxrot
y_centered=x[1]-myrot
x_centered_rotated = x_centered*cos(myaw[i]) + y_centered*sin(myaw[i])
y_centered_rotated = -x_centered*sin(myaw[i]) + y_centered*cos(myaw[i])
# tf = tf+ 0.0001*exp(-(((x[0] - mx[i])/thickness)**WTGexp +(((x[1] - my[i])**2)/radius**2)**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered/thickness)**WTGexp + ((y_centered-radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered/thickness)**WTGexp + ((y_centered+radius/2.)/(radius/2.))**WTGexp))*WTGbase
tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered_rotated/thickness)**WTGexp + ((y_centered_rotated-radius/2.)/(radius/2.))**WTGexp))*WTGbase
tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-((x_centered_rotated/thickness)**WTGexp + ((y_centered_rotated+radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-(((x[0]*cos(myaw[i]) - x - mxrot)/thickness)**WTGexp + ((x[1] - myrot-radius/2.)/(radius/2.))**WTGexp))*WTGbase
# tf = tf + 0.5*4.*A*ma[i]/(1.-ma[i])/beta*exp(-(((x[0]*cos(myaw[i]) - mxrot)/thickness)**WTGexp + ((x[1] - myrot+radius/2.)/(radius/2.))**WTGexp))*WTGbase
return tf
#boundary conditions
class walls(SubDomain):
def inside(self, x, on_boundary):
return near(x[1]**2 - (Ly/2.)**2, 0.) and on_boundary
class inflow(SubDomain):
def inside(self, x, on_boundary):
return near(x[0],-(Lx/2.)) and on_boundary
class outflow(SubDomain):
def inside(self, x, on_boundary):
return near(x[0],Lx/2.) and on_boundary
wavenum=2*pi/(Ly/4.)
wavenum2=2*pi/(Ly/4.)
freq=2*pi/200.
wavenummod=wavenum
wavenum2mod=wavenum2
freqmod=freq
inflowExpr=Expression(("inflowVel + 0.1*sin(freq*t + wavenum*x[1])","0. + 0.1*sin(freq*t + wavenum2*x[1]) "), inflowVel=inflowVel,t=0,wavenum=wavenum,wavenum2=wavenum2,freq=freq,degree=2)
# inflowExpr=Expression(("inflowVel + 0.05*sin(2*pi*t/100. + wavenum*x[1]) + perturbx*0.2*sin(2*pi*t/100. + wavenum2*x[1]+pi/2.)","0. + 0.01*sin(2*pi*t/100. + wavenum*x[1])+ perturby*0.2*sin(2*pi*t/100. + wavenum2*x[1])"), inflowVel=inflowVel,t=0,perturbx=0,perturby=0,wavenum=wavenum,wavenum2=wavenum2,degree=2)
# inflowExpr=Expression(("inflowVel + 0.5*sin(2*pi*t/100. + wavenum*x[1])","0. + 0.25*sin(2*pi*t/100.)"), inflowVel=inflowVel,t=0,wavenum=wavenum,degree=2)
# inflowExpr=Expression(("inflowVel","0."), inflowVel=inflowVel,degree=2)
# lateral BC
bcu_inflow = DirichletBC(V, inflowExpr, inflow())
# bcu_walls = DirichletBC(V, Expression(("0","0."), inflowVel=inflowVel,degree=2), walls())
bcp_outflow = DirichletBC(Q, Constant(0), outflow())
# bc1a = DirichletBC(V.sub(1), Constant(0.0), NoSlipBoundary())
# inflow BC
# bc2 = DirichletBC(V, Constant((inflowVel,0.0)), InflowBoundary())
# bc2a = DirichletBC(VQ.sub(0).sub(0), Constant(8.), InflowBoundary())
# bcp = [DirichletBC(Q, Constant(0), OutflowBoundary())]
bcp=[bcp_outflow]
# bcu = [bcu_inflow,bcu_walls]
bcu = [bcu_inflow]
# Define trial and test functions
u = TrialFunction(V)
v = TestFunction(V)
p = TrialFunction(Q)
q = TestFunction(Q)
# Define functions for solutions at previous and current time steps
u_n = Function(V)
u_ = Function(V)
p_n = Function(Q)
p_ = Function(Q)
# Define expressions used in variational forms
U = 0.5*(u_n + u)
n = FacetNormal(mesh)
f = Constant((0, 0))
k = Constant(dt)
mu = Constant(mu)
rho = Constant(rho)
mx,my,mz = createLayout(numturbs)
ma=[Constant(mm) for mm in 0.33*np.ones(numturbs)]
# right hand rule from above
# myaw=[Constant(pi/8.),Constant(0),Constant(0)]
yaw=0
myaw = [Constant(mm) for mm in (yaw*pi/180.)*np.ones(numturbs)]
beta = integrate.dblquad(WTGdist,-3*radius,3*radius,lambda x: -3*radius,lambda x: 3*radius)
B=beta[0]
f = createRotatedTurbineForce(mx,my,ma,A,B,numturbs,alpha,V)
# Define symmetric gradient
def epsilon(u):
return sym(nabla_grad(u))
# Define stress tensor
def sigma(u, p):
return 2*mu*epsilon(u) - p*Identity(len(u))
# Define variational problem for step 1
F1 = rho*dot((u - u_n) / k, v)*dx \
+ rho*dot(dot(u_n, nabla_grad(u_n)), v)*dx \
+ inner(sigma(U, p_n), epsilon(v))*dx \
+ dot(p_n*n, v)*ds - dot(mu*nabla_grad(U)*n, v)*ds \
+ dot(f*(cos(myaw[0])**2*u_n[0]*u_n[0]+sin(myaw[0])**2*u_n[1]*u_n[1]), v)*dx # inner? other form of vel?
a1 = lhs(F1)
L1 = rhs(F1)
# Define variational problem for step 2
a2 = dot(nabla_grad(p), nabla_grad(q))*dx
L2 = dot(nabla_grad(p_n), nabla_grad(q))*dx - (1/k)*div(u_)*q*dx
# Define variational problem for step 3
a3 = dot(u, v)*dx
L3 = dot(u_, v)*dx - k*dot(nabla_grad(p_ - p_n), v)*dx
# Assemble matrices
A1 = assemble(a1)
A2 = assemble(a2)
A3 = assemble(a3)
# Apply boundary conditions to matrices
[bc.apply(A1) for bc in bcu]
[bc.apply(A2) for bc in bcp]
# Create XDMF files for visualization output
# ufile = File('output/fields/velocity_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.pvd')
# pfile = File('output/fields/pressure_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.pvd')
xdmffile_u = XDMFFile('output/velocity_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
xdmffile_p = XDMFFile('output/pressure_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # xdmffile_tf = XDMFFile('2DDynamic/turbine_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # Create time series (for use in reaction_system.py)
# timeseries_u = TimeSeries('output/velocity_series_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# timeseries_p = TimeSeries('output/pressure_series_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha)+'.xdmf')
# # Save mesh to file (for use in reaction_system.py)
# File('navier_stokes_cylinder/cylinder.xml.gz') << mesh
# Create progress bar
# progress = Progress('Time-stepping')
# set_log_level(PROGRESS)
# ufile = File('output/u_'+str(float(mu))+'.pvd')
# pfile = File('output/p_'+str(float(mu))+'.pvd')
# DoF=len(u_.vector()[:])
# snapshots = np.zeros((DoF,int(num_steps/save_int)))
# uInterp = Function(V)
# uInterp=project(Expression(("x[0]","x[1]"),degree=2),V)
# basePositions=uInterp.vector()[:]
# np.save('output/basePositions_'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha),basePositions)
# Time-stepping
t = 0
count=0
for n in range(num_steps):
# Update current time
t += dt
# bcu_inflow.perturbx=.1*np.random.rand()
# bcu_inflow.perturby=.1*np.random.rand()
inflowExpr.t=t
# wavenummod = wavenummod + .01*np.random.randn()*wavenum
# wavenum2mod = wavenum2mod+ .01*np.random.randn()*wavenum2
# freqmod = freqmod+ .01*np.random.randn()*wavenum2
# inflowExpr.wavenum=wavenummod
# inflowExpr.wavenum2=wavenum2mod
# inflowExpr.freq=freqmod
bcu_inflow = DirichletBC(V, inflowExpr, inflow())
bcu=[bcu_inflow]
# Step 1: Tentative velocity step
b1 = assemble(L1)
[bc.apply(b1) for bc in bcu]
solve(A1, u_.vector(), b1, 'bicgstab', 'hypre_amg')
# Step 2: Pressure correction step
b2 = assemble(L2)
[bc.apply(b2) for bc in bcp]
solve(A2, p_.vector(), b2, 'bicgstab', 'hypre_amg')
# Step 3: Velocity correction step
b3 = assemble(L3)
solve(A3, u_.vector(), b3, 'cg', 'sor')
# Update previous solution
u_n.assign(u_)
p_n.assign(p_)
if n % save_int ==0:
# Save solution to file (XDMF/HDF5)
# ufile << u_
# pfile << p_
xdmffile_u.write(u_, t)
xdmffile_p.write(p_, t)
# xdmffile_tf.write(project(f,V),t)
# # Save nodal values to file
# timeseries_u.store(u_.vector(), t)
# timeseries_p.store(p_.vector(), t)
# snapshots[:,count]=u_.vector()[:]
print(t)
# print(wavenummod/wavenum)
# print(wavenum2mod/wavenum2)
# print(freqmod/freq)
count+=1
# # Update progress bar
# progress.update(t / T)
# print('u max:', u_.vector().array().max())
# Hold plot
# interactive()
# np.save('output/snapshots'+str(numturbs) + '_' + str(int(np.round(Re))) + '_' + str(yaw) + '_' + str(alpha),snapshots)
|
184363
|
import re as RE
import operator
from logging import info, getLogger
getLogger().setLevel(1)
def conv(input,output):
info("reading input file "+input)
fd=open(input)
line=reduce(operator.add,fd.readlines())
fd.close()
info("pruning trailing comments, labels, declarations and indentation")
# this is for the comments
line=RE.sub(r'\n\s*/\*(.*?)\*/\s*\n',r'\n*\1\n',line)
line=RE.sub(r'^\s*/\*(.*?)\*/\s*\n',r'\n*\1\n',line)
line=RE.sub(r'\s*//.*\n',r'\n',line)
line=RE.sub(r'\s*#pragma.*\n',r'\n',line)
line=RE.sub(r'(/\*.*?\*/)|(;)',r'',line)
line=RE.sub(r'[ \t]*\n+[ \t]*',r'\n',line)
# this is for the declarations
line=RE.sub(r'(short|int)\s*\*?\s*(\w+)(\s*,\s*\*?\s*(\w+))*',r'',line)
# this is for labels
line=RE.sub(r'\n\s*\w+:\s*',r'',line)
# are we in degraded mode ?
degraded_mode=line.find("P=")>=0
info("managing substitution")
if output.name[-4:] == ".asm":
def print_fields(fields):
padding=24
max_delay=5
res=fields[0].center(padding)
for field in fields[1:]:
res+='||'+field.center(padding)
res+='\n'
for i in range(max_delay):
res+="".center(padding)
for field in ["","","",""]:
res+='||'+field.center(padding)
res+='\n'
return res
elif output.name[-4:] == ".asl":
def print_fields(fields):
res=''
for field in fields:
if field: res+= field
return res.strip() + '\n'
def print_im(lhs,rhs): return print_fields(["im"+(','+lhs if lhs else '') +'='+rhs,' ',' ',' ',' '])
def print_ma(lhs,rhs): return print_fields([' ',"ma"+(","+lhs if lhs else '') +'='+rhs,' ',' ',' '])
def print_re(lhs, rhs): return print_fields([' ',' ',"P"+ (','+lhs if lhs else '') + "=" + rhs,' ',' '])
def print_do(field): return print_fields([' ',' ',' ',field,' '])
def print_return(field): return print_fields([' ',' ',' ',' ',field])
def re(m,i):return "re("+m.group(i)+")"
def im(m,i):return "i"+m.group(i)
def ma(m,i):return "m"+m.group(i)
#imX = FIFOY
line=RE.sub(r'im([0-9]+) = (FIFO[0-9]+)',
lambda m:print_im(im(m,1),m.group(2)),
line)
#maX = FIFOY
line=RE.sub(r'ma([0-9]+) = (FIFO[0-9]+)',
lambda m:print_ma(ma(m,1),m.group(2)),
line)
#imX = imY+Z*S
line=RE.sub(r'im([0-9]+) = im([0-9]+)([\+-])([0-9]+\*)S',
lambda m:print_im(im(m,1),im(m,2)+"+"+m.group(4)+("S" if m.group(3)=="+" else "N")),
line)
#maX = maY+Z
line=RE.sub(r'ma([0-9]+) = ma([0-9]+)([\+\-])([0-9]+)',
lambda m:print_ma(ma(m,1),ma(m,2)+"+"+m.group(4)+"*"+("E" if m.group(3)=="+" else "W" ) ),
line)
#imX = imY+Z
line=RE.sub(r'im([0-9]+) = im([0-9]+)([\+\-])([0-9]+)',
lambda m:print_im(im(m,1),im(m,2)+"+"+m.group(4)+"*"+("E" if m.group(3)=="+" else "W" ) ),
line)
#*++imX = reY
line=RE.sub(r'\*\+\+im([0-9]+) = re([0-9]+)',
lambda m:print_im(im(m,1),im(m,1)+"+1*E")+print_re("im",re(m,2)+"*re(0)"),
line)
#*imX = reY
line=RE.sub(r'\*im([0-9]+) = re([0-9]+)',
lambda m:print_im("",im(m,1))+print_re("",re(m,2))+print_re("im","P"),
line)
#*++imX = reY+reZ
line=RE.sub(r'\*\+\+im([0-9]+) = re([0-9]+)\+re([0-9]+)',
lambda m:print_re("",re(m,2))+print_im(im(m,1),im(m,1)+"+1*E")+print_re("im","P+"+re(m,3)+"*re(0)"),
line)
#*++imX = P
line=RE.sub(r'\*\+\+im([0-9]+) = P',
lambda m:print_im(im(m,1),im(m,1)+"+1*E")+print_re("im","P"),
line)
#*imX = P
line=RE.sub(r'\*im([0-9]+) = P',
lambda m:print_im("",im(m,1))+print_re("im","P"),
line)
#reX = *++imY*reZ
line=RE.sub(r're([0-9]+) = \*\+\+im([0-9]+)\*re([0-9]+)',
lambda m:print_re("",re(m,3))+print_im(im(m,2),im(m,2)+"+1*E")+print_re(re(m,1),"P+im*re(0)"),
line)
#P = *++imY*reZ
line=RE.sub(r'P = \*\+\+im([0-9]+)\*re([0-9]+)',
lambda m:print_im(im(m,1),im(m,1)+"+1*E")+print_re("","im*"+re(m,2)),
line)
#reX = *++imY**++maZ
line=RE.sub(r're([0-9]+) = \*\+\+im([0-9]+)\*\*\+\+ma([0-9]+)',
lambda m:print_im(im(m,2),im(m,2)+"+1*E")+print_ma(ma(m,3),ma(m,3)+"+1*E")+print_re(re(m,1),"im*ma"),
line)
#reX = reY+*imZ**maW
line=RE.sub(r're([0-9]+) = re([0-9]+)([\+\*])\*im([0-9]+)([\+\*])\*ma([0-9]+)',
lambda m:print_im("",im(m,4))+print_ma("",ma(m,6))+print_re("","im"+m.group(5)+"ma")+print_re(re(m,1),"P"+m.group(3)+re(m,2)+"*re(0)"),
line)
#*imX = *imY+*imZ
line=RE.sub(r'\*im([0-9]+) = \*im([0-9]+)([\+\*])\*im([0-9]+)',
lambda m:(print_re("re(1)","P") if degraded_mode else "" )+print_im("",im(m,4))+print_re("","im*re(0)")+print_im("",im(m,2))+print_re("","P"+m.group(3)+"im*re(0)")+print_im("",im(m,1))+print_re("im","P")+(print_re("","re(1)") if degraded_mode else ""),
line)
#*imX = *imY+*maZ
line=RE.sub(r'\*im([0-9]+) = \*im([0-9]+)([\+\*])\*ma([0-9]+)',
lambda m:(print_re("re(1)","P") if degraded_mode else "" )+print_im("",im(m,2))+print_ma("",ma(m,4))+print_re("","P"+m.group(3)+"im*ma")+print_im("",im(m,1))+print_re("im","P")+(print_re("","re(1)") if degraded_mode else ""),
line)
#*(++imX) = *(++imY)+*maZ
line=RE.sub(r'\*\+\+im([0-9]+) = \*\+\+im([0-9]+)([\+\*])\*ma([0-9]+)',
lambda m:print_im(im(m,2),im(m,2)+"+1*E")+print_re("","im*re(0)")+print_ma("",ma(m,4))+print_im(im(m,1),im(m,1)+"+1*E")+print_re("im","P+re(1)*ma"),
line)
#P = P+*imZ**maW
line=RE.sub(r'P = P([\+\*])\*im([0-9]+)([\+\*])\*ma([0-9]+)',
lambda m:print_im("",im(m,2))+print_ma("",ma(m,4))+print_re("","P"+m.group(1)+"im"+m.group(3)+"ma"),
line)
#P = P+*++imZ**++maW
line=RE.sub(r'P = P([\+\*])\*\+\+im([0-9]+)([\+\*])\*\+\+ma([0-9]+)',
lambda m:print_im(im(m,2),im(m,2)+"+1*E")+print_ma(ma(m,4),ma(m,4)+"+1*E")+print_re("","P"+m.group(1)+"im"+m.group(3)+"ma"),
line)
#reX = *(maY+X)
line=RE.sub(r're([0-9]+) = \*\(ma([0-9]+)\+([0-9]+)\)',
lambda m:print_ma("",ma(m,2)+"+"+m.group(3)+"*E")+print_re(re(m,1),"re(0)*ma"),
line)
#reX = *maY
line=RE.sub(r're([0-9]+) = \*ma([0-9]+)',
lambda m:print_ma("",ma(m,2))+print_re(re(m,1),"re(0)*ma"),
line)
#imX = imY
line=RE.sub(r'im([0-9]+) = im([0-9]+)',
lambda m:print_im(im(m,1),im(m,2)),
line)
#reX = Y (special case for Y=0)
line=RE.sub(r're([0-9]+) = ([0-9]+)',
lambda m:print_re(re(m,1),(m.group(2) if m.group(2)!="0" else "P-P")),
line)
#P = Y (special case for Y=0)
line=RE.sub(r'P = ([0-9]+)',
lambda m:print_re("",(m.group(1) if m.group(1)!="0" else "P-P")),
line)
#reX = reY + reZ
line=RE.sub(r're([0-9]+) = re([0-9]+)\+re([0-9]+)',
lambda m:print_re("",re(m,2))+print_re(re(m,1),"P+"+re(m,3)+"*re(0)"),
line)
#reX = reY
line=RE.sub(r're([0-9]+) = re([0-9]+)',
lambda m:print_re(re(m,1),re(m,2)),
line)
#P = reY
line=RE.sub(r'P = re([0-9]+)',
lambda m:print_re("",re(m,1)),
line)
#maX = maY
line=RE.sub(r'ma([0-9]+) = ma([0-9]+)',
lambda m:print_ma(ma(m,1),ma(m,2)),
line)
#*imX = MIN(reY,*imZ)
def handler0(m):
# it's ok to use re(0-5) they are reserved for internal use
s=print_im("",im(m,4))
if m.group(2) == "MIN":
s+=print_re("As",re(m,3)+"-im*re(0)")
else:
s+=print_re("As","im*re(0)-"+re(m,3))
s+=print_re("re(2)","im*re(0)")
s+=print_re("",re(m,3))
s+=print_im("",im(m,1))
s+=print_re("im","if(As=1,P,re(2))")
return s
line=RE.sub(r'\*im([0-9]+) = (MIN|MAX)\(re([0-9]+), \*im([0-9]+)\)',
handler0,
line)
#*++imX = MIN(reY,*++imZ)
def handler0(m):
# it's ok to use re(0-5) they are reserved for internal use
s=print_im("",im(m,4)+"+1*E")
if m.group(2) == "MIN":
s+=print_re("As",re(m,3)+"-im*re(0)")
else:
s+=print_re("As","im*re(0)-"+re(m,3))
s+=print_re("re(2)","im*re(0)")
s+=print_re("",re(m,3))
s+=print_im("",im(m,1)+"+1*E")
s+=print_re("im","if(As=1,P,re(2))")
return s
line=RE.sub(r'\*\+\+im([0-9]+) = (MIN|MAX)\(re([0-9]+), \*\+\+im([0-9]+)\)',
handler0,
line)
#P = MIN(P,*imZ)
def handler1(m):
# it's ok to use re(0-5) they are reserved for internal use
s=print_im("",im(m,2))
s+=print_re("re(1)","P")
s+=print_re("As","P-im*re(0)")
if m.group(1) == "MIN":
s+=print_re("","if(As=1,P,re(1))")
else:
s+=print_re("","if(As=0,P,re(1))")
return s
line=RE.sub(r'P = (MIN|MAX)\(P, \*im([0-9]+)\)',
handler1,
line)
#reX = MIN(reY,*imZ)
def handler1(m):
# it's ok to use re(0-5) they are reserved for internal use
s=print_im("",im(m,4))
if m.group(2) == "MIN":
s+=print_re("As",re(m,3)+"-im*re(0)")
else:
s+=print_re("As","im*re(0)-"+re(m,3))
s+=print_re(re(m,1),"im*re(0)")
s+=print_re("",re(m,3))
s+=print_re(re(m,1),"if(As=1,P,"+re(m,1)+")")
return s
line=RE.sub(r're([0-9]+) = (MIN|MAX)\(re([0-9]+), \*im([0-9]+)\)',
handler1,
line)
#reX = MIN(reY,*++imZ)
def handler1(m):
# it's ok to use re(0-5) they are reserved for internal use
s=print_im("",im(m,4)+"+1*E")
if m.group(2) == "MIN":
s+=print_re("As",re(m,3)+"-im*re(0)")
else:
s+=print_re("As","im*re(0)-"+re(m,3))
s+=print_re(re(m,1),"im*re(0)")
s+=print_re("",re(m,3))
s+=print_re(re(m,1),"if(As=1,P,"+re(m,1)+")")
return s
line=RE.sub(r're([0-9]+) = (MIN|MAX)\(re([0-9]+), \*\+\+im([0-9]+)\)',
handler1,
line)
#reX=MIN(*imY,*imZ)
def handler2(m):
# it's ok to use re(0-5) they are reserved for internal use
s =print_im("",im(m,3))
s+=print_re("re(1)","im*re(0)")
s+=print_im("",im(m,4))
if m.group(2) == "MIN":
s+=print_re("As","P-im*re(0)")
else:
s+=print_re("As","im*re(0)-P")
s+=print_re("re(2)","im*re(0)")
s+=print_re("","re(1)")
s+=print_re(re(m,1),"if(As=1,P,re(2))")
return s
line=RE.sub(r're([0-9]+) = (MIN|MAX)\(\*im([0-9]+), \*im([0-9]+)\)',
handler2,
line)
#reX=MIN(*++imY,*++imZ)
def handler2(m):
# it's ok to use re(0-5) they are reserved for internal use
s =print_im("",im(m,3)+"+1*E")
s+=print_re("re(1)","im*re(0)")
s+=print_im("",im(m,4)+"+1*E")
if m.group(2) == "MIN":
s+=print_re("As","P-im*re(0)")
else:
s+=print_re("As","im*re(0)-P")
s+=print_re("re(2)","im*re(0)")
s+=print_re("","re(1)")
s+=print_re(re(m,1),"if(As=1,P,re(2))")
return s
line=RE.sub(r're([0-9]+) = (MIN|MAX)\(\*\+\+im([0-9]+), \*\+\+im([0-9]+)\)',
handler2,
line)
# void microcode(short *FIFO0, short *FIFO1, short *FIFO2, short iter1, short iter2)
line=RE.sub(r'void ([a-zA-Z]\w*)\(.*?\)\s*{',
lambda m: 'prog '+m.group(1)+ "\nsub "+m.group(1) +'\n' + print_re("re(0)","1"), #re(0) is reserved for this value
line)
line=RE.sub(r'}\s*$',print_return("return")+r'endsub\nendprog\n',line)
# for(re0 = 0 re0 <= N0 re0 += 1) {
line=RE.sub(r'for\s*\([^N]*(N[0-9])[^\n]*',
lambda m:print_do("do_"+m.group(1)),
line)
line=RE.sub(r'}',
lambda m:print_do("loop"),
line) # only work under the assumption that indetion for while loop adds at least a tab before } and that no {} left
#prune duplicated newlines
line=RE.sub(r'\n+',r'\n',line)
if output.name[-4:] == ".asl":
info("add some tabbing")
tline=""
tab=0
for l in line.split('\n'):
if l[:4] == "loop":
tab-=1
tline+=" "*tab*4 + l + '\n'
if l[:4] == "do_N":
tab+=1
line=tline
# small optimization step
line=RE.sub(r'((im,i([0-9]+)=[^\n]*\n)\s*im=i([0-9]+)\n)',
lambda m:m.groups(0)[1] if m.groups(0)[2] == m.groups(0)[3] else m.groups(0)[0],
line)
line=RE.sub(r'\n+',r'\n',line)
info("resulting file")
output.write(line)
|
184365
|
import numba
import numpy as np
NP_COMPLEX = np.complex128
NUMBA_COMPLEX = numba.complex128
NP_FLOAT = np.float64
NUMBA_FLOAT = numba.float64
|
184370
|
from .algorithms import OnlineNNClassifier, OnlineNNRuLSIF
from .rulsif import RuLSIF
from .dataset import generate_dataset
from .viz import display
__all__ = [
'OnlineNNClassifier', 'OnlineNNRuLSIF', 'RuLSIF', 'generate_dataset', 'display'
]
|
184401
|
from fastai.callbacks.mixup import *
from fastai.tabular import *
class TabMixUpCallback(LearnerCallback):
"Callback that creates the mixed-up input and target."
def __init__(self, learn:Learner, alpha:float=0.3, stack_x:bool=False, stack_y:bool=True):
super().__init__(learn)
self.alpha,self.stack_x,self.stack_y = alpha,stack_x,stack_y
def on_train_begin(self, **kwargs):
if self.stack_y: self.learn.loss_func = MixUpLoss(self.learn.loss_func)
def on_batch_begin(self, last_input, last_target, train, **kwargs):
"Applies mixup to `last_input` and `last_target` if `train`."
if not train: return
new_input = []
lambd_gnd = np.random.beta(self.alpha, self.alpha, last_target.size(0))
lambd_gnd = np.concatenate([lambd_gnd[:,None], 1-lambd_gnd[:,None]], 1).max(1)
shuffle = torch.randperm(last_target.size(0)).to(last_input[0].device)
y1 = last_target[shuffle]
x_cat = last_input[0]
if self.model.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.model.embeds)]
x_cat = torch.cat(x, 1)
lambd = x_cat.new(lambd_gnd)
x1 = x_cat[shuffle]
out_shape = [lambd.size(0)] + [1 for _ in range(len(x1.shape) - 1)]
lambd = tensor(lambd)
new_input.append((x_cat * lambd.view(out_shape) + x1 * (1-lambd).view(out_shape)))
new_input.append(last_input[1])
new_target = torch.cat([last_target[:,None].float(), y1[:,None].float(), lambd[:,None].float()], 1)
return {'last_input': new_input, 'last_target': new_target}
def on_train_end(self, **kwargs):
if self.stack_y: self.learn.loss_func = self.learn.loss_func.get_old()
def mytabular_learner(data:DataBunch, layers:Collection[int], emb_szs:Dict[str,int]=None, metrics=None,
ps:Collection[float]=None, emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, **learn_kwargs):
"Get a `Learner` using `data`, with `metrics`, including a `TabularModel` created using the remaining params."
emb_szs = data.get_emb_szs(ifnone(emb_szs, {}))
model = myTabularModel(emb_szs, len(data.cont_names), out_sz=data.c, layers=layers, ps=ps, emb_drop=emb_drop,
y_range=y_range, use_bn=use_bn)
return Learner(data, model, metrics=metrics, **learn_kwargs)
class myTabularModel(Module):
"Basic model for tabular data."
def __init__(self, emb_szs:ListSizes, n_cont:int, out_sz:int, layers:Collection[int], ps:Collection[float]=None,
emb_drop:float=0., y_range:OptRange=None, use_bn:bool=True, bn_final:bool=False):
super().__init__()
ps = ifnone(ps, [0]*len(layers))
ps = listify(ps, layers)
self.embeds = nn.ModuleList([embedding(ni, nf) for ni,nf in emb_szs])
self.emb_drop = nn.Dropout(emb_drop)
self.bn_cont = nn.BatchNorm1d(n_cont)
n_emb = sum(e.embedding_dim for e in self.embeds)
self.n_emb,self.n_cont,self.y_range = n_emb,n_cont,y_range
sizes = self.get_sizes(layers, out_sz)
actns = [nn.ReLU(inplace=True) for _ in range(len(sizes)-2)] + [None]
layers = []
for i,(n_in,n_out,dp,act) in enumerate(zip(sizes[:-1],sizes[1:],[0.]+ps,actns)):
layers += bn_drop_lin(n_in, n_out, bn=use_bn and i!=0, p=dp, actn=act)
if bn_final: layers.append(nn.BatchNorm1d(sizes[-1]))
self.layers = nn.Sequential(*layers)
def get_sizes(self, layers, out_sz):
return [self.n_emb + self.n_cont] + layers + [out_sz]
def forward(self, x_cat:Tensor, x_cont:Tensor) -> Tensor:
if self.n_emb != 0:
if not self.training:
if self.n_emb != 0:
x = [e(x_cat[:,i]) for i,e in enumerate(self.embeds)]
x_cat = torch.cat(x, 1)
x = self.emb_drop(x_cat)
if self.n_cont != 0:
x_cont = self.bn_cont(x_cont)
x = torch.cat([x.float(), x_cont], 1) if self.n_emb != 0 else x_cont
x = self.layers(x)
if self.y_range is not None:
x = (self.y_range[1]-self.y_range[0]) * torch.sigmoid(x) + self.y_range[0]
return x
|
184442
|
import numpy as np
import matplotlib.pyplot as plt
import torch
from collections import namedtuple
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'done'))
class DeepRLTrainer:
NB_EPISODES = 3000
SAVE_EVERY = 500
INFO_EVERY = 50
SOFT_MAX = False
DEVICE = 'cpu'
def __init__(self, environment, agent, save_path):
self.env = environment
self.agent = agent
self.save_path = save_path
self.total_rewards = []
self.avg_rewards = []
self.tiles_visited = []
self.avg_tiles_visited = []
self.nb_steps = []
self.avg_nb_steps = []
self.cc_counter = 0
self.nb_complete_cov = []
self.terrain_diffs = []
self.avg_terrain_diffs = []
def train(self):
for i in range(DeepRLTrainer.NB_EPISODES):
current_state = torch.tensor(self.env.reset(), dtype=torch.float,
device=DeepRLTrainer.DEVICE)
done = False
info = {}
self.agent.update_epsilon(i)
while not done:
action = self.agent.select_action(
current_state, soft_max=DeepRLTrainer.SOFT_MAX
)
n_state, reward, done, info = self.env.step(action)
action = torch.tensor(action, dtype=torch.int64,
device=DeepRLTrainer.DEVICE)
n_state = torch.tensor(n_state, dtype=torch.float,
device=DeepRLTrainer.DEVICE)
reward = torch.tensor(reward, dtype=torch.float,
device=DeepRLTrainer.DEVICE)
done = torch.tensor(done, dtype=torch.bool,
device=DeepRLTrainer.DEVICE)
self.agent.observe_transition(Transition(
current_state, action, n_state, reward, done
), device=DeepRLTrainer.DEVICE)
current_state = n_state
if info["full_cc"]:
self.cc_counter += 1
print(f"COMPLETE COVERAGE: {self.cc_counter}")
self.total_rewards.append(info["total_reward"])
self.nb_steps.append(info["nb_steps"])
self.tiles_visited.append(info["total_covered_tiles"])
self.nb_complete_cov.append(self.cc_counter)
self.terrain_diffs.append(info["total_pos_terr_diff"])
avg_start = 0 if i < DeepRLTrainer.SAVE_EVERY else -DeepRLTrainer.SAVE_EVERY
self.avg_rewards.append(np.average(self.total_rewards[avg_start:]))
self.avg_tiles_visited.append(np.average(self.tiles_visited[avg_start:]))
self.avg_nb_steps.append(np.average(self.nb_steps[avg_start:]))
self.avg_terrain_diffs.append(np.average(self.terrain_diffs[avg_start:]))
episode_nb = i + 1
if episode_nb % DeepRLTrainer.INFO_EVERY == 0:
print(f"Episode {episode_nb}")
print(f"average total reward: {self.avg_rewards[-1]}")
print(f"average nb steps: {self.avg_nb_steps[-1]}")
print(f"average nb tiles visited: {self.avg_tiles_visited[-1]}")
print(f"average positive terrain diff: {self.avg_terrain_diffs[-1]}")
print(f"epsilon: {self.agent.epsilon}")
print()
if episode_nb % DeepRLTrainer.SAVE_EVERY == 0:
x = range(episode_nb)
plt.clf()
plt.plot(x, self.total_rewards, x, self.avg_rewards)
plt.legend(['total rewards', 'average total rewards'])
plt.title('Total reward for every episode')
plt.savefig(self.save_path + f"rewards.png")
np.save(self.save_path + f"rewards.npy", self.total_rewards)
np.save(self.save_path + f"avg_rewards.npy", self.avg_rewards)
plt.clf()
plt.plot(x, self.tiles_visited, x, self.avg_tiles_visited)
plt.legend(['nb tiles visited', 'average nb tile visited'])
plt.title('Number of tiles visited for every episode')
plt.savefig(self.save_path + f"tiles_visited.png")
np.save(self.save_path + f"tiles_visited.npy", self.tiles_visited)
np.save(self.save_path + f"avg_tiles_visited.npy", self.avg_tiles_visited)
plt.clf()
plt.plot(x, self.nb_steps, x, self.avg_nb_steps)
plt.legend(['nb steps', 'average nb steps'])
plt.title('Number of steps for every episode')
plt.savefig(self.save_path + f"nb_steps.png")
np.save(self.save_path + f"nb_steps.npy", self.nb_steps)
np.save(self.save_path + f"avg_nb_steps.npy", self.avg_nb_steps)
plt.clf()
plt.plot(x, self.nb_complete_cov)
plt.legend(['nb complete coverage runs'])
plt.title('Nb of complete coverage runs')
plt.savefig(self.save_path + f"nb_complete_covs.png")
np.save(self.save_path + f"nb_complete_covs.npy", self.nb_complete_cov)
plt.clf()
plt.plot(x, self.terrain_diffs, x, self.avg_terrain_diffs)
plt.legend(['terrain differences', 'average terrain differences'])
plt.title('Total terrain differences for every episode')
plt.savefig(self.save_path + f"terrain_diffs.png")
np.save(self.save_path + f"terrain_diffs.npy", self.terrain_diffs)
np.save(self.save_path + f"avg_terrain_diffs.npy", self.avg_terrain_diffs)
self.agent.save(self.save_path, episode_nb)
|
184495
|
DATABASE_ENGINE = 'sqlite3'
SECRET_KEY = 'abcd123'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': './example.db',
}
}
INSTALLED_APPS = (
'django_bouncy',
)
BOUNCY_TOPIC_ARN = ['arn:aws:sns:us-east-1:250214102493:Demo_App_Unsubscribes']
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-xunit',
'--nologcapture',
'--cover-package=django_bouncy',
]
|
184503
|
from robox import Options, Robox
with Robox() as robox:
page = robox.open("https://httpbin.org/forms/post")
form = page.get_form()
form.fill_in("custname", value="foo")
form.check("topping", values=["Onion"])
form.choose("size", option="Medium")
form.fill_in("comments", value="all good in the hood")
form.fill_in("delivery", value="13:37")
page = page.submit_form(form)
assert page.url == "https://httpbin.org/post"
with Robox(options=Options(obey_robotstxt=True)) as robox:
robox.open("https://news.ycombinator.com/")
|
184540
|
import os
import asyncio
import json
from pyppeteer import launch
network = []
javascript = []
async def intercept_network_response(response):
network.append(str(response.status) + response.url)
async def intercept_console(response):
javascript.append(response.text)
async def collect_msgs_and_screenshot(url, ss_path):
browser = await launch({'args': ['--disable-dev-shm-usage', '--no-sandbox']})
page = await browser.newPage()
page.on('response', intercept_network_response)
page.on('console', intercept_console)
page.on('requestfailed', intercept_console)
page.on('pageerror', intercept_console)
page.on('error', intercept_console)
await page.goto(url)
await page.screenshot({'path': ss_path, 'fullPage': True})
await browser.close()
def collect_data(url, output_folder, output_filename):
if not os.path.exists("./tmp"):
os.mkdir("./tmp")
output_folder = os.path.join("./tmp", output_folder)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
asyncio.get_event_loop().run_until_complete(
collect_msgs_and_screenshot(url, os.path.join(output_folder, output_filename)))
with open(os.path.join(output_folder, output_filename.split('.')[0] + '_js_log.json'), 'w') as f:
json.dump(javascript, f, indent=2)
with open(os.path.join(output_folder, output_filename.split('.')[0] + '_network_log.json'), 'w') as f:
json.dump(network, f, indent=2)
|
184606
|
import asyncio
import json
import time
import unittest
import requests
import websockets
from BaseTest import BaseTest, BASE_URL, BASE_WS_URL_CUSTOM, BASE_WS_URL_JSONRPC, BaseAsyncTest
class IntentionsHttpTestCase(BaseTest):
@classmethod
def setUpClass(cls):
BaseTest.setUpClass()
def setUp(self):
pass
def reloadAll(self):
pass
def test_create_intentions_block(self):
r = requests.post(
"%s/intentions/ProtocolLanguage.sandbox/5465070037663859703/createBlock"
% BASE_URL
)
self.assertEqual(200, r.status_code)
data = r.json()
self.assertEqual(True, data["success"])
uuid = data["value"]
time.sleep(0.5)
r = requests.get("%s/intentions/%s" % (BASE_URL, uuid))
self.assertEqual(200, r.status_code)
data = r.json()
self.assertEqual(True, data["success"], "returned %s" % str(data))
self.assertEqual(
[
{"index": 0, "description": "Intention on Protocol Element"},
{"index": 1, "description": "Intention on Protocol"},
],
data["value"],
)
class IntentionsWsTestCase(BaseAsyncTest):
@classmethod
def setUpClass(cls):
BaseAsyncTest.setUpClass()
def setUp(self):
pass
def reloadAll(self):
pass
def test_create_intentions_block_custom(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_CUSTOM)
await websocket.send(
json.dumps(
{
"type": "CreateIntentionsBlock",
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
},
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["type"])
uuid = response["blockUUID"]
await websocket.send(
json.dumps({"type": "GetIntentionsBlock", "blockUUID": uuid})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["type"])
self.assertEqual(
[
{"index": 0, "description": "Intention on Protocol Element"},
{"index": 1, "description": "Intention on Protocol"},
],
response["intentions"],
)
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_delete_intentions_block_custom(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_CUSTOM)
await websocket.send(
json.dumps(
{
"type": "CreateIntentionsBlock",
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
},
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["type"])
uuid = response["blockUUID"]
await websocket.send(
json.dumps({"type": "DeleteIntentionsBlock", "blockUUID": uuid})
)
await websocket.send(
json.dumps({"type": "GetIntentionsBlock", "blockUUID": uuid})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["type"])
self.assertEqual(False, response["result"]["success"])
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_execute_intention_custom(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_CUSTOM)
await websocket.send(
json.dumps(
{
"type": "CreateIntentionsBlock",
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
},
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["type"])
uuid = response["blockUUID"]
await websocket.send(
json.dumps({"type": "ExecuteIntention", "blockUUID": uuid, "index": 0})
)
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_create_intentions_block_jsonrpc(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_JSONRPC)
await websocket.send(
json.dumps(
{
"method": "CreateIntentionsBlock",
"params": {
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
}
},
"id": "req-a-123"
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["result"]["type"])
self.assertEqual("req-a-123", response["id"])
uuid = response["result"]["blockUUID"]
await websocket.send(
json.dumps({"method": "GetIntentionsBlock",
"params": {"blockUUID": uuid},
"id": "req-a-124"})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["result"]["type"])
self.assertEqual(
[
{"index": 0, "description": "Intention on Protocol Element"},
{"index": 1, "description": "Intention on Protocol"},
],
response["result"]["intentions"],
)
self.assertEqual("req-a-124", response["id"])
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_delete_intentions_block_jsonrpc(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_JSONRPC)
await websocket.send(
json.dumps(
{
"method": "CreateIntentionsBlock",
"params": {
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
}
},
"id": "req-a-125"
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response['result']["type"])
uuid = response['result']["blockUUID"]
await websocket.send(
json.dumps({"method": "DeleteIntentionsBlock", "params": {"blockUUID": uuid}})
)
await websocket.send(
json.dumps({"method": "GetIntentionsBlock", "params": {"blockUUID": uuid}})
)
response = json.loads(await websocket.recv())
self.assertEqual("GetIntentionsBlockAnswer", response["result"]["type"])
self.assertEqual(False, response["result"]["result"]["success"])
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
def test_execute_intention_jsonrpc(self):
async def f():
websocket = await websockets.connect(BASE_WS_URL_JSONRPC)
await websocket.send(
json.dumps(
{
"method": "CreateIntentionsBlock",
"params": {
"node": {
"model": "ProtocolLanguage.sandbox",
"id": {"regularId": 5465070037663859703},
}
}
}
)
)
response = json.loads(await websocket.recv())
self.assertEqual("CreateIntentionsBlockAnswer", response["result"]["type"])
uuid = response["result"]["blockUUID"]
await websocket.send(
json.dumps({"method": "ExecuteIntention", "params": {"blockUUID": uuid, "index": 0}})
)
await websocket.close()
asyncio.get_event_loop().run_until_complete(f())
if __name__ == "__main__":
import os
import sys
sys.path.append(os.getcwd())
unittest.main()
|
184615
|
from functools import partial
import numpy as np
import torch
from torch import nn
from counterfactualms.arch.layers import Conv2d, ConvTranspose2d
class HierarchicalEncoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(16,32,64,128,256), latent_dim=100,
input_size=(1,128,128), use_weight_norm=False, use_spectral_norm=False,
hierarchical_layers=(1,3,5), div_factor=8):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
if use_weight_norm and use_spectral_norm:
raise ValueError('Cannot use both weight norm and spectral norm.')
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
self.hierarchical_layers = hierarchical_layers
self.div_factor = div_factor
self.down_layers = nn.ModuleList([])
self.resolution_layers = nn.ModuleList([])
self.intermediate_shapes = []
self.out_layers = nn.ModuleList([])
cur_channels = input_size[0]
for i, c in enumerate(filters):
resolution_layer = []
for _ in range(0, num_convolutions - 1):
resolution_layer += self._conv_layer(cur_channels, c)
cur_channels = c
self.resolution_layers.append(nn.Sequential(*resolution_layer))
if i in self.hierarchical_layers:
out_channels = max(cur_channels // div_factor, 1)
self.out_layers.append(self._conv(cur_channels, out_channels, 1, bias=True))
self.intermediate_shapes.append(np.array(input_size) // (2 ** i))
self.intermediate_shapes[-1][0] = out_channels
self.down_layers.append(nn.Sequential(*self._down_conv_layer(cur_channels, c)))
cur_channels = c
if len(filters) in self.hierarchical_layers:
self.intermediate_shapes.append(np.array(input_size) // (2 ** len(filters)))
self.intermediate_shapes[-1][0] = cur_channels
self.fc = nn.Sequential(
nn.Linear(np.prod(self.intermediate_shapes[-1]), latent_dim, bias=False),
nn.BatchNorm1d(latent_dim),
nn.LeakyReLU(.1, inplace=True)
)
@property
def _conv(self):
return partial(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_layer(self, ci, co):
return [self._conv(ci, co, 3, 1, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def _down_conv_layer(self, ci, co):
return [self._conv(ci, co, 4, 2, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def forward(self, x):
out = []
c = 0
for i, (conv, down) in enumerate(zip(self.resolution_layers, self.down_layers)):
x = conv(x)
if i in self.hierarchical_layers:
out.append(self.out_layers[c](x))
c += 1
x = down(x)
if len(self.filters) in self.hierarchical_layers:
x = x.view(-1, np.prod(self.intermediate_shapes[-1]))
out.append(self.fc(x))
return out
class HierarchicalDecoder(nn.Module):
def __init__(self, num_convolutions=3, filters=(256,128,64,32,16), latent_dim=100, output_size=(1,128,128),
upconv=False, use_weight_norm=False, use_spectral_norm=False, hierarchical_layers=(1,3,5),
context_dim=4, div_factor=8):
super().__init__()
self.num_convolutions = num_convolutions
self.filters = filters
self.upconv = upconv
if use_weight_norm and use_spectral_norm:
raise ValueError('Cannot use both weight norm and spectral norm.')
self.use_weight_norm = use_weight_norm
self.use_spectral_norm = use_spectral_norm
self.hierarchical_layers = hierarchical_layers
hierarchical_layers_ = [h for h in hierarchical_layers if h != len(filters)]
self.context_dim = context_dim
self.div_factor = div_factor
self.resolution_layers = nn.ModuleList([])
self.up_layers = nn.ModuleList([])
self.intermediate_shapes = []
self.context_attention = nn.ModuleList([])
cur_channels = filters[0]
self.start_context_attention = self._attn(cur_channels)
self.start_up_layer = nn.Sequential(*self._upsample_layer(cur_channels, cur_channels))
if len(filters) in hierarchical_layers:
self.intermediate_shapes.append(np.array(output_size) // (2 ** (len(filters))))
self.intermediate_shapes[-1][0] = cur_channels
for i, c in enumerate(filters[1:], 1):
resolution_layer = []
i = (len(filters) - i)
input_layer = i in hierarchical_layers_
in_channels = max(cur_channels // div_factor, 1)
for j in range(0, num_convolutions - 1):
ci = (in_channels+cur_channels) if j == 0 and input_layer else cur_channels
resolution_layer += self._conv_layer(ci, cur_channels)
self.resolution_layers.append(nn.Sequential(*resolution_layer))
self.context_attention.append(self._attn(cur_channels))
self.up_layers.append(nn.Sequential(*self._upsample_layer(cur_channels, c)))
if input_layer:
self.intermediate_shapes.append(np.array(output_size) // (2 ** i))
self.intermediate_shapes[-1][0] = in_channels
cur_channels = c
final_layer = self._conv_layer(cur_channels, cur_channels)
final_layer.append(self._conv(cur_channels, output_size[0], 1, 1, bias=True))
self.final_layer = nn.Sequential(*final_layer)
self.fc = nn.Sequential(
nn.Linear(latent_dim, np.prod(self.intermediate_shapes[0]), bias=False),
nn.BatchNorm1d(np.prod(self.intermediate_shapes[0])),
nn.LeakyReLU(.1, inplace=True)
)
@property
def _conv(self):
return partial(Conv2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
@property
def _conv_transpose(self):
return partial(ConvTranspose2d, use_weight_norm=self.use_weight_norm, use_spectral_norm=self.use_spectral_norm)
def _conv_layer(self, ci, co):
return [self._conv(ci, co, 3, 1, 1, bias=False),
nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
def _upsample_layer(self, ci, co):
if self.upconv:
layer = [nn.Upsample(scale_factor=2, mode='nearest'),
self._conv(ci, co, kernel_size=5, stride=1, padding=2, bias=False)]
else:
layer = [self._conv_transpose(ci, co, kernel_size=4, stride=2, padding=1, bias=False)]
layer += [nn.BatchNorm2d(co, momentum=0.05),
nn.LeakyReLU(.1, inplace=True)]
return layer
def _attn(self, co):
hidden_dim = max(co // 4, self.context_dim)
return nn.Sequential(nn.Linear(self.context_dim, hidden_dim),
nn.LeakyReLU(0.1, inplace=True),
nn.Linear(hidden_dim, co),
nn.Sigmoid())
def forward(self, x, ctx):
assert x[0].size(0) == ctx.size(0)
batch_size = ctx.size(0)
layers = zip(self.resolution_layers, self.up_layers, self.context_attention)
ctx_attn = self.start_context_attention(ctx).view(batch_size, -1, 1, 1)
y = self.fc(x.pop()).view(-1, *self.intermediate_shapes[0])
y = self.start_up_layer(y) * ctx_attn
for i, (conv, up, attn) in enumerate(layers, 1):
i = len(self.filters) - i
output_layer = i in self.hierarchical_layers
ctx_attn = attn(ctx).view(batch_size, -1, 1, 1)
if output_layer:
y = torch.cat([y, x.pop()], 1)
y = conv(y) * ctx_attn
y = up(y)
y = self.final_layer(y)
return y
if __name__ == "__main__":
hl = (1, 2, 3, 4, 5)
filters = [20, 40, 80, 160, 320]
div_factor = 80
img_shape = (3,128,128)
enc = HierarchicalEncoder(
hierarchical_layers=hl, filters=filters,
div_factor=div_factor, input_size=img_shape
)
dec = HierarchicalDecoder(
hierarchical_layers=hl, filters=filters[::-1],
div_factor=div_factor, output_size=img_shape
)
print(enc.intermediate_shapes)
print(dec.intermediate_shapes)
ctx = torch.randn(2, 4)
x = torch.randn(2, *img_shape)
y = enc(x)
z = dec(y, ctx)
assert z.shape == x.shape
print(enc)
print(dec)
|
184638
|
import lief
def lief_from_raw(bytes):
"""Create a lief binary object from raw bytes"""
b_list = list(bytes)
lief_binary = lief.parse(raw=b_list)
return lief_binary
|
184687
|
graphite_url = 'http://graphite.intra.douban.com'
graphite_index_url = graphite_url + '/metrics/index.json'
metrics_file = 'metrics.json'
diamond_cache = 'diamond.cache'
debug = False
listen_host = '0.0.0.0'
listen_port = 8808
try:
from local_config import *
except:
pass
|
184688
|
import torch
import torch.nn as nn
# model
class net_le(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
return torch.le(inputs[0], inputs[1])
_model_ = net_le()
# dummy input for onnx generation
_dummy_ = [torch.randn(1, 2, 3, 3), torch.randn(1, 2, 3, 3)]
|
184705
|
from pepnet.encoder import Encoder
from nose.tools import eq_
import numpy as np
def test_encoder_index_lists():
encoder = Encoder()
S_idx = encoder.index_dict["S"]
A_idx = encoder.index_dict["A"]
index_lists = encoder.encode_index_lists(["SSS", "AAA", "SAS"])
eq_(index_lists, [
[S_idx, S_idx, S_idx],
[A_idx, A_idx, A_idx],
[S_idx, A_idx, S_idx]
])
def test_encoder_prepare_sequences_padding():
encoder = Encoder()
eq_(encoder.prepare_sequences(["SISI"], 5), ["SISI-"])
def test_encoder_prepare_sequences_start_token():
encoder = Encoder(add_start_tokens=True)
eq_(encoder.prepare_sequences(["SISI"], 5), ["^SISI-"])
def test_encoder_prepare_sequences_stop_token():
encoder = Encoder(add_stop_tokens=True)
eq_(encoder.prepare_sequences(["SISI"], 5), ["SISI$-"])
def test_encoder_index_array():
encoder = Encoder()
S_idx = encoder.index_dict["S"]
A_idx = encoder.index_dict["A"]
assert S_idx > 0
assert A_idx > 0
X = encoder.encode_index_array(["SSS", "AAA", "SASA"], max_peptide_length=4)
expected = np.array([
[S_idx, S_idx, S_idx, 0],
[A_idx, A_idx, A_idx, 0],
[S_idx, A_idx, S_idx, A_idx]
])
assert (X == expected).all()
def test_encoder_FOFE():
# turn off the gap character '-' used for ends of shorter sequences
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_FOFE(["AAA", "SSS", "SASA"])
eq_(x.shape, (3, 20))
def test_encoder_FOFE_bidirectional():
# turn off the gap character '-' used for ends of shorter sequences
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_FOFE(["AAA", "SSS", "SASA"], bidirectional=True)
eq_(x.shape, (3, 40))
def test_encoder_blosum():
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_blosum(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 20))
def test_encoder_pmbec():
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_pmbec(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 20))
def test_encoder_onehot():
encoder = Encoder(variable_length_sequences=False)
x = encoder.encode_onehot(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 20))
def test_encoder_blosum_with_positional_features():
encoder = Encoder(
variable_length_sequences=False,
add_normalized_position=True,
add_normalized_centrality=True)
x = encoder.encode_blosum(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 22))
def test_encoder_pmbec_with_positional_features():
encoder = Encoder(
variable_length_sequences=False,
add_normalized_position=True,
add_normalized_centrality=True)
x = encoder.encode_pmbec(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 22))
def test_encoder_onehot_with_positional_features():
encoder = Encoder(
variable_length_sequences=False,
add_normalized_position=True,
add_normalized_centrality=True)
x = encoder.encode_onehot(["AAA", "SSS", "EEE"])
eq_(x.shape, (3, 3, 22))
|
184724
|
def multiplication_table(row, col):
return [range(a, a * col + 1, a) for a in xrange(1, row + 1)]
|
184728
|
from _tutorial import *
explorerhat = None
name = ''
horse = '''
>>\.
/_ )`.
/ _)`^)`. _.---. _
(_,' \ `^-)"" `.\\
| | \\
\ / |
/ \ /.___.'\ (\ (_
< ,"|| \ |`. \`-'
\\\\ () )| )/
hjw |_>|> /_] //
/_] /_]'''
def importme():
global explorerhat
try:
import explorerhat
except ImportError:
output(horse)
type_write("\nWoah! Hold your horses, you've not installed the library!")
time.sleep(0.2)
type_write("\nI'm going to send you back to the command line where you should type:")
time.sleep(0.2)
type_write("\nsudo pip install explorerhat")
time.sleep(0.2)
type_write("\nGot it?")
wait_for_space()
exit()
def check_for_pro():
try:
explorerhat.analog.one.read()
except:
type_write_warning("Uh oh! You need an Explorer HAT Pro!")
time.sleep(1)
exit()
def get_name():
global name
type_write("Hi! Who are you?\n")
prompt()
return get_input()
add_placeholder("{name}", get_name())
|
184752
|
from unittest import TestCase
from irlib.progress import Progress
class TestProgress(TestCase):
def setUp(self):
self.p = Progress(n=1002, percent=10)
def test_progress_counter(self):
total = 0
for i in range(0,1002):
total += self.p.show(message='Testing progress:', silent=True)
self.assertEqual(total,10)
|
184795
|
import os
from PIL import Image
class SlideCrack(object):
def __init__(self, gap_img, bg):
self.gap_img = gap_img
self.bg = bg
@staticmethod
def pixel_is_equal(image1, image2, x, y):
"""
判断两张图片的像素是否相等,不想等即为缺口位置
:param image1:
:param image2:
:param x: x坐标
:param y: y 坐标
:return:
"""
# 取两个图片的像素点
pixel1 = image1.load()[x, y]
pixel2 = image2.load()[x, y]
threshold = 60 # 像素色差
if abs(pixel1[0]-pixel2[0]) < threshold and abs(pixel1[1]-pixel2[1]) < threshold and abs(pixel1[2]-pixel2[2]) <threshold:
return True
else:
return False
def get_gap(self, image1, image2):
"""
获取缺口位置
:param image1:完整图片
:param image2: 带缺口的图片
:return:
"""
left = 50 # 设置一个起始量,因为验证码一般不可能在左边,加快识别速度
for i in range(left, image1.size[0]):
for j in range(image1.size[1]):
if not self.pixel_is_equal(image1, image2, i, j):
left = i
return left
return left
def run(self):
image1 = Image.open(self.bg)
image2 = Image.open(self.gap_img)
# 获取缺口的位置
gap = self.get_gap(image1, image2)
return gap
def get_distance():
img1 = "gap.png" # 带缺口的背景图
img2 = "full.png" # 不带缺口的背景图
gt = SlideCrack(img1, img2)
val = gt.run()
os.remove('full.webp')
os.remove('full.png')
os.remove('gap.webp')
os.remove('gap.png')
return val
|
184810
|
import logging
import os
import csv
import codecs
from decimal import Decimal as D
import nose
from . import pygrowup
from six.moves import zip
class WHOResult(object):
def __init__(self, indicator, values):
self.indicator = indicator
columns = 'id,region,GENDER,agemons,WEIGHT,_HEIGHT,measure,oedema,HEAD,MUAC,TRI,SUB,SW,agedays,CLENHEI,CBMI,ZWEI,ZLEN,ZWFL,ZBMI,FWEI,FLEN,FWFL,FBMI'
data = list(zip(columns.split(','), values))
for k, v in data:
setattr(self, k.lower(), v)
self.age = self.agemons
if int(self.gender) == 1:
self.gender = "M"
elif int(self.gender) == 2:
self.gender = "F"
else:
self.gender = None
def __repr__(self):
rep = self.indicator + " " + str(self.id) + " " + self.agemons
if all([self.gender, self.height]):
rep = rep + " (" + ", ".join([self.gender, self.height]) + ")"
return rep
@property
def result(self):
if self.indicator == "lhfa":
return self.zlen
if self.indicator in ["wfl", "wfh"]:
return self.zwfl
if self.indicator == "wfa":
return self.zwei
if self.indicator == "bmifa":
return self.zbmi
@property
def measurement(self):
if self.indicator == "lhfa":
return self._height
if self.indicator in ["wfl", "wfh"]:
return self.weight
if self.indicator == "wfa":
return self.weight
if self.indicator == "bmifa":
return self.cbmi
@property
def height(self):
if self.indicator in ["lhfa", "wfl", "wfh", "wfa"]:
return self._height
return None
def compare_result(who):
our_result = None
logging.debug(who.indicator.upper() + " (" + str(who.measurement) + ") " + who.gender + " " + who.age + " " + str(who.height))
calc = pygrowup.Calculator(include_cdc=True, log_level='DEBUG')
if who.measurement:
our_result = calc.zscore_for_measurement(who.indicator, who.measurement,
who.age, who.gender, who.height)
logging.info("THEM: " + str(who.result))
if who.result not in ['', ' ', None]:
if our_result is not None:
logging.info("US : " + str(our_result))
diff = calc.context.subtract(D(who.result), D(our_result))
logging.info("DIFF: " + str(abs(diff)))
assert abs(diff) <= D('1')
def test_generator():
# software uses error-prone floating-point calculations
module_dir = os.path.split(os.path.abspath(__file__))[0]
test_file = os.path.join(module_dir, 'testdata', 'survey_z_rc.csv')
csvee = codecs.open(test_file, "rU", encoding='utf-8', errors='ignore')
reader = csv.reader(csvee, dialect="excel")
# skip column labels
next(reader)
for row in reader:
for indicator in ["lhfa", "wfl", "wfh"]:
who = WHOResult(indicator, row)
# ignore these two cases
if who.id in ["287", "381"]:
continue
# also ignore other cases that are missing
# height or length data required for these calculations
if who.height not in ['', ' ', None]:
yield compare_result, who
for indicator in ["wfa", "bmifa"]:
who = WHOResult(indicator, row)
# ignore these two cases
if who.id in ["287", "381"]:
continue
yield compare_result, who
def test_bmifa_bug():
calc = pygrowup.Calculator(include_cdc=True, log_level='DEBUG')
# test to verify fix of https://github.com/ewheeler/pygrowup/issues/6
# where age of exactly 3 months would not be resolved to either
# 0-13 week table or to 0-2 years table
does_not_raise = calc.zscore_for_measurement('bmifa', 32.0,
3, 'F', 50)
assert does_not_raise == D('7.41')
should_use_bmifa_girls_0_13 = calc.zscore_for_measurement('bmifa', 32.0,
2.9, 'F', 50)
assert should_use_bmifa_girls_0_13 == D('7.53')
should_use_bmifa_girls_0_2 = calc.zscore_for_measurement('bmifa', 32.0,
3.1, 'F', 50)
assert should_use_bmifa_girls_0_2 == D('7.41')
if __name__ == '__main__':
nose.main()
|
184862
|
from ocdskit.cli.__main__ import main
from tests import assert_streaming
def test_command(capsys, monkeypatch):
assert_streaming(capsys, monkeypatch, main, ['split-record-packages', '1'],
['realdata/record-package_package.json'], ['realdata/record-package_split.json'])
|
184863
|
import math
import sys
import time
import numpy as np
from fj_refactored import fisher_jenks, fj_generate_sample
def testfull():
"""
Tests the fully enumerated Fisher-Jenks implementation
"""
cores = [1,2,4,16,32]
classes = [5,6,7]
data_sizes = [500, 1000, 2500, 5000, 7500, 10000, 12500, 15000, 17500, 20000, 22500, 25000]
for c in cores:
for d in data_sizes:
for k in classes:
data = np.random.ranf(size=d)
try:
t1 = time.time()
#wrapped in try since we will blow out RAM at some point
classification = fisher_jenks(data, k, c)
t2 = time.time()
print "Processed {0} data points in {1} classes using {2} cores. Total time: {3}".format(d, k, c, t2-t1)
data = None
except KeyboardInterrupt:
print "Aborting"
sys.exit(1)
except:
print "FAILURE: {0} data points.".format(d)
def testsample():
"""
Tests the sampled Fisher-Jenks implementation
"""
cores = [1,2,4,16,32]
classes = [5,6,7]
data_sizes = [10000, 20000, 40000, 80000, 160000, 320000, 640000, 1280000,
2560000, 5120000, 10240000, 20480000, 40960000, 81920000,
163840000, 327680000, 655360000]
for c in cores:
for d in data_sizes:
for k in classes:
#Generate the test data and save to disk
data = np.random.ranf(size=d)
nobs = len(data)
np.save('testfile.npy', data)
data = None
#Compute the sample size as the sqrt of nobs
sqrt = math.sqrt(nobs)
if sqrt > 40000:
sqrt = 40000
pct = sqrt / float(d)
#Load the data back into memory as a mmapped file
f = np.load('testfile.npy', mmap_mode='r+')
t1 = time.time()
data = fj_generate_sample(f, pct=pct)
t2 = time.time()
print "Randomly sampling {0} percent of {1} observations for a run size of {2} observations took {3} seconds.".format(pct, nobs, sqrt, t2 - t1)
try:
t1 = time.time()
#wrapped in try since we will blow out RAM at some point
classification = fisher_jenks(data, k, c)
t2 = time.time()
print "Processed {0} data points in {1} classes using {2} cores. Total time: {3}".format(d, k, c, t2-t1)
except KeyboardInterrupt:
print "Aborting"
sys.exit(1)
except:
print "FAILURE: {0} data points.".format(d)
if __name__ =='__main__':
#Test the fully enumerated FJ
testfull()
#Test FJ using sampling
testsample()
|
184877
|
import numpy as np
from imread import ijrois
from . import file_path
def test_rois_smoke():
rois = ijrois.read_roi_zip(file_path('rois.zip'))
assert len(rois) == 4
r = ijrois.read_roi(open(file_path('0186-0099.roi'), 'rb'))
assert any([np.array_equal(ri, r) for ri in rois])
|
184881
|
import pandas as pd
import sqlalchemy
from constants import DB_FOLDER, SYMBOL
import matplotlib.pyplot as plt
def create_engine(symbol):
engine = sqlalchemy.create_engine(f"sqlite:///{DB_FOLDER}/{symbol}-stream.db")
return engine
def fetch_dataframe(symbol, engine):
try:
return pd.read_sql(symbol, engine)
except Exception as e:
print(f'Exception: {e}')
return None
def plot_stats(symbol, dataframe, y_axis="Price", x_axis="Time"):
dataframe.plot(title=symbol, x=x_axis, y=y_axis)
plt.show()
def create_frame(msg):
pd.set_option("precision", 18)
df = pd.DataFrame([msg])
df = df.loc[:, ["s", "E", "p"]]
df.columns = ["symbol", "Time", "Price"]
df.Price = df.Price.astype(float)
df.Time = pd.to_datetime(df.Time, unit="ms")
return df
|
184909
|
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.utils as nn_utils
import torch.backends.cudnn as cudnn
from torch.nn import SyncBatchNorm
import torch.optim.lr_scheduler as lr_scheduler
from torch.nn.parallel import DistributedDataParallel
import utils
from utils import CONFIG
import networks
class Trainer(object):
def __init__(self,
train_dataloader,
test_dataloader,
logger,
tb_logger):
# Save GPU memory.
cudnn.benchmark = False
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.logger = logger
self.tb_logger = tb_logger
self.model_config = CONFIG.model
self.train_config = CONFIG.train
self.log_config = CONFIG.log
self.loss_dict = {'rec': None,
'comp': None,
'smooth_l1':None,
'grad':None,
'gabor':None}
self.test_loss_dict = {'rec': None,
'smooth_l1':None,
'mse':None,
'sad':None,
'grad':None,
'gabor':None}
self.grad_filter = torch.tensor(utils.get_gradfilter()).cuda()
self.gabor_filter = torch.tensor(utils.get_gaborfilter(16)).cuda()
self.build_model()
self.resume_step = None
self.best_loss = 1e+8
utils.print_network(self.G, CONFIG.version)
if self.train_config.resume_checkpoint:
self.logger.info('Resume checkpoint: {}'.format(self.train_config.resume_checkpoint))
self.restore_model(self.train_config.resume_checkpoint)
if self.model_config.imagenet_pretrain and self.train_config.resume_checkpoint is None:
self.logger.info('Load Imagenet Pretrained: {}'.format(self.model_config.imagenet_pretrain_path))
if self.model_config.arch.encoder == "vgg_encoder":
utils.load_VGG_pretrain(self.G, self.model_config.imagenet_pretrain_path)
else:
utils.load_imagenet_pretrain(self.G, self.model_config.imagenet_pretrain_path)
def build_model(self):
self.G = networks.get_generator(encoder=self.model_config.arch.encoder, decoder=self.model_config.arch.decoder)
self.G.cuda()
if CONFIG.dist:
self.logger.info("Using pytorch synced BN")
self.G = SyncBatchNorm.convert_sync_batchnorm(self.G)
self.G_optimizer = torch.optim.Adam(self.G.parameters(),
lr = self.train_config.G_lr,
betas = [self.train_config.beta1, self.train_config.beta2])
if CONFIG.dist:
# SyncBatchNorm only supports DistributedDataParallel with single GPU per process
self.G = DistributedDataParallel(self.G, device_ids=[CONFIG.local_rank], output_device=CONFIG.local_rank)
else:
self.G = nn.DataParallel(self.G)
self.build_lr_scheduler()
def build_lr_scheduler(self):
"""Build cosine learning rate scheduler."""
self.G_scheduler = lr_scheduler.CosineAnnealingLR(self.G_optimizer,
T_max=self.train_config.total_step
- self.train_config.warmup_step)
def reset_grad(self):
"""Reset the gradient buffers."""
self.G_optimizer.zero_grad()
def restore_model(self, resume_checkpoint):
"""
Restore the trained generator and discriminator.
:param resume_checkpoint: File name of checkpoint
:return:
"""
pth_path = os.path.join(self.log_config.checkpoint_path, '{}.pth'.format(resume_checkpoint))
checkpoint = torch.load(pth_path, map_location = lambda storage, loc: storage.cuda(CONFIG.gpu))
self.resume_step = checkpoint['iter']
self.logger.info('Loading the trained models from step {}...'.format(self.resume_step))
self.G.load_state_dict(checkpoint['state_dict'], strict=True)
if not self.train_config.reset_lr:
if 'opt_state_dict' in checkpoint.keys():
try:
self.G_optimizer.load_state_dict(checkpoint['opt_state_dict'])
except ValueError as ve:
self.logger.error("{}".format(ve))
else:
self.logger.info('No Optimizer State Loaded!!')
if 'lr_state_dict' in checkpoint.keys():
try:
self.G_scheduler.load_state_dict(checkpoint['lr_state_dict'])
except ValueError as ve:
self.logger.error("{}".format(ve))
else:
self.G_scheduler = lr_scheduler.CosineAnnealingLR(self.G_optimizer,
T_max=self.train_config.total_step - self.resume_step - 1)
if 'loss' in checkpoint.keys():
self.best_loss = checkpoint['loss']
def train(self):
data_iter = iter(self.train_dataloader)
if self.train_config.resume_checkpoint:
start = self.resume_step + 1
else:
start = 0
moving_max_grad = 0
moving_grad_moment = 0.999
max_grad = 0
for step in range(start, self.train_config.total_step + 1):
try:
image_dict = next(data_iter)
except:
data_iter = iter(self.train_dataloader)
image_dict = next(data_iter)
image, alpha, trimap = image_dict['image'], image_dict['alpha'], image_dict['trimap']
image = image.cuda()
alpha = alpha.cuda()
trimap = trimap.cuda()
# train() of DistributedDataParallel has no return
self.G.train()
log_info = ""
loss = 0
"""===== Update Learning Rate ====="""
if step < self.train_config.warmup_step and self.train_config.resume_checkpoint is None:
cur_G_lr = utils.warmup_lr(self.train_config.G_lr, step + 1, self.train_config.warmup_step)
utils.update_lr(cur_G_lr, self.G_optimizer)
else:
self.G_scheduler.step()
cur_G_lr = self.G_scheduler.get_lr()[0]
"""===== Forward G ====="""
alpha_pred, info_dict = self.G(image, trimap) # info_dict: intermediate feature of networks like attention
weight = utils.get_unknown_tensor(trimap)
"""===== Calculate Loss ====="""
if self.train_config.rec_weight > 0:
self.loss_dict['rec'] = self.regression_loss(alpha_pred, alpha, loss_type='l1', weight=weight) \
* self.train_config.rec_weight
if self.train_config.smooth_l1_weight > 0:
self.loss_dict['smooth_l1'] = self.smooth_l1(alpha_pred, alpha, weight=weight) \
* self.train_config.smooth_l1_weight
if self.train_config.comp_weight > 0:
self.loss_dict['comp'] = self.composition_loss(alpha_pred, image_dict['fg'].cuda(),
image_dict['bg'].cuda(), image, weight=weight) \
* self.train_config.comp_weight
if self.train_config.grad_weight > 0:
self.loss_dict['grad'] = self.grad_loss(alpha_pred, alpha, weight=weight, grad_filter=self.grad_filter) \
* self.train_config.grad_weight
if self.train_config.gabor_weight > 0:
self.loss_dict['gabor'] = self.gabor_loss(alpha_pred, alpha, weight=weight, gabor_filter=self.gabor_filter) \
* self.train_config.gabor_weight
for loss_key in self.loss_dict.keys():
if self.loss_dict[loss_key] is not None and loss_key in ['rec', 'comp', 'smooth_l1', 'grad', 'gabor']:
loss += self.loss_dict[loss_key]
"""===== Back Propagate ====="""
self.reset_grad()
loss.backward()
"""===== Clip Large Gradient ====="""
if self.train_config.clip_grad:
if moving_max_grad == 0:
moving_max_grad = nn_utils.clip_grad_norm_(self.G.parameters(), 1e+6)
max_grad = moving_max_grad
else:
max_grad = nn_utils.clip_grad_norm_(self.G.parameters(), 2 * moving_max_grad)
moving_max_grad = moving_max_grad * moving_grad_moment + max_grad * (
1 - moving_grad_moment)
"""===== Update Parameters ====="""
self.G_optimizer.step()
"""===== Write Log and Tensorboard ====="""
# stdout log
if step % self.log_config.logging_step == 0:
# reduce losses from GPUs
if CONFIG.dist:
self.loss_dict = utils.reduce_tensor_dict(self.loss_dict, mode='mean')
loss = utils.reduce_tensor(loss)
# create logging information
for loss_key in self.loss_dict.keys():
if self.loss_dict[loss_key] is not None:
log_info += loss_key.upper() + ": {:.4f}, ".format(self.loss_dict[loss_key])
self.logger.debug("Image tensor shape: {}. Trimap tensor shape: {}".format(image.shape, trimap.shape))
log_info = "[{}/{}], ".format(step, self.train_config.total_step) + log_info
log_info += "lr: {:6f}".format(cur_G_lr)
self.logger.info(log_info)
# tensorboard
if step % self.log_config.tensorboard_step == 0 or step == start: # and step > start:
self.tb_logger.scalar_summary('Loss', loss, step)
# detailed losses
for loss_key in self.loss_dict.keys():
if self.loss_dict[loss_key] is not None:
self.tb_logger.scalar_summary('Loss_' + loss_key.upper(),
self.loss_dict[loss_key], step)
self.tb_logger.scalar_summary('LearnRate', cur_G_lr, step)
if self.train_config.clip_grad:
self.tb_logger.scalar_summary('Moving_Max_Grad', moving_max_grad, step)
self.tb_logger.scalar_summary('Max_Grad', max_grad, step)
# write images to tensorboard
if step % self.log_config.tensorboard_image_step == 0 or step == start:
if self.model_config.trimap_channel == 3:
trimap = trimap.argmax(dim=1, keepdim=True)
alpha_pred[trimap==2] = 1
alpha_pred[trimap==0] = 0
image_set = {'image': (utils.normalize_image(image[-1, ...]).data.cpu().numpy()
* 255).astype(np.uint8),
'trimap': (trimap[-1, ...].data.cpu().numpy() * 127).astype(np.uint8),
'alpha': (alpha[-1, ...].data.cpu().numpy() * 255).astype(np.uint8),
'alpha_pred': (alpha_pred[-1, ...].data.cpu().numpy() * 255).astype(np.uint8)}
if info_dict is not None:
for key in info_dict.keys():
if key.startswith('offset'):
image_set[key] = utils.flow_to_image(info_dict[key][0][-1,...].data.cpu()
.numpy()).transpose([2, 0, 1]).astype(np.uint8)
# write softmax_scale to offset image
scale = info_dict[key][1].cpu()
image_set[key] = utils.put_text(image_set[key], 'unknown: {:.2f}, known: {:.2f}'
.format(scale[-1,0].item(), scale[-1,1].item()))
else:
image_set[key] = (utils.normalize_image(info_dict[key][-1,...]).data.cpu().numpy()
* 255).astype(np.uint8)
self.tb_logger.image_summary(image_set, step)
"""===== TEST ====="""
if ((step % self.train_config.val_step) == 0 or step == self.train_config.total_step):# and step > start:
self.G.eval()
test_loss = 0
log_info = ""
self.test_loss_dict['mse'] = 0
self.test_loss_dict['sad'] = 0
for loss_key in self.loss_dict.keys():
if loss_key in self.test_loss_dict and self.loss_dict[loss_key] is not None:
self.test_loss_dict[loss_key] = 0
with torch.no_grad():
for image_dict in self.test_dataloader:
image, alpha, trimap = image_dict['image'], image_dict['alpha'], image_dict['trimap']
alpha_shape = image_dict['alpha_shape']
image = image.cuda()
alpha = alpha.cuda()
trimap = trimap.cuda()
alpha_pred, info_dict = self.G(image, trimap)
h, w = alpha_shape
alpha_pred = alpha_pred[..., :h, :w]
trimap = trimap[..., :h, :w]
weight = utils.get_unknown_tensor(trimap)
# value of MSE/SAD here is different from test.py and matlab version
self.test_loss_dict['mse'] += self.mse(alpha_pred, alpha, weight)
self.test_loss_dict['sad'] += self.sad(alpha_pred, alpha, weight)
if self.train_config.rec_weight > 0:
self.test_loss_dict['rec'] += self.regression_loss(alpha_pred, alpha, weight=weight) \
* self.train_config.rec_weight
if self.train_config.smooth_l1_weight > 0:
self.test_loss_dict['smooth_l1'] += self.smooth_l1(alpha_pred, alpha, weight=weight) \
* self.train_config.smooth_l1_weight
if self.train_config.grad_weight > 0:
self.test_loss_dict['grad'] = self.grad_loss(alpha_pred, alpha, weight=weight,
grad_filter=self.grad_filter) \
* self.train_config.grad_weight
if self.train_config.gabor_weight > 0:
self.test_loss_dict['gabor'] = self.gabor_loss(alpha_pred, alpha, weight=weight,
gabor_filter=self.gabor_filter) \
* self.train_config.gabor_weight
# reduce losses from GPUs
if CONFIG.dist:
self.test_loss_dict = utils.reduce_tensor_dict(self.test_loss_dict, mode='mean')
"""===== Write Log and Tensorboard ====="""
# stdout log
for loss_key in self.test_loss_dict.keys():
if self.test_loss_dict[loss_key] is not None:
self.test_loss_dict[loss_key] /= len(self.test_dataloader)
# logging
log_info += loss_key.upper()+": {:.4f} ".format(self.test_loss_dict[loss_key])
self.tb_logger.scalar_summary('Loss_'+loss_key.upper(),
self.test_loss_dict[loss_key], step, phase='test')
if loss_key in ['rec', 'smooth_l1', 'grad', 'gabor']:
test_loss += self.test_loss_dict[loss_key]
self.logger.info("TEST: LOSS: {:.4f} ".format(test_loss)+log_info)
self.tb_logger.scalar_summary('Loss', test_loss, step, phase='test')
if self.model_config.trimap_channel == 3:
trimap = trimap.argmax(dim=1, keepdim=True)
alpha_pred[trimap==2] = 1
alpha_pred[trimap==0] = 0
image_set = {'image': (utils.normalize_image(image[-1, ...]).data.cpu().numpy()
* 255).astype(np.uint8),
'trimap': (trimap[-1, ...].data.cpu().numpy() * 127).astype(np.uint8),
'alpha': (alpha[-1, ...].data.cpu().numpy() * 255).astype(np.uint8),
'alpha_pred': (alpha_pred[-1, ...].data.cpu().numpy() * 255).astype(np.uint8)}
if info_dict is not None:
for key in info_dict.keys():
if key.startswith('offset'):
image_set[key] = utils.flow_to_image(info_dict[key][0][-1,...].data.cpu()
.numpy()).transpose([2, 0, 1]).astype(np.uint8)
# write softmax_scale to offset image
scale = info_dict[key][1].cpu()
image_set[key] = utils.put_text(image_set[key], 'unknown: {:.2f}, known: {:.2f}'
.format(scale[-1,0].item(), scale[-1,1].item()))
else:
image_set[key] = (utils.normalize_image(info_dict[key][-1,...]).data.cpu().numpy()
* 255).astype(np.uint8)
self.tb_logger.image_summary(image_set, step, phase='test')
"""===== Save Model ====="""
if (step % self.log_config.checkpoint_step == 0 or step == self.train_config.total_step) \
and CONFIG.local_rank == 0 and (step > start):
self.logger.info('Saving the trained models from step {}...'.format(iter))
self.save_model("latest_model", step, loss)
if self.test_loss_dict['mse'] < self.best_loss:
self.best_loss = self.test_loss_dict['mse']
self.save_model("best_model", step, loss)
def save_model(self, checkpoint_name, iter, loss):
"""Restore the trained generator and discriminator."""
torch.save({
'iter': iter,
'loss': loss,
'state_dict': self.G.state_dict(),
'opt_state_dict': self.G_optimizer.state_dict(),
'lr_state_dict': self.G_scheduler.state_dict()
}, os.path.join(self.log_config.checkpoint_path, '{}.pth'.format(checkpoint_name)))
@staticmethod
def regression_loss(logit, target, loss_type='l1', weight=None):
"""
Alpha reconstruction loss
:param logit:
:param target:
:param loss_type: "l1" or "l2"
:param weight: tensor with shape [N,1,H,W] weights for each pixel
:return:
"""
if weight is None:
if loss_type == 'l1':
return F.l1_loss(logit, target)
elif loss_type == 'l2':
return F.mse_loss(logit, target)
else:
raise NotImplementedError("NotImplemented loss type {}".format(loss_type))
else:
if loss_type == 'l1':
return F.l1_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
elif loss_type == 'l2':
return F.mse_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
else:
raise NotImplementedError("NotImplemented loss type {}".format(loss_type))
@staticmethod
def smooth_l1(logit, target, weight):
loss = torch.sqrt((logit * weight - target * weight)**2 + 1e-6)
loss = torch.sum(loss) / (torch.sum(weight) + 1e-8)
return loss
@staticmethod
def mse(logit, target, weight):
# return F.mse_loss(logit * weight, target * weight, reduction='sum') / (torch.sum(weight) + 1e-8)
return Trainer.regression_loss(logit, target, loss_type='l2', weight=weight)
@staticmethod
def sad(logit, target, weight):
return F.l1_loss(logit * weight, target * weight, reduction='sum') / 1000
@staticmethod
def composition_loss(alpha, fg, bg, image, weight, loss_type='l1'):
"""
Alpha composition loss
"""
merged = fg * alpha + bg * (1 - alpha)
return Trainer.regression_loss(merged, image, loss_type=loss_type, weight=weight)
@staticmethod
def gabor_loss(logit, target, gabor_filter, loss_type='l2', weight=None):
""" pass """
gabor_logit = F.conv2d(logit, weight=gabor_filter, padding=2)
gabor_target = F.conv2d(target, weight=gabor_filter, padding=2)
return Trainer.regression_loss(gabor_logit, gabor_target, loss_type=loss_type, weight=weight)
@staticmethod
def grad_loss(logit, target, grad_filter, loss_type='l1', weight=None):
""" pass """
grad_logit = F.conv2d(logit, weight=grad_filter, padding=1)
grad_target = F.conv2d(target, weight=grad_filter, padding=1)
grad_logit = torch.sqrt((grad_logit * grad_logit).sum(dim=1, keepdim=True) + 1e-8)
grad_target = torch.sqrt((grad_target * grad_target).sum(dim=1, keepdim=True) + 1e-8)
return Trainer.regression_loss(grad_logit, grad_target, loss_type=loss_type, weight=weight)
|
184922
|
import numpy as np
import torch.nn as nn
from networks.ResidualBlocks import ResidualBlock1dTransposeConv
def make_res_block_decoder_feature_generator(channels_in, channels_out, a_val=2.0, b_val=0.3):
upsample = None;
if channels_in != channels_out:
upsample = nn.Sequential(nn.ConvTranspose1d(channels_in, channels_out,
kernel_size=1,
stride=1,
padding=0,
dilation=1,
output_padding=0),
nn.BatchNorm1d(channels_out))
layers = []
layers.append(ResidualBlock1dTransposeConv(channels_in, channels_out,
kernelsize=1,
stride=1,
padding=0,
dilation=1,
o_padding=0,
upsample=upsample,
a=a_val, b=b_val))
return nn.Sequential(*layers)
def make_layers_resnet_decoder_feature_generator(start_channels, end_channels, a=2.0, b=0.3, l=1):
layers = [];
num_decompr_layers = int(1/float(l)*np.floor(np.log(end_channels / float(start_channels))))
for k in range(0, num_decompr_layers):
in_channels = start_channels*(2 ** (l*k))
out_channels = start_channels*(2 ** (l*(k+1)))
resblock = make_res_block_decoder_feature_generator(in_channels, out_channels, a_val=a, b_val=b);
layers.append(resblock)
if start_channels*(2 ** (l*num_decompr_layers)) < end_channels:
resblock = make_res_block_decoder_feature_generator(start_channels*(2 ** (l*num_decompr_layers)), end_channels, a_val=a, b_val=b);
layers.append(resblock)
return nn.Sequential(*layers)
class FeatureGenerator(nn.Module):
def __init__(self, in_channels, out_channels, a, b, generation_power):
super(FeatureGenerator, self).__init__()
self.in_channels = in_channels;
self.out_channels = out_channels;
self.a = a;
self.b = b;
self.generation_power = generation_power;
self.feature_generator = make_layers_resnet_decoder_feature_generator(self.in_channels,
self.out_channels,
a=self.a,
b=self.b,
l=self.generation_power)
def forward(self, z):
features = self.feature_generator(z);
return features;
|
184930
|
import os
from typing import NamedTuple
from unittest.mock import Mock, sentinel
import pytest
import requests
import requests_mock
import vcr
from apiclient import APIClient
from apiclient.request_formatters import BaseRequestFormatter
from apiclient.response_handlers import BaseResponseHandler
BASE_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
VCR_CASSETTE_DIR = os.path.join(BASE_DIR, "vcr_cassettes")
api_client_vcr = vcr.VCR(
serializer="yaml",
cassette_library_dir=VCR_CASSETTE_DIR,
record_mode="once",
match_on=["uri", "method", "query"],
)
error_cassette_vcr = vcr.VCR(
serializer="yaml", cassette_library_dir=VCR_CASSETTE_DIR, record_mode="once", match_on=["uri"]
)
@pytest.fixture
def cassette():
with api_client_vcr.use_cassette("cassette.yaml") as cassette:
yield cassette
@pytest.fixture
def error_cassette():
with error_cassette_vcr.use_cassette("error_cassette.yaml") as cassette:
yield cassette
@pytest.fixture
def mock_requests() -> requests_mock.Mocker:
with requests_mock.mock() as _mocker:
yield _mocker
class MockClient(NamedTuple):
client: Mock
request_formatter: Mock
response_handler: Mock
@pytest.fixture
def mock_client():
# Build our fully mocked client
_mock_client: APIClient = Mock(spec=APIClient)
mock_request_formatter: BaseRequestFormatter = Mock(spec=BaseRequestFormatter)
mock_response_handler: BaseResponseHandler = Mock(spec=BaseResponseHandler)
_mock_client.get_default_query_params.return_value = {}
_mock_client.get_default_headers.return_value = {}
_mock_client.get_default_username_password_authentication.return_value = None
_mock_client.get_request_timeout.return_value = 30.0
_mock_client.get_session.return_value = requests.session()
mock_request_formatter.format.return_value = {}
_mock_client.get_request_formatter.return_value = mock_request_formatter
mock_response_handler.get_request_data.return_value = sentinel.result
_mock_client.get_response_handler.return_value = mock_response_handler
return MockClient(
client=_mock_client, request_formatter=mock_request_formatter, response_handler=mock_response_handler
)
|
184952
|
from __future__ import print_function, division
#
import sys,os
quspin_path = os.path.join(os.getcwd(),"../../")
sys.path.insert(0,quspin_path)
#
from quspin.operators import hamiltonian, exp_op # Hamiltonians, operators and exp_op
from quspin.basis import spin_basis_1d # Hilbert space spin basis
import numpy as np # generic math functions
#
##### define model parameters #####
L=4 # system size
J=1.0 # spin interaction
g=0.809 # transverse field
h=0.9045 # parallel field
#
##### construct basis
basis=spin_basis_1d(L=L)
# define PBC site-coupling lists for operators
x_field=[[g,i] for i in range(L)]
z_field=[[h,i] for i in range(L)]
J_nn=[[J,i,(i+1)%L] for i in range(L)] # PBC
# static and dynamic lists
static=[["zz",J_nn],["z",z_field],["x",x_field]]
dynamic=[]
###### construct Hamiltonian
H=hamiltonian(static,dynamic,dtype=np.float64,basis=basis)
#
###### compute evolution operator as matrix exponential
start, stop, N_t = 0.0, 4.0, 21 # time vector parameters
# define evolution operator
U=exp_op(H,a=-1j,start=start,stop=stop,num=N_t,endpoint=True,iterate=True)
print(U)
#
# compute domain wall initial state
dw_str = "".join("1" for i in range(L//2)) + "".join("0" for i in range(L-L//2))
i_0 = basis.index(dw_str) # find index of product state in basis
psi = np.zeros(basis.Ns) # allocate space for state
psi[i_0] = 1.0 # set MB state to be the given product state
#
##### calculate time-evolved state by successive application of matrix exponential
psi_t=U.dot(psi) # create generator object to apply matrix exponential on the initial state
print(psi_t)
for psi_i in psi_t:
print("evolved state:", psi_i)
|
185013
|
import re
import string
DEFAULT_TOKENIZER_DELIMITER = ' '
def remove_all_whitespace(str):
"""
Strips all whitespace from a given string.
:return: new string without whitespaces, will return the original string if it is empty or None
"""
if str:
return re.sub(r'\s+', '', str)
else:
return str
def tokenize(str, delimiter=DEFAULT_TOKENIZER_DELIMITER):
"""
Splits a string by a given delimiter. Default delimiter is a single whitespace.
:return: list of string tokens, will return the original string if it is empty or None
"""
if str:
return str.split(delimiter)
else:
return str
def normalize(str):
"""
Normalizes the string making string all lower case and removes all punctuation.
:param str: string to be normalized
:return: normalized string, if str is None or empty it returns the original string
"""
if str:
if isinstance(str, unicode):
not_letters_or_digits = u'!"#%\'()*+,-./:;<=>?@[\]^_`{|}~'
translate_to = u''
translate_table = dict((ord(char), translate_to) for char in not_letters_or_digits)
return str.translate(translate_table)
else:
return str.lower().translate(string.maketrans("",""), string.punctuation)
else:
return str
def get_stem(word):
#TODO: Research stemming libraries and implement method using library functions
return word
|
185045
|
import unittest
import sympy
from means.approximation.mea.eq_central_moments import eq_central_moments
from means.core import Moment
from means.util.sympyhelpers import to_sympy_matrix, assert_sympy_expressions_equal
class CentralMomentsTestCase(unittest.TestCase):
def test_centralmoments_using_p53model(self):
"""
Given the p53 model hard codded bellow,the result of central moment should match exactly the expected one
:return:
"""
counter_nvecs = [[0, 0, 0], [0, 0, 2], [0, 1, 1], [0, 2, 0], [1, 0, 1], [1, 1, 0], [2, 0, 0]]
mcounter_nvecs = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 2], [0, 1, 1], [0, 2, 0],
[1, 0, 1], [1, 1, 0], [2, 0, 0]]
counter = [Moment(c,sympy.Symbol("YU{0}".format(i))) for i,c in enumerate(counter_nvecs)]
mcounter = [Moment(c,sympy.Symbol("y_{0}".format(i))) for i,c in enumerate(mcounter_nvecs)]
m = to_sympy_matrix([
['c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0)',
0,
0,
0,
'c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0)',
0,
'-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2'],
[
'c_3*y_0 - c_4*y_1',
0,
0,
0,
0,
0,
0],
[
'c_4*y_1 - c_5*y_2',
0,
0,
0,
0,
0,
0
]])
species = to_sympy_matrix(['y_0', 'y_1', 'y_2'])
propensities = to_sympy_matrix(['c_0',
'c_1 * y_0',
'c_2*y_0*y_2/(c_6 + y_0)',
'c_3*y_0',
'c_4*y_1',
'c_5*y_2'])
stoichiometry_matrix = to_sympy_matrix([[1, -1, -1, 0, 0, 0],
[0, 0, 0, 1, -1, 0],
[0, 0, 0, 0, 1, -1]])
answer = eq_central_moments(counter, mcounter, m, species, propensities, stoichiometry_matrix, 2)
expected = to_sympy_matrix([
[" 2*c_4*y_1*y_2 + c_4*y_1 - 2*c_5*y_2**2 + c_5*y_2 - 2*y_1*(c_4*y_1 - c_5*y_2)"," -2*c_5"," 2*c_4"," 0"," 0"," 0"," 0"],
["c_3*y_0*y_2 + c_4*y_1**2 - c_4*y_1*y_2 - c_4*y_1 - c_5*y_1*y_2 - y_1*(c_3*y_0 - c_4*y_1) - y_2*(c_4*y_1 - c_5*y_2)"," 0"," -c_4 - c_5"," c_4"," c_3"," 0"," 0"],
["2*c_3*y_0*y_1 + c_3*y_0 - 2*c_4*y_1**2 + c_4*y_1 - 2*y_2*(c_3*y_0 - c_4*y_1)"," 0"," 0"," -2*c_4"," 0"," 2*c_3","0"],
["c_0*y_2 - c_1*y_0*y_2 - c_2*y_0*y_2**2/(c_6 + y_0) + c_4*y_0*y_1 - c_5*y_0*y_2 - y_1*(c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0)) - y_3*(c_4*y_1 - c_5*y_2)"," -c_2*y_0/(c_6 + y_0)"," 0"," 0"," -c_1 + 2*c_2*y_0*y_2/(c_6 + y_0)**2 - 2*c_2*y_2/(c_6 + y_0) - c_5 - y_1*(c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0))","c_4"," -c_2*y_0*y_2**2/(c_6 + y_0)**3 + c_2*y_2**2/(c_6 + y_0)**2 - y_1*(-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2)"],
["c_0*y_1 - c_1*y_0*y_1 - c_2*y_0*y_1*y_2/(c_6 + y_0) + c_3*y_0**2 - c_4*y_0*y_1 - y_2*(c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0)) - y_3*(c_3*y_0 - c_4*y_1)"," 0"," -c_2*y_0/(c_6 + y_0)"," 0"," c_2*y_0*y_1/(c_6 + y_0)**2 - c_2*y_1/(c_6 + y_0) - y_2*(c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0))"," -c_1 + c_2*y_0*y_2/(c_6 + y_0)**2 - c_2*y_2/(c_6 + y_0) - c_4"," -c_2*y_0*y_1*y_2/(c_6 + y_0)**3 + c_2*y_1*y_2/(c_6 + y_0)**2 + c_3 - y_2*(-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2)"],
["2*c_0*y_0 + c_0 - 2*c_1*y_0**2 + c_1*y_0 - 2*c_2*y_0**2*y_2/(c_6 + y_0) + c_2*y_0*y_2/(c_6 + y_0) - 2*y_3*(c_0 - c_1*y_0 - c_2*y_0*y_2/(c_6 + y_0))"," 0"," 0"," 0"," 2*c_2*y_0**2/(c_6 + y_0)**2 - 4*c_2*y_0/(c_6 + y_0) - c_2*y_0/(c_6 + y_0)**2 + c_2/(c_6 + y_0) - 2*y_3*(c_2*y_0/(c_6 + y_0)**2 - c_2/(c_6 + y_0))"," 0"," -2*c_1 - 2*c_2*y_0**2*y_2/(c_6 + y_0)**3 + 4*c_2*y_0*y_2/(c_6 + y_0)**2 + c_2*y_0*y_2/(c_6 + y_0)**3 - 2*c_2*y_2/(c_6 + y_0) - c_2*y_2/(c_6 + y_0)**2 - 2*y_3*(-c_2*y_0*y_2/(c_6 + y_0)**3 + c_2*y_2/(c_6 + y_0)**2)"]
])
assert_sympy_expressions_equal(answer, expected)
def test_centralmoments_using_MM_model(self):
"""
Given the MM model hard codded bellow,the result of central moment should match exactly the expected one
:return:
"""
counter_nvecs = [[0, 0], [0, 2], [1, 1], [2, 0]]
mcounter_nvecs = [[0, 0], [0, 1], [1, 0], [0, 2], [1, 1], [2, 0]]
counter = [Moment(c,sympy.Symbol("YU{0}".format(i))) for i,c in enumerate(counter_nvecs)]
mcounter = [Moment(c,sympy.Symbol("y_{0}".format(i))) for i,c in enumerate(mcounter_nvecs)]
m = to_sympy_matrix([
['-c_0*y_0*(y_0 + y_1 - 181) + c_1*(-y_0 - y_1 + 301)',
0,
'-c_0',
'-c_0'],
[
'c_2*(-y_0 - y_1 + 301)',
0,
0,
0]
])
species = sympy.Matrix(map(sympy.var, ['y_0', 'y_1']))
propensities = to_sympy_matrix(['c_0*y_0*(y_0 + y_1 - 181)',
'c_1*(-y_0 - y_1 + 301)',
'c_2*(-y_0 - y_1 + 301)'])
stoichiometry_matrix = sympy.Matrix([[-1, 1, 0],
[0, 0, 1]])
expected = to_sympy_matrix([
["c_2*(-y_0 - y_1 + 301)"," -2*c_2"," -2*c_2"," 0"],
["-c_0*y_0*y_1*(y_0 + y_1 - 181) + c_1*y_1*(-y_0 - y_1 + 301) + c_2*y_0*(-y_0 - y_1 + 301) - c_2*y_2*(-y_0 - y_1 + 301) - y_1*(-c_0*y_0*(y_0 + y_1 - 181) + c_1*(-y_0 - y_1 + 301))"," -c_0*y_0 - c_1"," -c_0*y_0 - c_0*(y_0 + y_1 - 181) - c_1 - c_2"," -c_2"],
["-2*c_0*y_0**2*(y_0 + y_1 - 181) + c_0*y_0*(y_0 + y_1 - 181) + 2*c_1*y_0*(-y_0 - y_1 + 301) + c_1*(-y_0 - y_1 + 301) - 2*y_2*(-c_0*y_0*(y_0 + y_1 - 181) + c_1*(-y_0 - y_1 + 301))"," 0"," -4*c_0*y_0 + 2*c_0*y_2 + c_0 - 2*c_1"," -4*c_0*y_0 + 2*c_0*y_2 - 2*c_0*(y_0 + y_1 - 181) + c_0 - 2*c_1"]
])
answer = eq_central_moments(counter, mcounter, m, species, propensities, stoichiometry_matrix, 2)
assert_sympy_expressions_equal(answer, expected)
|
185059
|
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import tensorflow as tf
from ccgnet import experiment as exp
from ccgnet.finetune import *
from ccgnet import layers
from ccgnet.layers import *
import numpy as np
import time
import random
from sklearn.metrics import balanced_accuracy_score
from ccgnet.Dataset import Dataset, DataLoader
from Featurize.Coformer import Coformer
from Featurize.Cocrystal import Cocrystal
def verify_dir_exists(dirname):
if os.path.isdir(os.path.dirname(dirname)) == False:
os.makedirs(os.path.dirname(dirname))
def make_dataset(fp_size, radii):
data1 = Dataset(abs_path+'/CC_Table/CC_Table.tab', mol_blocks_dir=abs_path+'/Mol_Blocks.dir')
data1.make_embedding_dataset(fp_type='ecfp', nBits=fp_size, radii=radii, processes=15, make_dataframe=True)
return data1
def build_model(
layer_1_size,
layer_2_size,
layer_3_size,
act_func,
dropout,
merge,
forward_layer_1_size,
forward_layer_2_size,
forward_layer_3_size,
forward_act_func,
forward_dropout
):
class DNN_5(object):
def build_model(self, inputs, is_training, global_step=None):
fps = inputs[0]
labels = inputs[1]
tags = inputs[2]
fps = tf.reshape(fps, [-1, int(fps.get_shape()[-1].value/2)])
with tf.compat.v1.variable_scope('FC_1') as scope:
fps = tf.compat.v1.layers.dense(fps, layer_1_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if layer_2_size != None:
with tf.compat.v1.variable_scope('FC_2') as scope:
fps = tf.compat.v1.layers.dense(fps, layer_2_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if layer_3_size != None:
with tf.compat.v1.variable_scope('FC_3') as scope:
fps = tf.compat.v1.layers.dense(fps, layer_3_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if merge == 'add':
with tf.compat.v1.variable_scope('merge_add') as scope:
fp_size = fps.get_shape()[-1].value
fps = tf.reshape(fps, [-1, 2, fp_size])
fps = tf.reduce_sum(fps, axis=1)
elif merge == 'concat':
with tf.compat.v1.variable_scope('merge_concat') as scope:
fp_size = fps.get_shape()[-1].value
fps = tf.reshape(fps, [-1, fp_size*2])
with tf.compat.v1.variable_scope('Forward_FC_1') as scope:
fps = tf.compat.v1.layers.dense(fps, forward_layer_1_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = forward_act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if forward_layer_2_size != None:
with tf.compat.v1.variable_scope('Forward_FC_2') as scope:
fps = tf.compat.v1.layers.dense(fps, forward_layer_2_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = forward_act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
if forward_layer_3_size != None:
with tf.compat.v1.variable_scope('Forward_FC_3') as scope:
fps = tf.compat.v1.layers.dense(fps, forward_layer_3_size)
fps = tf.compat.v1.layers.batch_normalization(fps, training=is_training)
fps = forward_act_func(fps)
fps = tf.compat.v1.layers.dropout(fps, dropout, training=is_training)
fps = layers.make_fc_layer(fps, 2, is_training=is_training, with_bn=False, act_func=None)
return fps, labels
return DNN_5()
def black_box_function(args_dict):
tf.reset_default_graph()
fp_size = args_dict['fp_size']
radii = args_dict['fp_radii']
batch_size = args_dict['batch_size']
layer_1_size = args_dict['layer_1_size']
layer_2_size = args_dict['layer_2_size']
layer_3_size = args_dict['layer_3_size']
act_fun = args_dict['act_fun']
dropout = args_dict['dropout']
merge = args_dict['merge']
forward_layer_1_size = args_dict['forward_layer_1_size']
forward_layer_2_size = args_dict['forward_layer_2_size']
forward_layer_3_size = args_dict['forward_layer_3_size']
forward_act_fun = args_dict['forward_act_fun']
forward_dropout = args_dict['forward_dropout']
# data spliting
data = make_dataset(fp_size, radii)
train_data, valid_data = data.split(train_samples=train_samples, valid_samples=valid_samples, with_fps=True)
# make save dir
snapshot_path = abs_path+'/bayes_snapshot/'
model_name = 'BayesOpt-FP/'
verify_dir_exists(snapshot_path+model_name)
if os.listdir(snapshot_path+model_name) == []:
dataset_name = 'Step_0/'
else:
l_ = [int(i.split('_')[1]) for i in os.listdir(snapshot_path+model_name) if 'Step_' in i]
dataset_name = 'Step_{}/'.format(max(l_)+1)
# training
tf.reset_default_graph()
model = build_model(layer_1_size, layer_2_size, layer_3_size, act_fun, dropout, merge, forward_layer_1_size,
forward_layer_2_size, forward_layer_3_size, forward_act_fun, forward_dropout)
model = exp.Model(model, train_data, valid_data, with_test=False, snapshot_path=snapshot_path, use_subgraph=False, use_desc=False, build_fc=True,
model_name=model_name, dataset_name=dataset_name+'/time_0')
history = model.fit(num_epoch=100, save_info=True, save_att=False, silence=0, train_batch_size=batch_size,
max_to_keep=1, metric='loss')
loss = min(history['valid_cross_entropy'])
tf.reset_default_graph()
print('\nLoss: {}'.format(loss))
print(str(args_dict))
return loss
from hyperopt import hp
import hyperopt.pyll.stochastic
args_dict = {
'fp_size': hp.choice('fp_size', [128,256,512,1024,2048,4096]),
'fp_radii': hp.choice('fp_radii', (1,2,3)),
'batch_size':hp.choice('batch_size', (64,128,256)),
'layer_1_size':hp.choice('layer_1_size', (128,256,512,1024,2048)),
'layer_2_size':hp.choice('layer_2_size', (128,256,512,1024,2048, None)),
'layer_3_size':hp.choice('layer_3_size', (128,256,512,1024,2048, None)),
'act_fun':hp.choice('act_fun', (tf.nn.relu, tf.nn.elu, tf.nn.tanh)),
'dropout':hp.uniform('dropout', 0.0, 0.75),
'merge':hp.choice('merge',('add', 'concat')),
'forward_layer_1_size':hp.choice('forward_layer_1_size', (128,256,512,1024,2048)),
'forward_layer_2_size':hp.choice('forward_layer_2_size', (128,256,512,1024,2048, None)),
'forward_layer_3_size':hp.choice('forward_layer_3_size', (128,256,512,1024,2048, None)),
'forward_act_fun':hp.choice('forward_act_fun', (tf.nn.relu, tf.nn.elu, tf.nn.tanh)),
'forward_dropout':hp.uniform('forward_dropout', 0.0, 0.75),
}
from hyperopt import fmin, tpe, hp, Trials
abs_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
fold_10 = eval(open(abs_path+'/Fold_10.dir').read())
Samples = fold_10['fold-0']['train']+fold_10['fold-0']['valid']
## sample spliting
random.shuffle(Samples)
num_sample = len(Samples)
train_num = int(0.9 * num_sample)
train_samples = Samples[:train_num]
valid_samples = Samples[train_num:]
trials = Trials()
best = fmin(
fn=black_box_function,
space=args_dict,
algo=tpe.suggest,
max_evals=100,
trials=trials,
trials_save_file='trials_save_file-FP')
print('best:')
print(best)
|
185064
|
import pytest
import sqlalchemy as sa
from postgresql_audit import (
add_column,
alter_column,
change_column_name,
remove_column,
rename_table
)
from .utils import last_activity
@pytest.mark.usefixtures('activity_cls', 'table_creator')
class TestRenameTable(object):
def test_only_updates_given_table(
self,
session,
article,
user,
connection,
versioning_manager
):
rename_table(connection, 'user', 'user2')
activity = session.query(versioning_manager.activity_cls).filter_by(
table_name='article'
).one()
assert activity
def test_updates_table_name(self, session, user, connection):
rename_table(connection, 'user', 'user2')
activity = last_activity(connection)
assert activity['table_name'] == 'user2'
@pytest.mark.usefixtures('activity_cls', 'table_creator')
class TestChangeColumnName(object):
def test_only_updates_given_table(
self,
session,
article,
user,
connection,
versioning_manager
):
change_column_name(connection, 'user', 'name', 'some_name')
activity = session.query(versioning_manager.activity_cls).filter_by(
table_name='article'
).one()
assert 'name' in activity.changed_data
def test_updates_changed_data(self, session, user, connection):
change_column_name(connection, 'user', 'name', 'some_name')
activity = last_activity(connection)
assert activity['changed_data'] == {
'id': user.id,
'some_name': 'John',
'age': 15
}
def test_updates_old_data(self, session, user, connection):
user.name = 'Luke'
session.commit()
change_column_name(connection, 'user', 'name', 'some_name')
activity = last_activity(connection)
assert activity['old_data'] == {
'id': user.id,
'some_name': 'John',
'age': 15
}
@pytest.mark.usefixtures('activity_cls', 'table_creator')
class TestRemoveColumn(object):
def test_only_updates_given_table(
self,
session,
article,
user,
connection,
versioning_manager
):
remove_column(connection, 'user', 'name')
activity = session.query(versioning_manager.activity_cls).filter_by(
table_name='article'
).one()
assert 'name' in activity.changed_data
def test_updates_changed_data(self, session, user, connection):
remove_column(connection, 'user', 'name')
activity = last_activity(connection)
assert activity['old_data'] == {}
assert activity['changed_data'] == {
'id': user.id,
'age': 15
}
def test_updates_old_data(self, session, user, connection):
user.name = 'Luke'
session.commit()
remove_column(connection, 'user', 'name')
activity = last_activity(connection)
assert activity['old_data'] == {
'id': user.id,
'age': 15
}
@pytest.mark.usefixtures('activity_cls', 'table_creator')
class TestAddColumn(object):
def test_only_updates_given_table(
self,
session,
article,
user,
connection,
versioning_manager
):
add_column(connection, 'user', 'some_column')
activity = session.query(versioning_manager.activity_cls).filter_by(
table_name='article'
).one()
assert 'some_column' not in activity.changed_data
def test_updates_changed_data(self, session, user, connection):
add_column(connection, 'user', 'some_column')
activity = last_activity(connection)
assert activity['old_data'] == {}
assert activity['changed_data'] == {
'id': user.id,
'age': 15,
'name': 'John',
'some_column': None
}
def test_updates_old_data(self, session, user, connection):
user.name = 'Luke'
session.commit()
add_column(connection, 'user', 'some_column')
activity = last_activity(connection)
assert activity['old_data'] == {
'id': user.id,
'age': 15,
'name': 'John',
'some_column': None
}
assert activity['changed_data'] == {'name': 'Luke'}
@pytest.mark.usefixtures('activity_cls', 'table_creator')
class TestAlterColumn(object):
def test_only_updates_given_table(
self,
session,
article,
user,
connection,
versioning_manager
):
alter_column(
connection,
'user',
'id',
lambda value, activity_table: sa.cast(value, sa.Text)
)
activity = session.query(versioning_manager.activity_cls).filter_by(
table_name='article'
).one()
assert isinstance(activity.changed_data['id'], int)
def test_updates_changed_data(self, session, user, connection):
alter_column(
connection,
'user',
'id',
lambda value, activity_table: sa.cast(value, sa.Text)
)
activity = last_activity(connection)
assert activity['changed_data'] == {
'id': str(user.id),
'age': 15,
'name': 'John'
}
def test_updates_old_data(self, session, user, connection):
user.name = 'Luke'
session.commit()
alter_column(
connection,
'user',
'id',
lambda value, activity_table: sa.cast(value, sa.Text)
)
activity = last_activity(connection)
assert activity['old_data'] == {
'id': str(user.id),
'age': 15,
'name': 'John'
}
|
185137
|
from office365.sharepoint.changes.change import Change
class ChangeWeb(Change):
@property
def web_id(self):
return self.properties.get("WebId", None)
|
185169
|
from pathlib import Path
path = Path().absolute()
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = "sqlite:///" + str(path) + "/Database/database.db"
SECRET_KEY = "e9515dfe457bfe64c1c30d73e161de0f76f6b03f"
|
185242
|
from .decorator import log_on_start, log_on_error, log_on_end, log_exception
__all__ = ["log_on_start", "log_on_error", "log_on_end", "log_exception"]
|
185259
|
import os
from substance.logs import *
from substance import (Command, Box, EngineProfile)
from substance.exceptions import (InvalidOptionError)
class Create(Command):
def getUsage(self):
return "substance engine init [options] [ENGINE NAME]"
def getHelpTitle(self):
return "Create a new engine configuration"
def getShellOptions(self, optparser):
optparser.set_description("Create a new substance engine")
optparser.add_option("--devroot", dest="devroot",
help="Path to local devroot directory.")
optparser.add_option('--devroot-mode', dest="devroot_mode",
help="devroot sync mode", default="unison")
optparser.add_option("--mount", dest="mounts",
help="Mount host path to engine path", nargs=10)
optparser.add_option("--driver", dest="driver",
help="Virtualization driver for this engine")
optparser.add_option("--memory", type="int",
dest="memory", help="Machine memory allocation")
optparser.add_option("--cpus", type="int",
dest="cpus", help="Machine vCPU allocation")
optparser.add_option("--box", type="str",
dest="box", help="Engine box image")
return optparser
def main(self):
name = self.getInputName()
options = self.buildConfigFromArgs().bind(self.buildProfileFromArgs) \
.catch(self.exitError).getOK()
self.core.createEngine(name, config=options['config'], profile=options['profile']) \
.bind(dinfo("Engine \"%s\" has been created", name)) \
.catch(self.exitError) \
def buildConfigFromArgs(self, config={}):
opts = {}
boxName = self.core.getDefaultBoxString(
) if not self.options.box else self.options.box
box = self.core.readBox(boxName)
if box.isFail():
return box
opts['box'] = box.getOK().boxstring
if self.options.driver:
if not self.validateDriver(self.options.driver):
return Fail(InvalidOptionError("Driver %s is not valid." % self.options.driver))
opts['driver'] = self.options.driver
if self.options.devroot:
if not os.path.isdir(self.options.devroot):
return Fail(InvalidOptionError("Devroot path %s does not exist." % self.options.devroot))
opts['devroot'] = {} if 'devroot' not in opts else opts['devroot']
opts['devroot']['path'] = self.options.devroot
if self.options.devroot_mode:
# XXX Fix hardcoded values.
if self.options.devroot_mode not in ['rsync', 'sharedfolder', 'unison']:
return Fail(InvalidOptionError("Devroot mode '%s' is not valid."))
opts['devroot'] = {} if 'devroot' not in opts else opts['devroot']
opts['devroot']['mode'] = self.options.devroot_mode
config['config'] = opts
return OK(config)
def buildProfileFromArgs(self, config={}):
profile = EngineProfile()
if self.options.memory and self.validateInteger(self.options.memory):
profile.memory = self.options.memory
if self.options.cpus and self.validateInteger(self.options.cpus):
profile.cpus = self.options.cpus
config['profile'] = profile
return OK(config)
|
185280
|
from examples.paper.initialize import *
# user settings
settings = {
#
# audit settings
'data_name': 'credit',
'method_name': 'logreg',
'normalize_data': True,
'force_rational_actions': False,
#
# script flags
'audit_recourse': True,
'plot_audits': True,
'print_flag': True,
'save_flag': True,
'randomseed': 2338,
#
# placeholders
'method_suffixes': [''],
'audit_suffixes': [''],
}
# file names
output_dir = results_dir / settings['data_name']
output_dir.mkdir(exist_ok = True)
if settings['normalize_data']:
settings['method_suffixes'].append('normalized')
if settings['force_rational_actions']:
settings['audit_suffixes'].append('rational')
# set file header
settings['dataset_file'] = '%s/%s_processed.csv' % (data_dir, settings['data_name'])
settings['file_header'] = '%s/%s_%s%s' % (output_dir, settings['data_name'], settings['method_name'], '_'.join(settings['method_suffixes']))
settings['audit_file_header'] = '%s%s' % (settings['file_header'], '_'.join(settings['audit_suffixes']))
settings['model_file'] = '%s_models.pkl' % settings['file_header']
settings['audit_file'] = '%s_audit_results.pkl' % settings['audit_file_header']
pp.pprint(settings)
# data set
data_df = pd.read_csv(settings['dataset_file'])
data = {
'outcome_name': data_df.columns[0],
'variable_names': data_df.columns[1:].tolist(),
'X': data_df.iloc[:, 1:],
'y': data_df.iloc[:, 0]
}
scaler = None
data['X_train'] = data['X']
data['scaler'] = None
if settings['normalize_data']:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler(copy = True, with_mean = True, with_std = True)
data['X_scaled'] = pd.DataFrame(scaler.fit_transform(data['X'].to_numpy(dtype = float), data['y'].values), columns = data['X'].columns)
data['X_train'] = data['X_scaled']
data['scaler'] = scaler
# action set
default_bounds = (1.0, 99.0, 'percentile')
custom_bounds = None
immutable_variables = []
if settings['data_name'] == 'credit':
immutable_names = ['Female', 'Single', 'Married']
immutable_names += list(filter(lambda x: 'Age' in x or 'Overdue' in x, data['variable_names']))
default_bounds = (0.1, 99.9, 'percentile')
custom_bounds = {'Female': (0, 100, 'p'), 'Married': (0, 100, 'p')}
data['immutable_variable_names'] = [n for n in immutable_names if n in data['variable_names']]
action_set = ActionSet(X = data['X'], custom_bounds = custom_bounds, default_bounds = default_bounds)
action_set[data['immutable_variable_names']].mutable = False
action_set['EducationLevel'].step_direction = 1
payment_fields = list(filter(lambda x: 'Amount' in x, data['variable_names']))
action_set[payment_fields].step_type = 'absolute'
action_set[payment_fields].step_size = 50
for p in payment_fields:
action_set[p].update_grid()
# model
model_stats = pickle.load(open(settings['model_file'], 'rb'))
all_models = model_stats.pop('all_models')
### Create Flipset
clf = all_models['C_0.02__max_iter_1000__penalty_l1__solver_saga__tol_1e-08']
yhat = clf.predict(X = data['X_train'])
coefficients, intercept = undo_coefficient_scaling(clf, scaler = data['scaler'])
action_set.align(coefficients)
predicted_neg = np.flatnonzero(yhat < 1)
U = data['X'].iloc[predicted_neg].values
k = 4
fb = Flipset(x = U[k], action_set = action_set, coefficients = coefficients, intercept = intercept)
fb.populate(enumeration_type = 'distinct_subsets', total_items = 14)
print(fb)
#### Run Audit ####
audit_results = {}
for key, clf in all_models.items():
if settings['method_name'] == 'logreg':
model_name = 1. / float(key.split('_')[1])
else:
model_name = float(key.split('_')[1])
# unscale coefficients
if scaler is not None:
coefficients, intercept = undo_coefficient_scaling(coefficients = np.array(clf.coef_).flatten(), intercept = clf.intercept_[0], scaler = scaler)
else:
coefficients, intercept = np.array(clf.coef_).flatten(), clf.intercept_[0]
auditor = RecourseAuditor(action_set, coefficients = coefficients, intercept = intercept)
audit_results[model_name] = auditor.audit(X = data['X'])
if settings['save_flag']:
pickle.dump(audit_results, file = open(settings['audit_file'], 'wb'), protocol=2)
#### Plots ####
if settings['plot_audits']:
if settings['method_name'] == 'logreg':
xlabel = '$\ell_1$-penalty (log scale)'
else:
xlabel = '$C$-penalty (log scale)'
# percent of points without recourse
feasibility_df = {}
obj_val = {}
for model_name in sorted(audit_results):
recourse_df = pd.DataFrame(audit_results[model_name])
recourse_cost = recourse_df.loc[lambda df: df.feasible].loc[:, 'total_cost']
feasibility_df[model_name] = recourse_df['feasible'].mean()
obj_val[model_name] = recourse_cost.mean()
# feasibility plot
f, ax = create_figure(fig_size = (6, 6))
t_found = pd.Series(feasibility_df)
t_found.plot(ax = ax, color = 'black', marker='o')
plt.semilogx()
ax.set_xlabel(xlabel)
ax.set_ylabel('% of Individuals with Recourse')
ax.set_ylim(0, 1.02)
ax.yaxis.set_major_formatter(mtick.PercentFormatter(1.0, decimals = 0))
ax = fix_font_sizes(ax)
f.savefig('%s_recourse_feasibility.pdf' % settings['audit_file_header'], bbox_inches = 'tight')
plt.close()
cost_df = {k: pd.DataFrame(v) for k, v in audit_results.items()}
cost_df = pd.concat([cost_df[k]['total_cost'].to_frame('%f' % k) for k in sorted(cost_df.keys())], axis=1).replace([-np.inf, np.inf], np.nan)
# plot cost distribution
f, ax = create_figure(fig_size = (6, 6))
sns.violinplot(data = cost_df, ax = ax, linewidth = 0.5, cut = 0, inner = 'quartile', color = "gold", scale = 'width')
ax.set_xlabel(xlabel)
ax.set_ylabel('Cost of Recourse')
ax.set_ylim(bottom = 0, top = 1)
xtick_labels = []
# for xt in ax.get_xticklabels():
# v = np.log10(float(xt.get_text()))
# label = '$10^{%.0f}$' % v if v == np.round(v, 0) else ' '
# xtick_labels.append(label)
ax.set_xticklabels(xtick_labels)
for l in ax.lines:
l.set_linewidth(3.0)
l.set_linestyle('-')
l.set_solid_capstyle('butt')
ax = fix_font_sizes(ax)
f.savefig('%s_recourse_cost_distribution.pdf' % settings['audit_file_header'], bbox_inches = 'tight')
plt.close()
# store median cost
cost_df.median(axis = 0).to_csv('%s_median_cost_df.csv' % settings['audit_file_header'])
# plot the mean cost of recourse
f, ax = create_figure(fig_size = (6, 6))
ts_m = pd.Series(obj_val)
ax = ts_m.plot(ax = ax, color = 'black', marker = 'o')
plt.semilogx()
plt.xlabel(xlabel)
plt.ylabel('Mean Cost of Recourse')
ax = fix_font_sizes(ax)
f.savefig('%s_recourse_cost.pdf' % settings['audit_file_header'], bbox_inches = 'tight')
plt.close()
## dev
import os
import pickle
data, scaler = load_data()
key = 'C_0.001000__max_iter_1000__penalty_l1__solver_saga__tol_1e-08'
all_models = pickle.load(open(settings['model_file'], 'rb'))
clf = all_models[key]
scaler_pkl = os.path.join(os.path.dirname(settings['model_file']), 'scaler.pkl')
scaler = pickle.load(open(scaler_pkl, 'rb'))
immutable_names = ['Female', 'Single', 'Married']
immutable_names += list(filter(lambda x: 'Age' in x or 'Overdue' in x, data['variable_names']))
default_bounds = (0.1, 99.9, 'percentile')
custom_bounds = {'Female': (0, 100, 'p'), 'Married': (0, 100, 'p')}
data['immutable_variable_names'] = [n for n in immutable_names if n in data['variable_names']]
action_set = ActionSet(X=data['X'], custom_bounds=custom_bounds, default_bounds=default_bounds)
action_set[data['immutable_variable_names']].mutable = False
action_set['EducationLevel'].step_direction = 1
payment_fields = list(filter(lambda x: 'Amount' in x, data['variable_names']))
action_set[payment_fields].step_type = 'absolute'
action_set[payment_fields].step_size = 50
for p in payment_fields:
action_set[p].update_grid()
# unscale coefficients
if scaler is not None:
coefficients, intercept = undo_coefficient_scaling(coefficients=np.array(clf.coef_).flatten(),
intercept=clf.intercept_[0], scaler=scaler)
else:
coefficients, intercept = np.array(clf.coef_).flatten(), clf.intercept_[0]
## run audit
print("Auditing for model %s..." % key)
auditor = RecourseAuditor(
action_set,
coefficients=coefficients,
intercept=intercept
)
auditor.audit(X=data['X'])
|
185293
|
import unittest
import urllib.parse
from ingenico.connect.sdk.defaultimpl.authorization_type import AuthorizationType
from ingenico.connect.sdk.defaultimpl.default_authenticator import DefaultAuthenticator
from ingenico.connect.sdk.request_header import RequestHeader
class DefaultAuthenticatorTest(unittest.TestCase):
"""Tests that the DefaultAuthenticator is capable of converting a set of request headers to a POST request
and that it is capable of providing the correct signature"""
def test_canonicalized_header_value(self):
"""Tests that the to_canonicalize_header function correctly removes control characters and excessive whitespace
"""
authenticator = DefaultAuthenticator(AuthorizationType.get_authorization("v1HMAC"), "apiKeyId", "secretApiKey")
self.assertEqual("aap noot", authenticator.to_canonicalize_header_value("aap\nnoot "))
self.assertEqual("aap noot", authenticator.to_canonicalize_header_value(" aap\r\n noot"))
def test_to_data_to_sign(self):
"""Tests that the to_data_to_sign function correctly constructs a POST request for multiple headers"""
authenticator = DefaultAuthenticator(AuthorizationType.get_authorization("v1HMAC"), "apiKeyId", "secretApiKey")
http_headers = [RequestHeader("X-GCS-ServerMetaInfo",
"{\"platformIdentifier\":\"Windows 7/6.1 Java/1.7 (Oracle Corporation; "
"Java HotSpot(TM) 64-Bit Server VM; 1.7.0_45)\",\"sdkIdentifier\":\"1.0\"}"),
RequestHeader("Content-Type", "application/json"),
RequestHeader("X-GCS-ClientMetaInfo", "{\"aap\",\"noot\"}"),
RequestHeader("User-Agent", "Apache-HttpClient/4.3.4 (java 1.5)"),
RequestHeader("Date", "Mon, 07 Jul 2014 12:12:40 GMT")]
expected_start = "POST\n" \
"application/json\n"
expected_end = "x-gcs-clientmetainfo:{\"aap\",\"noot\"}\n" \
"x-gcs-servermetainfo:{\"platformIdentifier\":\"Windows 7/6.1 Java/1.7 " \
"(Oracle Corporation; Java HotSpot(TM) 64-Bit Server VM; 1.7.0_45)\"," \
"\"sdkIdentifier\":\"1.0\"}\n" \
"/v1/9991/services%20bla/convert/amount?aap=noot&mies=geen%20noot\n"
url = urllib.parse.urlparse("http://localhost:8080/v1/9991/services%20bla/convert/amount?aap=noot&mies=geen%20noot")
data_to_sign = authenticator.to_data_to_sign("POST", url, http_headers)
actual_start = data_to_sign[:22]
actual_end = data_to_sign[52:]
self.assertEqual(expected_start, actual_start)
self.assertEqual(expected_end, actual_end)
def test_create_authentication_signature(self):
"""Tests if the default authenticator creates the correct signature"""
authenticator = DefaultAuthenticator(AuthorizationType.get_authorization("v1HMAC"), "apiKeyId", "secretApiKey")
data_to_sign = "DELETE\n" \
"application/json\n" \
"Fri, 06 Jun 2014 13:39:43 GMT\n" \
"x-gcs-clientmetainfo:processed header value\n" \
"x-gcs-customerheader:processed header value\n" \
"x-gcs-servermetainfo:processed header value\n" \
"/v1/9991/tokens/123456789\n"
authentication_signature = authenticator.create_authentication_signature(data_to_sign)
self.assertEqual("VfnXpPBQQoHZivTcAg0JvOWkhnzlPnaCPKpTQn/uMJM=", authentication_signature)
def test_create_authentication_signature_2(self):
"""Tests if the default authenticator creates the correct signature"""
authenticator = DefaultAuthenticator(AuthorizationType.get_authorization("v1HMAC"), "apiKeyId", "<KEY>")
data_to_sign = "GET\n" \
"\n" \
"Fri, 06 Jun 2014 13:39:43 GMT\n" \
"/v1/9991/tokens/123456789\n"
authentication_signature = authenticator.create_authentication_signature(data_to_sign)
self.assertEqual("9ond5EIN05dBXJGCLRK5om9pxHsyrh/12pZJ7bvmwNM=", authentication_signature)
if __name__ == '__main__':
unittest.main()
|
185294
|
import numpy as np
from scipy.fft import dct, idct
import math
def idct_basis_2d(len_basis, num_basis):
'''
Generate basic 2D DCT basis for dictionary learning
Inputs:
len_basis: length of the flattened atom, e.g. 36 for 6x6 basis
num_basis: number of the atoms. usually it is overcomplete (larger than len_basis)
Returns:
DCT basis in [len_basis, num_basis]
'''
assert len_basis <= num_basis, 'should be over-complete dictionary'
ODCT = idct(np.identity(math.ceil(num_basis ** 0.5)), norm='ortho', axis=0)
ODCT = ODCT[:math.ceil(len_basis ** 0.5), :]
ODCT = np.kron(ODCT, ODCT)
ODCT = np.column_stack((ODCT[:, 0], ODCT[:, 1:] - np.mean(ODCT[:, 1:], axis=0)))
ODCT = ODCT / np.linalg.norm(ODCT, axis=0)
ODCT = ODCT[:, :num_basis]
return ODCT
def idct_basis_3d(len_basis, num_basis):
'''
Generate basic 3D DCT basis for dictionary learning
Inputs:
len_basis: length of the flattened atom, e.g. 216 for 6x6x6 basis
num_basis: number of the atoms. usually it is overcomplete (larger than len_basis)
Returns:
DCT basis in [len_basis, num_basis]
'''
assert len_basis <= num_basis, 'should be over-complete dictionary'
ODCT = idct(np.identity(math.ceil(num_basis ** (1 / 3))), norm='ortho', axis=0)
ODCT = ODCT[:math.ceil(len_basis ** (1 / 3)), :]
ODCT = np.kron(ODCT, np.kron(ODCT, ODCT))
ODCT = np.column_stack((ODCT[:, 0], ODCT[:, 1:] - np.mean(ODCT[:, 1:], axis=0)))
ODCT = ODCT / np.linalg.norm(ODCT, axis=0)
ODCT = ODCT[:, :num_basis]
return ODCT
|
185325
|
from typing import List
class Solution:
def nextGreatestLetter(self, letters: List[str], target: str) -> str:
left, right = 0, len(letters) - 1
while left <= right:
middle = left + (right - left) // 2
if letters[middle] <= target: left = middle + 1
else: right = middle - 1
return letters[left % len(letters)]
|
185344
|
import torch
EPSILON = 1E-10
def xyxy_to_xywh(boxes_xyxy):
assert torch.all(boxes_xyxy[..., 0] < boxes_xyxy[..., 2])
assert torch.all(boxes_xyxy[..., 1] < boxes_xyxy[..., 3])
return torch.cat([
(boxes_xyxy[..., [0]] + boxes_xyxy[..., [2]]) / 2.,
(boxes_xyxy[..., [1]] + boxes_xyxy[..., [3]]) / 2.,
boxes_xyxy[..., [2]] - boxes_xyxy[..., [0]] + 1.,
boxes_xyxy[..., [3]] - boxes_xyxy[..., [1]] + 1.
], dim=-1)
def xywh_to_xyxy(boxes_xywh):
assert torch.all(boxes_xywh[..., [2, 3]] > 0)
return torch.cat([
boxes_xywh[..., [0]] - 0.5 * (boxes_xywh[..., [2]] - 1),
boxes_xywh[..., [1]] - 0.5 * (boxes_xywh[..., [3]] - 1),
boxes_xywh[..., [0]] + 0.5 * (boxes_xywh[..., [2]] - 1),
boxes_xywh[..., [1]] + 0.5 * (boxes_xywh[..., [3]] - 1)
], dim=-1)
def deltas_to_boxes(deltas, anchors, input_size):
"""
:param deltas: dxdydwdh format
:param anchors: xywh format
:param input_size: input image size in hw format
:return: boxes in xyxy format
"""
boxes_xywh = torch.cat([
anchors[..., [0]] + anchors[..., [2]] * deltas[..., [0]],
anchors[..., [1]] + anchors[..., [3]] * deltas[..., [1]],
anchors[..., [2]] * torch.exp(deltas[..., [2]]),
anchors[..., [3]] * torch.exp(deltas[..., [3]])
], dim=2)
boxes_xyxy = xywh_to_xyxy(boxes_xywh)
boxes_xyxy[..., [0, 2]] = torch.clamp(boxes_xyxy[..., [0, 2]], 0, input_size[1] - 1)
boxes_xyxy[..., [1, 3]] = torch.clamp(boxes_xyxy[..., [1, 3]], 0, input_size[0] - 1)
return boxes_xyxy
def compute_overlaps(boxes1, boxes2):
"""
Compute IoUs between two sets of boxes.
boxes1 and boxes2 must have the same shape.
:param boxes1: xyxy format
:param boxes2: xyxy format
:return:
"""
lr = torch.clamp_min(torch.min(boxes1[..., [2]], boxes2[..., [2]]) -
torch.max(boxes1[..., [0]], boxes2[..., [0]]), 0)
tb = torch.clamp_min(torch.min(boxes1[..., [3]], boxes2[..., [3]]) -
torch.max(boxes1[..., [1]], boxes2[..., [1]]), 0)
inter = lr * tb
union = (boxes1[..., [2]] - boxes1[..., [0]]) * (boxes1[..., [3]] - boxes1[..., [1]]) + \
(boxes2[..., [2]] - boxes2[..., [0]]) * (boxes2[..., [3]] - boxes2[..., [1]]) - inter
return inter / (union + EPSILON)
def safe_softmax(probs, dim=None):
exp = torch.exp(probs - torch.max(probs, dim=dim, keepdim=True)[0])
return exp / torch.sum(exp, dim=dim, keepdim=True)
|
185386
|
l = [[True] * 4 for _ in range(4)]
def s(r, c):
if r == 3 and c == 3:
return 1
elif max(r, c) > 3 or min(r, c) < 0:
return 0
elif not l[r][c]:
return 0
else:
l[r][c] = False
cnt = s(r + 1, c) + s(r - 1, c) + s(r, c + 1) + s(r, c - 1)
l[r][c] = True
return cnt
print s(0, 0)
|
185414
|
from datetime import datetime
from unittest.mock import Mock
import pytest
from toucan_connectors.google_sheets_2.google_sheets_2_connector import GoogleSheets2Connector
from toucan_connectors.http_api.http_api_connector import HttpAPIConnector
from toucan_connectors.json_wrapper import JsonWrapper
from toucan_connectors.oauth2_connector.oauth2connector import (
AuthFlowNotFound,
NoInstanceUrl,
NoOAuth2RefreshToken,
OAuth2Connector,
OAuth2ConnectorConfig,
)
from toucan_connectors.snowflake_oauth2.snowflake_oauth2_connector import SnowflakeoAuth2Connector
from toucan_connectors.toucan_connector import get_oauth2_configuration
FAKE_AUTHORIZATION_URL = 'http://localhost:4242/foobar'
FAKE_TOKEN_URL = 'http://service/token_endpoint'
SCOPE: str = 'openid email https://www.googleapis.com/auth/spreadsheets.readonly'
@pytest.fixture
def oauth2_connector(secrets_keeper):
return OAuth2Connector(
auth_flow_id='test',
authorization_url=FAKE_AUTHORIZATION_URL,
scope=SCOPE,
config=OAuth2ConnectorConfig(client_id='', client_secret=''),
redirect_uri='',
token_url=FAKE_TOKEN_URL,
secrets_keeper=secrets_keeper,
)
def test_build_authorization_url(mocker, oauth2_connector, secrets_keeper):
"""
It should return the authorization URL
"""
mock_create_authorization_url: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.create_authorization_url',
return_value=('authorization_url', 'state'),
)
url = oauth2_connector.build_authorization_url()
assert mock_create_authorization_url.called
assert url == 'authorization_url'
assert secrets_keeper.load('test')['state'] == 'state'
def test_retrieve_tokens(mocker, oauth2_connector, secrets_keeper):
"""
It should retrieve tokens and save them
"""
secrets_keeper.save('test', {'state': JsonWrapper.dumps({'token': 'the_token'})})
mock_fetch_token: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.fetch_token',
return_value={'access_token': 'dummy_token'},
)
oauth2_connector.retrieve_tokens(
f'http://localhost/?state={JsonWrapper.dumps({"token": "the_token"})}'
)
mock_fetch_token.assert_called()
assert secrets_keeper.load('test')['access_token'] == 'dummy_token'
def test_fail_retrieve_tokens(oauth2_connector, secrets_keeper):
"""
It should fail ig the stored state does not match the received state
"""
secrets_keeper.save('test', {'state': JsonWrapper.dumps({'token': 'the_token'})})
with pytest.raises(AssertionError):
oauth2_connector.retrieve_tokens(
f'http://localhost/?state={JsonWrapper.dumps({"token": "bad_token"})}'
)
def test_get_access_token(oauth2_connector, secrets_keeper):
"""
It should return the last saved access_token
"""
secrets_keeper.save('test', {'access_token': 'dummy_token'})
assert oauth2_connector.get_access_token() == 'dummy_token'
def test_get_access_token_expired(mocker, oauth2_connector, secrets_keeper):
"""
It should refresh the token if it expired
"""
secrets_keeper.save(
'test',
{
'access_token': '<PASSWORD>',
'expires_at': datetime.fromtimestamp(0),
'refresh_token': '<PASSWORD>',
},
)
mock_refresh_token: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.refresh_token',
return_value={'access_token': 'new_token'},
)
access_token = oauth2_connector.get_access_token()
mock_refresh_token.assert_called_once_with(FAKE_TOKEN_URL, refresh_token='dummy_refresh_token')
assert access_token == 'new_token'
def test_get_access_token_expired_int_type(mocker, oauth2_connector, secrets_keeper):
"""
It should refresh the token if it expired
"""
secrets_keeper.save(
'test',
{
'access_token': '<PASSWORD>',
'expires_at': 123,
'refresh_token': '<PASSWORD>',
},
)
mock_refresh_token: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.refresh_token',
return_value={'access_token': 'new_token'},
)
access_token = oauth2_connector.get_access_token()
mock_refresh_token.assert_called_once_with(FAKE_TOKEN_URL, refresh_token='<PASSWORD>')
assert access_token == 'new_token'
def test_get_access_token_expired_bool_type(mocker, oauth2_connector, secrets_keeper):
"""
It should refresh the token if it expired
"""
secrets_keeper.save(
'test',
{
'access_token': '<PASSWORD>',
'expires_at': True,
'refresh_token': '<PASSWORD>',
},
)
mock_refresh_token: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.refresh_token',
return_value={'access_token': 'new_token'},
)
access_token = oauth2_connector.get_access_token()
mock_refresh_token.assert_called_once_with(FAKE_TOKEN_URL, refresh_token='<PASSWORD>')
assert access_token == 'new_token'
def test_get_access_token_expired_no_refresh_token(mocker, oauth2_connector, secrets_keeper):
"""
It should fail to refresh the token if no refresh token is provided
"""
secrets_keeper.save(
'test', {'access_token': '<PASSWORD>_token', 'expires_at': datetime.fromtimestamp(0)}
)
mock_refresh_token: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.refresh_token',
return_value={'access_token': 'new_token'},
)
with pytest.raises(NoOAuth2RefreshToken):
oauth2_connector.get_access_token()
mock_refresh_token.assert_not_called()
def test_get_access_data(mocker, oauth2_connector, secrets_keeper):
mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.refresh_token',
return_value={
'access_token': 'new_access_token',
'refresh_token': '<PASSWORD>token',
'instance_url': 'new_instance_url',
},
)
secrets_keeper.save(
'test',
{
'access_token': 'old_token',
'refresh_token': 'old_<PASSWORD>token',
'instance_url': 'old_instance_url',
},
)
access_data = oauth2_connector.get_access_data()
assert access_data['access_token'] == 'new_access_token'
assert access_data['refresh_token'] == 'new_refresh_token'
assert access_data['instance_url'] == 'new_instance_url'
def test_get_access_data_without_refresh(mocker, oauth2_connector, secrets_keeper):
mock_refresh_token: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.refresh_token',
return_value={
'access_token': 'new_access_token',
'refresh_token': 'new_<PASSWORD>token',
'instance_url': 'new_instance_url',
},
)
secrets_keeper.save('test', {'access_token': 'old_token'})
with pytest.raises(NoOAuth2RefreshToken):
oauth2_connector.get_access_data()
mock_refresh_token.assert_not_called()
def test_get_access_data_without_instance(mocker, oauth2_connector, secrets_keeper):
mock_refresh_token: Mock = mocker.patch(
'toucan_connectors.oauth2_connector.oauth2connector.OAuth2Session.refresh_token',
return_value={'access_token': 'new_access_token', 'refresh_token': 'new_refresh_token'},
)
secrets_keeper.save('test', {'access_token': 'old_token', 'refresh_token': 'old_refresh_token'})
with pytest.raises(NoInstanceUrl):
oauth2_connector.get_access_data()
mock_refresh_token.assert_not_called()
def test_should_throw_if_authflow_id_not_found(oauth2_connector, secrets_keeper):
with pytest.raises(AuthFlowNotFound):
oauth2_connector.retrieve_tokens(
f'http://localhost/?state={JsonWrapper.dumps({"token": "bad_token"})}'
)
def test_should_return_if_is_instance_oauth2_connector(oauth2_connector):
assert get_oauth2_configuration(GoogleSheets2Connector) == (True, 'instance')
assert get_oauth2_configuration(HttpAPIConnector) == (False, None)
assert get_oauth2_configuration(SnowflakeoAuth2Connector) == (True, 'connector')
def test_get_refresh_token(mocker, oauth2_connector):
mocked_keeper = mocker.patch.object(
oauth2_connector,
'secrets_keeper',
)
mocked_load = mocked_keeper.load
mocked_load.return_value = {'refresh_token': 'bla'}
token = oauth2_connector.get_refresh_token()
assert token == 'bla'
|
185416
|
import logging
from typing import Optional
class LazyLogger:
log_level: int = logging.WARNING
log_format: str = "%(name)s: %(levelname)-8s %(message)s"
logger_name: str = "dynamoquery"
_lazy_logger: Optional[logging.Logger]
@property
def _logger(self) -> logging.Logger:
if self._lazy_logger is None:
self._lazy_logger = self._get_default_logger()
return self._lazy_logger
def _get_default_logger(self) -> logging.Logger:
logger = logging.getLogger(self.logger_name)
if not logger.handlers:
formatter = logging.Formatter(self.log_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
stream_handler.setLevel(self.log_level)
logger.addHandler(stream_handler)
logger.setLevel(self.log_level)
return logger
|
185431
|
from django.db import models
import datetime
#---------------- Drive Profile Logic Function ------------------------#
class Category(models.Model):
name = models.CharField(max_length=300)
def __str__(self):
return self.name
class SubCategory(models.Model):
name = models.CharField(max_length=300)
def __str__(self):
return self.name
class ExperimentType(models.Model):
category = models.ForeignKey(Category, on_delete=models.CASCADE)
subcategory = models.ForeignKey(SubCategory, on_delete=models.CASCADE)
cell_id_active = models.BooleanField(default=True)
start_cycle_active = models.BooleanField(default=True)
voltage_active = models.BooleanField(default=True)
voltage_name = models.CharField(max_length = 50, default = 'upper_cutoff_voltage')
temperature_active = models.BooleanField(default=True)
temperature_name = models.CharField(max_length=50, default = 'temperature')
drive_profile_active = models.BooleanField(default=False)
AC_active = models.BooleanField(default=False)
AC_increment_active = models.BooleanField(default=False)
charger_active = models.BooleanField(default=False)
version_number_active = models.BooleanField(default=False)
charger = models.CharField(max_length=50, default = '')
shorthand = models.CharField(max_length=10, default = '')
def __str__(self):
return '{} ({})'.format(self.subcategory.name, self.category.name)
class ChargerDriveProfile(models.Model):
drive_profile = models.CharField(max_length=50)
test = models.CharField(max_length=200)
description = models.CharField(max_length=1000)
x_name = models.CharField(max_length=50)
y_name = models.CharField(max_length=50)
z_name = models.CharField(max_length=50)
x_active = models.BooleanField(default=True)
y_active = models.BooleanField(default=False)
z_active = models.BooleanField(default=False)
def __str__(self):
return '{} ({})'.format(self.test, self.drive_profile)
def print_voltage(x):
print("x = {}".format(x))
if round((x * 100)) % 10 != 0:
y = str(int(x * 100))
elif round((x * 100)) % 10 == 0:
y = str(int(x * 10))
if x < 1:
y = '0' + y
if x == 0:
y = "00"
if x >= 10:
y = "voltage was invalid: {} volts is too high. Only supports voltages less than 10.".format(x)
return y
class ValidMetadata(models.Model):
ANODE = 'A'
CATHODE = 'C'
AC_CHOICES = [
(ANODE, 'Anode'),
(CATHODE, 'Cathode'),
]
experiment_type = models.ForeignKey(ExperimentType, on_delete=models.CASCADE, null=True)
charID = models.CharField(max_length=5, null=True)
cell_id = models.IntegerField(null=True)
start_cycle = models.IntegerField(null=True)
voltage = models.FloatField(null=True)
temperature = models.IntegerField(null=True)
AC = models.CharField(
max_length=1,
choices=AC_CHOICES,
null=True)
AC_increment = models.IntegerField(null=True)
version_number = models.IntegerField(null=True)
drive_profile = models.ForeignKey(ChargerDriveProfile, on_delete=models.CASCADE,null=True)
drive_profile_x_numerator = models.IntegerField(null=True)
drive_profile_x_denominator = models.IntegerField(null=True)
drive_profile_y_numerator = models.IntegerField(null=True)
drive_profile_y_denominator = models.IntegerField(null=True)
drive_profile_z = models.FloatField(null=True)
date = models.DateField(null=True)
def __str__(self):
return '{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}_{}'.format(
self.experiment_type,
self.charID,
self.cell_id,
self.start_cycle,
self.voltage,
self.temperature,
self.AC,
self.AC_increment,
self.version_number,
self.drive_profile,
self.drive_profile_x_numerator,
self.drive_profile_x_denominator,
self.drive_profile_y_numerator,
self.drive_profile_y_denominator,
self.drive_profile_z,
self.date)
@property
def is_valid(self):
return (( self.charID is not None) and
(not self.experiment_type.cell_id_active or self.cell_id is not None) and
(not self.experiment_type.start_cycle_active or self.start_cycle is not None) and
(not self.experiment_type.voltage_active or self.voltage is not None) and
(not self.experiment_type.temperature_active or self.temperature is not None) and
(not self.experiment_type.AC_active or self.AC is not None) and
(not self.experiment_type.AC_increment_active or self.AC_increment is not None) and
(not self.experiment_type.version_number_active or self.version_number is not None) and
( self.date is not None))
@property
def get_profile(self):
x_value = ''
y_value = ''
z_value = ''
if self.drive_profile.x_active:
x_value = self.drive_profile.x_name + '=' + str(self.drive_profile_x_numerator) + '/' + str(
self.drive_profile_x_denominator)
if self.drive_profile.y_active:
y_value = ', ' + self.drive_profile.y_name + '=' + str(self.drive_profile_y_numerator) + '/' + str(
self.drive_profile_y_denominator)
if self.drive_profile.z_active:
z_value = ', ' + self.drive_profile.z_name + '=' + str(self.drive_profile_z)
return x_value \
+ y_value \
+ z_value
@property
def get_filename(self):
#TODO: implement printing drive profiles if it ever becomes useful
if not self.is_valid:
return None
filename_printed_fields = []
if self.experiment_type.subcategory != 'exsitu':
filename_printed_fields += [str(self.charID),str(self.experiment_type.shorthand)]
if self.experiment_type.AC_active and not self.experiment_type.AC_increment_active:
filename_printed_fields.append(str(self.AC))
if self.experiment_type.AC_active and self.experiment_type.AC_increment_active:
filename_printed_fields.append('{}{}'.format(str(self.AC),str(self.AC_increment)))
if self.experiment_type.cell_id_active:
filename_printed_fields.append(str(self.cell_id))
if self.experiment_type.charger != '':
filename_printed_fields.append(str(self.experiment_type.charger))
if self.experiment_type.start_cycle_active:
filename_printed_fields.append('c{}'.format(str(self.start_cycle)))
if self.experiment_type.voltage_active:
filename_printed_fields.append("{:03d}V".format(int(self.voltage * 100)))
if self.experiment_type.temperature_active:
filename_printed_fields.append('{}C'.format(self.temperature))
filename_printed_fields.append(self.date.strftime("%y%m%d"))
if self.experiment_type.subcategory == 'exsitu':
filename='Ex-situ Gas Checkin_v{}.xls'.format(str(self.version_number))
else:
filename= '_'.join(filename_printed_fields)
return filename
class DatabaseFile(models.Model):
'''
Note that valid_metadata is null if filename hasn't been parsed.
'''
filename = models.CharField(max_length=300)
root = models.CharField(max_length=300)
last_modified = models.DateTimeField(default=datetime.datetime(1970, 1, 1))
filesize = models.IntegerField(default=0) # in bytes
valid_metadata = models.OneToOneField(ValidMetadata, on_delete=models.SET_NULL, null=True)
is_valid = models.BooleanField(default=False)
deprecated = models.BooleanField(default=False)
def __str__(self):
return self.filename
def set_valid_metadata(self,
valid_metadata = None,
experiment_type = None,
charID = None,
cell_id = None,
start_cycle = None,
voltage = None,
temperature = None,
AC = None,
AC_increment = None,
version_number = None,
drive_profile=None,
drive_profile_x_numerator=None,
drive_profile_x_denominator=None,
drive_profile_y_numerator=None,
drive_profile_y_denominator=None,
drive_profile_z=None,
date = None):
if self.valid_metadata is not None:
print('datafile metadata was', self.valid_metadata)
if valid_metadata is not None:
# both exist.
if ((self.valid_metadata.experiment_type == valid_metadata.experiment_type) and
(self.valid_metadata.charID == valid_metadata.charID) and
(self.valid_metadata.cell_id == valid_metadata.cell_id) and
(self.valid_metadata.voltage == valid_metadata.voltage) and
(self.valid_metadata.temperature == valid_metadata.temperature) and
(self.valid_metadata.date == valid_metadata.date) and
(self.valid_metadata.version_number == valid_metadata.version_number) and
(self.valid_metadata.AC == valid_metadata.AC) and
(self.valid_metadata.AC_increment == valid_metadata.AC_increment) and
(self.valid_metadata.drive_profile == valid_metadata.drive_profile) and
(self.valid_metadata.drive_profile_x_numerator == valid_metadata.drive_profile_x_numerator) and
(self.valid_metadata.drive_profile_x_denominator == valid_metadata.drive_profile_x_denominator) and
(self.valid_metadata.drive_profile_y_numerator == valid_metadata.drive_profile_y_numerator) and
(self.valid_metadata.drive_profile_y_denominator == valid_metadata.drive_profile_y_denominator) and
(self.valid_metadata.drive_profile_z == valid_metadata.drive_profile_z)
):
return
else:
if (
((experiment_type is None) or (experiment_type == self.valid_metadata.experiment_type)) and
((charID is None) or (charID == self.valid_metadata.charID)) and
((cell_id is None) or (cell_id == self.valid_metadata.cell_id)) and
((start_cycle is None) or (start_cycle == self.valid_metadata.start_cycle)) and
((voltage is None) or (voltage == self.valid_metadata.voltage)) and
((temperature is None) or (temperature == self.valid_metadata.temperature)) and
((AC is None) or (AC == self.valid_metadata.AC)) and
((AC_increment is None) or (AC_increment == self.valid_metadata.AC_increment)) and
((version_number is None) or (version_number == self.valid_metadata.version_number)) and
((drive_profile is None) or (drive_profile == self.valid_metadata.drive_profile)) and
((drive_profile_x_numerator is None) or (drive_profile_x_numerator == self.valid_metadata.drive_profile_x_numerator)) and
((drive_profile_x_denominator is None) or (drive_profile_x_denominator == self.valid_metadata.drive_profile_x_denominator)) and
((drive_profile_y_numerator is None) or (drive_profile_y_numerator == self.valid_metadata.drive_profile_y_numerator)) and
((drive_profile_y_denominator is None) or (drive_profile_y_denominator == self.valid_metadata.drive_profile_y_denominator)) and
((drive_profile_z is None) or (drive_profile_z == self.valid_metadata.drive_profile_z)) and
((date is None) or (date == self.valid_metadata.date))):
return
if valid_metadata is not None:
# get the new one.
self.valid_metadata.delete()
valid_metadata.save()
self.valid_metadata = valid_metadata
else:
if experiment_type is not None:
self.valid_metadata.experiment_type = experiment_type
if charID is not None:
self.valid_metadata.charID = charID
if cell_id is not None:
self.valid_metadata.cell_id = cell_id
if start_cycle is not None:
self.valid_metadata.start_cycle = start_cycle
if voltage is not None:
self.valid_metadata.voltage = voltage
if temperature is not None:
self.valid_metadata.temperature = temperature
if AC is not None:
self.valid_metadata.AC = AC
if AC_increment is not None:
self.valid_metadata.AC_increment = AC_increment
if version_number is not None:
self.valid_metadata.version_number = version_number
if drive_profile is not None:
self.valid_metadata.drive_profile = drive_profile
if drive_profile_x_numerator is not None:
self.valid_metadata.drive_profile_x_numerator = drive_profile_x_numerator
if drive_profile_x_denominator is not None:
self.valid_metadata.drive_profile_x_denominator = drive_profile_x_denominator
if drive_profile_y_numerator is not None:
self.valid_metadata.drive_profile_y_numerator = drive_profile_y_numerator
if drive_profile_y_denominator is not None:
self.valid_metadata.drive_profile_y_denominator = drive_profile_y_denominator
if drive_profile_z is not None:
self.valid_metadata.drive_profile_z = drive_profile_z
if date is not None:
self.valid_metadata.date = date
self.valid_metadata.save()
self.is_valid = self.valid_metadata.is_valid
self.save()
else:
if valid_metadata is not None:
valid_metadata.save()
self.valid_metadata = valid_metadata
self.is_valid = self.valid_metadata.is_valid
self.save()
else:
return
|
185438
|
from __future__ import print_function
import numpy as np
import pickle as pkl
import networkx as nx
import scipy.io as sio
import scipy.sparse as sp
import scipy.sparse.linalg as slinalg
import scipy.linalg as linalg
from scipy.sparse.linalg.eigen.arpack import eigsh
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
import sys
from os import path
import copy
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import time
import random
import tensorflow as tf
# import matplotlib.pyplot as plt
def save_sparse_csr(filename, array):
np.savez(filename, data=array.data, indices=array.indices,
indptr=array.indptr, shape=array.shape)
def load_sparse_csr(filename):
loader = np.load(filename)
return sp.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def get_triplet(y_train, train_mask, max_triplets):
# print('y_train----',y_train.shape)
index_nonzero = y_train.nonzero()
# for i in range(y_train.shape[1]):
# label_count.append(index_nonzero[1][[index_nonzero[1]==i]].size)
label_count = np.sum(y_train, axis=0)
all_count = np.sum(label_count)
index_nonzero = np.transpose(np.concatenate((index_nonzero[0][np.newaxis,:], index_nonzero[1]\
[np.newaxis, :]),axis=0)).tolist()
index_nonzero = sorted(index_nonzero, key = lambda s: s[1])
#print(index_nonzero)
#print(label_count)
def get_one_triplet(input_index, index_nonzero, label_count, all_count, max_triplets):
triplet = []
if label_count[input_index[1]]==0:
return 0
else:
# print('max_triplets', max_triplets)
# print(all_count)
# print(label_count[input_index[1]])
n_triplets = min(max_triplets, int(all_count-label_count[input_index[1]]))
# print('----------')
for j in range(int(label_count[input_index[1]])-1):
positives = []
negatives = []
for k, (value, label) in enumerate(index_nonzero):
#find a postive sample, and if only one sample then choose itself
if label == input_index[1] and (value != input_index[0] or label_count[input_index[1]]==1):
positives.append(index_nonzero[k])
if label != input_index[1]:
negatives.append(index_nonzero[k])
# print('positives' ,positives)
# print('negatives', negatives)
negatives = random.sample(list(negatives), n_triplets)
for value, label in negatives:
triplet.append([input_index[0], positives[j][0], value])
return triplet
triplet = []
for i, j in enumerate(index_nonzero):
triple = get_one_triplet(j, index_nonzero, label_count, all_count,max_triplets)
if triple == 0:
continue
else:
triplet.extend(triple)
np_triple = np.concatenate(np.array([triplet]), axis = 1)
return np_triple
def load_data(dataset_str, train_size, validation_size, model_config, shuffle=True):
"""Load data."""
if dataset_str in ['USPS-Fea', 'CIFAR-Fea', 'Cifar_10000_fea', 'Cifar_R10000_fea', 'MNIST-Fea', 'MNIST-10000', 'MNIST-5000']:
data = sio.loadmat('data/{}.mat'.format(dataset_str))
l = data['labels'].flatten()
labels = np.zeros([l.shape[0],np.max(data['labels'])+1])
labels[np.arange(l.shape[0]), l.astype(np.int8)] = 1
features = data['X']
sample = features[0].copy()
adj = data['G']
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
# features = sp.eye(features.shape[0]).tolil()
# features = sp.lil_matrix(allx)
labels = np.vstack((ally, ty))
# labels = np.vstack(ally)
if dataset_str.startswith('nell'):
# Find relation nodes, add them as zero-vecs into the right position
test_idx_range_full = range(allx.shape[0], len(graph))
isolated_node_idx = np.setdiff1d(test_idx_range_full, test_idx_reorder)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - allx.shape[0], :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - allx.shape[0], :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_all = np.setdiff1d(range(len(graph)), isolated_node_idx)
if not os.path.isfile("data/planetoid/{}.features.npz".format(dataset_str)):
print("Creating feature vectors for relations - this might take a while...")
features_extended = sp.hstack((features, sp.lil_matrix((features.shape[0], len(isolated_node_idx)))),
dtype=np.int32).todense()
features_extended[isolated_node_idx, features.shape[1]:] = np.eye(len(isolated_node_idx))
features = sp.csr_matrix(features_extended, dtype=np.float32)
print("Done!")
save_sparse_csr("data/planetoid/{}.features".format(dataset_str), features)
else:
features = load_sparse_csr("data/planetoid/{}.features.npz".format(dataset_str))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
features[test_idx_reorder, :] = features[test_idx_range, :]
labels[test_idx_reorder, :] = labels[test_idx_range, :]
features = preprocess_features(features, feature_type=model_config['feature'])
global all_labels
all_labels = labels.copy()
# split the data set
idx = np.arange(len(labels))
no_class = labels.shape[1] # number of class
# validation_size = validation_size * len(idx) // 100
# if not hasattr(train_size, '__getitem__'):
train_size = [train_size for i in range(labels.shape[1])]
if shuffle:
np.random.shuffle(idx)
idx_train = []
count = [0 for i in range(no_class)]
label_each_class = train_size
next = 0
for i in idx:
if count == label_each_class:
break
next += 1
for j in range(no_class):
if labels[i, j] and count[j] < label_each_class[j]:
idx_train.append(i)
count[j] += 1
test_size = model_config['test_size']
if model_config['validate']:
if test_size:
assert next+validation_size<len(idx)
idx_val = idx[next:next+validation_size]
assert next+validation_size+test_size < len(idx)
idx_test = idx[-test_size:] if test_size else idx[next+validation_size:]
else:
if test_size:
assert next+test_size<len(idx)
idx_val = idx[-test_size:] if test_size else idx[next:]
idx_test = idx[-test_size:] if test_size else idx[next:]
# else:
# labels_of_class = [0]
# while (np.prod(labels_of_class) == 0):
# np.random.shuffle(idx)
# idx_train = idx[0:int(len(idx) * train_size // 100)]
# labels_of_class = np.sum(labels[idx_train], axis=0)
# idx_val = idx[-500 - validation_size:-500]
# idx_test = idx[-500:]
print('labels of each class : ', np.sum(labels[idx_train], axis=0))
# idx_val = idx[len(idx) * train_size // 100:len(idx) * (train_size // 2 + 50) // 100]
# idx_test = idx[len(idx) * (train_size // 2 + 50) // 100:len(idx)]
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
# else:
# idx_test = test_idx_range.tolist()
# idx_train = range(len(y))
# idx_val = range(len(y), len(y) + 500)
#
# train_mask = sample_mask(idx_train, labels.shape[0])
# val_mask = sample_mask(idx_val, labels.shape[0])
# test_mask = sample_mask(idx_test, labels.shape[0])
#
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train[train_mask, :] = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
size_of_each_class = np.sum(labels[idx_train], axis=0)
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return tf.SparseTensorValue(coords, values, np.array(shape, dtype=np.int64))
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features, feature_type):
if feature_type == 'bow':
# """Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
# normalize(features, norm='l1', axis=1, copy=False)
elif feature_type == 'tfidf':
transformer = TfidfTransformer(norm=None, use_idf=True, smooth_idf=True, sublinear_tf=False)
features = transformer.fit_transform(features)
elif feature_type == 'none':
features = sp.csr_matrix(sp.eye(features.shape[0]))
else:
raise ValueError('Invalid feature type: ' + str(feature_type))
return features
def normalize_adj(adj, type='sym'):
"""Symmetrically normalize adjacency matrix."""
if type == 'sym':
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
# d_inv_sqrt = np.power(rowsum, -0.5)
# d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
# return adj*d_inv_sqrt*d_inv_sqrt.flatten()
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
elif type == 'rw':
rowsum = np.array(adj.sum(1))
d_inv = np.power(rowsum, -1.0).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
adj_normalized = d_mat_inv.dot(adj)
return adj_normalized
def preprocess_adj(adj, type='sym', loop=True):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
if loop:
adj = adj + sp.eye(adj.shape[0])
adj_normalized = normalize_adj(adj, type=type) #
return sparse_to_tuple(adj_normalized)
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
# largest_eigval, _ = eigsh(laplacian, 1, which='LM')
# scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k + 1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], laplacian))
return sparse_to_tuple(t_k)
def absorption_probability(W, alpha, stored_A=None, column=None):
try:
# raise Exception('DEBUG')
A = np.load(stored_A + str(alpha) + '.npz')['arr_0']
print('load A from ' + stored_A + str(alpha) + '.npz')
if column is not None:
P = np.zeros(W.shape)
P[:, column] = A[:, column]
return P
else:
return A
except:
# W=sp.csr_matrix([[0,1],[1,0]])
# alpha = 1
n = W.shape[0]
print('Calculate absorption probability...')
W = W.copy().astype(np.float32)
D = W.sum(1).flat
L = sp.diags(D, dtype=np.float32) - W
L += alpha * sp.eye(W.shape[0], dtype=L.dtype)
L = sp.csc_matrix(L)
# print(np.linalg.det(L))
if column is not None:
A = np.zeros(W.shape)
# start = time.time()
A[:, column] = slinalg.spsolve(L, sp.csc_matrix(np.eye(L.shape[0], dtype='float32')[:, column])).toarray()
# print(time.time()-start)
return A
else:
# start = time.time()
A = slinalg.inv(L).toarray()
# print(time.time()-start)
if stored_A:
np.savez(stored_A + str(alpha) + '.npz', A)
return A
# fletcher_reeves
# slinalg.solve(L, np.ones(L.shape[0]))
# A_ = np.zeros(W.shape)
# I = sp.eye(n)
# Di = sp.diags(np.divide(1,np.array(D)+alpha))
# for i in range(10):
# # A_=
# A_ = Di*(I+W.dot(A_))
# print(time.time()-start)
def fletcher_reeves(A, B):
# A=np.array(A)
X = np.zeros(B.shape)
r = np.array(B - A.dot(X))
rsold = (r * r).sum(0)
p = r
for i in range(10):
Ap = np.array(A.dot(p))
pAp = (p * Ap).sum(0)
alpha = rsold / pAp
X += alpha * p
r -= alpha * Ap
rsnew = (r * r).sum(0)
if True:
pass
p = r + rsnew / rsold * p
rsold = rsnew
return X
def cotraining(W, t, alpha, y_train, train_mask, stored_A=None):
A = absorption_probability(W, alpha, stored_A, train_mask)
y_train = y_train.copy()
train_index = np.where(train_mask)[0]
already_labeled = np.sum(y_train, axis=1)
# if not isinstance(features, np.ndarray):
# features = features.toarray()
print("Additional Label:")
if not hasattr(t, '__getitem__'):
t = [t for _ in range(y_train.shape[1])]
for i in range(y_train.shape[1]):
y = y_train[:, i:i + 1]
a = A.dot(y)
a[already_labeled > 0] = 0
# a[W.dot(y) > 0] = 0
gate = (-np.sort(-a, axis=0))[t[i]]
index = np.where(a.flat > gate)[0]
# x1 = features[index, :].reshape((-1, 1, features.shape[1]))
# x2 = features[y_train[:, i].astype(np.bool)].reshape((1, -1, features.shape[1]))
# D = np.sum((x1 - x2) ** 2, axis=2) ** 0.5
# D = np.mean(D, axis=1)
# gate = 100000000 if t[i] >= D.shape[0] else np.sort(D, axis=0)[t[i]]
# index = index[D<gate]
train_index = np.hstack([train_index, index])
y_train[index, i] = 1
correct_label_count(index, i)
print()
train_mask = sample_mask(train_index, y_train.shape[0])
return y_train, train_mask
def selftraining(prediction, t, y_train, train_mask):
new_gcn_index = np.argmax(prediction, axis=1)
confidence = np.max(prediction, axis=1)
sorted_index = np.argsort(-confidence)
no_class = y_train.shape[1] # number of class
if hasattr(t, '__getitem__'):
assert len(t) >= no_class
index = []
count = [0 for i in range(no_class)]
for i in sorted_index:
for j in range(no_class):
if new_gcn_index[i] == j and count[j] < t[j] and not train_mask[i]:
index.append(i)
count[j] += 1
else:
index = sorted_index[:t]
indicator = np.zeros(train_mask.shape, dtype=np.bool)
indicator[index] = True
indicator = np.logical_and(np.logical_not(train_mask), indicator)
prediction = np.zeros(prediction.shape)
prediction[np.arange(len(new_gcn_index)), new_gcn_index] = 1.0
prediction[train_mask] = y_train[train_mask]
correct_labels = np.sum(prediction[indicator] * all_labels[indicator], axis=0)
count = np.sum(prediction[indicator], axis=0)
print('Additiona Label:')
for i, j in zip(correct_labels, count):
print(int(i), '/', int(j), sep='', end='\t')
print()
y_train = np.copy(y_train)
train_mask = np.copy(train_mask)
train_mask[indicator] = 1
y_train[indicator] = prediction[indicator]
return y_train, train_mask
def lp(adj, alpha, y_train, train_mask, y_test, stored_A=None):
P = absorption_probability(adj, alpha, stored_A=stored_A, column=train_mask)
P = P[:, train_mask]
# nearest clssifier
predicted_labels = np.argmax(P, axis=1)
# prediction = alpha*P
prediction = np.zeros(P.shape)
prediction[np.arange(P.shape[0]), predicted_labels] = 1
y = np.sum(train_mask)
label_per_sample = np.vstack([np.zeros(y), np.eye(y)])[np.add.accumulate(train_mask) * train_mask]
sample2label = label_per_sample.T.dot(y_train)
prediction = prediction.dot(sample2label)
test_acc = np.sum(prediction * y_test) / np.sum(y_test)
test_acc_of_class = np.sum(prediction * y_test, axis=0) / np.sum(y_test, axis=0)
# print(test_acc, test_acc_of_class)
return test_acc, test_acc_of_class, prediction
def union_intersection(prediction, t, y_train, train_mask, W, alpha, stored_A, union_or_intersection):
no_class = y_train.shape[1] # number of class
# gcn index
new_labels_gcn = np.argmax(prediction, axis=1)
confidence = np.max(prediction, axis=1)
sorted_index = np.argsort(-confidence)
if not hasattr(t, '__getitem__'):
t = [t for i in range(no_class)]
assert len(t) >= no_class
count = [0 for i in range(no_class)]
index_gcn = [[] for i in range(no_class)]
for i in sorted_index:
j = new_labels_gcn[i]
if count[j] < t[j] and not train_mask[i]:
index_gcn[j].append(i)
count[j] += 1
# lp
A = absorption_probability(W, alpha, stored_A, train_mask)
train_index = np.where(train_mask)[0]
already_labeled = np.sum(y_train, axis=1)
index_lp = []
for i in range(no_class):
y = y_train[:, i:i + 1]
a = np.sum(A[:, y.flat > 0], axis=1)
a[already_labeled > 0] = 0
# a[W.dot(y) > 0] = 0
gate = (-np.sort(-a, axis=0))[t[i]]
index = np.where(a.flat > gate)[0]
index_lp.append(index)
# print(list(map(len, index_gcn)))
# print(list(map(len, index_lp)))
y_train = y_train.copy()
print("Additional Label:")
for i in range(no_class):
assert union_or_intersection in ['union', 'intersection']
if union_or_intersection == 'union':
index = list(set(index_gcn[i]) | set(index_lp[i]))
else:
index = list(set(index_gcn[i]) & set(index_lp[i]))
y_train[index, i] = 1
train_mask[index] = True
print(np.sum(all_labels[index, i]), '/', len(index), sep='', end='\t')
return y_train, train_mask
def ap_approximate(adj, features, alpha, k):
adj = normalize(adj + sp.eye(adj.shape[0]), 'l1', axis=1) / (alpha + 1)
# D = sp.diags(np.array(adj.sum(axis=1)).flatten())+alpha*sp.eye(adj.shape[0])
# D = D.power(-1)
# adj = D*adj
# features = D*alpha*features
if sp.issparse(features):
features = features.toarray()
new_feature = np.zeros(features.shape)
for _ in range(k):
new_feature = adj * new_feature + features
new_feature *= alpha / (alpha + 1)
return new_feature
all_labels = None
# dataset = None
def correct_label_count(indicator, i):
count = np.sum(all_labels[:, i][indicator])
if indicator.dtype == np.bool:
total = np.where(indicator)[0].shape[0]
elif indicator.dtype in [np.int, np.int8, np.int16, np.int32, np.int64]:
total = indicator.shape[0]
else:
raise TypeError('indicator must be of data type np.bool or np.int')
# print(" for class {}, {}/{} is correct".format(i, count, total))
print(count, '/', total, sep='', end='\t')
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def preprocess_model_config(model_config):
if model_config['Model'] not in [17, 23]:
model_config['connection'] = list(model_config['connection'])
# judge if parameters are legal
for c in model_config['connection']:
if c not in ['c', 'd', 'r', 'f', 'C']:
raise ValueError(
'connection string specified by --connection can only contain "c", "d", "r", "f", "C" but "{}" found'.format(
c))
for i in model_config['layer_size']:
if not isinstance(i, int):
raise ValueError('layer_size should be a list of int, but found {}'.format(model_config['layer_size']))
if i <= 0:
raise ValueError('layer_size must be greater than 0, but found {}' % i)
if not len(model_config['connection']) == len(model_config['layer_size']) + 1:
raise ValueError('length of connection string should be equal to length of layer_size list plus 1')
# Generate name
if not model_config['name']:
model_name = str(model_config['Model'])
if model_config['Model'] != 'lp':
model_name += '_' + model_config['connection'][0]
for char, size in \
zip(model_config['connection'][1:], model_config['layer_size']):
model_name += str(size) + char
if model_config['conv'] == 'cheby':
model_name += '_cheby' + str(model_config['max_degree'])
elif model_config['conv'] == 'taubin':
model_name += '_conv_taubin' + str(model_config['taubin_lambda']) \
+ '_' + str(model_config['taubin_mu']) \
+ '_' + str(model_config['taubin_repeat'])
elif model_config['conv'] == 'test21':
model_name += '_' + 'conv_test21' + '_' + str(model_config['alpha']) + '_' + str(model_config['beta'])
elif model_config['conv'] == 'gcn_unnorm':
model_name += '_' + 'gcn_unnorm'
elif model_config['conv'] == 'gcn_noloop':
model_name += '_' + 'gcn_noloop'
if model_config['validate']:
model_name += '_validate'
if model_config['Model'] == 'cotraining':
model_name += '_alpha_' + str(
model_config['alpha'])
# if model_config['Model'] == 'selftraining':
# Model_to_add_label = copy.deepcopy(model_config)
# if 'Model_to_add_label' in Model_to_add_label:
# del Model_to_add_label['Model_to_add_label']
# if 'Model_to_predict' in Model_to_add_label:
# del Model_to_add_label['Model_to_predict']
# Model_to_add_label.update({'Model': 'GCN'})
# model_config['Model_to_add_label'] = Model_to_add_label
# preprocess_model_config(model_config['Model_to_add_label'])
#
# Model_to_predict = copy.deepcopy(model_config)
# if 'Model_to_add_label' in Model_to_predict:
# del Model_to_predict['Model_to_add_label']
# if 'Model_to_predict' in Model_to_predict:
# del Model_to_predict['Model_to_predict']
# Model_to_predict.update({'Model': 'GCN'})
# model_config['Model_to_predict'] = Model_to_predict
# preprocess_model_config(model_config['Model_to_predict'])
# model_name = 'Model' + str(model_config['Model']) \
# + '_{' + model_config['Model_to_add_label']['name'] + '}' \
# + '_{' + model_config['Model_to_predict']['name'] + '}'
if model_config['Model'] in ['union', 'intersection','lp']:
model_name += '_alpha_' + str(model_config['alpha'])
if model_config['Model'] in ['union', 'intersection', 'selftraining']:
Model_to_add_label = copy.deepcopy(model_config)
if 'Model_to_add_label' in Model_to_add_label:
del Model_to_add_label['Model_to_add_label']
if 'Model_to_predict' in Model_to_add_label:
del Model_to_add_label['Model_to_predict']
Model_to_add_label.update({'Model': 'GCN'})
model_config['Model_to_add_label'] = Model_to_add_label
preprocess_model_config(model_config['Model_to_add_label'])
Model_to_predict = copy.deepcopy(model_config)
if 'Model_to_add_label' in Model_to_predict:
del Model_to_predict['Model_to_add_label']
if 'Model_to_predict' in Model_to_predict:
del Model_to_predict['Model_to_predict']
Model_to_predict.update({'Model': 'GCN'})
model_config['Model_to_predict'] = Model_to_predict
preprocess_model_config(model_config['Model_to_predict'])
model_config['name'] = model_name
if __name__ == '__main__':
pass
|
185452
|
import logging
from django.core.management.base import BaseCommand
import stats.models
import openkamer.dossier
import openkamer.parliament
import openkamer.kamervraag
logger = logging.getLogger(__name__)
class Command(BaseCommand):
MAX_TRIES = 3
def add_arguments(self, parser):
# Named (optional) arguments
parser.add_argument(
'--skip-existing',
action='store_true',
dest='skip-existing',
default=False,
help='Do not create items that already exist.',
)
def handle(self, *args, **options):
openkamer.parliament.create_parliament_and_government()
failed_dossiers = openkamer.dossier.create_wetsvoorstellen_all(options['skip-existing'])
if failed_dossiers:
logger.error('the following dossiers failed: ' + str(failed_dossiers))
openkamer.kamervraag.create_kamervragen(year=2019)
stats.models.update_all()
|
185471
|
import os
import numpy as np
from PIL import Image
from fnmatch import fnmatch
def remove_low_resolution_images(path, min_resolution=20):
"""
A function that removes all the single pixel images
Parameters
-------------------------
path: str
Path to the folder that we would like to clear out.
min_resolution: int, optional
The minimum image resolution that we want to clear out of the folder.
"""
if not isinstance(min_resolution, int):
raise ValueError('The min resolution should be integer')
print('I am working on the {} directory'.format(path))
count = 0
pattern = "*.jpg"
for dir_path, _, files in os.walk(path):
for name in files:
temp_name = os.path.join(dir_path, name)
temp_shape = np.shape(np.array(Image.open(temp_name)))
if (temp_shape[0] < min_resolution or temp_shape[1] < min_resolution) and fnmatch(temp_name, pattern):
os.remove(temp_name)
count += 1
else:
pass
print('I am done here and I removed {} files'.format(count))
|
185501
|
import os
import unittest
from tempfile import mkstemp
from clips import Environment, Symbol, InstanceName
from clips import CLIPSError, ClassDefaultMode, LoggingRouter
DEFCLASSES = [
"""
(defclass AbstractClass (is-a USER)
(role abstract))
""",
"""(defclass InheritClass (is-a AbstractClass))""",
"""
(defclass ConcreteClass (is-a USER)
(slot Slot (type SYMBOL) (allowed-values value another-value)))
""",
"""
(defclass MessageHandlerClass (is-a USER)
(slot One)
(slot Two))
""",
"""
(defmessage-handler MessageHandlerClass test-handler ()
(+ ?self:One ?self:Two))
"""
]
DEFINSTANCES = """(definstances MAIN::defined-instances
(c1 of ConcreteClass (Slot a-slot)))
"""
class TempFile:
"""Cross-platform temporary file."""
name = None
def __enter__(self):
fobj, self.name = mkstemp()
os.close(fobj)
return self
def __exit__(self, *_):
os.remove(self.name)
class TestClasses(unittest.TestCase):
def setUp(self):
self.env = Environment()
self.env.add_router(LoggingRouter())
for defclass in DEFCLASSES:
self.env.build(defclass)
def test_classes(self):
"""Classes wrapper test."""
self.assertEqual(
self.env.default_mode, ClassDefaultMode.CONVENIENCE_MODE)
self.env.default_mode = ClassDefaultMode.CONSERVATION_MODE
self.assertEqual(
self.env.default_mode, ClassDefaultMode.CONSERVATION_MODE)
defclass = self.env.find_class('USER')
self.assertTrue(defclass in self.env.classes())
with self.assertRaises(LookupError):
self.env.find_class('NonExisting')
defclass = self.env.find_class('ConcreteClass')
defclass.make_instance('some-instance')
defclass.make_instance('test-instance')
instance = self.env.find_instance('test-instance')
self.assertTrue(instance in self.env.instances())
with self.assertRaises(LookupError):
self.env.find_instance('non-existing-instance')
self.assertTrue(self.env.instances_changed)
self.assertFalse(self.env.instances_changed)
with TempFile() as tmp:
saved = self.env.save_instances(tmp.name)
self.env.reset()
loaded = self.env.load_instances(tmp.name)
self.assertEqual(saved, loaded)
with TempFile() as tmp:
saved = self.env.save_instances(tmp.name)
self.env.reset()
loaded = self.env.restore_instances(tmp.name)
self.assertEqual(saved, loaded)
with TempFile() as tmp:
saved = self.env.save_instances(tmp.name, binary=True)
self.env.reset()
loaded = self.env.load_instances(tmp.name)
self.assertEqual(saved, loaded)
def test_abstract_class(self):
"""Abstract class test."""
superclass = self.env.find_class('USER')
subclass = self.env.find_class('InheritClass')
defclass = self.env.find_class('AbstractClass')
self.assertTrue(defclass.abstract)
self.assertFalse(defclass.reactive)
self.assertEqual(defclass.name, 'AbstractClass')
self.assertEqual(defclass.module.name, 'MAIN')
self.assertTrue(defclass.deletable)
self.assertTrue(defclass.subclass(superclass))
self.assertTrue(defclass.superclass(subclass))
self.assertEqual(tuple(defclass.subclasses()), (subclass, ))
self.assertEqual(tuple(defclass.superclasses()), (superclass, ))
with self.assertRaises(CLIPSError):
defclass.make_instance('foobar')
defclass.undefine()
def test_concrete_class(self):
"""Concrete class test."""
defclass = self.env.find_class('ConcreteClass')
self.assertFalse(defclass.abstract)
self.assertTrue(defclass.reactive)
self.assertEqual(defclass.name, 'ConcreteClass')
self.assertEqual(defclass.module.name, 'MAIN')
self.assertTrue(defclass.deletable)
self.assertFalse(defclass.watch_instances)
defclass.watch_instances = True
self.assertTrue(defclass.watch_instances)
self.assertFalse(defclass.watch_slots)
defclass.watch_slots = True
self.assertTrue(defclass.watch_slots)
defclass.undefine()
def test_slot(self):
"""Slot test."""
defclass = self.env.find_class('ConcreteClass')
slot = tuple(defclass.slots())[0]
self.assertFalse(slot.public)
self.assertTrue(slot.writable)
self.assertTrue(slot.accessible)
self.assertTrue(slot.initializable)
self.assertEqual(slot.name, 'Slot')
self.assertEqual(slot.types, ('SYMBOL', ))
self.assertEqual(slot.sources, (defclass.name, ))
self.assertEqual(slot.range, Symbol('FALSE'))
self.assertEqual(slot.facets, ('SGL', 'STC', 'INH', 'RW', 'LCL', 'RCT',
'EXC', 'PRV', 'RW', 'put-Slot'))
self.assertEqual(slot.cardinality, ())
self.assertEqual(slot.default_value, Symbol('value'))
self.assertEqual(slot.allowed_values, ('value', 'another-value'))
self.assertEqual(tuple(slot.allowed_classes()), ())
def test_make_instance(self):
"""Instance test."""
defclass = self.env.find_class('ConcreteClass')
instance_name = self.env.eval(
'(make-instance test-name-instance of ConcreteClass)')
self.assertEqual(instance_name, 'test-name-instance')
self.assertTrue(isinstance(instance_name, InstanceName))
instance = defclass.make_instance()
self.assertEqual(instance.name, 'gen1')
instance = defclass.make_instance('test-instance', Slot=Symbol('value'))
self.assertTrue(instance in defclass.instances())
self.assertEqual(instance.name, 'test-instance')
self.assertEqual(instance.instance_class, defclass)
self.assertEqual(instance['Slot'], Symbol('value'))
self.assertEqual(
str(instance), '[test-instance] of ConcreteClass (Slot value)')
self.assertEqual(
repr(instance),
'Instance: [test-instance] of ConcreteClass (Slot value)')
self.assertEqual(dict(instance), {'Slot': Symbol('value')})
instance.delete()
with self.assertRaises(LookupError):
self.env.find_instance('test-instance')
instance = defclass.make_instance('test-instance')
instance.unmake()
with self.assertRaises(LookupError):
self.env.find_instance('test-instance')
def test_make_instance_errors(self):
"""Instance errors."""
defclass = self.env.find_class('ConcreteClass')
with self.assertRaises(KeyError):
defclass.make_instance('some-instance', NonExistingSlot=1)
with self.assertRaises(TypeError):
defclass.make_instance('some-instance', Slot="wrong type")
with self.assertRaises(ValueError):
defclass.make_instance('some-instance', Slot=Symbol('wrong-value'))
def test_modify_instance(self):
"""Instance slot modification test."""
defclass = self.env.find_class('ConcreteClass')
defclass.make_instance('some-instance')
instance = defclass.make_instance('test-instance', Slot=Symbol('value'))
instance.modify_slots(Slot=Symbol('another-value'))
self.assertEqual(instance['Slot'], Symbol('another-value'))
instance.delete()
def test_message_handler(self):
"""MessageHandler test."""
defclass = self.env.find_class('MessageHandlerClass')
handler = defclass.find_message_handler('test-handler')
expected_str = "(defmessage-handler MAIN::MessageHandlerClass " + \
"test-handler () (+ ?self:One ?self:Two))"
self.assertTrue(handler.deletable)
self.assertEqual(handler.type, 'primary')
self.assertEqual(handler.name, 'test-handler')
self.assertTrue(handler in defclass.message_handlers())
self.assertEqual(str(handler), expected_str)
self.assertEqual(repr(handler), 'MessageHandler: ' + expected_str)
self.assertFalse(handler.watch)
handler.watch = True
self.assertTrue(handler.watch)
handler.undefine()
def test_message_handler_instance(self):
"""MessageHandler instance test."""
defclass = self.env.find_class('MessageHandlerClass')
instance = defclass.make_instance('test-instance', One=1, Two=2)
self.assertEqual(instance.send('test-handler'), 3)
def test_defined_instances(self):
"""DefinedInstances tests."""
self.env.build(DEFINSTANCES)
definstances = self.env.find_defined_instances('defined-instances')
listed = list(self.env.defined_instances())
self.assertEqual(definstances, listed[0])
self.assertEqual(definstances.name, 'defined-instances')
self.assertEqual(
str(definstances),
'(definstances MAIN::defined-instances (c1 of ConcreteClass (Slot a-slot)))')
self.assertEqual(definstances.module.name, 'MAIN')
self.assertTrue(definstances.deletable)
definstances.undefine()
with self.assertRaises(LookupError):
self.env.find_defined_instances('defined-instances')
with self.assertRaises(CLIPSError):
print(definstances)
|
185511
|
def test_home_retorna_status_code_200(client):
response = client.get("/")
assert response.status_code == 200
def test_home_retorna_texto_ola(client):
response = client.get("/")
assert response.text == "ola"
def test_echo_retorna_status_code_200(client):
response = client.get("/echo")
assert response.status_code == 200
def test_home_retorna_texto_echo_man(client):
response = client.get("/echo")
assert response.text == "echo man!"
|
185512
|
import numpy as np
import torch
from pytorchrl.distributions.base import Distribution
from pytorchrl.misc.tensor_utils import constant
class DiagonalGaussian(Distribution):
"""
Instead of a distribution, rather a collection of distribution.
"""
def __init__(self, means, log_stds):
"""
Parameters
----------
means (Variable):
log_stds (Variable):
"""
self.means = means
self.log_stds = log_stds
# dim is the dimension of action space
self.dim = self.means.size()[-1]
@classmethod
def from_dict(cls, means, log_stds):
"""
Parameters
----------
means (Variable):
log_std (Variable):
"""
return cls(means=means, log_stds=log_stds)
def entropy(self):
"""
Entropy of gaussian distribution is given by
1/2 * log(2 * \pi * e * sigma^2)
= log(sqrt(2 * \pi * e) * sigma))
= log(sigma) + log(sqrt(2 * \pi * e))
"""
return np.sum(self.log_stds.data.numpy() + np.log(np.sqrt(2 * np.pi * np.e)), axis=-1)
def log_likelihood(self, a):
"""
Compute log likelihood of a.
Parameters
----------
a (Variable):
Returns
-------
logli (Variable)
"""
# First cast into float tensor
a = a.type(torch.FloatTensor)
# Convert into a sample of standard normal
zs = (a - self.means) / (self.log_stds.exp())
# TODO (ewei), I feel this equation is not correct.
# Mainly the first line
# TODO (ewei), still need to understand what is meaning of having
# -1 for axis in sum method, (same for numpy)
logli = - self.log_stds.sum(-1) - \
constant(0.5) * zs.pow(2).sum(-1) - \
constant(0.5) * constant(float(self.dim)) * constant(float(np.log(2 * np.pi)))
return logli
def kl_div(self, other):
"""
Given the distribution parameters of two diagonal multivariate Gaussians,
compute their KL divergence (vectorized)
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence#Kullback.E2.80.93Leibler_divergence_for_multivariate_normal_distributions
In general, for two n-dimensional distributions, we have
D_KL(N1||N2) =
1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
Here, Σ_1 and Σ_2 are diagonal. Hence this equation can be simplified.
In terms of the parameters of this method,
determinant of diagonal matrix is product of diagonal, thus
- ln(det(Σ_2) / det(Σ_1)) = sum(2 * (log_stds_2 - log_stds_1), axis=-1)
inverse of diagonal matrix is the diagonal matrix of elements at diagonal inverted, thus
- (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) = sum((means_1 - means_2)^2 / vars_2, axis=-1)
trace is sum of the diagonal elements
- tr(Σ_2^{-1}Σ_1) = sum(vars_1 / vars_2, axis=-1)
Where
- vars_1 = exp(2 * log_stds_1)
- vars_2 = exp(2 * log_stds_2)
Combined together, we have
D_KL(N1||N2)
= 1/2 ( tr(Σ_2^{-1}Σ_1) + (μ_2 - μ_1)^T Σ_2^{-1} (μ_2 - μ_1) - n + ln(det(Σ_2) / det(Σ_1)) )
= sum(1/2 * ((vars_1 - vars_2) / vars_2 + (means_1 - means_2)^2 / vars_2 + 2 * (log_stds_2 - log_stds_1)), axis=-1)
= sum( ((means_1 - means_2)^2 + vars_1 - vars_2) / (2 * vars_2) + (log_stds_2 - log_stds_1)), axis=-1)
Parameters
----------
other (DiagonalGaussian):
Returns
-------
kl_div (Variable):
"""
# Constant should wrap in Variable to multiply with another Variable
# TODO (ewei) kl seems have problem
variance = (constant(2.0) * self.log_stds).exp()
other_variance = (constant(2.0) * other.log_stds).exp()
numerator = (self.means - other.means).pow(2) + \
variance - other_variance
denominator = constant(2.0) * other_variance + constant(1e-8)
# TODO (ewei), -1 for sum has a big impact, need to figure out why
kl_div = (numerator / denominator + other.log_stds - self.log_stds).sum(-1)
return kl_div
|
185535
|
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
def create_model():
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(150, 150, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# so far, the model outputs 3D feature maps (height, width, features)
# then, add two fully connected layers
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
# end the model with 10 units and a sigmoid activation
model.add(Dense(10))
model.add(Activation('sigmoid'))
return model
|
185539
|
from xgboost_ray.tests.utils import create_parquet
def main():
create_parquet(
"example.parquet",
num_rows=1_000_000,
num_partitions=100,
num_features=8,
num_classes=2)
if __name__ == "__main__":
main()
|
185569
|
from motionAE.src.models import lstmCVAE2
from torch.distributions.kl import kl_divergence
from torch.distributions.normal import Normal
import torch
from motionAE.src.motionCVAETrainer import motionCVAETrainer
import numpy as np
class motionCVAE2Trainer(motionCVAETrainer):
def load_param(self, arg_parser, **kwargs):
super().load_param(arg_parser, **kwargs)
def build_model(self):
if self.architecture == 'lstmCVAE2':
self.model = lstmCVAE2(
self.input_length, self.dim_pose, self.dim_z, len(self.all_classes))
else:
raise(ValueError)
self.model = self.model.cuda()
def sample(self, batch_size=20, used_class=None, gpu=True):
if used_class is not None:
class_vector = self.one_hot_encoder.transform([used_class])
else:
class_vector = self.one_hot_encoder.transform([self.used_class])
class_vector = np.tile(class_vector, (batch_size, 1))
z_sample_shape = [batch_size]
z_sample_shape.extend([self.dim_z])
z_sample = np.random.normal(size=z_sample_shape)
self.model.decoder.eval()
if gpu:
z_sample = self._to_torch(z_sample)
class_vector = self._to_torch(class_vector)
else:
z_sample = torch.from_numpy(z_sample.astype(np.float32))
class_vector = torch.from_numpy(class_vector.astype(np.float32))
with torch.no_grad():
mu_c, log_var_c = self.model.encoder_class(class_vector)
std = torch.exp(0.5 * log_var_c)
z_sample = z_sample * std + mu_c
motions = self.model.decoder(z_sample, class_vector)
if gpu:
motions = self._to_numpy(motions)
else:
motions = motions.numpy()
self.model.decoder.train()
return motions
def kld_loss(self, *args):
mu = args[3]
log_var = args[4]
mu_c = args[6]
log_var_c = args[7]
q = Normal(mu, torch.exp(0.5 * log_var))
pi = Normal(mu_c, torch.exp(0.5 * log_var_c))
return torch.mean(torch.sum(kl_divergence(q, pi), dim=1))
# sigma1 : mu, log_var
# sigma2 : mu_c, log_var_c
# return torch.mean(-0.5 * torch.sum(1 - log_var_c + log_var - ((mu - mu_c) ** 2 + log_var.exp()) / log_var_c.exp() , dim=1), dim=0).cuda()
|
185578
|
import ui
def hide_action(sender):
s = sender.superview
s['view1'].hidden = True
s2 = s['view2']
def a():
s2.transform=ui.Transform.scale(1.0, 1.33).concat(ui.Transform.translation(0,-100))
ui.animate(a, 1.0)
def reveal_action(sender):
s = sender.superview
s2 = s['view2']
def a():
s2.transform=ui.Transform.scale(1.0, 1.0).concat(ui.Transform.translation(0,0))
s['view1'].hidden = False
ui.animate(a, 1.0)
v = ui.load_view()
v.present('sheet')
|
185593
|
import torch
import torch.nn as nn
class contrastive_loss(nn.Module):
def __init__(self, tau=1, normalize=False):
super(contrastive_loss, self).__init__()
self.tau = tau
self.normalize = normalize
def forward(self, xi, xj):
x = torch.cat((xi, xj), dim=0)
is_cuda = x.is_cuda
sim_mat = torch.mm(x, x.T)
if self.normalize:
sim_mat_denom = torch.mm(torch.norm(x, dim=1).unsqueeze(1), torch.norm(x, dim=1).unsqueeze(1).T)
sim_mat = sim_mat / sim_mat_denom.clamp(min=1e-16)
sim_mat = torch.exp(sim_mat / self.tau)
# no diag because it's not diffrentiable -> sum - exp(1 / tau)
# diag_ind = torch.eye(xi.size(0) * 2).bool()
# diag_ind = diag_ind.cuda() if use_cuda else diag_ind
# sim_mat = sim_mat.masked_fill_(diag_ind, 0)
# top
if self.normalize:
sim_mat_denom = torch.norm(xi, dim=1) * torch.norm(xj, dim=1)
sim_match = torch.exp(torch.sum(xi * xj, dim=-1) / sim_mat_denom / self.tau)
else:
sim_match = torch.exp(torch.sum(xi * xj, dim=-1) / self.tau)
sim_match = torch.cat((sim_match, sim_match), dim=0)
norm_sum = torch.exp(torch.ones(x.size(0)) / self.tau)
norm_sum = norm_sum.cuda() if is_cuda else norm_sum
loss = torch.mean(-torch.log(sim_match / (torch.sum(sim_mat, dim=-1) - norm_sum)))
return loss
|
185645
|
from inspect import getmembers
from fastapi import FastAPI
from tortoise.contrib.starlette import register_tortoise
from app.config import tortoise_config
from app.utils.api.router import TypedAPIRouter
def init(app: FastAPI):
"""
Init routers and etc.
:return:
"""
init_routers(app)
init_db(app)
def init_db(app: FastAPI):
"""
Init database models.
:param app:
:return:
"""
register_tortoise(
app,
db_url=tortoise_config.db_url,
generate_schemas=tortoise_config.generate_schemas,
modules=tortoise_config.modules,
)
def init_routers(app: FastAPI):
"""
Initialize routers defined in `app.api`
:param app:
:return:
"""
from app.core import routers
routers = [o[1] for o in getmembers(routers) if isinstance(o[1], TypedAPIRouter)]
for router in routers:
app.include_router(**router.dict())
|
185657
|
from EventEngine.DyEvent import *
class DyStockStrategyState:
running = 'sRunning'
monitoring = 'sMonitoring'
backTesting = 'sBackTesting'
def __init__(self, *states):
self._state = None
self.add(*states)
@property
def state(self):
if self._state is None:
return '空'
state = self._state.replace(self.running, '运行')
state = state.replace(self.monitoring, '监控')
state = state.replace(self.backTesting, '回测')
return state
def add(self, *states):
if self._state:
self._state += ('+' + '+'.join(states))
else:
if states:
self._state = '+'.join(states)
def isState(self, state):
if self._state is None:
if state is None:
return True
else:
return False
if state in self._state:
return True
return False
def remove(self, *states):
if not self._state: return
curStates = self._state.split('+')
for state in states:
if state in curStates:
curStates.remove(state)
curStates = '+'.join(curStates)
if not curStates:
curStates = None
self._state = curStates
def checkState(self, state, strategyCls, eventEngine):
if self.isState(state):
return
self.add(state)
if self._state == state:
event = DyEvent(DyEventType.startStockCtaStrategy)
event.data['class'] = strategyCls
event.data['state'] = DyStockStrategyState(self._state)
else:
event = DyEvent(DyEventType.changeStockCtaStrategyState)
event.data['class'] = strategyCls
event.data['state'] = DyStockStrategyState(*self._state.split('+'))
eventEngine.put(event)
def uncheckState(self, state, strategyCls, eventEngine):
if not self.isState(state):
return
self.remove(state)
if not self._state:
event = DyEvent(DyEventType.stopStockCtaStrategy)
event.data['class'] = strategyCls
else:
event = DyEvent(DyEventType.changeStockCtaStrategyState)
event.data['class'] = strategyCls
event.data['state'] = DyStockStrategyState(self._state)
eventEngine.put(event)
def checkAll(self, strategyCls, eventEngine):
""" check '运行' 和 '监控' """
if self.isState(DyStockStrategyState.running) and self.isState(DyStockStrategyState.monitoring):
return
if self._state is None:
event = DyEvent(DyEventType.startStockCtaStrategy)
event.data['class'] = strategyCls
event.data['state'] = DyStockStrategyState(DyStockStrategyState.running, DyStockStrategyState.monitoring)
self.add(DyStockStrategyState.running, DyStockStrategyState.monitoring)
else:
if self.isState(DyStockStrategyState.running):
self.add(DyStockStrategyState.monitoring)
else:
self.add(DyStockStrategyState.running)
event = DyEvent(DyEventType.changeStockCtaStrategyState)
event.data['class'] = strategyCls
event.data['state'] = DyStockStrategyState(DyStockStrategyState.monitoring, DyStockStrategyState.running)
eventEngine.put(event)
def uncheckAll(self, strategyCls, eventEngine):
if self._state is None:
return
self._state = None
event = DyEvent(DyEventType.stopStockCtaStrategy)
event.data['class'] = strategyCls
eventEngine.put(event)
|
185658
|
import os
import sys
import random
import datetime
import time
import shutil
import numpy as np
import pandas as pd
import scipy.io
import scipy.signal
import math
from skimage.measure import compare_ssim as sk_ssim
import torch
from torch import nn
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
class AverageMeter(object):
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def calc_psnr(output, target):
psnr = 0.
mse = nn.MSELoss()(output, target)
psnr = 10 * math.log10(torch.max(output)/mse)
return psnr
def calc_ssim(output, target):
ssim = 0.
output = output.cpu().detach().numpy()
target = target.cpu().detach().numpy()
if output.ndim == 4:
for i in range(output.shape[0]):
output_i = np.squeeze(output[i,:,:,:])
output_i = np.moveaxis(output_i, 0, -1)
target_i = np.squeeze(target[i,:,:,:])
target_i = np.moveaxis(target_i, 0, -1)
batch_size = output.shape[0]
ssim += sk_ssim(output_i, target_i, data_range = output_i.max() - target_i.max(), multichannel=True)
else:
output_i = np.squeeze(output)
output_i = np.moveaxis(output_i, 0, -1)
target_i = np.squeeze(target)
target_i = np.moveaxis(target_i, 0, -1)
batch_size = 1
ssim += sk_ssim(output_i, target_i, data_range = output_i.max() - target_i.max(), multichannel=True)
ssim = ssim / batch_size
return ssim
|
185691
|
import argparse
import pyBigWig
def extract_regions(bwfile, bedfile, chsizefile, bwoutfile):
bw = pyBigWig.open(bwfile)
bed_el = []
intervals = []
for line in open(bedfile):
cols = line.strip().split()
intervals.append(bw.intervals(cols[0], int(cols[1]), int(cols[2])))
bed_el.append((cols[0], int(cols[1]), int(cols[2])))
bwout = pyBigWig.open(bwoutfile, 'w')
# add header
header = []
for line in open(chsizefile):
line = line.strip().split()
header.append((line[0], int(line[1])))
bwout.addHeader(header)
for i, iv in enumerate(intervals):
chroms = [bed_el[i][0]] * len(iv)
starts = [t[0] for t in iv]
ends = [t[1] for t in iv]
values = [t[2] for t in iv]
bwout.addEntries(chroms, starts, ends, values)
bw.close()
bwout.close()
def main():
parser = argparse.ArgumentParser(
description='Extract a set of BED regions from a BigWig file')
parser.add_argument('-bw', '--bigwig', type=str,
default='',
help="input BigWig file")
parser.add_argument('-b', '--bed', type=str,
default='',
help="input BED file")
parser.add_argument('-c', '--chromsizes', type=str,
default='',
help="input TSV file with chromosome size")
parser.add_argument('-bwout', '--bigwigout', type=str,
default='',
help="output BigWig file")
args = parser.parse_args()
extract_regions(bwfile=args.bigwig, bedfile=args.bed,
chsizefile=args.chromsizes, bwoutfile=args.bigwigout)
if __name__ == '__main__':
main()
|
185695
|
from common import *
import datetime
import argparse
import time
here = os.path.abspath(os.path.dirname(__file__))
app_dir = os.path.join(here, '../dgl/multi_gpu')
"""
if log_dir is not None, it will only parse logs
"""
def motivation_test(log_folder=None):
tic = time.time()
if log_folder:
mock = True
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
mock = False
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=2,
num_col=4
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_col_definition(
col_id=3,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 3],
BOOL_use_gpu_sampling='use_gpu_sampling'
).update_row_definition(
row_id=1,
col_range=[0, 3],
BOOL_use_gpu_sampling='no_use_gpu_sampling'
).create()
ConfigList(
test_group_name='DGL motivation test'
).select(
'app',
[App.gcn]
).select(
'dataset',
[Dataset.papers100M]
).override(
'num_epoch',
[10]
).override(
'num_sampling_worker',
[24] # Because it may race with extracting.
).override(
'BOOL_use_gpu_sampling',
['use_gpu_sampling', 'no_use_gpu_sampling']
).override(
'BOOL_pipelining',
['no_pipelining']
).override(
'devices',
['0'],
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('motivation test uses {:.4f} secs'.format(toc - tic))
def breakdown_test(log_folder=None):
tic = time.time()
if log_folder:
mock = True
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
mock = False
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=12,
num_col=3
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.products
).update_row_definition(
row_id=1,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.papers100M
).update_row_definition(
row_id=2,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.twitter
).update_row_definition(
row_id=3,
col_range=[0, 2],
app=App.gcn,
dataset=Dataset.uk_2006_05
).update_row_definition(
row_id=4,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.products
).update_row_definition(
row_id=5,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=6,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.twitter
).update_row_definition(
row_id=7,
col_range=[0, 2],
app=App.graphsage,
dataset=Dataset.uk_2006_05
).update_row_definition(
row_id=8,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.products
).update_row_definition(
row_id=9,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=10,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.twitter
).update_row_definition(
row_id=11,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.uk_2006_05
).create()
ConfigList(
test_group_name='DGL breakdown test'
).select(
'app',
[App.gcn, App.graphsage, App.pinsage]
).override(
'num_epoch',
[10]
).combo(
'app',
[App.pinsage],
'num_sampling_worker',
[40]
# ).combo(
# 'app',
# [App.gcn, App.graphsage],
# 'num_sampling_worker',
# [8]
).combo(
'app',
[App.gcn],
'fanout',
['5 10 15']
).combo(
'app',
[App.graphsage],
'fanout',
['25 10']
# ).combo(
# 'app',
# [App.pinsage],
# 'num_epoch',
# [1]
).combo(
'app',
[App.gcn, App.graphsage],
'BOOL_use_gpu_sampling',
['use_gpu_sampling']
).override(
'BOOL_pipelining',
['no_pipelining']
).override(
'devices',
['0'],
).combo(
'dataset',
[Dataset.uk_2006_05],
'BOOL_validate_configs',
['validate_configs']
).combo(
'app',
[App.pinsage],
'BOOL_validate_configs',
['validate_configs']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('breakdown test uses {:.4f} secs'.format(toc - tic))
def pinsage_breakdown_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=4,
num_col=3
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.products
).update_row_definition(
row_id=1,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.twitter
).update_row_definition(
row_id=2,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=3,
col_range=[0, 2],
app=App.pinsage,
dataset=Dataset.uk_2006_05
).create()
ConfigList(
test_group_name='DGL PinSAGE breakdown test'
).select(
'app',
[App.pinsage]
).override(
'num_epoch',
[10]
).override(
'BOOL_use_gpu_sampling',
['use_gpu_sampling']
).override(
'BOOL_pipelining',
['no_pipelining']
).override(
'devices',
['0']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('pinsage breakdown test uses {:.4f} secs'.format(toc - tic))
def scalability_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=8,
num_col=4
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_col_definition(
col_id=3,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 2],
devices='0',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=1,
col_range=[0, 2],
devices='0 1',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=2,
col_range=[0, 2],
devices='0 1 2',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=3,
col_range=[0, 2],
devices='0 1 2 3',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=4,
col_range=[0, 2],
devices='0 1 2 3 4',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=5,
col_range=[0, 2],
devices='0 1 2 3 4 5',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=6,
col_range=[0, 2],
devices='0 1 2 3 4 5 6',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=7,
col_range=[0, 2],
devices='0 1 2 3 4 5 6 7',
BOOL_pipelining='no_pipelining'
).update_row_definition(
row_id=0,
col_range=[3, 3],
devices='0',
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=1,
col_range=[3, 3],
devices='0 1',
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=2,
col_range=[3, 3],
devices='0 1 2',
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=3,
col_range=[3, 3],
devices='0 1 2 3',
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=4,
col_range=[3, 3],
devices='0 1 2 3 4',
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=5,
col_range=[3, 3],
devices='0 1 2 3 4 5',
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=6,
col_range=[3, 3],
devices='0 1 2 3 4 5 6',
BOOL_pipelining='pipelining'
).update_row_definition(
row_id=7,
col_range=[3, 3],
devices='0 1 2 3 4 5 6 7',
BOOL_pipelining='pipelining'
).create()
ConfigList(
test_group_name='DGL scalability test'
).select(
'app',
[App.gcn]
).select(
'dataset',
[Dataset.papers100M]
).override(
'fanout',
['5 10 15']
).override(
'num_epoch',
[10]
).override(
'devices',
['0', '0 1', '0 1 2', '0 1 2 3', '0 1 2 3 4',
'0 1 2 3 4 5', '0 1 2 3 4 5 6', '0 1 2 3 4 5 6 7'],
).override(
'BOOL_pipelining',
['pipelining', 'no_pipelining']
).override(
'BOOL_use_gpu_sampling',
['use_gpu_sampling']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir,
left_wrap='',
right_wrap='',
sep='\t'
)
toc = time.time()
print('scalability test uses {:.4f} secs'.format(toc - tic))
def scalability_twitter_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=8,
num_col=1
).update_col_definition(
col_id=0,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 0],
devices='0',
).update_row_definition(
row_id=1,
col_range=[0, 0],
devices='0 1',
).update_row_definition(
row_id=2,
col_range=[0, 0],
devices='0 1 2',
).update_row_definition(
row_id=3,
col_range=[0, 0],
devices='0 1 2 3',
).update_row_definition(
row_id=4,
col_range=[0, 0],
devices='0 1 2 3 4',
).update_row_definition(
row_id=5,
col_range=[0, 0],
devices='0 1 2 3 4 5',
).update_row_definition(
row_id=6,
col_range=[0, 0],
devices='0 1 2 3 4 5 6',
).update_row_definition(
row_id=7,
col_range=[0, 0],
devices='0 1 2 3 4 5 6 7',
).create()
ConfigList(
test_group_name='DGL Twitter scalability test'
).select(
'app',
[App.gcn]
).select(
'dataset',
[Dataset.twitter]
).override(
'fanout',
['5 10 15']
).override(
'num_epoch',
[10]
).override(
'devices',
['0', '0 1', '0 1 2', '0 1 2 3', '0 1 2 3 4',
'0 1 2 3 4 5', '0 1 2 3 4 5 6', '0 1 2 3 4 5 6 7'],
).override(
'BOOL_pipelining',
['pipelining']
).override(
'BOOL_use_gpu_sampling',
['use_gpu_sampling']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir,
left_wrap='',
right_wrap='',
sep='\t'
)
toc = time.time()
print('scalability twitter test uses {:.4f} secs'.format(toc - tic))
def overall_perf_test(log_folder=None):
tic = time.time()
if log_folder:
mock = True
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
mock = False
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=6,
num_col=1
).update_col_definition(
col_id=0,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 0],
app=App.gcn,
dataset=Dataset.products
).update_row_definition(
row_id=1,
col_range=[0, 0],
app=App.gcn,
dataset=Dataset.papers100M
).update_row_definition(
row_id=2,
col_range=[0, 0],
app=App.gcn,
dataset=Dataset.twitter
).update_row_definition(
row_id=3,
col_range=[0, 0],
app=App.graphsage,
dataset=Dataset.products
).update_row_definition(
row_id=4,
col_range=[0, 0],
app=App.graphsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=5,
col_range=[0, 0],
app=App.graphsage,
dataset=Dataset.twitter
).create()
ConfigList(
test_group_name='DGL overall performance test'
).select(
'app',
[App.gcn, App.graphsage]
).select(
'dataset',
[Dataset.products, Dataset.papers100M, Dataset.twitter]
).override(
'num_epoch',
[10]
).override(
'devices',
['0 1 2 3 4 5 6 7'],
).override(
'BOOL_use_gpu_sampling',
['use_gpu_sampling']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).override(
'BOOL_pipelining',
['pipelining']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('DGL overall performance test uses {:.4f} secs'.format(toc - tic))
def pinsage_overall_test(log_folder=None, mock=False):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=4,
num_col=1
).update_col_definition(
col_id=0,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 0],
app=App.pinsage,
dataset=Dataset.products
).update_row_definition(
row_id=1,
col_range=[0, 0],
app=App.pinsage,
dataset=Dataset.twitter
).update_row_definition(
row_id=2,
col_range=[0, 0],
app=App.pinsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=3,
col_range=[0, 0],
app=App.pinsage,
dataset=Dataset.uk_2006_05
).create()
ConfigList(
test_group_name='DGL PinSAGE overall performance test'
).select(
'app',
[App.pinsage]
).override(
'num_epoch',
[10]
).override(
'devices',
['0 1 2 3 4 5 6 7'],
).override(
'BOOL_use_gpu_sampling',
['use_gpu_sampling']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).override(
'BOOL_pipelining',
['pipelining']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('DGL Pinsage overall performance test uses {:.4f} secs'.format(toc - tic))
def cpu_overall_perf_test(log_folder=None):
tic = time.time()
if log_folder:
mock = True
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
mock = False
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=6,
num_col=1
).update_col_definition(
col_id=0,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 0],
app=App.gcn,
dataset=Dataset.products
).update_row_definition(
row_id=1,
col_range=[0, 0],
app=App.gcn,
dataset=Dataset.papers100M
).update_row_definition(
row_id=2,
col_range=[0, 0],
app=App.gcn,
dataset=Dataset.twitter
).update_row_definition(
row_id=3,
col_range=[0, 0],
app=App.graphsage,
dataset=Dataset.products
).update_row_definition(
row_id=4,
col_range=[0, 0],
app=App.graphsage,
dataset=Dataset.papers100M
).update_row_definition(
row_id=5,
col_range=[0, 0],
app=App.graphsage,
dataset=Dataset.twitter
).create()
ConfigList(
test_group_name='DGL CPU overall performance test'
).override(
'num_epoch',
[10]
).combo(
'app',
[App.pinsage],
'num_epoch',
[1]
).override(
'devices',
['0 1 2 3 4 5 6 7'],
).override(
'num_sampling_worker',
[24]
).override(
'BOOL_pipelining',
['pipelining']
).combo(
'app',
[App.gcn, App.graphsage],
'BOOL_use_gpu_sampling',
['no_use_gpu_sampling']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('DGL CPU overall performance test uses {:.4f} secs'.format(toc - tic))
def motivation2_test(log_folder, mock):
tic = time.time()
if log_folder:
log_dir = os.path.join(os.path.join(here, f'run-logs/{log_folder}'))
else:
log_dir = os.path.join(
here, f'run-logs/logs_dgl_{datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}')
log_table = LogTable(
num_row=1,
num_col=4
).update_col_definition(
col_id=0,
definition='sample_time'
).update_col_definition(
col_id=1,
definition='copy_time'
).update_col_definition(
col_id=2,
definition='train_time'
).update_col_definition(
col_id=3,
definition='epoch_time'
).update_row_definition(
row_id=0,
col_range=[0, 3],
app=App.gcn
).create()
ConfigList(
test_group_name='DGL Motivation 2 test'
).select(
'app',
[App.gcn]
).select(
'dataset',
[Dataset.papers100M]
).override(
'num_epoch',
[3]
).override(
'devices',
['0 1 2 3 4 5 6 7'],
).override(
'num_sampling_worker',
[40]
).override(
'BOOL_pipelining',
['no_pipelining']
).override(
'BOOL_use_gpu_sampling',
['no_use_gpu_sampling']
# ).override(
# 'BOOL_validate_configs',
# ['validate_configs']
).run(
appdir=app_dir,
logdir=log_dir,
mock=mock
).parse_logs(
logtable=log_table,
logdir=log_dir
)
toc = time.time()
print('DGL motivation 2 test uses {:.4f} secs'.format(toc - tic))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("DGL runner")
argparser.add_argument('-l', '--log-folder', default=None)
argparser.add_argument('-m', '--mock', action='store_true', default=False)
args = argparser.parse_args()
# motivation_test(args.log_folder)
# breakdown_test(args.log_folder)
# scalability_test(args.log_folder, args.mock)
# scalability_pipeline_test(args.log_folder, args.mock)
# overall_perf_test(args.log_folder)
# cpu_overall_perf_test(args.log_folder)
# motivation2_test(args.log_folder, args.mock)
# scalability_twitter_test(args.log_folder, args.mock)
pinsage_breakdown_test(args.log_folder, args.mock)
# pinsage_overall_test(args.log_folder, args.mock)
|
185696
|
from .translated_object import TranslatedObject
from .base_translator import BaseTranslator
BASE_HEADERS: dict = {
"User-Agent": "GoogleTranslate/6.6.1.RC09.302039986 (Linux; U; Android 9; Redmi Note 8)",
}
|
185698
|
import io
import pathlib
import pytest
from mopidy.m3u import translator
from mopidy.m3u.translator import path_to_uri
from mopidy.models import Playlist, Ref, Track
def loads(s, basedir):
return translator.load_items(io.StringIO(s), basedir)
def dumps(items):
fp = io.StringIO()
translator.dump_items(items, fp)
return fp.getvalue()
@pytest.mark.parametrize(
"path,scheme,expected",
[
("test", None, "m3u:test"),
("test.m3u", None, "m3u:test.m3u"),
("./test.m3u", None, "m3u:test.m3u"),
("foo/../test.m3u", None, "m3u:test.m3u"),
("Test Playlist.m3u", None, "m3u:Test%20Playlist.m3u"),
("test.mp3", "file", "file:///test.mp3"),
],
)
def test_path_to_uri(path, scheme, expected):
if scheme is not None:
assert path_to_uri(pathlib.Path(path), scheme) == expected
else:
assert path_to_uri(pathlib.Path(path)) == expected
def test_latin1_path_to_uri():
bytes_path = "æøå.m3u".encode("latin-1")
path = pathlib.Path(bytes_path.decode(errors="surrogateescape"))
assert translator.path_to_uri(path) == "m3u:%E6%F8%E5.m3u"
def test_utf8_path_to_uri():
bytes_path = "æøå.m3u".encode()
path = pathlib.Path(bytes_path.decode())
assert translator.path_to_uri(path) == "m3u:%C3%A6%C3%B8%C3%A5.m3u"
@pytest.mark.parametrize(
"path,expected",
[
("test", "test"),
("test.m3u", "test"),
("../test.m3u", "test"),
("testæ.m3u", "testæ"),
],
)
def test_name_from_path(path, expected):
assert translator.name_from_path(pathlib.Path(path)) == expected
def test_path_from_name():
from mopidy.m3u.translator import path_from_name
assert path_from_name("test") == pathlib.Path("test")
assert path_from_name("test", ".m3u") == pathlib.Path("test.m3u")
assert path_from_name("foo/bar", sep="-") == pathlib.Path("foo-bar")
@pytest.mark.parametrize(
"path,expected",
[
("test.m3u", ("m3u:test.m3u", "test")),
("Test Playlist.m3u", ("m3u:Test%20Playlist.m3u", "Test Playlist")),
],
)
def test_path_to_ref(path, expected):
from mopidy.m3u.translator import path_to_ref
result = path_to_ref(pathlib.Path(path))
assert Ref.playlist(uri=expected[0], name=expected[1]) == result
@pytest.mark.parametrize(
"contents,basedir,expected",
[
("", ".", None),
("test.mp3", "/playlists", ("file:///playlists/test.mp3", "test")),
("../test.mp3", "/playlists", ("file:///test.mp3", "test")),
("/test.mp3", ".", ("file:///test.mp3", "test")),
("file:///test.mp3", ".", ("file:///test.mp3", None)),
("http://example.com/stream", ".", ("http://example.com/stream", None)),
(
"#EXTM3U\n#EXTINF:42,Test\nfile:///test.mp3\n",
".",
("file:///test.mp3", "Test"),
),
(
"#EXTM3U\n#EXTINF:-1,Test\nhttp://example.com/stream\n",
".",
("http://example.com/stream", "Test"),
),
],
)
def test_load_items(contents, basedir, expected):
result = loads(contents, pathlib.Path(basedir))
if expected is not None:
assert [Ref.track(uri=expected[0], name=expected[1])] == result
else:
assert [] == result
def test_dump_items():
assert dumps([]) == ""
assert dumps([Ref.track(uri="file:///test.mp3")]) == ("file:///test.mp3\n")
assert dumps([Ref.track(uri="file:///test.mp3", name="test")]) == (
"#EXTM3U\n" "#EXTINF:-1,test\n" "file:///test.mp3\n"
)
assert dumps([Track(uri="file:///test.mp3", name="test", length=42)]) == (
"#EXTM3U\n" "#EXTINF:-1,test\n" "file:///test.mp3\n"
)
assert dumps([Track(uri="http://example.com/stream")]) == (
"http://example.com/stream\n"
)
assert dumps([Track(uri="http://example.com/stream", name="Test")]) == (
"#EXTM3U\n" "#EXTINF:-1,Test\n" "http://example.com/stream\n"
)
def test_playlist():
from mopidy.m3u.translator import playlist
path = pathlib.Path("test.m3u")
assert playlist(path) == Playlist(uri="m3u:test.m3u", name="test")
assert playlist(path, [Ref(uri="file:///test.mp3")], 1) == Playlist(
uri="m3u:test.m3u",
name="test",
tracks=[Track(uri="file:///test.mp3")],
last_modified=1000,
)
|
185734
|
import random
import unittest
from podman import PodmanClient
from podman.errors import NotFound
from podman.tests.integration import base
class VolumesIntegrationTest(base.IntegrationTest):
def setUp(self):
super().setUp()
self.client = PodmanClient(base_url=self.socket_uri)
self.addCleanup(self.client.close)
def test_volume_crud(self):
"""Test Volume CRUD."""
volume_name = f"volume_{random.getrandbits(160):x}"
self.assertFalse(
self.client.volumes.exists(volume_name), "Storage is corrupt from previous run"
)
with self.subTest("Create"):
volume = self.client.volumes.create(volume_name)
self.assertEqual(volume.name, volume_name)
with self.subTest("Get"):
actual = self.client.volumes.get(volume_name)
self.assertDictEqual(actual.attrs, volume.attrs)
self.assertTrue(self.client.volumes.exists(volume_name))
with self.subTest("List"):
report = self.client.volumes.list()
names = [i.name for i in report]
self.assertIn(volume_name, names)
with self.subTest("Remove"):
self.client.volumes.remove(volume_name, force=True)
with self.assertRaises(NotFound):
self.client.volumes.get(volume_name)
def test_inspect_404(self):
with self.assertRaises(NotFound):
self.client.volumes.get("NoSuchVolume")
if __name__ == '__main__':
unittest.main()
|
185747
|
import unittest
from test.robotTestUtil import RobotTestUtil
class MyTestCase(unittest.TestCase):
def test_pose(self):
robot = RobotTestUtil.make_fake_dash()
packet = {}
packet['2002'] = {
'x' : 1.2,
'y' : 3.4,
'degree': 5.6,
}
robot.sensors.parse(packet)
sensor = robot.sensors.pose
self.assertAlmostEquals(sensor.x , -3.4)
self.assertAlmostEquals(sensor.y , 1.2)
self.assertAlmostEquals(sensor.degrees, 5.6)
self.assertTrue (sensor.watermark_measured is None)
self.assertAlmostEquals(sensor.watermark_inferred, 0.0)
packet['2002'] = {
'x' : 1.2,
'y' : 3.4,
'degree' : 5.6,
'watermark': 3,
}
robot.sensors.parse(packet)
sensor = robot.sensors.pose
self.assertAlmostEquals(sensor.x , -3.4)
self.assertAlmostEquals(sensor.y , 1.2)
self.assertAlmostEquals(sensor.degrees , 5.6)
self.assertAlmostEquals(sensor.watermark_measured, 3)
self.assertAlmostEquals(sensor.watermark_inferred, 3)
if __name__ == '__main__':
unittest.main()
|
185753
|
from PIL import Image
import numpy as np
import cv2
PAPER_EXT = {".gloria_chx": "gloria_chx_open_image"}
def gloria_chx_open_image(img):
def _resize_img(img, scale):
"""
Args:
img - image as numpy array (cv2)
scale - desired output image-size as scale x scale
Return:
image resized to scale x scale with shortest dimension 0-padded
"""
size = img.shape
max_dim = max(size)
max_ind = size.index(max_dim)
# Resizing
if max_ind == 0:
# image is heigher
wpercent = scale / float(size[0])
hsize = int((float(size[1]) * float(wpercent)))
desireable_size = (scale, hsize)
else:
# image is wider
hpercent = scale / float(size[1])
wsize = int((float(size[0]) * float(hpercent)))
desireable_size = (wsize, scale)
resized_img = cv2.resize(
img, desireable_size[::-1], interpolation=cv2.INTER_AREA
) # this flips the desireable_size vector
# Padding
if max_ind == 0:
# height fixed at scale, pad the width
pad_size = scale - resized_img.shape[1]
left = int(np.floor(pad_size / 2))
right = int(np.ceil(pad_size / 2))
top = int(0)
bottom = int(0)
else:
# width fixed at scale, pad the height
pad_size = scale - resized_img.shape[0]
top = int(np.floor(pad_size / 2))
bottom = int(np.ceil(pad_size / 2))
left = int(0)
right = int(0)
resized_img = np.pad(
resized_img, [(top, bottom), (left, right)], "constant", constant_values=0
)
return resized_img
x = cv2.imread(str(img), 0)
x = _resize_img(x, 256)
img = Image.fromarray(x).convert("RGB")
return img
|
185760
|
import copy
from dataclasses import dataclass, field
import functools
from typing import Optional, Tuple
from pycparser import c_ast as ca
from .compiler import Compiler
from .randomizer import Randomizer
from .scorer import Scorer
from .perm.perm import EvalState
from .perm.ast import apply_ast_perms
from .helpers import try_remove
from .profiler import Profiler
from . import ast_util
@dataclass
class CandidateResult:
"""Represents the result of scoring a candidate, and is sent from child to
parent processes, or server to client with p@h."""
score: int
hash: Optional[str]
source: Optional[str]
profiler: Optional[Profiler] = None
@dataclass
class Candidate:
"""
Represents a AST candidate created from a source which can be randomized
(possibly multiple times), compiled, and scored.
"""
ast: ca.FileAST
fn_index: int
rng_seed: int
randomizer: Randomizer
score_value: Optional[int] = field(init=False, default=None)
score_hash: Optional[str] = field(init=False, default=None)
_cache_source: Optional[str] = field(init=False, default=None)
@staticmethod
@functools.lru_cache(maxsize=16)
def _cached_shared_ast(
source: str, fn_name: str
) -> Tuple[ca.FuncDef, int, ca.FileAST]:
ast = ast_util.parse_c(source)
orig_fn, fn_index = ast_util.extract_fn(ast, fn_name)
ast_util.normalize_ast(orig_fn, ast)
return orig_fn, fn_index, ast
@staticmethod
def from_source(
source: str, eval_state: EvalState, fn_name: str, rng_seed: int
) -> "Candidate":
# Use the same AST for all instances of the same original source, but
# with the target function deeply copied. Since we never change the
# AST outside of the target function, this is fine, and it saves us
# performance (deepcopy is really slow).
orig_fn, fn_index, ast = Candidate._cached_shared_ast(source, fn_name)
ast = copy.copy(ast)
ast.ext = copy.copy(ast.ext)
fn_copy = copy.deepcopy(orig_fn)
ast.ext[fn_index] = fn_copy
apply_ast_perms(fn_copy, eval_state)
return Candidate(
ast=ast,
fn_index=fn_index,
rng_seed=rng_seed,
randomizer=Randomizer(rng_seed),
)
def randomize_ast(self) -> None:
self.randomizer.randomize(self.ast, self.fn_index)
self._cache_source = None
def get_source(self) -> str:
if self._cache_source is None:
self._cache_source = ast_util.to_c(self.ast)
return self._cache_source
def compile(self, compiler: Compiler, show_errors: bool = False) -> Optional[str]:
source: str = self.get_source()
return compiler.compile(source, show_errors=show_errors)
def score(self, scorer: Scorer, o_file: Optional[str]) -> CandidateResult:
self.score_value = None
self.score_hash = None
try:
self.score_value, self.score_hash = scorer.score(o_file)
finally:
if o_file:
try_remove(o_file)
return CandidateResult(
score=self.score_value, hash=self.score_hash, source=self.get_source()
)
|
185763
|
import pickle
import re
from unidecode import unidecode
from collections import defaultdict
from math import log
from opentapioca.readers.dumpreader import WikidataDumpReader
separator_re = re.compile(r'[,\-_/:;!?)]? [,\-_/:;!?(]?')
def tokenize(phrase):
"""
Split a text into lists of words
"""
words = [
unidecode(word.strip())
for word in separator_re.split(' '+phrase+' ')
]
return [w for w in words if w]
class BOWLanguageModel(object):
def __init__(self):
self.total_count = 0
self.word_count = defaultdict(int)
self.smoothing = 1
self.log_quotient = None
self.threshold = 2
def ingest(self, words):
"""
Ingests a sequence of words in the language model
"""
for word in words:
self.word_count[word] += 1
self.total_count += len(words)
def ingest_phrases(self, phrases):
"""
Given a list of strings (phrases), deduplicate all
their words and ingest them.
"""
word_set = set()
for phrase in phrases:
word_set |= set(tokenize(phrase))
self.ingest(word_set)
def log_likelihood(self, phrase):
"""
Returns the log-likelihood of the phrase
"""
words = tokenize(phrase)
return sum(self._word_log_likelihood(word) for word in words)
def _word_log_likelihood(self, word):
"""
The log-likelihood for a single phrase
"""
if self.log_quotient is None:
self._update_log_quotient()
return log(float(self.smoothing + self.word_count[word])) - self.log_quotient
def _update_log_quotient(self):
"""
Updates the precomputed quotient
"""
self.log_quotient = log(self.smoothing*(1+len(self.word_count))+self.total_count)
def load(self, filename):
"""
Loads a pre-trained language model
"""
with open(filename, 'rb') as f:
dct = pickle.load(f)
self.total_count = dct['total_count']
self.word_count = defaultdict(int, dct['word_count'])
self._update_log_quotient()
def save(self, filename):
"""
Saves the language model to a file
"""
print('saving language model')
with open(filename, 'wb') as f:
pickle.dump(
{'total_count':self.total_count,
'word_count':[ (w,c) for w,c in self.word_count.items()
if c >= self.threshold ]},
f)
@classmethod
def train_from_dump(cls, filename):
"""
Trains a bag of words language model from either a .txt
file (in which case it is read as plain text) or a .json.bz2
file (in which case it is read as a wikidata dump).
"""
bow = BOWLanguageModel()
if filename.endswith('.txt'):
with open(filename, 'r') as f:
for line in f:
bow.ingest_phrases([line.strip()])
elif filename.endswith('.json.bz2'):
with WikidataDumpReader(filename) as reader:
for idx, item in enumerate(reader):
if idx % 10000 == 0:
print(idx)
enlabel = item.get('labels', {}).get('en', {}).get('value')
endesc = item.get('descriptions', {}).get('en', {}).get('value')
if enlabel:
# Fetch aliases
enaliases = [
alias['value']
for alias in item.get('aliases', {}).get('en', [])
]
bow.ingest_phrases(enaliases + [enlabel])
else:
raise ValueError('invalid filename provided (must end in .txt or .json.bz2)')
return bow
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.