content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from gurobipy import *
from prediction.predictor import predict_scores
from utils.progress import inhour
import time
import numpy as np
def sample_users(num_users, num_users_sampled):
return np.random.choice(num_users, num_users_sampled, replace=False)
def sample_items(candidate_items, num_items_sampled):
return np.random.choice(candidate_items, size=num_items_sampled, replace=False)
def sample_keyphrase():
# Critique the most popular keyphrases in the remaining
critiqued_keyphrase = remaining_keyphrases[np.argmax(self.keyphrase_popularity[remaining_keyphrases])]
def get_max_length(df, num_keyphrases, max_iteration):
df_s_f = df[(df['result'] == 'successful') | (df['result'] == 'fail')]
df_s_f.loc[df_s_f['num_existing_keyphrases'] > max_iteration, 'num_existing_keyphrases'] = max_iteration
return df_s_f['num_existing_keyphrases'].mean()
def get_average_length(df, n):
df_s_f = df[(df['result'] == 'successful') | (df['result'] == 'fail')]
iteration = df_s_f[df_s_f['target_rank']==n].groupby('user_id', as_index=False).agg({'iteration':'mean'})['iteration'].to_numpy()
return (np.average(iteration), 1.96*np.std(iteration)/np.sqrt(len(iteration)))
def get_success_num(df, n):
return len(df[(df['result'] == 'successful') & (df['target_rank'] == n)])
def get_fail_num(df, n):
return len(df[(df['result'] == 'fail') & (df['target_rank'] == n)])
def get_success_rate(df, n):
df_s_f = df[(df['result'] == 'successful') | (df['result'] == 'fail')]
df_list_result = df_s_f[df_s_f['target_rank']==n].groupby('user_id', as_index=False)['result'].apply(list).reset_index(name='result')
successful_rate = df_list_result['result'].apply(lambda r: r.count("successful")/len(r)).to_numpy()
return (np.average(successful_rate), 1.96*np.std(successful_rate)/np.sqrt(len(successful_rate)))
def count_occurrence(x):
return x.count("successful")
def add_pop(x, item_pop_index):
return np.where(item_pop_index == x)[0][0]
##################################
# Baseline Methods
##################################
def LPUAC(initial_prediction_u, keyphrase_freq, affected_items, unaffected_items, num_keyphrases, query, test_user, item_latent, reg):
critiqued_vector = np.zeros(keyphrase_freq[0].shape)
for q in query:
critiqued_vector[q] = -keyphrase_freq[test_user][q]
num_critiques = len(query)
num_affected_items = len(affected_items)
num_unaffected_items = len(unaffected_items)
# UAC
lambdas = []
for k in range(num_critiques):
optimal_lambda = 1/(1+num_critiques) # All equals to 1/(K+1)
lambdas.append(optimal_lambda)
critiqued_vector[query[k]] *= optimal_lambda
critique_score = predict_scores(matrix_U=reg.predict(critiqued_vector.reshape(1, -1)),
matrix_V=item_latent)
new_prediction = (1/(1+num_critiques))*initial_prediction_u + critique_score.flatten()
return new_prediction, lambdas
def LPBAC(initial_prediction_u, keyphrase_freq, affected_items, unaffected_items, num_keyphrases, query, test_user, item_latent, reg):
critiqued_vector = np.zeros(keyphrase_freq[0].shape)
for q in query:
critiqued_vector[q] = -keyphrase_freq[test_user][q]
num_critiques = len(query)
num_affected_items = len(affected_items)
num_unaffected_items = len(unaffected_items)
# UAC
lambdas = []
for k in range(num_critiques):
optimal_lambda = 1/(2*num_critiques) # All equals to 1/(2*K)
lambdas.append(optimal_lambda)
critiqued_vector[query[k]] *= optimal_lambda
critique_score = predict_scores(matrix_U=reg.predict(critiqued_vector.reshape(1, -1)),
matrix_V=item_latent)
new_prediction = (1/2)*initial_prediction_u + critique_score.flatten()
return new_prediction, lambdas
##################################
# LP Objectives
##################################
def LP1SimplifiedOptimize(initial_prediction_u, keyphrase_freq, affected_items, unaffected_items, num_keyphrases, query, test_user, item_latent, reg):
critiqued_vector = np.zeros(keyphrase_freq[0].shape)
for q in query:
critiqued_vector[q] = -max(keyphrase_freq[test_user][q],keyphrase_freq[test_user].mean())
num_critiques = len(query)
W2 = reg.coef_
W = item_latent.dot(W2)
num_affected_items = len(affected_items)
num_unaffected_items = len(unaffected_items)
start_time = time.time()
# Model
m = Model("LP1Simplified")
m.setParam('OutputFlag', 0)
# Assignment variables
lambs = []
for k in range(num_critiques):
lambs.append(m.addVar(lb=-1,
ub=1,
vtype=GRB.CONTINUOUS,
name="lamb%d" % query[k]))
m.setObjective(quicksum(initial_prediction_u[affected_item] * num_unaffected_items + quicksum(lambs[k] * critiqued_vector[query[k]] * W[affected_item][query[k]] * num_unaffected_items for k in range(num_critiques)) for affected_item in affected_items) - quicksum(initial_prediction_u[unaffected_item] * num_affected_items + quicksum(lambs[k] * critiqued_vector[query[k]] * W[unaffected_item][query[k]] * num_affected_items for k in range(num_critiques)) for unaffected_item in unaffected_items), GRB.MINIMIZE)
# Optimize
m.optimize()
# print("Elapsed: {}".format(inhour(time.time() - start_time)))
lambdas = []
for k in range(num_critiques):
optimal_lambda = m.getVars()[k].X
lambdas.append(optimal_lambda)
critiqued_vector[query[k]] *= optimal_lambda
critique_score = predict_scores(matrix_U=reg.predict(critiqued_vector.reshape(1, -1)),
matrix_V=item_latent)
new_prediction = initial_prediction_u + critique_score.flatten()
return new_prediction, lambdas
##################################
# LP-Ranking
##################################
def LPRank(initial_prediction_u, keyphrase_freq, affected_items, unaffected_items, num_keyphrases,
query, test_user, item_latent, reg, Z_pre, Z, thetas_pre, lamb = 10, bound_range = 2):
"""
Incremental Approach
"""
critiqued_vector = np.zeros(keyphrase_freq[0].shape)
for q in query:
critiqued_vector[q] = -max(keyphrase_freq[test_user][q],keyphrase_freq[test_user].mean())
num_critiques = len(query)
num_affected_items = len(affected_items)
num_unaffected_items = len(unaffected_items)
# Model
m = Model("LPRank")
m.setParam('OutputFlag', 0) # set to 1 for outputing details
# Assignment variables
thetas = []
us = []
xi_pos = []
xi_neg = []
# weight thetas
for k in range(num_critiques + 1):
thetas.append(m.addVar(lb=-bound_range,
ub=bound_range,
vtype=GRB.CONTINUOUS,
name="theta%d" % k))
thetas = np.array(thetas)
# dummy variable u for absolute theta
for k in range(num_critiques + 1):
us.append(m.addVar(vtype=GRB.CONTINUOUS,
name="u%d" % k))
# slack variables xi
for i in range(num_affected_items):
xi_pos.append(m.addVar(lb = 0,
vtype = GRB.CONTINUOUS,
name = "xi_pos%d" % i ))
for i in range(num_unaffected_items):
xi_neg.append(m.addVar(lb = 0,
vtype = GRB.CONTINUOUS,
name = "xi_neg%d" % i ))
## constraints
# constraints for dummy variable u's
for k in range(num_critiques+1):
m.addConstr(us[k] >= thetas[k] - 1/(num_critiques+1))
m.addConstr(us[k] >= 1/(num_critiques+1) - thetas[k])
# Affected items rank higher
for j in range(num_affected_items):
m.addConstr( thetas_pre.dot(Z_pre.dot(item_latent[affected_items[j]])) - thetas.dot(Z.dot(item_latent[affected_items[j]])) >= 1 - xi_pos[j], name = "pos_constraint%d" % j )
for j in range(num_unaffected_items):
m.addConstr( thetas.dot(Z.dot(item_latent[unaffected_items[j]])) >= thetas_pre.dot(Z_pre.dot(item_latent[unaffected_items[j]])) + 1 - xi_neg[j], name = "neg_constraint%d" % j )
m.setObjective(quicksum(us) + lamb * (quicksum(xi_pos)+quicksum(xi_neg)), GRB.MINIMIZE) # Single regularization
# Optimize
m.optimize()
# Save optimal thetas
thetas = []
for k in range(num_critiques+1):
optimal_theta = m.getVarByName("theta%d" % k).X
thetas.append(optimal_theta)
for k in range(num_critiques):
critiqued_vector[query[k]] *= thetas[k+1]
# Get rating score
critique_score = predict_scores(matrix_U=reg.predict(critiqued_vector.reshape(1, -1)),
matrix_V=item_latent)
new_prediction = thetas[0]*initial_prediction_u + critique_score.flatten()
return new_prediction, thetas
def LPRank2(initial_prediction_u, keyphrase_freq, affected_items, unaffected_items, num_keyphrases,
query, test_user, item_latent, reg, Z_pre, Z, lamb = 10, bound_range = 2):
"""
Non Incremental Approach
"""
critiqued_vector = np.zeros(keyphrase_freq[0].shape)
for q in query:
critiqued_vector[q] = -max(keyphrase_freq[test_user][q],keyphrase_freq[test_user].mean())
num_critiques = len(query)
num_affected_items = len(affected_items)
num_unaffected_items = len(unaffected_items)
# Model
m = Model("LPRank")
m.setParam('OutputFlag', 0) # set to 1 for outputing details
# Assignment variables
thetas = []
us = []
xi_pos = []
xi_neg = []
# weight thetas
for k in range(num_critiques + 1):
thetas.append(m.addVar(lb=-bound_range,
ub=bound_range,
vtype=GRB.CONTINUOUS,
name="theta%d" % k))
thetas = np.array(thetas)
# dummy variable u for absolute theta
for k in range(num_critiques + 1):
us.append(m.addVar(vtype=GRB.CONTINUOUS,
name="u%d" % k))
# slack variables xi
for i in range(num_affected_items):
xi_pos.append(m.addVar(lb = 0,
vtype = GRB.CONTINUOUS,
name = "xi_pos%d" % i ))
for i in range(num_unaffected_items):
xi_neg.append(m.addVar(lb = 0,
vtype = GRB.CONTINUOUS,
name = "xi_neg%d" % i ))
## constraints
# constraints for dummy variable u's
for k in range(num_critiques+1):
m.addConstr(us[k] >= thetas[k] - 1/(num_critiques+1))
m.addConstr(us[k] >= 1/(num_critiques+1) - thetas[k])
# Affected items rank higher
for j in range(num_affected_items):
m.addConstr( initial_prediction_u[affected_items[j]] - thetas.dot(Z.dot(item_latent[affected_items[j]])) >= 1 - xi_pos[j], name = "pos_constraint%d" % j )
# Unaffected items rank lower
for j in range(num_unaffected_items):
m.addConstr( thetas.dot(Z.dot(item_latent[unaffected_items[j]])) >= initial_prediction_u[unaffected_items[j]] + 1 - xi_neg[j], name = "neg_constraint%d" % j )
m.setObjective(quicksum(us) + lamb * (quicksum(xi_pos)+quicksum(xi_neg)), GRB.MINIMIZE) # Single regularization
# Optimize
m.optimize()
# Save optimal thetas
thetas = []
for k in range(num_critiques+1):
optimal_theta = m.getVarByName("theta%d" % k).X
thetas.append(optimal_theta)
for k in range(num_critiques):
critiqued_vector[query[k]] *= thetas[k+1]
# Get rating score
critique_score = predict_scores(matrix_U=reg.predict(critiqued_vector.reshape(1, -1)),
matrix_V=item_latent)
new_prediction = thetas[0]*initial_prediction_u + critique_score.flatten()
return new_prediction, thetas
|
nilq/baby-python
|
python
|
import json
from rest_framework.fields import MISSING_ERROR_MESSAGE
from rest_framework.relations import *
from django.utils.translation import ugettext_lazy as _
from rest_framework_json_api.exceptions import Conflict
from rest_framework_json_api.utils import Hyperlink, \
get_resource_type_from_queryset, get_resource_type_from_instance, \
get_included_serializers, get_resource_type_from_serializer
class ResourceRelatedField(PrimaryKeyRelatedField):
self_link_view_name = None
related_link_view_name = None
related_link_lookup_field = 'pk'
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected resource identifier object, received {data_type}.'),
'incorrect_relation_type': _('Incorrect relation type. Expected {relation_type}, received {received_type}.'),
'missing_type': _('Invalid resource identifier object: missing \'type\' attribute'),
'missing_id': _('Invalid resource identifier object: missing \'id\' attribute'),
'no_match': _('Invalid hyperlink - No URL match.'),
}
def __init__(self, self_link_view_name=None, related_link_view_name=None, **kwargs):
if self_link_view_name is not None:
self.self_link_view_name = self_link_view_name
if related_link_view_name is not None:
self.related_link_view_name = related_link_view_name
self.related_link_lookup_field = kwargs.pop('related_link_lookup_field', self.related_link_lookup_field)
self.related_link_url_kwarg = kwargs.pop('related_link_url_kwarg', self.related_link_lookup_field)
# check for a model class that was passed in for the relation type
model = kwargs.pop('model', None)
if model:
self.model = model
# We include this simply for dependency injection in tests.
# We can't add it as a class attributes or it would expect an
# implicit `self` argument to be passed.
self.reverse = reverse
super(ResourceRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
# We need the real object to determine its type...
return False
def conflict(self, key, **kwargs):
"""
A helper method that simply raises a validation error.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
raise AssertionError(msg)
message_string = msg.format(**kwargs)
raise Conflict(message_string)
def get_url(self, name, view_name, kwargs, request):
"""
Given a name, view name and kwargs, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# Return None if the view name is not supplied
if not view_name:
return None
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.reverse(view_name, kwargs=kwargs, request=request)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s".'
)
raise ImproperlyConfigured(msg % view_name)
if url is None:
return None
return Hyperlink(url, name)
def get_links(self, obj=None, lookup_field='pk'):
request = self.context.get('request', None)
view = self.context.get('view', None)
return_data = OrderedDict()
kwargs = {lookup_field: getattr(obj, lookup_field) if obj else view.kwargs[lookup_field]}
self_kwargs = kwargs.copy()
self_kwargs.update({'related_field': self.field_name if self.field_name else self.parent.field_name})
self_link = self.get_url('self', self.self_link_view_name, self_kwargs, request)
related_kwargs = {self.related_link_url_kwarg: kwargs[self.related_link_lookup_field]}
related_link = self.get_url('related', self.related_link_view_name, related_kwargs, request)
if self_link:
return_data.update({'self': self_link})
if related_link:
return_data.update({'related': related_link})
return return_data
def to_internal_value(self, data):
if isinstance(data, six.text_type):
try:
data = json.loads(data)
except ValueError:
# show a useful error if they send a `pk` instead of resource object
self.fail('incorrect_type', data_type=type(data).__name__)
if not isinstance(data, dict):
self.fail('incorrect_type', data_type=type(data).__name__)
expected_relation_type = get_resource_type_from_queryset(self.queryset)
if 'type' not in data:
self.fail('missing_type')
if 'id' not in data:
self.fail('missing_id')
if data['type'] != expected_relation_type:
self.conflict('incorrect_relation_type', relation_type=expected_relation_type, received_type=data['type'])
return super(ResourceRelatedField, self).to_internal_value(data['id'])
def to_representation(self, value):
if getattr(self, 'pk_field', None) is not None:
pk = self.pk_field.to_representation(value.pk)
else:
pk = value.pk
# check to see if this resource has a different resource_name when
# included and use that name
resource_type = None
root = getattr(self.parent, 'parent', self.parent)
field_name = self.field_name if self.field_name else self.parent.field_name
if getattr(root, 'included_serializers', None) is not None:
includes = get_included_serializers(root)
if field_name in includes.keys():
resource_type = get_resource_type_from_serializer(includes[field_name])
resource_type = resource_type if resource_type else get_resource_type_from_instance(value)
return OrderedDict([('type', resource_type), ('id', str(pk))])
@property
def choices(self):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
return OrderedDict([
(
json.dumps(self.to_representation(item)),
self.display_value(item)
)
for item in queryset
])
class SerializerMethodResourceRelatedField(ResourceRelatedField):
def get_attribute(self, instance):
# check for a source fn defined on the serializer instead of the model
if self.source and hasattr(self.parent, self.source):
serializer_method = getattr(self.parent, self.source)
if hasattr(serializer_method, '__call__'):
return serializer_method(instance)
return super(ResourceRelatedField, self).get_attribute(instance)
|
nilq/baby-python
|
python
|
import kube_vars as globalvars
import kube_factory as factory
import kube_secret
import kube_servicecheck
import kube_pvc
if __name__=="__main__":
# This is the unified interface to accept parameters
# Python model argparse be used to parse the input parameter
# Subparser has been defined in git_handler.py which including what subcmd and related parameters should be used
# IF you want to extenstion more cmd please following this coding pattern
parser=globalvars.get_value('RootCMDParser')
args = parser.parse_args(["-configtype","OutCluster","createsecret","-tenant","demo", "-env","dev", "-envtype","live","-replace","true"])
print(args)
# config and group are global config used to initial gitlab object
config,configtype,context,apiversion = None, None, None , None
if hasattr(args, 'config'):
config=args.config
if hasattr(args, 'configtype'):
configtype=args.configtype
factory.Factory_InitKubeClient(configtype,config,context,apiversion)
args.func(args)
|
nilq/baby-python
|
python
|
import sys
from PIL import Image, ImageDraw
WRITABLES = [(0, 0, 7, 7), (24, 0, 39, 7), (56, 0, 63, 7)]
imagePath = "schoolgirlsweater_tanukirotate.png"
img = Image.open(imagePath)
draw = ImageDraw.Draw(img)
lengthPass = False
length = 0
while not lengthPass:
msg = input("Enter the messaage to encode (768 characters max):\n")
length = len(msg)
if length > 768:
print("Message is too long, please try again")
else:
lengthPass = True
ascii = [ord(c) for c in msg]
for i in range(768 - length):
ascii = ascii +[0]
it = iter(ascii)
rgb = zip(it, it, it)
counter = 0
writeArea = 0
xy = [0, 0]
for z in rgb:
draw.point(xy, fill = z)
if xy[0] >= (WRITABLES[writeArea])[2]:
xy[0] = (WRITABLES[writeArea])[0]
xy[1] = xy[1] + 1
else:
xy[0] = xy[0] + 1
if xy[1] > (WRITABLES[writeArea])[3] and writeArea + 1 < len(WRITABLES):
writeArea = writeArea + 1
xy[0] = (WRITABLES[writeArea])[0]
xy[1] = (WRITABLES[writeArea])[1]
img.save(imagePath)
img = Image.open(imagePath).convert("RGB")
px = img.load()
counter = 0
writeArea = 0
xy = [0, 0]
decodedString = []
for i in range(256):
#if (px[xy[0], xy[1]]) == (0, 0, 0):
#break
#else:
decodedString.append(px[xy[0], xy[1]])
if xy[0] >= (WRITABLES[writeArea])[2]:
xy[0] = (WRITABLES[writeArea])[0]
xy[1] = xy[1] + 1
else:
xy[0] = xy[0] + 1
if xy[1] > (WRITABLES[writeArea])[3] and writeArea + 1 < len(WRITABLES):
writeArea = writeArea + 1
xy[0] = (WRITABLES[writeArea])[0]
xy[1] = (WRITABLES[writeArea])[1]
decodedString = [i for sub in decodedString for i in sub]
decodedString = ''.join(chr(i) for i in decodedString)
print("Decoded String:")
print (decodedString)
|
nilq/baby-python
|
python
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import time
import unittest
import common.mqtt_connection as mqtt_connection
import common.mqtt_messages as mqtt_messages
import server.MQTT_callbacks as MQTT_callbacks
import server.storage as server_storage
from main import do_global_config
class TestServerMQTTCallbacks(unittest.TestCase):
def setUpClass() -> None:
do_global_config()
def test_able_to_process_heartbeat(self):
heartbeat = {
"machine_levels": {
"coffee_mg_level": 10,
"milk_mg_level": 10,
"sugar_mg_level": 10,
"water_mg_level": 10
},
"status": "OK",
"id_machine": "UNIT_TEST",
}
MQTT_callbacks.receive_heartbeat(heartbeat)
self.assertTrue("UNIT_TEST" in server_storage.coffee_machines_last_heartbeat)
self.assertTrue("UNIT_TEST" in server_storage.coffee_machines_levels)
def test_able_to_receive_order(self):
order_log = {
"machine_levels": {
"coffee_mg_level": 10,
"milk_mg_level": 10,
"sugar_mg_level": 10,
"water_mg_level": 10
},
"success": "OK",
"machine_id": "UNIT_TEST",
"timestamp": time.time(),
"coffee_name": "good coffee",
}
MQTT_callbacks.receive_order(order_log)
self.assertTrue("UNIT_TEST" in server_storage.coffee_machines_levels)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import os,copy
import pandas as pd
from collections import OrderedDict
from pypospack.pyposmat.data import PyposmatDataAnalyzer
from pypospack.pyposmat.data import PyposmatDataFile
from pypospack.pyposmat.data import PyposmatConfigurationFile
_fn_config = os.path.join("resources","pyposmat.config.in")
_fn_results_in = os.path.join("resources","pyposmat.results.0.out")
_fn_results_out = os.path.join("resources","pyposmat.results.0a.out")
config = PyposmatConfigurationFile()
config.read(_fn_config)
qoi_targets = config.qoi_targets
print(config.qoi_targets)
print(list(config.qois))
data_in = PyposmatDataFile()
data_in.read(_fn_results_in)
data_out = PyposmatDataFile()
data_out.parameter_names = data_in.parameter_names
data_out.qoi_names = list(config.qois)
data_out.error_names = ['{}.err'.format(q) for q in data_out.qoi_names]
data_out.names = ["sim_id"]\
+data_out.parameter_names\
+data_out.qoi_names\
+data_out.error_names
data_out.types = ["sim_id"]\
+len(data_out.parameter_names)*['param']\
+len(data_out.qoi_names)*['qoi']\
+len(data_out.error_names)*['err']
def calculate_bulk_modulus(c11,c12,c44):
return (c11+2*c12)/3
def calculate_shear_modulus(c11,c12,c44):
return (c11-c12)/2
data_out_lists = []
for i,row in data_in.df.iterrows():
in_row_results = row.to_dict(into=OrderedDict)
out_row_results = OrderedDict()
for k in (["sim_id"]+data_out.parameter_names):
out_row_results[k] = in_row_results[k]
for k in (data_out.qoi_names):
try:
out_row_results[k] = in_row_results[k]
except KeyError as e:
if k == 'Ni_fcc.B':
c11 = in_row_results['Ni_fcc.c11']
c12 = in_row_results['Ni_fcc.c12']
c44 = in_row_results['Ni_fcc.c44']
out_row_results[k] = calculate_bulk_modulus(c11,c12,c44)
elif k == 'Ni_fcc.G':
c11 = in_row_results['Ni_fcc.c11']
c12 = in_row_results['Ni_fcc.c12']
c44 = in_row_results['Ni_fcc.c44']
out_row_results[k] = calculate_bulk_modulus(c11,c12,c44)
else:
raise
for k in (data_out.qoi_names):
out_row_results["{}.err".format(k)] = out_row_results[k] - qoi_targets[k]
data_out_lists.append([out_row_results[k] for k in data_out.names])
data_out.df=pd.DataFrame(data_out_lists,columns=data_out.names)
data_out.write(_fn_results_out)
|
nilq/baby-python
|
python
|
def slugify(text):
"""
Removes all char from string that can cause problems whe used as a file name.
:param text: text to be modified.
:return: modified text
"""
return "".join(x for x in text if x.isalnum())
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.4 on 2021-06-21 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pvs_suban', '0009_auto_20210621_1753'),
]
operations = [
migrations.AddField(
model_name='address',
name='country',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='countries', to='pvs_suban.country'),
),
migrations.AlterField(
model_name='country',
name='value',
field=models.CharField(blank=True, max_length=256, null=True),
),
]
|
nilq/baby-python
|
python
|
from PyQt5.QtWidgets import QBoxLayout
from PyQt5.QtWidgets import QDialog
from PyQt5.QtWidgets import QDialogButtonBox
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QMenu
from PyQt5.QtWidgets import QPushButton
from PyQt5.Qt import pyqtSignal
from PyQt5.Qt import QAction
from PyQt5.QtWidgets import QMessageBox
from PyQt5.Qt import QFontMetrics
from PyQt5.Qt import Qt
from PyQt5.Qt import QUrl
from PyQt5.QtWidgets import QFormLayout
from PyQt5.QtWidgets import QPlainTextEdit
from mc.tools.IconProvider import IconProvider
from mc.tools.EnhancedMenu import Menu, Action
from mc.common.globalvars import gVar
from mc.common import const
from .BookmarkItem import BookmarkItem
class BookmarksFoldersMenu(QMenu):
def __init__(self, parent=None):
'''
@param: parent QWidget
'''
super().__init__(parent)
self._selectedFolder = None # BookmarkItem
self._init()
def selectedFolder(self):
'''
@return: BookmarkItem
'''
return self._selectedFolder
# Q_SIGNALS:
folderSelected = pyqtSignal(BookmarkItem)
# private Q_SLOTS:
def _folderChoosed(self):
act = self.sender()
if not isinstance(act, QAction):
return
folder = act.data()
if not isinstance(folder, BookmarkItem):
return
self.folderSelected.emit(folder)
def _ADD_MENU(self, name):
bookmarks = gVar.app.bookmarks()
item = getattr(bookmarks, name)()
menu = self.addMenu(item.icon(), item.title())
self._createMenu(menu, item)
# private:
def _init(self):
self._ADD_MENU('toolbarFolder')
self._ADD_MENU('menuFolder')
self._ADD_MENU('unsortedFolder')
def _createMenu(self, menu, parent):
'''
@param: menu QMenu
@param: parent BookmarkItem
'''
act = menu.addAction(_('Choose %s') % parent.title())
act.setData(parent)
act.triggered.connect(self._folderChoosed)
menu.addSeparator()
for child in parent.children():
if not child.isFolder(): continue
m = menu.addMenu(child.icon(), child.title())
self._createMenu(m, child)
class BookmarksFoldersButton(QPushButton):
def __init__(self, parent, folder=None):
'''
@param: parent QWidget
@param: folder BookmarkItem
'''
super().__init__(parent)
self._menu = BookmarksFoldersMenu(self) # BookmarksFoldersMenu
self._selectedFolder = None # BookmarkItem
if folder:
self._selectedFolder = folder
else:
self._selectedFolder = gVar.app.bookmarks().lastUsedFolder()
self._init()
self._menu.folderSelected.connect(self.setSelectedFolder)
def selectedFolder(self):
'''
@return: BookmarkItem
'''
return self._selectedFolder
# Q_SIGNALS:
selectedFolderChanged = pyqtSignal(BookmarkItem)
# public Q_SLOTS:
def setSelectedFolder(self, folder):
'''
@param: folder BookmarkItem
'''
assert(folder)
assert(folder.isFolder())
self._selectedFolder = folder
self.setText(folder.title())
self.setIcon(folder.icon())
if self.sender():
self.selectedFolderChanged.emit(folder)
def _init(self):
self.setMenu(self._menu)
self.setSelectedFolder(self._selectedFolder)
class BookmarksTools(object):
@classmethod
def addBookmarkDialog(cls, parent, url, title, folder=None):
'''
@brief: Add Bookmark Dialogs
@param: parent QWidget
@param: url QUrl
@param: title QString
@param: folder BookmarkItem
'''
dialog = QDialog(parent)
layout = QBoxLayout(QBoxLayout.TopToBottom, dialog)
label = QLabel(dialog)
edit = QLineEdit(dialog)
folderButton = BookmarksFoldersButton(dialog, folder)
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addWidget(label)
layout.addWidget(edit)
layout.addWidget(folderButton)
layout.addWidget(box)
label.setText(_('Choose name and location of this bookmark.'))
edit.setText(title)
edit.setCursorPosition(0)
dialog.setWindowIcon(IconProvider.iconForUrl(url))
dialog.setWindowTitle(_('Add New Bookmark'))
size = dialog.size()
size.setWidth(350)
dialog.resize(size)
dialog.exec_()
if dialog.result() == QDialog.Rejected or not edit.text():
del dialog
return False
bookmark = BookmarkItem(BookmarkItem.Url)
bookmark.setTitle(edit.text())
bookmark.setUrl(url)
gVar.app.bookmarks().addBookmark(folderButton.selectedFolder(), bookmark)
del dialog
return True
@classmethod
def bookmarkAllTabsDialog(cls, parent, tabWidget, folder=None):
'''
@param: parent QWidget
@param: tabWidget TabWidget
@param: folder BookmarkItem
'''
assert(tabWidget)
dialog = QDialog(parent)
layout = QBoxLayout(QBoxLayout.TopToBottom, dialog)
label = QLabel(dialog)
folderButton = BookmarksFoldersButton(dialog, folder)
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addWidget(label)
layout.addWidget(folderButton)
layout.addWidget(box)
label.setText(_('Choose folder for bookmarks:'))
dialog.setWindowTitle(_('Bookmark All Tabs'))
size = dialog.size()
size.setWidth(350)
dialog.resize(size)
dialog.exec_()
if dialog.result() == QDialog.Rejected:
return False
for tab in tabWidget.allTabs(False):
if tab.url().isEmpty(): continue
bookmark = BookmarkItem(BookmarkItem.Url)
bookmark.setTitle(tab.title())
bookmark.setUrl(tab.url())
gVar.app.bookmarks().addBookmark(folderButton.selectedFolder(), bookmark)
del dialog
return True
@classmethod
def editBookmarkDialog(cls, parent, item):
'''
@param: parent QWidget
@param: item BookmarkItem
'''
dialog = QDialog(parent)
layout = QFormLayout(dialog)
title = QLineEdit()
address = QLineEdit()
keyword = QLineEdit()
description = QPlainTextEdit()
box = QDialogButtonBox(dialog)
box.addButton(QDialogButtonBox.Ok)
box.addButton(QDialogButtonBox.Cancel)
box.rejected.connect(dialog.reject)
box.accepted.connect(dialog.accept)
layout.addRow(_('Title:'), title)
title.setText(item.title())
if not item.isFolder():
layout.addRow(_('Address:'), address)
address.setText(item.urlString())
layout.addRow(_('Keyword:'), keyword)
keyword.setText(item.keyword())
layout.addRow(_('Description:'), description)
description.document().setPlainText(item.description())
layout.addWidget(box)
dialog.setWindowIcon(item.icon())
dialog.setWindowTitle(_('Edit Bookmark'))
dialog.exec_()
if dialog.result() == QDialog.Rejected:
del dialog
return False
item.setTitle(title.text())
if not item.isFolder():
item.setUrl(QUrl.fromEncoded(address.text().encode()))
item.setKeyword(keyword.text())
item.setDescription(description.toPlainText())
del dialog
return True
@classmethod
def openBookmark(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
assert(window)
if not item or not item.isUrl():
return
item.updateVisitCount()
window.loadAddress(item.url())
@classmethod
def openBookmarkInNewTab(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
assert(window)
if not item:
return
if item.isFolder():
cls.openFolderInTabs(window, item)
elif item.isUrl():
item.updateVisitCount()
window.tabWidget().addViewByUrlTitle(item.url(), item.title(),
gVar.appSettings.newTabPosition)
@classmethod
def openBookmarkInNewWindow(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
if not item.isUrl():
return
item.updateVisitCount()
gVar.app.createWindow(const.BW_NewWindow, item.url())
@classmethod
def openBookmarkInNewPrivateWindow(cls, window, item):
'''
@param: window BrowserWindow
@param: item BookmarkItem
'''
if not item.isUrl():
return
item.updateVisitCount()
gVar.app.startPrivateBrowsing(item.url())
@classmethod
def openFolderInTabs(cls, window, folder):
'''
@param: window BrowserWindow
@param: folder BookmarkItem
'''
assert(window)
assert(folder.isFolder())
showWarning = len(folder.children()) > 10
if not showWarning:
for child in folder.children():
if child.isFolder():
showWarning = True
break
if showWarning:
button = QMessageBox.warning(window, _('Confirmation'),
_('Are you sure you want to open all bookmarks from "%s" folder in tabs?') % folder.title(),
QMessageBox.Yes | QMessageBox.No)
if button != QMessageBox.Yes:
return
for child in folder.children():
if child.isUrl():
cls.openBookmarkInNewTab(window, child)
elif child.isFolder():
cls.openFolderInTabs(window, child)
@classmethod
def addActionToMenu(cls, receiver, menu, item):
'''
@param: receiver QObject
@param: menu Menu
@param: item BookmarkItem
'''
assert(menu)
assert(item)
type_ = item.type()
if type_ == BookmarkItem.Url:
cls.addUrlToMenu(receiver, menu, item)
elif type_ == BookmarkItem.Folder:
cls.addFolderToMenu(receiver, menu, item)
elif type_ == BookmarkItem.Separator:
cls.addSeparatorToMenu(menu, item)
@classmethod
def addFolderToMenu(cls, receiver, menu, folder):
'''
@param: receiver QObject
@param: menu Menu
@param: folder BookmarkItem
'''
assert(menu)
assert(folder)
assert(folder.isFolder())
subMenu = Menu(menu)
title = QFontMetrics(subMenu.font()).elidedText(folder.title(), Qt.ElideRight, 250)
subMenu.setTitle(title)
subMenu.setIcon(folder.icon())
cls.addFolderContentsToMenu(receiver, subMenu, folder)
# QAction
act = menu.addMenu(subMenu)
act.setData(folder)
act.setIconVisibleInMenu(True)
@classmethod
def addUrlToMenu(cls, receiver, menu, bookmark):
'''
@param: receiver QObject
@param: menu Menu
@param: bookmark BookmarkItem
'''
assert(menu)
assert(bookmark)
assert(bookmark.isUrl())
act = Action(menu)
title = QFontMetrics(act.font()).elidedText(bookmark.title(), Qt.ElideRight, 250)
act.setText(title)
act.setData(bookmark)
act.setIconVisibleInMenu(True)
act.triggered.connect(receiver._bookmarkActivated)
act.ctrlTriggered.connect(receiver._bookmarkCtrlActivated)
act.shiftTriggered.connect(receiver._bookmarkShiftActivated)
menu.addAction(act)
@classmethod
def addSeparatorToMenu(cls, menu, separator):
'''
@param: menu Menu
@param: separator BookmarkItem
'''
assert(menu)
assert(separator.isSeparator())
menu.addSeparator()
@classmethod
def addFolderContentsToMenu(cls, receiver, menu, folder):
'''
@param: receiver QObject
@param: menu Menu
@param: folder BookmarkItem
'''
menu.aboutToShow.connect(receiver._menuAboutToShow)
menu.menuMiddleClicked.connect(receiver._menuMiddleClicked)
for child in folder.children():
cls.addActionToMenu(receiver, menu, child)
if menu.isEmpty():
menu.addAction(_('Empty')).setDisabled(True)
#@classmethod
#def migrateBookmarksIfNecessary(cls, bookmarks):
# '''
# @brief: Migration from Sql Bookmarks (returns tree if bookmarks migrated)
# '''
# pass
|
nilq/baby-python
|
python
|
"""Merge constrained primitives as property constraints."""
import collections
from typing import Tuple, Optional, List, Mapping, MutableMapping, Sequence
from icontract import ensure
from aas_core_codegen import intermediate
from aas_core_codegen.common import Error
from aas_core_codegen.infer_for_schema import (
_len as infer_for_schema_len,
_pattern as infer_for_schema_pattern,
)
from aas_core_codegen.infer_for_schema._types import (
ConstraintsByProperty,
LenConstraint,
PatternConstraint,
)
@ensure(lambda result: (result[0] is not None) ^ (result[1] is not None))
def _infer_len_constraints_by_constrained_primitive(
symbol_table: intermediate.SymbolTable,
) -> Tuple[
Optional[MutableMapping[intermediate.ConstrainedPrimitive, LenConstraint]],
Optional[List[Error]],
]:
"""Infer the constraints on ``len(.)`` of the constrained primitives."""
# NOTE (mristin, 2022-02-11):
# We do this inference in two passes. In the first pass, we only infer
# the constraints defined for the constrained primitive and ignore the ancestors.
# In the second pass, we stack the constraints of the ancestors as well.
errors = [] # type: List[Error]
first_pass: MutableMapping[
intermediate.ConstrainedPrimitive, LenConstraint
] = collections.OrderedDict()
for symbol in symbol_table.symbols:
if isinstance(symbol, intermediate.ConstrainedPrimitive):
(
len_constraint,
len_constraint_errors,
) = infer_for_schema_len.infer_len_constraint_of_self(
constrained_primitive=symbol
)
if len_constraint_errors is not None:
errors.extend(len_constraint_errors)
else:
assert len_constraint is not None
first_pass[symbol] = len_constraint
if len(errors) > 0:
return None, errors
second_pass: MutableMapping[
intermediate.ConstrainedPrimitive, LenConstraint
] = collections.OrderedDict()
for symbol in symbol_table.symbols_topologically_sorted:
if isinstance(symbol, intermediate.ConstrainedPrimitive):
# NOTE (mristin, 2022-02-11):
# We make the copy in order to avoid bugs when we start processing
# the inheritances.
len_constraint = first_pass[symbol].copy()
for inheritance in symbol.inheritances:
inherited_len_constraint = second_pass.get(inheritance, None)
assert (
inherited_len_constraint is not None
), "Expected topological order"
if inherited_len_constraint.min_value is not None:
len_constraint.min_value = (
max(
len_constraint.min_value, inherited_len_constraint.min_value
)
if len_constraint.min_value is not None
else inherited_len_constraint.min_value
)
if inherited_len_constraint.max_value is not None:
len_constraint.max_value = (
min(
len_constraint.max_value, inherited_len_constraint.max_value
)
if len_constraint.max_value is not None
else inherited_len_constraint.max_value
)
second_pass[symbol] = len_constraint
assert len(errors) == 0
return second_pass, None
def _infer_pattern_constraints_by_constrained_primitive(
symbol_table: intermediate.SymbolTable,
pattern_verifications_by_name: infer_for_schema_pattern.PatternVerificationsByName,
) -> MutableMapping[intermediate.ConstrainedPrimitive, List[PatternConstraint]]:
"""Infer the pattern constraints of the constrained strings."""
# NOTE (mristin, 2022-02-11):
# We do this inference in two passes. In the first pass, we only infer
# the constraints defined for the constrained primitive and ignore the ancestors.
# In the second pass, we stack the constraints of the ancestors as well.
first_pass: MutableMapping[
intermediate.ConstrainedPrimitive,
List[PatternConstraint],
] = collections.OrderedDict()
for symbol in symbol_table.symbols:
if (
isinstance(symbol, intermediate.ConstrainedPrimitive)
and symbol.constrainee is intermediate.PrimitiveType.STR
):
pattern_constraints = infer_for_schema_pattern.infer_patterns_on_self(
constrained_primitive=symbol,
pattern_verifications_by_name=pattern_verifications_by_name,
)
first_pass[symbol] = pattern_constraints
second_pass: MutableMapping[
intermediate.ConstrainedPrimitive,
List[PatternConstraint],
] = collections.OrderedDict()
for symbol in first_pass:
# NOTE (mristin, 2022-02-11):
# We make the copy in order to avoid bugs when we start processing
# the inheritances.
pattern_constraints = first_pass[symbol][:]
for inheritance in symbol.inheritances:
assert inheritance in first_pass, (
f"We are processing the constrained primitive {symbol.name!r}. "
f"However, its parent, {inheritance.name!r}, has not been processed in "
f"the first pass. Something probably went wrong in the first pass."
)
inherited_pattern_constraints = second_pass.get(inheritance, None)
assert inherited_pattern_constraints is not None, (
f"Expected topological order. However, the symbol {symbol.name!r} "
f"is being processed before one of its parents, {inheritance.name!r}."
)
pattern_constraints = inherited_pattern_constraints + pattern_constraints
second_pass[symbol] = pattern_constraints
return second_pass
@ensure(lambda result: (result[0] is not None) ^ (result[1] is not None))
def infer_constraints_by_class(
symbol_table: intermediate.SymbolTable,
) -> Tuple[
Optional[MutableMapping[intermediate.ClassUnion, ConstraintsByProperty]],
Optional[List[Error]],
]:
"""Infer the constraints from the invariants and constrained primitives."""
errors = [] # type: List[Error]
pattern_verifications_by_name = (
infer_for_schema_pattern.map_pattern_verifications_by_name(
verifications=symbol_table.verification_functions
)
)
(
len_constraints_by_constrained_primitive,
some_errors,
) = _infer_len_constraints_by_constrained_primitive(symbol_table=symbol_table)
if some_errors is not None:
errors.extend(some_errors)
if len(errors) > 0:
return None, errors
assert len_constraints_by_constrained_primitive is not None
patterns_by_constrained_primitive = (
_infer_pattern_constraints_by_constrained_primitive(
symbol_table=symbol_table,
pattern_verifications_by_name=pattern_verifications_by_name,
)
)
result: MutableMapping[
intermediate.ClassUnion, ConstraintsByProperty
] = collections.OrderedDict()
for symbol in symbol_table.symbols:
if not isinstance(
symbol, (intermediate.AbstractClass, intermediate.ConcreteClass)
):
continue
# region Infer constraints on ``len(.)``
len_constraints_by_property: MutableMapping[
intermediate.Property, LenConstraint
] = collections.OrderedDict()
(
len_constraints_from_invariants,
len_constraints_errors,
) = infer_for_schema_len.len_constraints_from_invariants(cls=symbol)
if len_constraints_errors is not None:
errors.extend(len_constraints_errors)
continue
assert len_constraints_from_invariants is not None
patterns_by_property: MutableMapping[
intermediate.Property, List[PatternConstraint]
] = collections.OrderedDict()
patterns_from_invariants_by_property = (
infer_for_schema_pattern.patterns_from_invariants(
cls=symbol, pattern_verifications_by_name=pattern_verifications_by_name
)
)
# region Merge the length constraints
for prop in symbol.properties:
# NOTE (mristin, 2022-03-03):
# We need to go beneath ``Optional`` as the constraints are applied even
# if a property is optional. In cases where cardinality is affected by
# ``Optional``, the client code needs to cover them separately.
type_anno = intermediate.beneath_optional(prop.type_annotation)
len_constraint_from_type: Optional[LenConstraint] = None
len_constraint_from_invariants = len_constraints_from_invariants.get(
prop, None
)
if isinstance(type_anno, intermediate.OurTypeAnnotation) and isinstance(
type_anno.symbol, intermediate.ConstrainedPrimitive
):
len_constraint_from_type = len_constraints_by_constrained_primitive.get(
type_anno.symbol, None
)
# Merge the constraint from the type and from the invariants
if (
len_constraint_from_type is None
and len_constraint_from_invariants is None
):
pass
elif (
len_constraint_from_type is not None
and len_constraint_from_invariants is None
):
if (
len_constraint_from_type.min_value is not None
or len_constraint_from_type.max_value is not None
):
len_constraints_by_property[prop] = len_constraint_from_type
elif (
len_constraint_from_type is None
and len_constraint_from_invariants is not None
):
if (
len_constraint_from_invariants.min_value is not None
or len_constraint_from_invariants.max_value is not None
):
len_constraints_by_property[prop] = len_constraint_from_invariants
elif (
len_constraint_from_type is not None
and len_constraint_from_invariants is not None
):
# NOTE (mristin, 2022-03-02):
# We have to make the bounds *stricter* since both
# the type constraints and the invariant(s) need to be satisfied.
min_value = infer_for_schema_len.max_with_none(
len_constraint_from_type.min_value,
len_constraint_from_invariants.min_value,
)
max_value = infer_for_schema_len.min_with_none(
len_constraint_from_type.max_value,
len_constraint_from_invariants.max_value,
)
if (
min_value is not None
and max_value is not None
and min_value > max_value
):
errors.append(
Error(
symbol.parsed.node,
f"The inferred minimum and maximum value on len(.) "
f"is contradictory: "
f"minimum = {min_value}, maximum = {max_value}; "
f"please check the invariants and "
f"any involved constrained primitives",
)
)
continue
if min_value is not None or max_value is not None:
len_constraints_by_property[prop] = LenConstraint(
min_value=min_value, max_value=max_value
)
else:
raise AssertionError(
f"Unhandled case: "
f"{len_constraint_from_type=}, {len_constraint_from_invariants}"
)
# endregion
# region Infer constraints on string patterns
for prop in symbol.properties:
# NOTE (mristin, 2022-03-03):
# We need to go beneath ``Optional`` as the constraints are applied even
# if a property is optional. In cases where cardinality is affected by
# ``Optional``, the client code needs to cover them separately.
type_anno = intermediate.beneath_optional(prop.type_annotation)
patterns_from_type: List[PatternConstraint] = []
patterns_from_invariants = patterns_from_invariants_by_property.get(
prop, []
)
if isinstance(type_anno, intermediate.OurTypeAnnotation) and isinstance(
type_anno.symbol, intermediate.ConstrainedPrimitive
):
patterns_from_type = patterns_by_constrained_primitive.get(
type_anno.symbol, []
)
merged = patterns_from_type + patterns_from_invariants
if len(merged) > 0:
patterns_by_property[prop] = merged
# endregion
result[symbol] = ConstraintsByProperty(
len_constraints_by_property=len_constraints_by_property,
patterns_by_property=patterns_by_property,
)
if len(errors) > 0:
return None, errors
return result, None
@ensure(lambda result: (result[0] is not None) ^ (result[1] is not None))
def merge_constraints_with_ancestors(
symbol_table: intermediate.SymbolTable,
constraints_by_class: Mapping[intermediate.ClassUnion, ConstraintsByProperty],
) -> Tuple[
Optional[MutableMapping[intermediate.ClassUnion, ConstraintsByProperty]],
Optional[Error],
]:
"""
Merge the constraints over all the classes with their ancestors.
Usually, when you generate a schema, you do *not* want to inherit the constraints
over the properties. Most schema engines will do that for you and you want to be
as explicit as possible in the schema for readability (whereas merged constraints
might not be as readable, since you do not explicitly see their origin).
However, for some applications we indeed want to stack the constraints and merge
them. For example, this is the case when we (semi-)automatically generate test
data. In those cases, you should use this function.
The length constraints are merged by picking the smaller interval that fits.
Patterns are simply stacked together.
"""
new_constraints_by_class: MutableMapping[
intermediate.ClassUnion, ConstraintsByProperty
] = collections.OrderedDict()
for symbol in symbol_table.symbols_topologically_sorted:
if not isinstance(
symbol, (intermediate.AbstractClass, intermediate.ConcreteClass)
):
continue
this_constraints_by_props = constraints_by_class[symbol]
new_len_constraints_by_property: MutableMapping[
intermediate.Property, LenConstraint
] = collections.OrderedDict()
new_patterns_by_property: MutableMapping[
intermediate.Property, Sequence[PatternConstraint]
] = collections.OrderedDict()
for prop in symbol.properties:
# region Merge len constraints
len_constraints = []
this_len_constraint = (
this_constraints_by_props.len_constraints_by_property.get(prop, None)
)
if this_len_constraint is not None:
len_constraints.append(this_len_constraint)
for parent in symbol.inheritances:
# NOTE (mristin, 2022-05-15):
# Assume here that all the ancestors already inherited their constraints
# due to the topological order in the iteration.
that_constraints_by_props = new_constraints_by_class[parent]
that_len_constraint = (
that_constraints_by_props.len_constraints_by_property.get(
prop, None
)
)
if that_len_constraint is not None:
len_constraints.append(that_len_constraint)
min_value = None
max_value = None
for len_constraint in len_constraints:
if min_value is None:
min_value = len_constraint.min_value
else:
if len_constraint.min_value is not None:
min_value = max(len_constraint.min_value, min_value)
if max_value is None:
max_value = len_constraint.max_value
else:
if len_constraint.max_value is not None:
max_value = min(len_constraint.max_value, max_value)
if (
min_value is not None
and max_value is not None
and min_value > max_value
):
return None, Error(
symbol.parsed.node,
f"We could not stack the length constraints "
f"on the property {prop.name} as they are contradicting: "
f"min_value == {min_value} and max_value == {max_value}. "
f"Please check the invariants and the invariants of all "
f"the ancestors.",
)
if min_value is not None or max_value is not None:
new_len_constraints_by_property[prop] = LenConstraint(
min_value=min_value, max_value=max_value
)
# endregion
# region Merge patterns
# NOTE (mristin, 2022-05-15):
# The following logic has quadratic time complexity, but it seems that
# the runtime is currently no problem in practice.
patterns = [] # type: List[PatternConstraint]
this_patterns = this_constraints_by_props.patterns_by_property.get(
prop, None
)
if this_patterns is not None:
patterns.extend(this_patterns)
set_of_this_patterns = (
set()
if this_patterns is None
else set(this_pattern.pattern for this_pattern in this_patterns)
)
for parent in symbol.inheritances:
# NOTE (mristin, 2022-05-15):
# Assume here that all the ancestors already inherited their constraints
# due to the topological order in the iteration.
that_constraints_by_props = new_constraints_by_class[parent]
that_patterns = that_constraints_by_props.patterns_by_property.get(
prop, None
)
if that_patterns is not None:
for that_pattern in that_patterns:
# NOTE (mristin, 2022-06-15):
# We have to make sure that we do not inherit the same pattern
# from the parent.
#
# This is particularly important if the inherited property is a
# constrained primitive. In that case, if we didn't check for
# the duplicates, we would inherit the same pattern multiple
# times as we can not distinguish whether the pattern
# comes from an invariant of the parent or an invariant of
# the constrained primitive.
if that_pattern.pattern not in set_of_this_patterns:
patterns.append(that_pattern)
if len(patterns) > 0:
new_patterns_by_property[prop] = patterns
# endregion
new_constraints_by_class[symbol] = ConstraintsByProperty(
len_constraints_by_property=new_len_constraints_by_property,
patterns_by_property=new_patterns_by_property,
)
return new_constraints_by_class, None
|
nilq/baby-python
|
python
|
from core.models import UrineDrugScreen
from rest_framework import serializers
class UrineDrugScreenSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UrineDrugScreen
fields = ('id', 'participant_id', 'date_of_test', 'uds_temp', 'pregnancy_test', 'opiates', 'fentanyl', 'bup', 'coc', 'amp', 'm_amp', 'thc', 'mtd', 'pcp', 'bar', 'bzo', 'tca', 'oxy')
|
nilq/baby-python
|
python
|
from typing import Optional, List
from pydantic import BaseModel
from feeder.api.models import BasePaginatedList
class GenericResponse(BaseModel):
success: str = "ok"
class FrontButton(BaseModel):
enable: bool = True
class UTCOffset(BaseModel):
utc_offset: int = -7
class TriggerFeeding(BaseModel):
portion: float = 0.0625
class FeedEvent(BaseModel):
device_name: Optional[str]
device_hid: str
timestamp: int
start_time: int
end_time: int
pour: Optional[int]
full: Optional[int]
grams_expected: int
grams_actual: int
hopper_start: int
hopper_end: int
source: int
fail: bool
trip: Optional[bool]
lrg: Optional[bool]
vol: Optional[bool]
bowl: Optional[bool]
recipe_id: str
error: Optional[str]
class FeedHistory(BasePaginatedList):
data: List[FeedEvent]
|
nilq/baby-python
|
python
|
from al_utils.vaal_util import train_vae, train_vae_disc
from al_utils import vae_sampling as vs
import sys
import pickle
import torch
import numpy as np
import os
from copy import deepcopy
from pycls.core.config import custom_dump_cfg
import pycls.datasets.loader as imagenet_loader
def save_numpy_arrays(arrays, names, parent_dir, saveinText=False):
"""Saves numpy arrays"""
for i, a in enumerate(arrays):
if saveinText:
np.savetxt(parent_dir + names[i] + ".txt", a, fmt="%d")
print(
"Saved {} at path: {} !!".format(
names[i], parent_dir + names[i] + ".txt"
)
)
else:
np.save(parent_dir + names[i] + ".npy", a)
print(
"Saved {} at path: {} !!".format(
names[i], parent_dir + names[i] + ".npy"
)
)
# #train task model
def vaal_active_sampling(cfg, dataObj, debug=False):
"""Implements VAAL sampling.
Args:
cfg: Reference to the config yaml
dataObj: Reference to data class
debug (bool, optional): Switch for debug mode. Defaults to False.
"""
temp_old_im_size = cfg.TRAIN.IM_SIZE
if cfg.TRAIN.DATASET.lower() == "imagenet":
cfg.TRAIN.IM_SIZE = cfg.VAAL.IM_SIZE # args.vaal_im_size
print("cfg.TRAIN.IM_SIZE: ", cfg.TRAIN.IM_SIZE)
print("cfg.VAAL.IM_SIZE: ", cfg.VAAL.IM_SIZE)
lSet_path = cfg.ACTIVE_LEARNING.LSET_PATH
uSet_path = cfg.ACTIVE_LEARNING.USET_PATH
if debug:
print("lSetPath: {}".format(lSet_path))
if debug:
print("uSetPath: {}".format(uSet_path))
lSet = np.load(lSet_path, allow_pickle=True)
uSet = np.load(uSet_path, allow_pickle=True)
print("---------Loaded partitions--------")
print("lSet: {}, uSet: {}".format(len(lSet), len(uSet)))
if cfg.TRAIN.DATASET.upper() == "IMAGENET":
temp_cfg_worker = cfg.DATA_LOADER.NUM_WORKERS
cfg.DATA_LOADER.NUM_WORKERS = 0
if cfg.TRAIN.DATASET == "IMAGENET":
dataObj = None
noAugDataset = None
elif cfg.TRAIN.DATASET == "STL10":
oldmode = dataObj.eval_mode
dataObj.eval_mode = True
noAugDataset, _ = dataObj.getDatasetForVAAL(
save_dir=cfg.TRAIN_DIR, isTrain=True, isDownload=True
)
dataObj.eval_mode = oldmode
else:
oldmode = dataObj.eval_mode
dataObj.eval_mode = True
noAugDataset, _ = dataObj.getDataset(
save_dir=cfg.TRAIN_DIR, isTrain=True, isDownload=True
)
dataObj.eval_mode = oldmode
# First train vae and disc
vae, disc = train_vae_disc(cfg, lSet, uSet, noAugDataset, dataObj, debug)
if cfg.TRAIN.DATASET == "IMAGENET":
temp_vaal_bs = cfg.VAAL.VAE_BS
cfg.VAAL.VAE_BS = cfg.TRAIN.BATCH_SIZE
uSetLoader = imagenet_loader.construct_loader_no_aug(
cfg, indices=uSet, isShuffle=False, isDistributed=False
) # , isVaalSampling=True)
cfg.VAAL.VAE_BS = temp_vaal_bs
else:
uSetLoader = dataObj.getSequentialDataLoader(
indexes=uSet,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
data=noAugDataset,
)
# Do active sampling
print("Setting vae and disc in eval mode..!")
vae.eval()
disc.eval()
print("Done!!")
sampler = vs.AdversarySampler(budget=cfg.ACTIVE_LEARNING.BUDGET_SIZE)
print("call vae sampling to get activeSet")
activeSet, uSet = sampler.sample_for_labeling(
vae=vae, discriminator=disc, unlabeled_dataloader=uSetLoader, uSet=uSet, cfg=cfg
)
lSet = np.append(lSet, activeSet)
# save arrays in npy format
save_numpy_arrays(
[lSet, uSet, activeSet], ["lSet", "uSet", "activeSet"], cfg.OUT_DIR
)
# save arrays in txt format
save_numpy_arrays(
[lSet, uSet, activeSet],
["lSet", "uSet", "activeSet"],
cfg.OUT_DIR,
saveinText=True,
)
if cfg.TRAIN.DATASET.lower() == "imagenet":
cfg.TRAIN.IM_SIZE = temp_old_im_size
cfg.DATA_LOADER.NUM_WORKERS = temp_cfg_worker
# Dump cfg file --
temp_cfg = deepcopy(cfg)
temp_cfg.ACTIVE_LEARNING.ACTIVATE = True
temp_cfg.ACTIVE_LEARNING.LSET_PATH = os.path.join(temp_cfg.OUT_DIR, "lSet.npy")
temp_cfg.ACTIVE_LEARNING.USET_PATH = os.path.join(temp_cfg.OUT_DIR, "uSet.npy")
custom_dump_cfg(temp_cfg)
def vaal_active_sampling_minus_disc(cfg, dataObj, debug=False):
lSet_path = cfg.ACTIVE_LEARNING.LSET_PATH
uSet_path = cfg.ACTIVE_LEARNING.USET_PATH
lSet = np.load(lSet_path, allow_pickle=True)
uSet = np.load(uSet_path, allow_pickle=True)
# trainDataset = dataObj.getDataset(save_dir=cfg.TRAIN_DIR, isTrain=True, isDownload=True)
if cfg.TRAIN.DATASET == "IMAGENET":
dataObj = None
noAugDataset = None
else:
oldmode = dataObj.eval_mode
dataObj.eval_mode = True
noAugDataset, _ = dataObj.getDataset(
save_dir=cfg.TRAIN_DIR, isTrain=True, isDownload=True
)
dataObj.eval_mode = oldmode
# First train vae
vae = train_vae(cfg, lSet, uSet, noAugDataset, dataObj, debug)
if cfg.TRAIN.DATASET == "IMAGENET":
lSetLoader = imagenet_loader.construct_loader_no_aug(
cfg, indices=lSet, isShuffle=False, isDistributed=False
) # , isVaalSampling=True)
uSetLoader = imagenet_loader.construct_loader_no_aug(
cfg, indices=uSet, isShuffle=False, isDistributed=False
) # , isVaalSampling=True)
else:
lSetLoader = dataObj.getIndexesDataLoader(
indexes=lSet,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
data=noAugDataset,
)
uSetLoader = dataObj.getSequentialDataLoader(
indexes=uSet,
batch_size=int(cfg.TRAIN.BATCH_SIZE / cfg.NUM_GPUS),
data=noAugDataset,
)
# Do active sampling
vae.eval()
sampler = vs.AdversarySampler(budget=cfg.ACTIVE_LEARNING.BUDGET_SIZE)
with torch.no_grad():
activeSet, uSet = sampler.vae_sample_for_labeling(
vae=vae,
uSet=uSet,
lSet=lSet,
unlabeled_dataloader=uSetLoader,
lSetLoader=lSetLoader,
)
lSet = np.append(lSet, activeSet)
save_numpy_arrays(
[lSet, uSet, activeSet], ["lSet", "uSet", "activeSet"], cfg.OUT_DIR
)
save_numpy_arrays(
[lSet, uSet, activeSet],
["lSet", "uSet", "activeSet"],
cfg.OUT_DIR,
saveinText=True,
)
tempArgsFile = sys.argv[1]
# Getting back the objects:
with open(tempArgsFile, "rb") as f: # Python 3: open(..., 'rb')
cfg, dataObj = pickle.load(f)
if cfg.ACTIVE_LEARNING.SAMPLING_FN == "vaal":
# Run original vaal
print("--------------------------")
print("Running VAAL Sampling")
print("--------------------------")
print("dataObj: {}".format(dataObj))
vaal_active_sampling(cfg, dataObj, debug=True)
elif cfg.ACTIVE_LEARNING.SAMPLING_FN == "vaal_minus_disc":
# Run vaal[-d]
print("--------------------------")
print("Running VAAL MINUS DISC Sampling")
print("--------------------------")
vaal_active_sampling_minus_disc(cfg, dataObj, debug=True)
|
nilq/baby-python
|
python
|
import sys
import os
import datetime
import psycopg2
import pandas
from subprocess import call, Popen
print "dropping temporary members from database..."
conn_string = "dbname='hamlethurricane' user=postgres port='5432' host='127.0.0.1' password='password'"
try:
conn = psycopg2.connect(conn_string)
except Exception as e:
print str(e)
sys.exit()
hurricane_name = 'ARTHUR'
dataframe_cur = conn.cursor()
dataframe_sql = """Select * from hurricane_{}""".format(hurricane_name)
dataframe_cur.execute(dataframe_sql)
data = dataframe_cur.fetchall()
colnames = [desc[0] for desc in dataframe_cur.description]
dataframe = pandas.DataFrame(data)
dataframe.columns = colnames
conn.commit()
range_feat = range(len(dataframe))
range_feat_strp = str(range_feat).strip('[]')
range_feat_strp_v2 = range_feat_strp.split(',')
drop_dismembered_cur = conn.cursor()
for key in range(1, len(dataframe)):
sql = """drop table if exists {}_{} cascade""".format(hurricane_name, key)
drop_dismembered_cur.execute(sql)
conn.commit()
conn.close()
|
nilq/baby-python
|
python
|
######################################################################
######################################################################
# Copyright Tsung-Hsien Wen, Cambridge Dialogue Systems Group, 2016 #
######################################################################
######################################################################
import numpy as np
import theano
import theano.tensor as T
# numerical stability
eps = 1e-7
# gradient clipping
class GradClip(theano.compile.ViewOp):
def __init__(self, clip_lower_bound, clip_upper_bound):
self.clip_lower_bound = clip_lower_bound
self.clip_upper_bound = clip_upper_bound
assert(self.clip_upper_bound >= self.clip_lower_bound)
def grad(self, args, g_outs):
return [T.clip(g_out, self.clip_lower_bound, self.clip_upper_bound) for g_out in g_outs]
def clip_gradient(x, bound):
grad_clip = GradClip(-bound, bound)
try:
T.opt.register_canonicalize(theano.gof.OpRemove(grad_clip), name='grad_clip_%.1f' % (bound))
except ValueError:
pass
return grad_clip(x)
# obtain sent logprob by summing over word logprob
def collectSentLogp(p,cutoff_t,cutoff_b):
q = p.dimshuffle(1,0)
def sump(p_b,stop_t):
logp = T.sum(T.log10(p_b[:stop_t]))
return logp
cutoff_logp, _ = theano.scan(fn=sump,\
sequences=[q[:cutoff_b],cutoff_t[:cutoff_b]],\
outputs_info=[None])
return cutoff_logp
# Node class for performing beam search
class BeamSearchNode(object):
def __init__(self,h,c,prevNode,wordid,logp,leng):
self.h = h
self.c = c
self.logp = logp
self.leng = leng
self.wordid = wordid
self.prevNode = prevNode
self.sv = None
def eval(self):
if self.leng>40:
return self.logp/float(self.leng-1+eps)-40.0
return self.logp/float(self.leng-1+eps)
# basic class for Recurrent Language Generator
class BaseRLG(object):
def __init__(self, gentype, beamwidth, overgen,
vocab_size, hidden_size, batch_size, da_sizes):
# setting hyperparameters
self.gentype= gentype
self.di = vocab_size
self.dh = hidden_size
self.db = batch_size
self.dfs = da_sizes
self.overgen= overgen
self.beamwidth = beamwidth
def _init_params(self):
#TODO: function for initialise weight matrices
pass
def unroll(self):
#TODO: unrolling function in theano, for training
pass
def _recur(self):
#TODO: per step recurrence function in theano, for training
pass
def beamSearch(self):
#TODO: generation function in numpy, beam search decoding
pass
def sample(self):
#TODO: generation function in numpy, random sampling
pass
def _gen(self):
#TODO: per step generation function in numpy, for decoding
pass
def loadConverseParams(self):
#TODO: load numpy parameters
pass
def setParams(self,params):
# set theano parameters
for i in range(len(self.params)):
self.params[i].set_value(params[i])
def getParams(self):
# fetch theano parameters
return [p.get_value() for p in self.params]
def numOfParams(self):
# return number of parameters
return sum([p.get_value().size for p in self.params])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2018-07-30 19:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data', '0053_merge_20180615_1859'),
]
operations = [
migrations.AddField(
model_name='vmflavor',
name='cpus',
field=models.IntegerField(default=1, help_text='How many CPUs are assigned to this flavor'),
),
]
|
nilq/baby-python
|
python
|
"""Allow _view property in graph file schema
Revision ID: 8f6d4eef042d
Revises: a2316139e9a3
Create Date: 2021-10-20 10:04:21.668552
"""
import hashlib
import json
from os import path
import fastjsonschema
import sqlalchemy as sa
from alembic import context
from alembic import op
from sqlalchemy import table, column, and_
from sqlalchemy.orm import Session
from migrations.utils import window_chunk
# revision identifiers, used by Alembic.
revision = '8f6d4eef042d'
down_revision = 'a2316139e9a3'
branch_labels = None
depends_on = None
# reference to this directory
directory = path.realpath(path.dirname(__file__))
with open(path.join(directory, '../upgrade_data/graph_v3.json'), 'r') as f:
# Use this method to validate the content of an enrichment table
validate_graph = fastjsonschema.compile(json.load(f))
def drop_view_property():
"""
We start using _view property in graph files. By design it should not exist (file
format documentation prohibits properties starting with _). However, our schema do not used
to validate this constrain. Just to be sure this migration runs check to delete _view
property from existing files (new uploads with this property will not pass schema validation).
"""
conn = op.get_bind()
session = Session(conn)
t_files = table(
'files',
column('content_id', sa.Integer),
column('mime_type', sa.String))
t_files_content = table(
'files_content',
column('id', sa.Integer),
column('raw_file', sa.LargeBinary),
column('checksum_sha256', sa.Binary)
)
files = conn.execution_options(stream_results=True).execute(sa.select([
t_files_content.c.id,
t_files_content.c.raw_file
]).where(
and_(
t_files.c.mime_type == 'vnd.lifelike.document/graph',
t_files.c.content_id == t_files_content.c.id
)
))
for chunk in window_chunk(files, 25):
for id, content in chunk:
graph = json.loads(content)
if '_views' in graph:
del graph['_views']
validate_graph(graph)
raw_file = json.dumps(graph).encode('utf-8')
checksum_sha256 = hashlib.sha256(raw_file).digest()
session.execute(
t_files_content.update().where(
t_files_content.c.id == id
).values(
raw_file=raw_file,
checksum_sha256=checksum_sha256
)
)
session.flush()
session.commit()
def upgrade():
if context.get_x_argument(as_dictionary=True).get('data_migrate', None):
data_upgrades()
def downgrade():
if context.get_x_argument(as_dictionary=True).get('data_migrate', None):
data_upgrades()
def data_upgrades():
drop_view_property()
def data_downgrades():
drop_view_property()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from abc import ABC, abstractmethod
from fuocore.models import (
SongModel,
ArtistModel,
AlbumModel,
PlaylistModel,
LyricModel,
UserModel,
)
class AbstractProvider(ABC):
"""abstract music resource provider
"""
# A well behaved provider should implement its own models .
Song = SongModel
Artist = ArtistModel
Album = AlbumModel
Playlist = PlaylistModel
Lyric = LyricModel
User = UserModel
@property
@abstractmethod
def identifier(self):
"""provider identify"""
@property
@abstractmethod
def name(self):
"""provider name"""
|
nilq/baby-python
|
python
|
import warnings
from mmdet.models.builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,
ROI_EXTRACTORS, SHARED_HEADS, build)
from .registry import FUSION_LAYERS, MIDDLE_ENCODERS, VOXEL_ENCODERS
from mmdet3d.datasets.pipelines import Compose
def build_backbone(cfg):
"""Build backbone."""
return build(cfg, BACKBONES)
def build_neck(cfg):
"""Build neck."""
return build(cfg, NECKS)
def build_roi_extractor(cfg):
"""Build RoI feature extractor."""
return build(cfg, ROI_EXTRACTORS)
def build_shared_head(cfg):
"""Build shared head of detector."""
return build(cfg, SHARED_HEADS)
def build_head(cfg):
"""Build head."""
return build(cfg, HEADS)
def build_loss(cfg):
"""Build loss function."""
return build(cfg, LOSSES)
def build_detector(cfg, train_cfg=None, test_cfg=None):
"""Build detector."""
if train_cfg is not None or test_cfg is not None:
warnings.warn(
'train_cfg and test_cfg is deprecated, '
'please specify them in model', UserWarning)
assert cfg.get('train_cfg') is None or train_cfg is None, \
'train_cfg specified in both outer field and model field '
assert cfg.get('test_cfg') is None or test_cfg is None, \
'test_cfg specified in both outer field and model field '
return build(cfg, DETECTORS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
def build_voxel_encoder(cfg):
"""Build voxel encoder."""
return build(cfg, VOXEL_ENCODERS)
def build_middle_encoder(cfg):
"""Build middle level encoder."""
return build(cfg, MIDDLE_ENCODERS)
def build_fusion_layer(cfg):
"""Build fusion layer."""
return build(cfg, FUSION_LAYERS)
|
nilq/baby-python
|
python
|
from flask import Flask
from . import api_credentials_provider
def create_app(test_config=None) -> Flask:
"""Main entry point of the service. The application factory is responsible for
creating and confguring the flask app. It also defines a http ping endpoint and registers blueprints.
Returns:
Flask: the flask app
"""
app = Flask(__name__, instance_relative_config=True)
if test_config:
app.config.from_mapping(test_config)
else:
app.config.from_mapping(
GIPHY_API_KEY=api_credentials_provider.resolve_credentials()
)
@app.route("/ping")
def ping():
return "OK"
from . import search
app.register_blueprint(search.bp)
return app
|
nilq/baby-python
|
python
|
# encapsulation
def outer(num1):
print("outer")
def inner_increment(num1):
print("inner")
return num1 + 1
num2 = inner_increment(num1)
print(num1, num2)
outer(10)
|
nilq/baby-python
|
python
|
'''
Extract special sequences from fasta file. You can specify which sequences get extracted by using the a separator.
Usage: python extract <fasta_file> <output_file> <separator>
Author: Nicolas Schmelling
'''
from Bio import SeqIO
import sys
def extract(fasta_file, output_file, separator):
with open(output_file,"w") as f:
extract_seqs = []
for seq_record in SeqIO.parse(fasta_file, "fasta"):
if separator in seq_record.description:
extract_seqs.append(seq_record)
SeqIO.write(extract_seqs, f, "fasta")
if __name__ == "__main__":
fasta_file = sys.argv[1]
output_file = sys.argv[2]
separator = sys.argv[3]
extract(fasta_file, output_file, separator)
|
nilq/baby-python
|
python
|
"""Utilities for interacting with ProxyStore"""
import proxystore as ps
from typing import Any, Optional, Union
from colmena.models import SerializationMethod
class ColmenaSerializationFactory(ps.store.redis.RedisFactory):
"""Custom Factory for using Colmena serialization utilities"""
def __init__(self,
key: str,
name: str,
hostname: str,
port: int,
serialization_method: Union[str, SerializationMethod] = SerializationMethod.PICKLE,
**kwargs) -> None:
"""Init ColmenaSerialization Factory
Args:
key (str): key corresponding to object in Redis.
name (str): name of store to retrive objects from.
hostname (str): hostname of Redis server containing object.
port (int): port of Redis server containing object.
serialization_method (str): Colmena serialization method to use
for deserializing the object when resolved from Redis.
kwargs: keyword arguments to pass to the RedisFactory.
"""
self.serialization_method = serialization_method
self.kwargs = kwargs
super(ColmenaSerializationFactory, self).__init__(
key, name, hostname, port, **kwargs
)
def __getnewargs_ex__(self):
"""Helper method for pickling
Note:
We override default pickling behavior because a Factory may contain
a Future if it is being asynchronously resolved and Futures cannot
be pickled.
"""
return (self.key, self.name, self.hostname, self.port), {
'serialization_method': self.serialization_method,
**self.kwargs
}
def resolve(self) -> Any:
obj_str = super(ColmenaSerializationFactory, self).resolve()
return SerializationMethod.deserialize(self.serialization_method, obj_str)
def proxy(obj: Any,
key: Optional[str] = None,
is_serialized: bool = False,
serialization_method: Union[str, SerializationMethod] = SerializationMethod.PICKLE,
**kwargs) -> ps.proxy.Proxy:
"""Place object in Value Server and return Proxy
Args:
obj: object to be placed in Value Server and proxied.
key (str): optional key to associate with object. By default, ProxyStore
will create a key for the object (default: None).
is_serialized (bool): True if obj is already serialized (default: False).
serialization_method (str): serialization method to use for the object
(default: SerializationMethod.PICKLE).
kwargs (dict): keyword arguments to pass to ProxyStore.store.redis.RedisStore.proxy().
Returns:
ps.proxy.Proxy
"""
store = ps.store.get_store('redis')
if not is_serialized:
obj = SerializationMethod.serialize(serialization_method, obj)
return store.proxy(
obj,
key,
serialize=False, # Do not use ProxyStore serialization utilities
serialization_method=serialization_method,
factory=ColmenaSerializationFactory,
**kwargs
)
def resolve_proxies_async(args: Union[object, list, tuple, dict]) -> None:
"""Begin asynchronously resolving all proxies in input
Scan inputs for instances of `Proxy` and begin asynchronously resolving.
This is useful if you have one or more proxies that will be needed soon
so the underlying objects can be asynchronously resolved to reduce the
cost of the first access to the proxy.
Args:
args (object, list, tuple, dict): possible object or
iterable of objects that may be ObjectProxy instances
"""
def resolve_async_if_proxy(obj: Any) -> None:
if isinstance(obj, ps.proxy.Proxy):
ps.proxy.resolve_async(obj)
if isinstance(args, ps.proxy.Proxy):
resolve_async_if_proxy(args)
elif isinstance(args, list) or isinstance(args, tuple):
for x in args:
resolve_async_if_proxy(x)
elif isinstance(args, dict):
for x in args:
resolve_async_if_proxy(args[x])
|
nilq/baby-python
|
python
|
from ... import error
from ..entity import Entity
from ..component import Component
__all__ = ["Parent"]
class Parent(Component):
def __init__(self, parent: Entity):
self._parent = parent
def parent(self, err=True) -> Entity:
if self._parent is None and err:
raise error.ecs.ParentError(self.entity)
return self._parent
def __repr__(self) -> str:
return f"{super().__repr__()}<{self._parent}>"
|
nilq/baby-python
|
python
|
from microbit import *
import utime
class Rangefinder:
def __init__(self, pin):
'''Setup a rangefinder on the specified pin'''
self.pin = pin
def distance_cm(self):
'''Returns the distance from a rangefinder in cm'''
self.pin.write_digital(0)
utime.sleep_us(200)
self.pin.write_digital(1)
utime.sleep_us(500)
self.pin.write_digital(0)
init = utime.ticks_us()
stop = init
start = init
flag = False
timeout = 100000
while not self.pin.read_digital():
if utime.ticks_us() - init > timeout:
return -1
start = utime.ticks_us()
while self.pin.read_digital():
if utime.ticks_us() - start > timeout:
return -1
stop = utime.ticks_us()
distance = (stop - start) * 343 / 20000
print(stop, start)
return distance
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from backend.accounts import bcs_perm
from backend.iam.permissions.resources.namespace_scoped import NamespaceScopedPermCtx, NamespaceScopedPermission
from backend.resources.namespace.utils import get_namespaces_by_cluster_id
from backend.templatesets.legacy_apps.configuration import constants, models
from backend.templatesets.legacy_apps.configuration.yaml_mode.res2files import get_resource_file, get_template_files
def get_namespace_id(access_token, project_id, cluster_id, namespace):
namespaces = get_namespaces_by_cluster_id(access_token, project_id, cluster_id)
for ns in namespaces:
if ns["name"] == namespace:
return ns["id"]
raise serializers.ValidationError(_("项目(id:{})下不存在命名空间({}/{})").format(project_id, cluster_id, namespace))
def add_fields_in_template_files(version_id, req_template_files):
"""
add id and content fields in template_files
"""
try:
ventity = models.VersionedEntity.objects.get(id=version_id)
except models.VersionedEntity.DoesNotExist:
raise serializers.ValidationError(f"template version(id:{version_id}) does not exist")
entity = ventity.get_entity()
template_files = []
for res_file in req_template_files:
res_name = res_file["resource_name"]
res_file_ids = entity[res_name].split(",")
resource_file = get_resource_file(res_name, res_file_ids, "id", "name", "content")
if "files" not in res_file:
template_files.append(resource_file)
continue
if not res_file["files"]:
raise serializers.ValidationError(f"empty parameter files in template_files({res_name})")
resource_file_map = {f["name"]: f for f in resource_file["files"]}
files = [resource_file_map[f["name"]] for f in res_file["files"]]
template_files.append({"resource_name": res_name, "files": files})
return template_files
class NamespaceInfoSLZ(serializers.Serializer):
cluster_id = serializers.CharField()
name = serializers.CharField()
class TemplateReleaseSLZ(serializers.Serializer):
project_id = serializers.CharField()
template_name = serializers.CharField()
show_version_name = serializers.CharField()
template_files = serializers.JSONField(required=False)
namespace_info = NamespaceInfoSLZ()
template_variables = serializers.JSONField(default={})
def _validate_template_files(self, data):
"""
template_files: [{'resource_name': 'Deployment', 'files': [{'name': ''}]}]
"""
if "template_files" not in data:
data["template_files"] = get_template_files(data["show_version"].real_version_id, "id", "name", "content")
return
template_files = data["template_files"]
if not template_files:
raise serializers.ValidationError("empty parameter template_files")
try:
data["template_files"] = add_fields_in_template_files(data["show_version"].real_version_id, template_files)
except Exception as err:
raise serializers.ValidationError(f"invalid parameter template_files: {err}")
def _validate_namespace_info(self, data):
request = self.context["request"]
namespace_info = data["namespace_info"]
namespace_info["id"] = get_namespace_id(
request.user.token.access_token, data["project_id"], namespace_info["cluster_id"], namespace_info["name"]
)
perm_ctx = NamespaceScopedPermCtx(
username=request.user.username,
project_id=data["project_id"],
cluster_id=namespace_info["cluster_id"],
name=namespace_info["name"],
)
NamespaceScopedPermission().can_use(perm_ctx)
def validate(self, data):
template_name = data["template_name"]
try:
template = models.Template.objects.get(
project_id=data["project_id"], name=template_name, edit_mode=constants.TemplateEditMode.YAML.value
)
data["template"] = template
except models.Template.DoesNotExist:
raise serializers.ValidationError(_("YAML模板集(name:{})不存在").format(template_name))
try:
show_version = models.ShowVersion.objects.get(name=data["show_version_name"], template_id=template.id)
data["show_version"] = show_version
except models.ShowVersion.DoesNotExist:
raise serializers.ValidationError(
_("YAML模板集(name:{})不存在版本{}").format(template_name, data["show_version_name"])
)
self._validate_namespace_info(data)
self._validate_template_files(data)
return data
|
nilq/baby-python
|
python
|
def test_requirements(supported_configuration):
pass
|
nilq/baby-python
|
python
|
import os
import sys
import math
import scipy.signal
import schemasim.schemas.l0_schema_templates as st
class PhysicalCondition(st.RoleDefiningSchema):
def __init__(self):
super().__init__()
self._type = "PhysicalCondition"
self._meta_type.append("PhysicalCondition")
self._roles = {}
def isDefaultCompatible(self):
return False
class Default(PhysicalCondition):
def __init__(self):
super().__init__()
self._type = "Default"
self._meta_type.append("DefaultPhysicalCondition")
self._roles = {}
def isDefaultCompatible(self):
return True
class CollisionEnabled(Default):
def __init__(self, obj=None):
super().__init__()
self._type = "CollisionEnabled"
self._meta_type.append("CollisionEnabled")
self._roles = {"obj": obj}
class CollisionDisabled(PhysicalCondition):
def __init__(self, obj=None):
super().__init__()
self._type = "CollisionDisabled"
self._meta_type.append("CollisionDisabled")
self._roles = {"obj": obj}
class PhysicsPrimitiveQuality(st.RoleDefiningSchema):
def __init__(self, obj=None, quality="", default=1.0):
super().__init__()
self._type = "PhysicsPrimitiveQuality"
self._meta_type.append("PhysicsPrimitiveQuality")
self._normal = default
if (None != obj) and ("ParameterizedSchema" in obj._meta_type) and (quality in obj._parameters):
self._normal = obj._parameters[quality]
self._roles = {"obj": obj}
self._quality = quality
def getReferenceValue(self):
return self._normal
def _getQuality(self):
retq = self._normal
if (None != self._roles['obj']) and (self._quality in self._roles['obj']._parameters):
retq = self._roles['obj']._parameters[self._quality]
return retq
def evaluateFrame(self, frameData, sim):
return True, 1.0
def filterPD(self, rpd, sim, strictness=0.005):
return rpd
class MassSettingSchema(PhysicsPrimitiveQuality):
def __init__(self, obj=None):
super().__init__(obj=obj, quality="mass")
self._type = "MassSettingSchema"
self._meta_type.append("MassSettingSchema")
def evaluateFrame(self, frameData, sim):
mass = self._getQuality()
ref = self.getReferenceValue()
sc = math.exp(-math.fabs(mass - ref)/(ref/5.0))
return (0.2 < sc), sc
def filterPD(self, rpd, sim, strictness=0.005):
space = sim.space()
ref = self.getReferenceValue()
for c in rpd:
c[0] = c[0]*math.exp(-math.fabs(c[1] - ref)/(ref/5.0))
return rpd
class Heavy(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Heavy"
self._meta_type.append("Heavy")
def getReferenceValue(self):
return 5*self._normal
class VeryHeavy(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "VeryHeavy"
self._meta_type.append("VeryHeavy")
def getReferenceValue(self):
return 25*self._normal
class Lightweight(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Lightweight"
self._meta_type.append("Lightweight")
def getReferenceValue(self):
return 0.2*self._normal
class VeryLightweight(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "VeryLightweight"
self._meta_type.append("VeryLightweight")
def getReferenceValue(self):
return 0.04*self._normal
class RestitutionSettingSchema(PhysicsPrimitiveQuality):
def __init__(self, obj=None):
super().__init__(obj=obj, quality="restitution")
self._type = "RestitutionSettingSchema"
self._meta_type.append("RestitutionSettingSchema")
def evaluateFrame(self, frameData, sim):
restitution = self._getQuality()
ref = self.getReferenceValue()
sc = math.exp(-math.fabs(restitution - ref)/(0.1))
return (0.2 < sc), sc
def filterPD(self, rpd, sim, strictness=0.005):
space = sim.space()
ref = self.getReferenceValue()
for c in rpd:
c[0] = c[0]*math.exp(-math.fabs(c[1] - ref)/(0.1))
return rpd
class Elastic(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Elastic"
self._meta_type.append("Elastic")
def getReferenceValue(self):
return 0.6
class VeryElastic(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "VeryElastic"
self._meta_type.append("VeryElastic")
def getReferenceValue(self):
return 0.8
class Inelastic(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Inelastic"
self._meta_type.append("Inelastic")
def getReferenceValue(self):
return 0.3
class VeryInelastic(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "VeryInelastic"
self._meta_type.append("VeryInelastic")
def getReferenceValue(self):
return 0.1
class FrictionSettingSchema(PhysicsPrimitiveQuality):
def __init__(self, obj=None):
super().__init__(obj=obj, quality="friction")
self._type = "FrictionSettingSchema"
self._meta_type.append("FrictionSettingSchema")
def evaluateFrame(self, frameData, sim):
friction = self._getQuality()
ref = self.getReferenceValue()
sc = math.exp(-math.fabs(friction - ref)/(0.1))
return (0.2 < sc), sc
def filterPD(self, rpd, sim, strictness=0.005):
space = sim.space()
ref = self.getReferenceValue()
for c in rpd:
c[0] = c[0]*math.exp(-math.fabs(c[1] - ref)/(0.1))
return rpd
class Frictious(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Frictious"
self._meta_type.append("Frictious")
def getReferenceValue(self):
return 0.6
class Slippery(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Slippery"
self._meta_type.append("Slippery")
def getReferenceValue(self):
return 0.3
class VeryFrictious(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "VeryFrictious"
self._meta_type.append("VeryFrictious")
def getReferenceValue(self):
return 0.8
class VerySlippery(MassSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "VerySlippery"
self._meta_type.append("VerySlippery")
def getReferenceValue(self):
return 0.1
class ParticleNumSettingSchema(PhysicsPrimitiveQuality):
def __init__(self, obj=None):
super().__init__(obj=obj, quality="particle_num")
self._type = "ParticleNumSettingSchema"
self._meta_type.append("ParticleNumSettingSchema")
self._normal = 30
def evaluateFrame(self, frameData, sim):
return True, 1.0
def filterPD(self, rpd, sim, strictness=0.005):
space = sim.space()
for c in rpd:
c[0] = c[0]*math.exp(-math.fabs(c[1] - self._normal)/(self._normal/5.0))
return rpd
class Plentiful(ParticleNumSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Plentiful"
self._meta_type.append("Plentiful")
self._normal = 50
class Scarce(ParticleNumSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Scarce"
self._meta_type.append("Scarce")
self._normal = 15
class VeryPlentiful(ParticleNumSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Plentiful"
self._meta_type.append("Plentiful")
self._normal = 90
class VeryScarce(ParticleNumSettingSchema):
def __init__(self, obj=None):
super().__init__(obj=obj)
self._type = "Scarce"
self._meta_type.append("Scarce")
self._normal = 5
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Georg Brandl <g.brandl@fz-juelich.de>
#
# *****************************************************************************
"""Base classes for YAML based datasinks."""
from datetime import datetime
from time import time as currenttime
import quickyaml
from nicos import session
from nicos.core import NicosError
from nicos.devices.datasinks.image import SingleFileSinkHandler
from nicos.utils import AutoDefaultODict
def nice_datetime(dt):
if isinstance(dt, float):
dt = datetime.fromtimestamp(dt)
rounded = dt.replace(microsecond=0)
return rounded.isoformat()
class YAMLBaseFileSinkHandler(SingleFileSinkHandler):
filetype = 'MLZ.YAML' # to be overwritten in derived classes
max_yaml_width = 120
accept_final_images_only = True
yaml_array_handling = quickyaml.ARRAY_AS_SEQ
objects = ['angle', 'clearance', 'current', 'displacement', 'duration',
'energy', 'frequency', 'temperature', 'wavelength',
'offset', 'width', 'height', 'length']
units = ['deg', 'mm', 'A', 'mm', 's', 'meV', 'hertz', 'K', 'A',
'mm', 'mm', 'mm', 'mm']
def _readdev(self, devname, mapper=lambda x: x):
try:
return mapper(session.getDevice(devname).read())
except NicosError:
return None
def _devpar(self, devname, parname, mapper=lambda x: x):
try:
return mapper(getattr(session.getDevice(devname), parname))
except NicosError:
return None
def _dict(self):
return AutoDefaultODict()
def _flowlist(self, *args):
return quickyaml.flowlist(*args)
def writeData(self, fp, image):
"""Save in YAML format."""
fp.seek(0)
expdev = session.experiment
instrdev = session.instrument
o = AutoDefaultODict()
instr = o['instrument']
instr['name'] = instrdev.instrument
instr['facility'] = instrdev.facility
instr['operator'] = ', '.join(instrdev.operators)
instr['website'] = instrdev.website
instr['references'] = [AutoDefaultODict({'doi': instrdev.doi})]
o['format']['identifier'] = self.__class__.filetype
for obj, unit in zip(self.objects, self.units):
o['format']['units'][obj] = unit
exp = o['experiment']
exp['number'] = expdev.propinfo.get('session', expdev.proposal)
exp['proposal'] = expdev.proposal
exp['title'] = expdev.title
exp['authors'] = []
for user in expdev.propinfo.get('users', []):
a = AutoDefaultODict()
a['name'] = user['name']
a['affiliation'] = user.get('affiliation')
a['roles'] = self._flowlist(['principal_investigator'])
exp['authors'].append(a)
for user in expdev.propinfo.get('localcontacts', []):
a = AutoDefaultODict()
a['name'] = user['name']
a['affiliation'] = user.get('affiliation')
a['roles'] = self._flowlist(['local_contact'])
exp['authors'].append(a)
meas = o['measurement']
meas['number'] = self.dataset.number
meas['unique_identifier'] = '%s/%s/%s' % (
expdev.proposal, self.dataset.counter, self.dataset.number)
hist = meas['history']
hist['started'] = nice_datetime(self.dataset.started)
hist['stopped'] = nice_datetime(currenttime())
sample = meas['sample']['description']
sample['name'] = expdev.sample.samplename
env = meas['sample']['environment'] = []
stats = self.dataset.valuestats
for (info, val) in zip(self.dataset.envvalueinfo,
self.dataset.envvaluelist):
entry = self._dict()
entry['name'] = info.name
entry['unit'] = info.unit
entry['value'] = val
if info.name in stats:
entry['mean'] = stats[info.name][0]
entry['stddev'] = stats[info.name][1]
entry['min'] = stats[info.name][2]
entry['max'] = stats[info.name][3]
env.append(entry)
self._write_instr_data(meas, image)
quickyaml.Dumper(width=self.max_yaml_width,
array_handling=self.yaml_array_handling).dump(o, fp)
fp.flush()
def _write_instr_data(self, meas_root, image):
raise NotImplementedError('implement _write_instr_data')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# Copyright (c) 2020 Intel Corporation
""" ETCD Data Write Tool """
import argparse
import logging
import os
import sys
import eis_integ
def parse_arguments(_cli_args):
""" Parse argument passed to function """
parser = argparse.ArgumentParser(description=
"Adds the contents of the json file to the etcd database.")
parser.add_argument("arg", help=
"Name of the json file whose contents should be added to the database.")
return parser.parse_args()
def main(args):
""" Calls the eis_integ.etcd_put_json function to add the contents of the json file
to the etcd database """
eis_integ.init_logger()
os.environ["ETCDCTL_ENDPOINTS"] = "https://" + eis_integ.extract_etcd_endpoint()
eis_integ.check_path_variable("ETCDCTL_CACERT", os.environ.get("ETCDCTL_CACERT"))
eis_integ.check_path_variable("ETCDCTL_CERT", os.environ.get("ETCDCTL_CERT"))
eis_integ.check_path_variable("ETCDCTL_KEY", os.environ.get("ETCDCTL_KEY"))
print("Update the etcd database or add {} file contents to the etcd database".format(args.arg))
eis_integ.etcd_put_json(eis_integ.load_json(args.arg))
return eis_integ.CODES.NO_ERROR
if __name__ == '__main__':
try:
sys.exit(main(parse_arguments(sys.argv[1:])).value)
except eis_integ.EisIntegError as exception:
logging.error("Error while adding entries to ETCD database: %s", exception)
sys.exit(exception.code.value)
|
nilq/baby-python
|
python
|
from app.validation.validation import validate, ARGS, KWARGS
import json
import os
__db_items__ = "db/items"
class Item:
def __init__(self):
pass
@validate(4, ARGS)
def save(self, id, name, price, qty):
with open(f"{__db_items__}/{name}.json", 'w') as f:
data = {
'id': id,
'name': name,
'price': price,
'qty': qty
}
json.dump(data, f)
print("Item Saved")
def __get_item_list(self, name):
try:
item_list = os.listdir(__db_items__)
return [x for x in item_list if x == f"{name}.json"][0]
except Exception as e:
return None
@validate(1, ARGS)
def find(self, name):
item = self.__get_item_list(name)
if item != None:
with open(f"{__db_items__}/{item}", "r") as f:
data = json.load(f)
print(f"\nID: {data['id']} Name: {data['name']} Price: {data['price']} QTY: {data['qty']}", end='\n')
return data
else:
print("No Item")
def getAll(self):
files = os.listdir(__db_items__)
all_items = []
if len(files) > 0:
for fs in files:
with open(f"{__db_items__}/{fs}", "r") as f:
data = json.load(f)
print(f"ID: {data['id']} Name: {data['name']} Price: {data['price']} QTY: {data['qty']}", end='\n')
all_items.append(data)
return all_items
else:
print("No Items found.!")
return None
@validate(1, ARGS)
def is_item_exist(self, name):
item = self.__get_item_list(name)
if item != None:
return item.__contains__(name)
else:
return False
|
nilq/baby-python
|
python
|
import json
from setuptools import setup, find_packages
from pydoccano import __version__
def requirements():
requirements_list = []
with open('Pipfile.lock', "r") as requirements:
data = json.load(requirements)
data = data['default']
for i in data:
try:
req = i + data[i]['version'].replace('==', '>=')
except KeyError:
req = f"-e git+{data[i]['git']}@{data[i]['ref']}#egg={i}"
requirements_list.append(req)
return requirements_list
setup(
name='pydoccano',
version=__version__,
description='This package for API of doccano',
author='Bogdan Evstratenko)',
author_email='evstrat.bg@gmail.com',
url='https://github.com/evstratbg/pydoccano',
packages=find_packages(),
python_requires='>=3.7',
install_requires=requirements(),
)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import time
from multiprocessing import Process
import pytest
import six
import thriftpy2
from thriftpy2.http import make_server as make_http_server, \
make_client as make_http_client
from thriftpy2.protocol import TApacheJSONProtocolFactory
from thriftpy2.rpc import make_server as make_rpc_server, \
make_client as make_rpc_client
from thriftpy2.thrift import TProcessor, TType
from thriftpy2.transport import TMemoryBuffer
from thriftpy2.transport.buffered import TBufferedTransportFactory
def recursive_vars(obj):
if isinstance(obj, six.string_types):
return six.ensure_str(obj)
if isinstance(obj, six.binary_type):
return six.ensure_binary(obj)
if isinstance(obj, (int, float, bool)):
return obj
if isinstance(obj, dict):
return {k: recursive_vars(v) for k, v in obj.items()}
if isinstance(obj, (list, set)):
return [recursive_vars(v) for v in obj]
if hasattr(obj, '__dict__'):
return recursive_vars(vars(obj))
def test_thrift_transport():
test_thrift = thriftpy2.load(
"apache_json_test.thrift",
module_name="test_thrift"
)
Test = test_thrift.Test
Foo = test_thrift.Foo
test_object = Test(
tbool=False,
tbyte=16,
tdouble=1.234567,
tlong=123123123,
tshort=123,
tint=12345678,
tstr="Testing String",
tsetofints={1, 2, 3, 4, 5},
tmap_of_int2str={
1: "one",
2: "two",
3: "three"
},
tlist_of_strings=["how", "do", "i", "test", "this?"],
tmap_of_str2foo={'first': Foo("first"), "2nd": Foo("baz")},
tmap_of_str2foolist={
'test': [Foo("test list entry")]
},
tmap_of_str2mapofstring2foo={
"first": {
"second": Foo("testing")
}
},
tmap_of_str2stringlist={
"words": ["dog", "cat", "pie"],
"other": ["test", "foo", "bar", "baz", "quux"]
},
tfoo=Foo("test food"),
tlist_of_foo=[Foo("1"), Foo("2"), Foo("3")],
tlist_of_maps2int=[
{"one": 1, "two": 2, "three": 3}
],
tmap_of_int2foo={
1: Foo("One"),
2: Foo("Two"),
5: Foo("Five")
},
tbinary=b"\x01\x0fabc123\x00\x02"
)
# A request generated by apache thrift that matches the above object
request_data = b"""[1,"test",1,0,{"1":{"rec":{"1":{"tf":0},"2":{"i8":16},
"3":{"i16":123},"4":{"i32":12345678},"5":{"i64":123123123},"6":
{"dbl":1.234567},"7":{"str":"Testing String"},"8":{"lst":["str",5,
"how","do","i","test","this?"]},"9":{"map":["i32","str",3,{"1":"one",
"2":"two","3":"three"}]},"10":{"set":["i32",5,1,2,3,4,5]},
"11":{"map":["str","rec",2,{"first":{"1":{"str":"first"}},"2nd":
{"1":{"str":"baz"}}}]},"12":{"map":["str","lst",
2,{"words":["str",3,"dog","cat","pie"],"other":["str",5,"test",
"foo","bar","baz","quux"]}]},"13":{"map":["str",
"map",1,{"first":["str","rec",1,{"second":{"1":{"str":"testing"}}}]}]},
"14":{"lst":["rec",3,{"1":{"str":"1"}},
{"1":{"str":"2"}},{"1":{"str":"3"}}]},"15":{"rec":{"1":{
"str":"test food"}}},"16":{"lst":["map",1,["str","i32",
3,{"one":1,"two":2,"three":3}]]},"17":{"map":["str","lst",1,{"test":
["rec",1,{"1":{"str":"test list entry"}}]}]},
"18":{"map":["i32","rec",3,{"1":{"1":{"str":"One"}},"2":{"1":
{"str":"Two"}},"5":{"1":{"str":"Five"}}}]},
"19":{"str":"AQ9hYmMxMjMAAg=="}}}}]"""
class Handler:
@staticmethod
def test(t):
# t should match the object above
expected_a = recursive_vars(t)
expected_b = recursive_vars(test_object)
if TType.STRING != TType.BINARY:
assert expected_a == expected_b
return t
tp2_thrift_processor = TProcessor(test_thrift.TestService, Handler())
tp2_factory = TApacheJSONProtocolFactory()
iprot = tp2_factory.get_protocol(TMemoryBuffer(request_data))
obuf = TMemoryBuffer()
oprot = tp2_factory.get_protocol(obuf)
tp2_thrift_processor.process(iprot, oprot)
# output buffers should be the same
final_data = obuf.getvalue()
assert json.loads(request_data.decode('utf8'))[4]['1'] == \
json.loads(final_data.decode('utf8'))[4]['0']
@pytest.mark.parametrize('server_func', [(make_rpc_server, make_rpc_client),
(make_http_server, make_http_client)])
def test_client(server_func):
test_thrift = thriftpy2.load(
"apache_json_test.thrift",
module_name="test_thrift"
)
class Handler:
@staticmethod
def test(t):
return t
def run_server():
server = make_http_server(
test_thrift.TestService,
handler=Handler(),
host='localhost',
port=9090,
proto_factory=TApacheJSONProtocolFactory(),
trans_factory=TBufferedTransportFactory()
)
server.serve()
proc = Process(target=run_server, )
proc.start()
time.sleep(0.25)
try:
test_object = test_thrift.Test(
tdouble=12.3456,
tint=567,
tstr='A test \'{["string',
tmap_of_bool2str={True: "true string", False: "false string"},
tmap_of_bool2int={True: 0, False: 1}
)
client = make_http_client(
test_thrift.TestService,
host='localhost',
port=9090,
proto_factory=TApacheJSONProtocolFactory(),
trans_factory=TBufferedTransportFactory()
)
res = client.test(test_object)
assert recursive_vars(res) == recursive_vars(test_object)
finally:
proc.terminate()
time.sleep(1)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
""" """
# Script information for the file.
__author__ = "Philippe T. Pinard"
__email__ = "philippe.pinard@gmail.com"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2015 Philippe T. Pinard"
__license__ = "GPL v3"
# Standard library modules.
import unittest
import logging
import os
import tempfile
import shutil
# Third party modules.
import numpy as np
import tifffile
# Local modules.
from pyhmsa.datafile import DataFile
from pyhmsa.spec.condition.acquisition import AcquisitionRasterXY
from pyhmsa.spec.datum.imageraster import ImageRaster2D
from pyhmsa.type.numerical import _SUPPORTED_DTYPES
from pyhmsa_tiff.fileformat.exporter.tiff import ExporterTIFF, ExporterTIFFMultiPage
# Globals and constants variables.
def _create_datafile():
datafile = DataFile()
acq = AcquisitionRasterXY(60, 50, (0.1, 'nm'), (0.1, 'nm'))
for dtype in _SUPPORTED_DTYPES:
datum = ImageRaster2D(60, 50, dtype=dtype)
datum[:] = np.random.random((60, 50)) * 255
datum.conditions.add('Acq', acq)
datafile.data.add(dtype.name, datum)
return datafile
class TestExporterTIFF(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.exp = ExporterTIFF(compress=9)
self.datafile = _create_datafile()
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir, ignore_errors=True)
def testexport(self):
self.exp.export(self.datafile, self.tmpdir)
filepaths = self.exp.get()
self.assertEqual(len(filepaths), len(_SUPPORTED_DTYPES))
for filepath in filepaths:
with tifffile.TiffFile(filepath) as tif:
actual = tif.asarray()
identifier = os.path.splitext(os.path.basename(filepath))[0].split('_')[1]
expected = self.datafile.data[identifier]
np.testing.assert_almost_equal(actual, expected.T, 4)
class TestExporterTIFFMultiPage(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
self.exp = ExporterTIFFMultiPage(compress=9)
self.datafile = _create_datafile()
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir, ignore_errors=True)
def testexport(self):
self.exp.export(self.datafile, self.tmpdir)
filepaths = self.exp.get()
self.assertEqual(len(filepaths), 1)
with tifffile.TiffFile(filepaths[0]) as tif:
self.assertEqual(len(tif.pages), len(_SUPPORTED_DTYPES))
if __name__ == '__main__': #pragma: no cover
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
nilq/baby-python
|
python
|
import argparse
from multiprocessing import Pool
from grit.occlusion_detection.occlusion_detection_geometry import OcclusionDetector2D
from grit.core.base import create_folders
from igp2.data import ScenarioConfig
def prepare_episode_occlusion_dataset(params):
scenario_name, episode_idx, debug, debug_steps = params
print('scenario {} episode {}'.format(scenario_name, episode_idx))
occlusion_detector = OcclusionDetector2D(scenario_name, episode_idx, debug=debug, debug_steps=debug_steps)
occlusion_detector.extract_occlusions()
print('finished scenario {} episode {}'.format(scenario_name, episode_idx))
def main():
parser = argparse.ArgumentParser(description='Process the dataset')
parser.add_argument('--scenario', type=str, help='Name of scenario to process', default=None)
parser.add_argument('--workers', type=int, help='Number of multiprocessing workers', default=8)
parser.add_argument('--debug',
help="if set, we plot all the occlusions in a frame for each vehicle."
"If --debug_steps is also True, this takes precedence and --debug_steps will be"
"deactivated.",
action='store_true')
parser.add_argument('--debug_steps',
help="if set, we plot the occlusions created by each obstacle. "
"If --debug is set, --debug_steps will be disabled.",
action='store_true')
args = parser.parse_args()
create_folders()
if args.debug_steps and args.debug:
args.debug_steps = False
if args.scenario is None:
scenarios = ['heckstrasse', 'bendplatz', 'frankenberg', 'round']
else:
scenarios = [args.scenario]
params_list = []
for scenario_name in scenarios:
scenario_config = ScenarioConfig.load(f"scenarios/configs/{scenario_name}.json")
for episode_idx in range(len(scenario_config.episodes)):
params_list.append((scenario_name, episode_idx, args.debug, args.debug_steps))
with Pool(args.workers) as p:
p.map(prepare_episode_occlusion_dataset, params_list)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# This is the base module that will be imported by Django.
# Try to import the custom settings.py file, which will in turn import one of the deployment targets.
# If it doesn't exist we assume this is a vanilla development environment and import .deployments.settings_dev.
try:
from .settings import * # noqa
except ImportError as e:
if e.msg == "No module named 'config.settings.settings'":
from .settings_dev import * # noqa
else:
raise
|
nilq/baby-python
|
python
|
http://stackoverflow.com/questions/2339101/knights-shortest-path-chess-question
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import pickle
import os
import json
import glob
import scipy
from ngboost import NGBRegressor
from ngboost.distns import Normal
from ngboost.learners import default_tree_learner
from ngboost.scores import MLE, LogScore
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error, mean_absolute_error, accuracy_score, confusion_matrix
from classes.qrfr import QuantileRandomForestRegressor as qfrfQuantileRandomForestRegressor
class ModelTrainer:
"""
This class will perform training on datasets. This could happen during the grid search for the best combination
of weights, or, once the weights are assessed, for the creation of the final models
"""
def __init__(self, features_analyzer, input_gatherer, forecast_type, cfg, logger):
"""
Constructor
:param features_analyzer: Features Analyzer
:type features_analyzer: FeaturesAnalyzer
:param input_gatherer: Inputs Gatherer
:type input_gatherer: InputsGatherer
:param forecast_type: Forecast type (MOR | EVE)
:type forecast_type: str
:param cfg: FTP parameters for the files exchange
:type cfg: dict
:param logger: Logger
:type logger: Logger object
"""
# set the variables
self.features_analyzer = features_analyzer
self.forecast_type = forecast_type
self.input_gatherer = input_gatherer
self.cfg = cfg
self.logger = logger
self.dataFrames = None
self.models = {}
def get_datasets(self):
self.dataFrames = self.features_analyzer.dataFrames
def get_accuracy_threshold(self, threshold, prediction, measured):
"""
Calculate accuracy of predictions whose measured value is above a certain threshold
:param threshold: threshold level
:type threshold: float
:param prediction: predicted data
:type prediction: numpy.array
:param measured: measured data
:type measured: numpy.array
:return: accuracy score
:rtype: float
"""
lcl_acc = 0.0
if not measured.loc[measured > threshold].empty:
lcl_acc = accuracy_score(self.get_classes(prediction.loc[measured > threshold]),
self.get_classes(measured.loc[measured > threshold]))
return lcl_acc
@staticmethod
def calc_mae_rmse_threshold(meas, pred, th):
mask = meas.values >= th
if len(pred[mask]) > 0:
return round(mean_absolute_error(meas[mask], pred[mask]), 3), \
round(np.sqrt(mean_squared_error(meas[mask], pred[mask])), 3)
else:
return -1.0, -1.0
@staticmethod
def calc_mape_threshold(meas, pred, th):
mask = meas.values >= th
if len(pred[mask]) > 0:
return round(np.mean(np.abs((meas[mask].values - pred[mask].values) / meas[mask].values)) * 100, 1)
else:
return -1.0
def calculate_KPIs(self, region, prediction, measured, weights, ngbPars=None):
"""
For each fold and/or train/test separation, return the KPIs to establish the best weights combination
:param prediction: predicted data
:type prediction: numpy.array
:param measured: measured data
:type measured: numpy.array
:return: pandas DF with KPIs for each dataset provided
:rtype: pandas.DataFrame
"""
threshold1 = self.cfg['regions'][region]['featuresAnalyzer']['threshold1']
threshold2 = self.cfg['regions'][region]['featuresAnalyzer']['threshold2']
threshold3 = self.cfg['regions'][region]['featuresAnalyzer']['threshold3']
w1 = weights['w1']
w2 = weights['w2']
w3 = weights['w3']
lcl_acc_1 = round(self.get_accuracy_threshold(threshold1, prediction, measured), 3)
lcl_acc_2 = round(self.get_accuracy_threshold(threshold2, prediction, measured), 3)
lcl_acc_3 = round(self.get_accuracy_threshold(threshold3, prediction, measured), 3)
lcl_acc = round(accuracy_score(self.get_classes(prediction), self.get_classes(measured)), 3)
lcl_rmse = round((mean_squared_error(measured, prediction) ** 0.5), 3)
lcl_mae = round(mean_absolute_error(measured, prediction), 3)
lcl_cm = confusion_matrix(self.get_classes(prediction), self.get_classes(measured))
mae1, rmse1 = self.calc_mae_rmse_threshold(meas=measured, pred=prediction, th=threshold1)
mae2, rmse2= self.calc_mae_rmse_threshold(meas=measured, pred=prediction, th=threshold2)
mae3, rmse3= self.calc_mae_rmse_threshold(meas=measured, pred=prediction, th=threshold3)
if ngbPars is None:
df_KPIs = pd.DataFrame([[w1, w2, w3, lcl_acc_1, lcl_acc_2, lcl_acc_3, lcl_acc, rmse1, rmse2, rmse3, lcl_rmse,
mae1, mae2, mae3, lcl_mae, str(lcl_cm.flatten().tolist())]],
columns=['w1', 'w2', 'w3', 'Accuracy_1', 'Accuracy_2', 'Accuracy_3', 'Accuracy',
'RMSE1', 'RMSE2', 'RMSE3', 'RMSE', 'MAE1', 'MAE2', 'MAE3', 'MAE', 'ConfMat'])
else:
df_KPIs = pd.DataFrame([[w1, w2, w3, ngbPars['numberEstimators'], ngbPars['learningRate'], lcl_acc_1,
lcl_acc_2, lcl_acc_3, lcl_acc, rmse1, rmse2, rmse3, lcl_rmse,
mae1, mae2, mae3, lcl_mae, str(lcl_cm.flatten().tolist())]],
columns=['w1', 'w2', 'w3', 'ne', 'lr', 'Accuracy_1', 'Accuracy_2', 'Accuracy_3', 'Accuracy',
'RMSE1', 'RMSE2', 'RMSE3', 'RMSE', 'MAE1', 'MAE2', 'MAE3', 'MAE', 'ConfMat'])
return df_KPIs
def get_numpy_df(self, df_x, df_y):
x_data_no_date = df_x.iloc[:, 1:]
y_data_no_date = df_y.iloc[:, 1:]
assert (len(x_data_no_date) == len(y_data_no_date))
x_data = np.array(x_data_no_date, dtype='float64')
y_data = np.array(y_data_no_date, dtype='float64')
return x_data, y_data
def remove_date(self, X, Y):
assert 'date' in X.columns.values
assert 'date' in Y.columns.values
X = X.iloc[:, 1:]
Y = Y.iloc[:, 1:]
assert 'date' not in X.columns.values
assert 'date' not in Y.columns.values
return X, Y
def convert_to_series(self, prediction, Y):
"""
Convert dataframes to series for easier KPIs calculation
"""
assert (len(prediction) == len(Y))
prediction = pd.Series(prediction, index=Y.index)
measured = pd.Series(Y.iloc[:, 0], index=Y.index)
return prediction, measured
@staticmethod
def calc_prob_interval(pred_dataset, lower_limit, upper_limit):
mask = np.logical_and(pred_dataset > lower_limit, pred_dataset < upper_limit)
return len(pred_dataset[mask]) / len(pred_dataset)
@staticmethod
def handle_qrf_output(cfg, qrf, input_vals, region_code):
qntls = np.array(cfg['regions'][region_code]['forecaster']['quantiles'])
pred_qntls, pred_dataset = qrf.predict(input_vals, qntls)
pred_dataset = pred_dataset[0]
pred_qntls = pred_qntls[0]
ths = cfg['regions'][region_code]['forecaster']['thresholds']
eps = np.finfo(np.float32).eps
dict_probs = {'thresholds': {}, 'quantiles': {}}
# Get probabilities to be in configured thresholds
for i in range(1, len(ths)):
dict_probs['thresholds']['[%i:%i]' % (ths[i-1], ths[i])] = ModelTrainer.calc_prob_interval(pred_dataset, ths[i-1], ths[i]-eps)
dict_probs['thresholds']['[%i:%f]' % (ths[i], np.inf)] = ModelTrainer.calc_prob_interval(pred_dataset, ths[i], np.inf)
# Get probabilities to be in the configured quantiles
for i in range(0, len(qntls)):
dict_probs['quantiles']['perc%.0f' % (qntls[i]*100)] = pred_qntls[i]
return dict_probs
@staticmethod
def handle_ngb_normal_dist_output(cfg, mu, sigma, region_code):
dist = scipy.stats.norm(loc=mu, scale=sigma)
# QUANTILES
# dist.ppf(0.1)
# dist.ppf([0.1, 0.5])
# VALUES FOR PROB
# dist.ppf(0.1)
# dist.ppf([0.1, 0.5])
samples = []
for i in range(1, 1000):
samples.append(dist.ppf(float(i / 1000)))
samples = np.array(samples)
ths = cfg['regions'][region_code]['forecaster']['thresholds']
eps = np.finfo(np.float32).eps
dict_probs = {'thresholds': {}, 'quantiles': {}}
for i in range(1, len(ths)):
dict_probs['thresholds']['[%i:%i]' % (ths[i-1], ths[i])] = ModelTrainer.calc_prob_interval(samples, ths[i-1], ths[i]-eps)
dict_probs['thresholds']['[%i:%f]' % (ths[i], np.inf)] = ModelTrainer.calc_prob_interval(samples, ths[i], np.inf)
# Get probabilities to be in the configured quantiles
for q in cfg['regions'][region_code]['forecaster']['quantiles']:
dict_probs['quantiles']['perc%.0f' % (q*100)] = dist.ppf(q)
return dict_probs
def fold_training(self, region, train_index, test_index, X, Y, weights, ngbPars=None):
"""
For each fold and/or tran/test separation, create the model and calculate KPIs to establish the best weights
combination
:param train_index: indexes of dataset that compose train set
:type train_index: pandas.Index
:param test_index: indexes of dataset that compose test set
:type test_index: pandas.Index
:param X: design matrix
:type X: pandas.DataFrame
:param Y: response vector
:type Y: pandas.DataFrame
:return: prediction performed on test dataset
:rtype: numpy.array
"""
Xtrain, Xtest = np.array(X.loc[train_index, :]), np.array(X.loc[test_index, :])
Ytrain, Ytest = Y.loc[train_index].reset_index(drop=True), Y.loc[test_index].reset_index(drop=True)
assert len(Xtrain) == len(Ytrain)
assert len(Xtest) == len(Ytest)
ngb = self.train_NGB_model(region, Xtrain, Ytrain, weights, ngbPars)[0]
return ngb.predict(Xtest)
def train_NGB_model(self, region, Xtrain, Ytrain, target_data, ngbPars=None):
"""
Return the NGB model trained on the available data
:param Xtrain: indexes of dataset that compose train set
:type Xtrain: np.array()
:param Ytrain: indexes of dataset that compose test set
:type Ytrain: pandas.DataFrame
:return: prediction model
:rtype: ngboost.NGBRegressor
"""
if 'weights' in target_data.keys():
# MT case
weights = target_data['weights'][self.forecast_type]
else:
# HPOPT case
weights = target_data
if ngbPars is None:
# Usage of the configured parameters
n_est = target_data['numberEstimatorsNGB'][self.forecast_type]
l_rate = target_data['learningRateNGB'][self.forecast_type]
else:
# Usage of the parameters passed as arguments
n_est = ngbPars['numberEstimators']
l_rate = ngbPars['learningRate']
threshold1 = self.cfg['regions'][region]['featuresAnalyzer']['threshold1'] # It should be 240
threshold2 = self.cfg['regions'][region]['featuresAnalyzer']['threshold2'] # It should be 180
threshold3 = self.cfg['regions'][region]['featuresAnalyzer']['threshold3'] # It should be 120 (old but wrong 135)
w1 = weights['w1']
w2 = weights['w2']
w3 = weights['w3']
weight = np.array(
[w1 if x >= threshold1 else w2 if x >= threshold2 else w3 if x >= threshold3 else 1.0 for x in
np.array(Ytrain)],
dtype='float64')
assert len(weight) == len(Ytrain)
ngb = NGBRegressor(n_estimators=n_est, learning_rate=l_rate, Dist=Normal,
Base=default_tree_learner, natural_gradient=True, verbose=False,
Score=MLE, random_state=500).fit(Xtrain, np.array(Ytrain).ravel(), sample_weight=weight)
return ngb, weight
def error_data(self, pred, Y, fold, weights):
"""
Create pandas df with weights, fold, measurements and predictions
:param pred: predicted data
:type pred: numpy.array
:param Y: measured data
:type Y: pandas.Series
:param fold: current fold of Cross Validation
:type fold: int
:return: pandas DF with information
:rtype: pandas.DataFrame
"""
Y = np.array(Y.values)
assert len(pred) == len(Y)
df_pred = pd.DataFrame()
df_pred['w1'] = [weights['w1']] * len(Y)
df_pred['w2'] = [weights['w2']] * len(Y)
df_pred['w3'] = [weights['w3']] * len(Y)
df_pred['Fold'] = [fold] * len(Y)
df_pred['Measurements'] = Y
df_pred['Prediction'] = pred
return df_pred
def get_weights_folder_results(self, region, target_column, weights):
root_output_folder_path = self.input_gatherer.output_folder_creator(region)
str_ws = ''
for kw in weights.keys():
str_ws = '%s%s-%s_' % (str_ws, kw, weights[kw])
str_ws = str_ws[0:-1]
if not os.path.exists(root_output_folder_path + 'gs'):
os.mkdir(root_output_folder_path + 'gs')
if not os.path.exists(root_output_folder_path + 'gs' + os.sep + target_column):
os.mkdir(root_output_folder_path + 'gs' + os.sep + target_column)
if not os.path.exists(root_output_folder_path + 'gs' + os.sep + target_column + os.sep + str_ws):
os.mkdir(root_output_folder_path + 'gs' + os.sep + target_column + os.sep + str_ws)
return '%s%s%s%s%s%s%s' % (root_output_folder_path, 'gs', os.sep, target_column, os.sep, str_ws, os.sep)
def training_cross_validated_fs(self, features, region, target_column, df_x, df_y, weights):
df_x = df_x.reset_index(drop=True)
df_y = df_y.reset_index(drop=True)
# Dataset preparation for CV
df_x_tmp = df_x
df_y_tmp = df_y
df_x_tmp = df_x_tmp.drop(['date'], axis=1)
df_y_tmp = df_y_tmp.drop(['date'], axis=1)
cv_folds = self.cfg['regions'][region]['gridSearcher']['numFolds']
if self.cfg['regions'][region]['gridSearcher']['shuffle'] is True:
kf = KFold(n_splits=cv_folds, shuffle=self.cfg['regions'][region]['gridSearcher']['shuffle'],
random_state=self.cfg['regions'][region]['gridSearcher']['randomState'])
else:
kf = KFold(n_splits=cv_folds, shuffle=False, random_state=None)
np_x = df_x_tmp.to_numpy()
np_y = df_y_tmp.to_numpy()
fold = 1
if self.cfg['regions'][region]['gridSearcher']['hyperParsOptimizationNGB'] is not None:
df_pred = pd.DataFrame(columns=['w1', 'w2', 'w3', 'ne', 'lr', 'Measurements', 'Prediction'])
for train_index, test_index in kf.split(np_x):
# Consider only the last fold
if fold == cv_folds:
# HPOPT only on the last fold
ngb_prediction = np.empty(len(test_index))
df_pred = pd.DataFrame(columns=['w1', 'w2', 'w3', 'Fold', 'ne', 'lr', 'Measurements', 'Prediction'])
# Get the I/O datasets for the training and the test
X_train, X_test = np_x[train_index], np_x[test_index]
y_train, y_test = np_y[train_index], np_y[test_index]
# Reduce the dataset to consider only to the current fold
df_x = df_x.iloc[test_index[0]:test_index[-1] + 1]
df_y = df_y.iloc[test_index[0]:test_index[-1] + 1]
df_kpis = None
for ne in self.cfg['regions'][region]['gridSearcher']['hyperParsOptimizationNGB']['numEstimators']:
for lr in self.cfg['regions'][region]['gridSearcher']['hyperParsOptimizationNGB']['learningRate']:
self.logger.info('HPOPT -> region: %s, target: %s, weights: %s -> '
'Started FS fold %i/%i; (ne=%i, lr=%s)' % (region, target_column, weights,
fold, cv_folds, ne, str(lr)))
ngbPars = { 'numberEstimators': ne, 'learningRate': lr }
selected_features = self.features_analyzer.important_features(region,
X_train,
y_train,
features[1:],
weights,
ngbPars)[0]
X, Y = self.get_reduced_dataset(df_x, df_y, selected_features)
X, Y = self.remove_date(X, Y)
self.logger.info('HPOPT -> region: %s, target: %s, weights: %s -> '
'Ended FS fold %i/%i; (ne=%i, lr=%s)' % (region, target_column, weights,
fold, cv_folds, ne, str(lr)))
# Perform the training using the training folds and the prediction with the test fold
self.logger.info('HPOPT -> region: %s, target: %s, weights: %s -> '
'Started model training fold %i/%i; (ne=%i, lr=%s)' % (region,
target_column,
weights,
fold,
cv_folds,
ne, str(lr)))
# todo this part below should be investigated
ngb = self.train_NGB_model(region, X_train, y_train, weights, ngbPars)[0]
ngb_prediction = ngb.predict(X_test)
# pred = self.fold_training(region, train_index, test_index, X, Y, weights, ngbPars)
# ngb_prediction = pred
self.logger.info('HPOPT -> region: %s, target: %s, weights: %s -> '
'Ended model training fold %i/%i; (ne=%i, lr=%s)' % (region,
target_column,
weights,
fold,
cv_folds,
ne, str(lr)))
prediction, measured = self.convert_to_series(ngb_prediction, Y)
if df_kpis is None:
df_kpis = self.calculate_KPIs(region, prediction, measured, weights, ngbPars)
else:
kpis = self.calculate_KPIs(region, prediction, measured, weights, ngbPars)
df_kpis = df_kpis.append(kpis)
return df_kpis, None
fold += 1
else:
ngb_prediction = np.empty(len(df_y))
df_pred = pd.DataFrame(columns=['w1', 'w2', 'w3', 'Fold', 'Measurements', 'Prediction'])
for train_index, test_index in kf.split(np_x):
# Get the I/O datasets for the training and the test
X_train, X_test = np_x[train_index], np_x[test_index]
y_train, y_test = np_y[train_index], np_y[test_index]
# Perform the FS using the training folds
self.logger.info('Region: %s, target: %s, weights: %s -> Started FS fold %i/%i' % (region,
target_column,
weights,
fold,
cv_folds))
selected_features = self.features_analyzer.important_features(region, X_train, y_train, features[1:],
weights)[0]
X, Y = self.get_reduced_dataset(df_x, df_y, selected_features)
X, Y = self.remove_date(X, Y)
self.logger.info('Region: %s, target: %s, weights: %s -> Ended FS fold %i/%i' % (region, target_column,
weights, fold, cv_folds))
# Perform the training using the training folds and the prediction with the test fold
self.logger.info('Region: %s, target: %s, weights: %s -> Started model training fold %i/%i' % (region,
target_column,
weights,
fold,
cv_folds))
pred = self.fold_training(region, train_index, test_index, X, Y, weights)
ngb_prediction[test_index] = pred
self.logger.info('Region: %s, target: %s, weights: %s -> Ended model training fold %i/%i' % (region,
target_column,
weights,
fold,
cv_folds))
# Concat the prediction results
df_pred = pd.concat([df_pred, self.error_data(pred, Y.loc[test_index], fold, weights)], ignore_index=True,
axis=0)
fold += 1
prediction, measured = self.convert_to_series(ngb_prediction, Y)
return self.calculate_KPIs(region, prediction, measured, weights), df_pred
def get_weights(self, input_file_name):
w = {}
str_w = ''
for elem in input_file_name.split(os.sep)[-2].split('_'):
code, val = elem.split('-')
w[code] = int(val)
str_w += val + '-'
return w, str_w[:-1]
def train_final_models(self, k_region, target_signal, hps=None):
"""
Calculates the KPIs for a set of weight with multiple Feature selection: First we create the folds of the cross
validation, then for each fold we do the feature selection and locally calculate the KPIs
"""
target_data = self.cfg['regions'][k_region]['finalModelCreator']['targets'][target_signal]
self.get_datasets()
key = k_region
df = self.dataFrames[key]
fp = self.input_gatherer.output_folder_creator(key)
_, _, _, df_x, df_y = self.features_analyzer.dataset_splitter(key, df, target_signal)
# Check if there is a hyperparameters optimization or not
if hps is None:
suffix = self.cfg['regions'][k_region]['finalModelCreator']['signalsFileSuffix']
input_files = glob.glob('%s*%s%s.json' % (fp, target_signal, suffix))
else:
str_hpars = 'ne%i-lr%s' % (hps['numberEstimators'], str(hps['learningRate']).replace('.', ''))
suffix = str_hpars
input_files = glob.glob('%shpo%s%s%s*%s*.json' % (fp, os.sep, suffix, os.sep, target_signal))
for input_file in input_files:
selected_features = json.loads(open(input_file).read())['signals']
X, Y = self.get_reduced_dataset(df_x, df_y, selected_features)
X, Y = self.remove_date(X, Y)
target_data['weights'] = self.cfg['regions'][k_region]['featuresAnalyzer']['targetColumns'][target_signal]['weights']
target_data['numberEstimatorsNGB'] = self.cfg['regions'][k_region]['featuresAnalyzer']['targetColumns'][target_signal]['numberEstimatorsNGB']
target_data['learningRateNGB'] = self.cfg['regions'][k_region]['featuresAnalyzer']['targetColumns'][target_signal]['learningRateNGB']
start_year = self.cfg['datasetSettings']['years'][0]
end_year = self.cfg['datasetSettings']['years'][-1]
self.logger.info('Train models for %s - %s; period [%s:%s], case %s, weights: %s' % (k_region,
target_signal,
start_year,
end_year,
self.forecast_type,
target_data['weights']))
# Train NGB model
self.logger.info('Target %s -> NGBoost model training start' % target_signal)
ngb, weight = self.train_NGB_model(k_region, X, Y, target_data, hps)
self.logger.info('Target %s -> NGBoost model training end' % target_signal)
# Train QRF model
rfqr = None
# self.logger.info('RFQR model training start')
# rfqr = RandomForestQuantileRegressor(n_estimators=1000).fit(X, np.array(Y).ravel())
# self.logger.info('RFQR model training end')
self.logger.info('Target %s -> pyquantrf RFQR model training start' % target_signal)
rfqr_w = qfrfQuantileRandomForestRegressor(nthreads=4,
n_estimators=target_data['numberEstimatorsNGB'][self.forecast_type],
min_samples_leaf=10)
rfqr_w.fit(X, np.array(Y).ravel(), sample_weight=weight)
self.logger.info('Target %s -> pyquantrf RFQR model training end' % target_signal)
# Check if there is a hyperparameters optimization or not
if hps is None:
str_lr = str('%.3f' % target_data['learningRateNGB'][self.forecast_type]).replace('.','')
# str_hp = 'w1%iw2%iw3%ine%ilr%s' % (target_data['weights'][self.forecast_type]['w1'],
# target_data['weights'][self.forecast_type]['w2'],
# target_data['weights'][self.forecast_type]['w3'],
# target_data['numberEstimatorsNGB'][self.forecast_type],
# str_lr)
file_name_noext = fp + 'predictor_' + target_data['label'] + '_' + \
self.cfg['regions'][k_region]['finalModelCreator']['identifier']
else:
file_name_noext = '%shpo%spredictor_%s_%s' % (fp, os.sep,target_data['label'],
str_hpars.replace('-', ''))
pickle.dump([ngb, rfqr, rfqr_w], open('%s.pkl' % file_name_noext, 'wb'))
json.dump({"signals": list(selected_features)}, open('%s.json' % file_name_noext.replace('predictor', 'inputs'), 'w'))
metadata = {
"region": k_region,
"case": self.forecast_type,
"weights": {
"w1": target_data['weights'][self.forecast_type]['w1'],
"w2": target_data['weights'][self.forecast_type]['w2'],
"w3": target_data['weights'][self.forecast_type]['w3'],
},
"NGBoostParameters": {
"estimatorsNumber": target_data['numberEstimatorsNGB'][self.forecast_type],
"learningRate": target_data['learningRateNGB'][self.forecast_type],
"numberSelectedFeatures": len(selected_features)
}
}
json.dump(metadata, open('%s.json' % file_name_noext.replace('predictor', 'metadata'), 'w'))
@staticmethod
def get_reduced_dataset(df_x, df_y, selected_features):
"""
Extract a smaller dataframe with the selected features as columns. Keep the date and refresh indices
:param selected_features: list of selected features
:type selected_features: list
:param df_x: design matrix
:type df_x: pandas.DataFrame
:param df_y: response vector
:type df_y: pandas.DataFrame
:return: pandas DF with reduced columns
:rtype: pandas.DataFrame, pandas.DataFrame
"""
lcl_df_x = df_x.loc[:, ['date'] + selected_features]
lcl_df_y = df_y
# Date must be there
assert len(lcl_df_x.columns.values) == len(selected_features) + 1
assert len(lcl_df_y.columns.values) == 2
assert len(lcl_df_y) == len(lcl_df_x)
lcl_df_x = lcl_df_x.reset_index(drop=True)
lcl_df_y = lcl_df_y.reset_index(drop=True)
return lcl_df_x, lcl_df_y
def get_classes(self, prediction):
y_classes = []
for element in prediction:
if element < 60:
y_classes.append(0)
elif element < 120:
y_classes.append(1)
elif element < 135:
y_classes.append(2)
elif element < 180:
y_classes.append(3)
elif element < 240:
y_classes.append(4)
else:
y_classes.append(5)
return y_classes
|
nilq/baby-python
|
python
|
"""Runs the webserver."""
from absl import app
from absl import flags
from absl import logging
from icubam import config
from icubam.db import store
flags.DEFINE_string('config', 'resources/config.toml', 'Config file.')
flags.DEFINE_string('dotenv_path', None, 'Optionally specifies the .env path.')
flags.DEFINE_enum('mode', 'dev', ['prod', 'dev'], 'Run mode.')
flags.DEFINE_string('email', None, 'File for the db.')
flags.DEFINE_string('password', None, 'File for the db.')
FLAGS = flags.FLAGS
def main(argv):
cfg = config.Config(
FLAGS.config, mode=FLAGS.mode, env_path=FLAGS.dotenv_path
)
factory = store.create_store_factory_for_sqlite_db(cfg)
db = factory.create()
user_id = db.get_user_by_email(FLAGS.email)
if user_id is None:
logging.error(f"No user for email {FLAGS.email}")
return
admins = db.get_admins()
if not admins:
admin_id = db.add_default_admin()
else:
admin_id = admins[0].user_id
hash = db.get_password_hash(FLAGS.password)
db.update_user(admin_id, user_id, dict(password_hash=hash))
if __name__ == '__main__':
app.run(main)
|
nilq/baby-python
|
python
|
from time import sleep
def msg(string):
print('~' * (len(string) + 2))
print(f' {string} ')
print('~' * (len(string) + 2))
while True:
print('\33[30;42m', end='')
msg('Sistema de ajuda PyHELP')
user = str(input('\033[mFunção ou Biblioteca \033[32m>>>\033[m '))
if user.lower() == 'fim':
break
print('\033[30;44m', end='')
msg(f'Acessando o menu do comando {user}')
sleep(1.5)
print('\033[47m', end='')
print(help(user))
print('\033[41mMuito obrigado por usar o sistema de ajuda PyHELP, volte sempre!')
|
nilq/baby-python
|
python
|
import unittest
from streamlink.plugins.schoolism import Schoolism
class TestPluginSchoolism(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
'https://www.schoolism.com/watchLesson.php',
]
for url in should_match:
self.assertTrue(Schoolism.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
'https://www.schoolism.com',
]
for url in should_not_match:
self.assertFalse(Schoolism.can_handle_url(url))
def test_playlist_parse_subs(self):
with_subs = """var allVideos=[
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/44/2/part1.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Digital Painting - Lesson 2 - Part 1",playlistTitle:"Part 1",}], subtitles: [{
"default": true,
kind: "subtitles", srclang: "en", label: "English",
src: "https://s3.amazonaws.com/schoolism-encoded/44/subtitles/2/2-1.vtt",
}],
},
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/44/2/part2.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Digital Painting - Lesson 2 - Part 2",playlistTitle:"Part 2",}], subtitles: [{
"default": true,
kind: "subtitles", srclang: "en", label: "English",
src: "https://s3.amazonaws.com/schoolism-encoded/44/subtitles/2/2-2.vtt",
}]
}];
"""
data = Schoolism.playlist_schema.validate(with_subs)
self.assertIsNotNone(data)
self.assertEqual(2, len(data))
def test_playlist_parse(self):
without_subs = """var allVideos=[
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/14/1/part1.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Gesture Drawing - Lesson 1 - Part 1",playlistTitle:"Part 1",}],},
{sources:[{type:"application/x-mpegurl",src:"https://d8u31iyce9xic.cloudfront.net/14/1/part2.m3u8?Policy=TOKEN&Signature=TOKEN&Key-Pair-Id=TOKEN",title:"Gesture Drawing - Lesson 1 - Part 2",playlistTitle:"Part 2",}]}
];
"""
data = Schoolism.playlist_schema.validate(without_subs)
self.assertIsNotNone(data)
self.assertEqual(2, len(data))
|
nilq/baby-python
|
python
|
""""Process the results of DrFact and DrKIT"""
import json
import sys
from tqdm import tqdm
prediction_file = sys.argv[1]
output_file = sys.argv[2]
outputs = []
with open(prediction_file) as f:
print("Reading", f.name)
lines = f.read().splitlines()
for line in tqdm(lines[1:], desc="Processing %s"%f.name):
instance = json.loads(line)
qid = instance["qas_id"]
pred = instance["predictions"]
concept_predictions = pred["top_5000_predictions"]
predictions_K = {100: concept_predictions} # TODO: add more
output = dict(qid = qid, \
question = pred["question"], \
predictions_K = predictions_K
)
outputs.append(output)
with open(output_file, "w") as f:
print("Writing", f.name)
for output in outputs:
f.write(json.dumps(output) + "\n")
|
nilq/baby-python
|
python
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
def main():
print('234'.isdecimal())
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
def main():
x, y = c(input()), c(input())
if x * y == 0:
return 0
return "S" + ("(S" * ((x * y) - 1)) + "(0" + (")" * (x * y))
def c(x):
return x.count('S')
if __name__ == '__main__':
print(main())
|
nilq/baby-python
|
python
|
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import os
import stack.commands
class command(stack.commands.HostArgumentProcessor,
stack.commands.iterate.command):
pass
class Command(command):
"""
Iterate sequentially over a list of hosts. This is used to run
a shell command on the frontend with with '%' wildcard expansion for
every host specified.
<arg optional='1' type='string' name='host' repeat='1'>
Zero, one or more host names. If no host names are supplied iterate over
all hosts except the frontend.
</arg>
<param optional='0' type='string' name='command'>
The shell command to be run for each host. The '%' character is used as
a wildcard to indicate the hostname. Quoting of the '%' to expand to a
literal is accomplished with '%%'.
</param>
<example cmd='iterate host backend command="scp file %:/tmp/"'>
Copies file to the /tmp directory of every backend node
</example>
"""
def run(self, params, args):
(cmd, ) = self.fillParams([ ('command', None, True) ])
self.beginOutput()
hosts = []
if len(args) == 0:
#
# no hosts are supplied. we need to exclude the frontend
#
for host in self.getHostnames(args):
if host == self.db.getHostname('localhost'):
#
# don't include the frontend
#
continue
hosts.append(host)
else:
hosts = self.getHostnames(args)
for host in hosts:
# Turn the wildcard '%' into the hostname, and '%%' into
# a single '%'.
s = ''
prev = ''
for i in range(0, len(cmd)):
curr = cmd[i]
try:
next = cmd[i + 1]
except:
next = ''
if curr == '%':
if prev != '%' and next != '%':
s += host
prev = host
continue # consume '%'
elif prev == '%':
s += '%'
prev = '*'
continue # consume '%'
else:
s += curr
prev = curr
os.system(s)
# for line in os.popen(s).readlines():
# self.addOutput(host, line[:-1])
self.endOutput(padChar='')
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
"""
Master program """
import multiprocessing
import math
import queue
import time
import pickle
import numpy as np
import pygame
import zmq
class CommProcess(multiprocessing.Process):
"""Communicates with robot."""
def __init__(self, image_queue, command_queue):
super().__init__(daemon=True)
self.done = False
self.image_queue = image_queue
self.command_queue = command_queue
def run(self):
port = 15787
context = zmq.Context()
robot = context.socket(zmq.REQ)
robot.connect('tcp://zeitgeist.local:{}'.format(port))
while not self.done:
command_flag = False
try:
while True:
command = self.command_queue.get(block=False)
command_flag = True
except queue.Empty:
pass
if command_flag:
if not len(command) == 2:
self.done = True
break
robot.send_string('c {} {}'.format(command[0], command[1]))
print("Sent to robot: {}".format(command))
response = robot.recv_string()
print("Received from robot: {}".format(response))
robot.send_string('q')
robot.recv_string()
def run():
# current speeds
left = 0
right = 0
# speed factor (left and right can be +/- 31 max)
speed = 16.0
# screen radius
size = 250
# deadzone size
deadzone = 10
# motors on
enabled = True
pygame.init()
screen = pygame.display.set_mode((2*size, 2*size))
image_queue = multiprocessing.Queue()
command_queue = multiprocessing.Queue()
robot = CommProcess(image_queue, command_queue)
robot.start()
done = False
while not done:
old_left = left
old_right = right
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
command_queue.put('q')
if event.type == pygame.MOUSEMOTION:
x = event.pos[0]-size
y = -(event.pos[1]-size)
if x**2+y**2 < deadzone**2:
left = 0
right = 0
else:
left = right = speed*y/size
left += speed*x/size
right -= speed*x/size
left = int(left)
right = int(right)
if event.type == pygame.KEYDOWN:
if event.key == 32: # space
enabled = not enabled
left = 0
right = 0
if(old_left != left or old_right != right):
print('GUI sensed movement: ({},{}):\t{}\t{}'.format(x, y, left, right))
command_queue.put((left, right))
"""
try:
image = image_queue.get(block=False)
print(image.shape)
except queue.Empty:
pass
"""
if enabled :
screen.fill((0, 0, 0))
pygame.draw.circle(screen, (255, 255, 0), (size, size), deadzone)
else:
screen.fill((255, 255, 255))
pygame.display.flip()
time.sleep(1) # allow time for threads to finish
if __name__ == "__main__":
run()
|
nilq/baby-python
|
python
|
# Generated by Django 3.0.8 on 2020-07-15 09:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Resources', '0008_resume'),
]
operations = [
migrations.CreateModel(
name='People',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('date_of_birth', models.DateField(blank=True, null=True)),
('About', models.TextField(help_text='Resume of candidate')),
('education', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Resources.Edu_Qualification')),
('experience', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Resources.Experience')),
('ratings', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='Resources.Rating')),
('skills', models.ManyToManyField(help_text='enter your skills(Technical or personal ...etc)', to='Resources.Skill')),
('technologies', models.ManyToManyField(help_text='enter technology you are experience on(if freshers mention Fresher)', to='Resources.Technology')),
],
options={
'ordering': ['first_name'],
},
),
]
|
nilq/baby-python
|
python
|
import os
import sys
import yaml
import subprocess
from cryptography.fernet import Fernet
secrets_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'secrets.yaml')
def decrypt_secrets(cfg):
with open(os.path.join(cfg['global']['fernet_tokens_path'],'fernet_tokens.txt'), 'r') as token_file:
master_key = token_file.read()
with open(secrets_file_path, 'r') as ymlfile:
secrets_file = yaml.load(ymlfile)
cipher_suite = Fernet(str.encode(master_key))
for password_key in secrets_file:
decrypted_password= (cipher_suite.decrypt(secrets_file[password_key]))
cfg[str(password_key)]=decrypted_password.decode("utf-8")
return cfg
def generate_secrets_file(cfg):
master_key = Fernet.generate_key()
with open(os.path.join(cfg['global']['fernet_tokens_path'],'fernet_tokens.txt'), 'w') as token_file:
token_file.write(master_key.decode())
logging.info('Master key has been generated and stored in: ' + os.path.join(cfg['global']['fernet_tokens_path']))
with open(secrets_file_path, 'r') as ymlfile:
secrets_file = yaml.load(ymlfile)
for password_key in secrets_file:
password_raw_value = input("Enter the password for " + str(password_key) + " :" + "\n")
cfg[str(password_key)]=password_raw_value
cipher_suite = Fernet(master_key)
ciphered_text = cipher_suite.encrypt(str.encode(password_raw_value)) # required to be bytes
secrets_file[password_key]=ciphered_text
with open(secrets_file_path, 'w') as f:
yaml.dump(secrets_file, f)
return cfg
|
nilq/baby-python
|
python
|
#Test metadata model
from src.models import metadata
from src import data
from src import utils
import torch
import os
from pytorch_lightning import Trainer
ROOT = os.path.dirname(os.path.dirname(data.__file__))
def test_metadata():
m = metadata.metadata(sites = 1, classes=10)
sites = torch.zeros(20)
output = m(sites.int())
assert output.shape == (20,10)
def test_metadata_sensor_fusion():
sites = torch.zeros(20)
image = torch.randn(20, 3, 11, 11)
m = metadata.metadata_sensor_fusion(bands=3, sites=1, classes=10)
prediction = m(image, sites.int())
assert prediction.shape == (20,10)
def test_MetadataModel(config, dm):
model = metadata.metadata_sensor_fusion(sites=1, classes=3, bands=3)
m = metadata.MetadataModel(model=model, classes=3, label_dict=dm.species_label_dict, config=config)
trainer = Trainer(fast_dev_run=True)
trainer.fit(m,datamodule=dm)
|
nilq/baby-python
|
python
|
from PyQt5 import QtWebEngineWidgets, QtWidgets
from tootbox.core.framework import LayoutView
from tootbox.views.toot import Toot
class Timeline(LayoutView):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initialize_ui()
def initialize_ui(self):
self.button = QtWidgets.QPushButton("Refresh!")
self.toot_list = QtWidgets.QVBoxLayout()
self.toot_list.setContentsMargins(0, 0, 0, 0)
self.toot_list.setSpacing(20)
self.toot_list.addStretch()
self.scrollbox = QtWidgets.QScrollArea()
self.scrollbox.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.scrollbox.setFrameStyle(0)
self.scrollbox.setWidgetResizable(True)
self.scrollbox.verticalScrollBar().valueChanged.connect(self.timeline_scrolled)
scrollContainer = QtWidgets.QWidget()
scrollContainer.setLayout(self.toot_list)
self.scrollbox.setWidget(scrollContainer)
vbox = QtWidgets.QVBoxLayout()
vbox.addWidget(self.button)
vbox.addWidget(self.scrollbox)
self.setLayout(vbox)
def update_list(self, toots):
for toot in reversed(toots):
# print(toot.__str__() + "\n\n\n")
t = Toot(toot)
self.toot_list.insertWidget(0, t)
def timeline_scrolled(self, value):
maximum = self.scrollbox.verticalScrollBar().maximum()
scroll_perc = value / maximum
if scroll_perc > 0.9:
# Request more toots
pass
# print(str(value) + ", " + str(maximum) + ", " + str(scroll_perc) + "%")
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
import csv
import io
from django.conf import settings
from django.db import models
from django.db.models.signals import post_save, pre_save
from django.utils.text import slugify
from yourapp.signals import csv_uploaded
from yourapp.validators import csv_file_validator
def upload_csv_file(instance, filename):
qs = instance.__class__.objects.filter(user=instance.user)
if qs.exists():
num_ = qs.last().id + 1
else:
num_ = 1
return f'csv/{num_}/{instance.user.username}/{filename}'
class CSVUpload(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
file = models.FileField(upload_to=upload_csv_file, validators=[csv_file_validator])
completed = models.BooleanField(default=False)
def __str__(self):
return self.user.username
def convert_header(csvHeader):
header_ = csvHeader[0]
cols = [x.replace(' ', '_').lower() for x in header_.split(",")]
return cols
def csv_upload_post_save(sender, instance, created, *args, **kwargs):
if not instance.completed:
csv_file = instance.file
decoded_file = csv_file.read().decode('utf-8')
io_string = io.StringIO(decoded_file)
reader = csv.reader(io_string, delimiter=';', quotechar='|')
header_ = next(reader)
header_cols = convert_header(header_)
parsed_items = []
'''
if using a custom signal
'''
for line in reader:
parsed_row_data = {}
i = 0
row_item = line[0].split(',')
for item in row_item:
key = header_cols[i]
parsed_row_data[key] = item
i+=1
parsed_items.append(parsed_row_data)
csv_uploaded.send(sender=instance, user=instance.user, csv_file_list=parsed_items)
'''
if using a model directly
for line in reader:
new_obj = YourModelKlass()
i = 0
row_item = line[0].split(',')
for item in row_item:
key = header_cols[i]
setattr(new_obj, key) = item
i+=1
new_obj.save()
'''
instance.completed = True
instance.save()
post_save.connect(csv_upload_post_save, sender=StaffCSVUpload)
|
nilq/baby-python
|
python
|
import cv2
import numpy as np
# Let's load a simple image with 3 black squares
image = cv2.imread('Hough.jpg')
cv2.waitKey(0)
# Grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find Canny edges
edged = cv2.Canny(gray, 30, 200)
cv2.waitKey(0)
# Finding Contours
# Use a copy of the image e.g. edged.copy()
# since findContours alters the image
contours, hierarchy = cv2.findContours(edged,
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.imshow('Canny Edges After Contouring', edged)
cv2.waitKey(0)
print("Number of Contours found = " + str(len(contours)))
# Draw all contours
# -1 signifies drawing all contours
cv2.drawContours(image, contours, -1, (0, 255, 0), 3)
cv2.imshow('Contours', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
import numpy as np
import tensorrt as trt
import cv2
import os
import pycuda.autoinit
import pycuda.driver as cuda
try:
from . import TRT_exec_tools
except ImportError:
import TRT_exec_tools
class Semantic_Segmentation:
def __init__(self, trt_engine_path):
"""
Parameters:
-----------
trt_engine_path: string
Path to TRT engine.
"""
# Create a Context on this device,
self.cfx = cuda.Device(0).make_context()
TRT_LOGGER = trt.Logger()
TRT_LOGGER.min_severity = trt.Logger.Severity.VERBOSE
# Load TRT Engine
with open(trt_engine_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
self.engine = runtime.deserialize_cuda_engine(f.read())
# Create Context
self.context = self.engine.create_execution_context()
# Allocate buffers required for engine
self.inputs, self.outputs, self.bindings, self.stream = TRT_exec_tools.allocate_buffers(self.engine)
# Input image height
self.height = 720
# Input image width
self.width = 1280
# RGBA Colour map for segmentation display
self.colour_map = None
def segment_image(self, image):
"""
Parameters:
-----------
image: np.array
HWC uint8 BGR
"""
assert image.shape == (self.height, self.width, 3)
# Infer
self.inputs[0].host = np.ascontiguousarray(image.astype('float32')).ravel()
# Make self the active context, pushing it on top of the context stack.
self.cfx.push()
trt_outputs = TRT_exec_tools.do_inference_v2(
context=self.context,
bindings=self.bindings,
inputs=self.inputs,
outputs=self.outputs,
stream=self.stream,
)
# Remove any context from the top of the context stack, deactivating it.
self.cfx.pop()
o = trt_outputs[0].reshape(self.height, self.width)
# HW np.array uint8
self.depth = o
if __name__ == "__main__":
import time
model = Semantic_Segmentation("sample.trt")
N = 500
images = np.random.randint(0, 255, size=[N, model.height, model.width, 3], dtype='uint8')
t1 = time.perf_counter()
for i in images:
model.segment_image(i)
t2 = time.perf_counter()
print(N/(t2-t1))
model.cfx.pop()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from quant_benchmark.data.base_data_source.default_data_source import DefaultDataSource
import jqdatasdk
#see https://github.com/JoinQuant/jqdatasdk/blob/master/tests/test_api.py
jqdatasdk.auth(username='13922819479', password='123456')
data_source = DefaultDataSource()
order_book_id = "000001.XSHE"
bar_count = 10
dt = "2019-09-20"
data = data_source.history_bars(order_book_id=order_book_id, bar_count=bar_count, frequency="1w", dt=dt)
|
nilq/baby-python
|
python
|
def f06(word1 = 'paraparaparadise', word2 = 'paragraph'):
ngram = lambda n: lambda tl: [''.join(tl[i:i + n]) for i in range(len(tl) - n + 1)]
X = set(ngram(2)(list(word1)))
Y = set(ngram(2)(list(word2)))
print(X.union(Y))
print(X.difference(Y))
print(X.intersection(Y))
|
nilq/baby-python
|
python
|
import math
def get_digit(n, i):
return n // 10**i % 10
def int_len(n):
if n == 0:
return 1
return int(math.log10(n))+1
def get_new_recipes(n):
recipes = []
for i in range(int_len(n)):
recipes.append(get_digit(n, i))
return recipes[::-1]
def part1(count):
recipes = [3, 7]
elf1, elf2 = 0, 1
while len(recipes) < count + 10:
elf1_score = recipes[elf1]
elf2_score = recipes[elf2]
new_score = elf1_score + elf2_score
new_recipes = get_new_recipes(new_score)
recipes.extend(new_recipes)
elf1 = (1 + elf1 + elf1_score) % len(recipes)
elf2 = (1 + elf2 + elf2_score) % len(recipes)
return ''.join(map(str, recipes[count:count+10]))
def part2(goal):
recipes = [3, 7]
elf1, elf2 = 0, 1
while True:
elf1_score = recipes[elf1]
elf2_score = recipes[elf2]
new_score = elf1_score + elf2_score
new_recipes = get_new_recipes(new_score)
recipes.extend(new_recipes)
elf1 = (1 + elf1 + elf1_score) % len(recipes)
elf2 = (1 + elf2 + elf2_score) % len(recipes)
suffix = ''.join(map(str, recipes[-10:]))
if goal in suffix:
return len(recipes) -10 + suffix.index(goal)
if __name__ == '__main__':
INPUT = 793061
print(part1(INPUT))
# runs for about a minute or so on my machine
print(part2(str(INPUT)))
|
nilq/baby-python
|
python
|
from ._Activate import *
from ._Deactivate import *
from ._Completed import *
from ._Startup import *
from ._Shutdown import *
from ._Recs import *
|
nilq/baby-python
|
python
|
# %% codecell
import os
import numpy as np
from tqdm import tqdm
from shutil import copyfile
# %% codecell
class_indices_S2_rev = {
'Waterbuck': 1,
'Baboon': 2,
'Warthog': 3,
'Bushbuck': 4,
'Impala': 5,
'Oribi': 6,
'Elephant': 7,
'Genet': 8,
'Nyala': 9,
'Setup': 10,
'Bushpig': 11,
'Porcupine': 12,
'Civet': 13,
'Vervet': 14,
'Reedbuck': 15,
'Kudu': 16,
'Buffalo': 17,
'Sable_antelope': 18,
'Duiker_red': 19,
'Hartebeest': 20,
'Wildebeest': 21,
'Guineafowl_helmeted': 22,
'Hare': 23,
'Duiker_common': 24,
'Fire': 25,
'Mongoose_marsh': 26,
'Aardvark': 27,
'Honey_badger': 28,
'Hornbill_ground': 29,
'Mongoose_slender': 30,
'Mongoose_bushy_tailed': 31,
'Samango': 32,
'Mongoose_white_tailed': 33,
'Mongoose_banded': 34,
'Mongoose_large_grey': 35,
'Bushbaby': 36,
'Guineafowl_crested': 37,
'Eland': 38,
'Lion': 39,
'Serval': 40
}
class_indices_S2 = {class_indices_S2_rev[k]: k for k in class_indices_S2_rev}
# %% codecell
root = '/home/zhmiao/datasets/ecology/GNP'
# confident_path = '/home/zhmiao/repos/AnimalActiveLearing_srv/weights/GTPSMemoryStage2_ConfPseu/051620_MOZ_S2_0_preds_conf.txt'
# confident_path = '/home/zhmiao/repos/AnimalActiveLearing_srv/weights/GTPSMemoryStage2_ConfPseu_SoftIter/072520_MOZ_S2_soft_iter_0_preds_conf.txt'
confident_path = '/home/zhmiao/repos/AnimalActiveLearning/weights/SemiStage2OLTR_Energy/111620_MOZ_PSLABEL_OLTR_Energy_0_preds_conf.txt'
# %% codecell
f = open(confident_path, 'r')
file_id_list = []
cat_list = []
for line in tqdm(f):
line_sp = line.replace('\n', '').rsplit(' ', 1)
file_id = line_sp[0]
cat = class_indices_S2[int(line_sp[1])]
file_id_list.append(file_id)
cat_list.append(cat)
f.close()
# %% codecell
file_id_list = np.array(file_id_list)
cat_list = np.array(cat_list)
# %% codecell
np.random.seed(10)
rand_idx = np.random.choice(range(len(cat_list)), 1000)
file_id_sel = file_id_list[rand_idx]
cat_sel = cat_list[rand_idx]
# %% codecell
save_root = os.path.join(root, 'S3_pickout_soft_iter_120220')
os.makedirs(save_root, exist_ok=True)
# %% codecell
for file_id, cat in tqdm(zip(file_id_sel, cat_sel)):
from_path = os.path.join(root, file_id)
file_id = file_id.replace('/', ':::')
save_path = os.path.join(save_root, file_id)
if '.JPG' in save_path:
save_path = save_path.replace('.JPG', '_{}.JPG'.format(cat))
elif '.jpg' in save_path:
save_path = save_path.replace('.jpg', '_{}.jpg'.format(cat))
copyfile(from_path, save_path)
# %%
|
nilq/baby-python
|
python
|
import csv
import os
import random
from .utils import write_into_file, train_val_test_split
def preprocess(in_csv_paths, out_dir_path, val_split, test_split):
dataset = []
for path in in_csv_paths:
with open(path, "r") as file:
reader = csv.DictReader(file)
for row in reader:
text, label = row["text"], row["label"]
dataset.append((text, label))
random.shuffle(dataset)
train_dataset, val_dataset, test_dataset = train_val_test_split(dataset, val_split, test_split)
write_into_file(dataset, out_path=os.path.join(out_dir_path, "data_full.csv"))
write_into_file(train_dataset, out_path=os.path.join(out_dir_path, "data_train.csv"))
write_into_file(val_dataset, out_path=os.path.join(out_dir_path, "data_val.csv"))
write_into_file(test_dataset, out_path=os.path.join(out_dir_path, "data_test.csv"))
|
nilq/baby-python
|
python
|
"""Output timeseries in NetCDF format.
"""
import glob,os,sys
import pandas as pd
import datetime as dt
import copy
def defaultExtensions():
return ['.nc']
def NCfile(filename,datas):
datas=copy.deepcopy(datas)
fileout=copy.deepcopy(filename)
for i,df in enumerate(datas):
if len(datas)>1:
fileout=filename[:-3]+str(i)+'.txt'
del df['dataframe'][df['dataframe'].index.name]
xar=df['dataframe'].to_xarray()
cols=list(df['dataframe'].columns)
for i,col in enumerate(cols):
attr={}
uni=df['metadata'][col]['units']
if uni and uni!='None':
attr['units']=uni
uni=df['metadata'][col]['long_name']
if uni and uni!='None':
attr['long_name']=uni
xar[col].attrs=attr
xar.to_netcdf(path=fileout, mode='w')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class CassandraCluster(object):
"""Implementation of the 'CassandraCluster' model.
Specifies an Object containing information about a Cassandra cluster.
Attributes:
primary_host (string): Primary host from this Cassandra cluster.
seeds (list of string): Seeds of this Cassandra Cluster.
"""
# Create a mapping from Model property names to API property names
_names = {
"primary_host":'primaryHost',
"seeds":'seeds'
}
def __init__(self,
primary_host=None,
seeds=None):
"""Constructor for the CassandraCluster class"""
# Initialize members of the class
self.primary_host = primary_host
self.seeds = seeds
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
primary_host = dictionary.get('primaryHost')
seeds = dictionary.get('seeds')
# Return an object of this model
return cls(primary_host,
seeds)
|
nilq/baby-python
|
python
|
"""
A Work-In-Progress agent using Tensorforce
"""
from . import BaseAgent
from .. import characters
class TensorForceAgent(BaseAgent):
"""The TensorForceAgent. Acts through the algorith, not here."""
def __init__(self, character=characters.Bomber, algorithm='ppo'):
super(TensorForceAgent, self).__init__(character)
self.algorithm = algorithm
def act(self, obs, action_space):
"""This agent has its own way of inducing actions. See train_with_tensorforce."""
return None
def initialize(self, env):
from gym import spaces
from tensorforce.agents import PPOAgent
if self.algorithm == "ppo":
if type(env.action_space) == spaces.Tuple:
actions = {
str(num): {
'type': int,
'num_actions': space.n
}
for num, space in enumerate(env.action_space.spaces)
}
else:
actions = dict(type='int', num_actions=env.action_space.n)
return PPOAgent(
states=dict(type='float', shape=env.observation_space.shape),
actions=actions,
network=[
dict(type='dense', size=64),
dict(type='dense', size=64)
],
batching_capacity=1000,
step_optimizer=dict(
type='adam',
learning_rate=1e-4
)
)
return None
|
nilq/baby-python
|
python
|
## 2. Frequency Distribution ##
fandango_distribution = reviews['Fandango_Ratingvalue'].value_counts().sort_index()
imdb_distribution = reviews['IMDB_norm'].value_counts().sort_index()
print(fandango_distribution)
print('--'*12)
print(imdb_distribution)
## 4. Histogram In Matplotlib ##
fig, ax = plt.subplots()
plt.hist(reviews['Fandango_Ratingvalue'], range=(0,5))
plt.show()
## 5. Comparing histograms ##
fig = plt.figure(figsize=(5,20))
ax1 = fig.add_subplot(4,1,1)
ax2 = fig.add_subplot(4,1,2)
ax3 = fig.add_subplot(4,1,3)
ax4 = fig.add_subplot(4,1,4)
ax1.hist(reviews['Fandango_Ratingvalue'], bins=20, range=(0,5))
ax1.set_title('Distribution of Fandango Ratings')
ax1.set_ylim(0, 50)
ax3.hist(reviews['Metacritic_user_nom'], bins=20, range=(0,5))
ax3.set_title('Distribution of Metacritic Ratings')
ax3.set_ylim(0, 50)
ax2.hist(reviews['RT_user_norm'],bins=20, range=(0,5))
ax2.set_title('Distribution of Rotten Tomatoes Ratings')
ax2.set_ylim(0, 50)
ax4.hist(reviews['IMDB_norm'], bins=20, range=(0,5))
ax4.set_title('Distribution of IMDB Ratings')
ax4.set_ylim(0,50)
plt.show()
## 7. Box Plot ##
fig, ax = plt.subplots()
ax.boxplot(norm_reviews['RT_user_norm'])
ax.set_ylim(0,5)
ax.set_xticklabels(['Rotten Tomatoes'])
plt.show()
## 8. Multiple Box Plots ##
num_cols = ['RT_user_norm', 'Metacritic_user_nom', 'IMDB_norm', 'Fandango_Ratingvalue']
fig, ax = plt.subplots()
ax.boxplot(norm_reviews[num_cols].values)
ax.set_xticklabels(num_cols, rotation=90)
ax.set_ylim(0,5)
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 03. 円周率
sentence = "Now I need a drink, alcoholic of course, after the heavy lectures involving quantum mechanics."
print([len(c.strip(",.")) for c in sentence.split()])
|
nilq/baby-python
|
python
|
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.db import models
from problem.models import Submit, ProblemStats, LogEvent
@receiver(post_save, sender=Submit)
def update_problem_status(sender, instance, **kwargs):
try:
stats: ProblemStats = ProblemStats.objects.get(problem=instance.problem)
except ProblemStats.DoesNotExist:
stats: ProblemStats = ProblemStats(problem=instance.problem)
queryset = Submit.objects.annotate(
ordering=models.Case(
models.When(status="OK", then=models.Value(0)),
models.When(status="AW", then=models.Value(1)),
default=models.Value(2),
output_field=models.IntegerField()
)
).filter(problem=instance.problem).order_by('student', 'ordering', '-id').distinct('student')
stats.green = len(list((filter(lambda x: x.status == Submit.OK, queryset))))
stats.yellow = len(list((filter(lambda x: x.status in [Submit.AWAITING_MANUAL, Submit.DEFAULT_STATUS], queryset))))
stats.red = len(list((filter(
lambda x: x.status not in [Submit.OK, Submit.AWAITING_MANUAL, Submit.DEFAULT_STATUS],
queryset
))))
stats.save()
@receiver(post_save, sender=Submit)
def create_log_event(sender, instance: Submit, created, **kwargs):
log_event = LogEvent(problem=instance.problem, student=instance.student)
if created:
log_event.type = LogEvent.TYPE_SUBMIT
log_event.submit = instance
log_event.author = instance.student
log_event.data = dict(message=instance.id)
log_event.save()
return
if kwargs['update_fields'] and 'status' in kwargs['update_fields']:
log_event.type = LogEvent.TYPE_STATUS_CHANGE
log_event.submit = instance
if log_event.submit.updated_by:
log_event.author = log_event.submit.updated_by
log_event.data = dict(message=f'Статус изменён на {instance.status}')
log_event.save()
return
post_save.connect(update_problem_status, weak=False, sender=Submit)
post_save.connect(create_log_event, weak=False, sender=Submit)
|
nilq/baby-python
|
python
|
km = float(input('Informe a distância em KM: '))
v = float(input('Informe a velocidade média: '))
t = km / v
t_h = t // 1
t_m = (t - t_h) * 60
print(f'O tempo da viagem será de {t_h:.0f} horas e {t_m:.0f} minutos.')
|
nilq/baby-python
|
python
|
# from .runner import main
|
nilq/baby-python
|
python
|
import re
from typing import List, Dict, Type
import pkgutil
import inspect
import importlib
import HABApp
from HABApp.core import Items
from HABApp.core.items.base_item import BaseItem
import zone_api.core.actions as actions
from zone_api import platform_encapsulator as pe
from zone_api import device_factory as df
from zone_api.alert_manager import AlertManager
from zone_api.core.action import Action
from zone_api.core.devices.activity_times import ActivityTimes
from zone_api.core.devices.gas_sensor import NaturalGasSensor, SmokeSensor, Co2GasSensor, RadonGasSensor
from zone_api.core.immutable_zone_manager import ImmutableZoneManager
from zone_api.core.zone import Zone, Level
from zone_api.core.zone_manager import ZoneManager
from zone_api.core.neighbor import NeighborType, Neighbor
"""
This module contains functions to construct an ImmutableZoneManager using the following convention
for the OpenHab items.
1. The zones are defined as a String item with this pattern Zone_{name}:
String Zone_GreatRoom
{ level="FF", displayIcon="player", displayOrder="1",
openSpaceSlaveNeighbors="FF_Kitchen" }
- The levels are the reversed mapping of the enums in Zone::Level.
- Here are the list of supported attributes: level, external, openSpaceNeighbors,
openSpaceMasterNeighbors, openSpaceSlaveNeighbors, displayIcon, displayOrder.
2. The individual OpenHab items are named after this convention:
{zone_id}_{device_type}_{device_name}.
Here's an example:
Switch FF_Office_LightSwitch "Office Light" (gWallSwitch, gLightSwitch, gFirstFloorLightSwitch)
[shared-motion-sensor]
{ channel="zwave:device:9e4ce05e:node8:switch_binary", durationInMinutes="15" }
"""
def parse(activity_times: ActivityTimes, actions_package: str = "zone_api.core.actions",
actions_path: List[str] = actions.__path__) -> ImmutableZoneManager:
"""
- Parses the zones and devices from the remote OpenHab items (via the REST API).
- Adds devices to the zones.
- Adds default actions to the zones.
- For each action, invoke Action::on_startup method.
- Start the scheduler service.
:return:
"""
mappings = {
'.*AlarmPartition$': df.create_alarm_partition,
'.*_ChromeCast$': df.create_chrome_cast,
'.*Door$': df.create_door,
'[^g].*_Window$': df.create_window,
'.*_Camera$': df.create_camera,
'[^g].*MotionSensor$': df.create_motion_sensor,
'[^g].*LightSwitch.*': df.create_switches,
'.*FanSwitch.*': df.create_switches,
'.*Wled_MasterControls.*': df.create_switches,
'[^g].*_Illuminance.*': df.create_illuminance_sensor,
'[^g](?!.*Weather).*Humidity$': df.create_humidity_sensor,
'[^g].*_NetworkPresence.*': df.create_network_presence_device,
'[^g].*_Plug$': df.create_plug,
'[^g].*_Co2$': df.create_gas_sensor(Co2GasSensor),
'[^g].*_NaturalGas$': df.create_gas_sensor(NaturalGasSensor),
'[^g].*_RadonGas$': df.create_gas_sensor(RadonGasSensor),
'[^g].*_Smoke$': df.create_gas_sensor(SmokeSensor),
'.*_Tv$': df.create_television_device,
'.*_Thermostat_EcobeeName$': df.create_ecobee_thermostat,
# not matching "FF_Office_Computer_Dell_GpuTemperature"
'[^g](?!.*Computer)(?!.*Weather).*Temperature$': df.create_temperature_sensor,
'[^g].*WaterLeakState$': df.create_water_leak_sensor,
'[^g].*_TimeOfDay$': df.create_astro_sensor,
'.*_Computer_[^_]+$': df.create_computer,
'.*_Weather_Temperature$': df.create_weather,
}
zm: ZoneManager = ZoneManager()
immutable_zm = zm.get_immutable_instance()
immutable_zm = immutable_zm.set_alert_manager(AlertManager())
zone_mappings = {}
for zone in _parse_zones():
zone_mappings[zone.get_id()] = zone
items: List[BaseItem] = Items.get_all_items()
for item in items:
for pattern in mappings.keys():
device = None
if re.match(pattern, item.name) is not None:
device = mappings[pattern](immutable_zm, item)
if device is not None:
zone_id = df.get_zone_id_from_item_name(item.name)
if zone_id is None:
pe.log_warning("Can't get zone id from item name '{}'".format(item.name))
continue
if zone_id not in zone_mappings.keys():
pe.log_warning("Invalid zone id '{}'".format(zone_id))
continue
zone = zone_mappings[zone_id].add_device(device)
zone_mappings[zone_id] = zone
# Add specific devices to the Virtual Zone
zone = next((z for z in zone_mappings.values() if z.get_name() == 'Virtual'), None)
if zone is not None:
zone = zone.add_device(activity_times)
zone_mappings[zone.get_id()] = zone
action_classes = get_action_classes(actions_package, actions_path)
zone_mappings = add_actions(zone_mappings, action_classes)
for z in zone_mappings.values():
zm.add_zone(z)
immutable_zm.start()
return immutable_zm
def _parse_zones() -> List[Zone]:
"""
Parses items with the zone pattern in the name and constructs the associated Zone objects.
:return: List[Zone]
"""
pattern = 'Zone_([^_]+)'
zones: List[Zone] = []
items = Items.get_all_items()
for item in items:
match = re.search(pattern, item.name)
if not match:
continue
zone_name = match.group(1)
item_def = HABApp.openhab.interface.get_item(
item.name,
"level, external, openSpaceNeighbors, openSpaceMasterNeighbors, openSpaceSlaveNeighbors, displayIcon, "
"displayOrder")
metadata = item_def.metadata
level = Level(df.get_meta_value(metadata, "level"))
external = df.get_meta_value(metadata, "external", False)
display_icon = df.get_meta_value(metadata, "displayIcon", '')
display_order = int(df.get_meta_value(metadata, "displayOrder", 9999))
zone = Zone(zone_name, [], level, [], {}, external, display_icon, display_order)
neighbor_type_mappings = {
'closeSpaceNeighbors': NeighborType.CLOSED_SPACE,
'openSpaceNeighbors': NeighborType.OPEN_SPACE,
'openSpaceMasterNeighbors': NeighborType.OPEN_SPACE_MASTER,
'openSpaceSlaveNeighbors': NeighborType.OPEN_SPACE_SLAVE,
}
for neighbor_type_str in neighbor_type_mappings.keys():
neighbor_str = df.get_meta_value(metadata, neighbor_type_str)
if neighbor_str is not None:
for neighbor_id in neighbor_str.split(','):
neighbor_id = neighbor_id.strip()
neighbor = Neighbor(neighbor_id, neighbor_type_mappings[neighbor_type_str])
zone = zone.add_neighbor(neighbor)
zones.append(zone)
return zones
def add_actions(zone_mappings: Dict, action_classes: List[Type]) -> Dict:
"""
Create action instances from action_classes and add them to the zones.
A set of filters are applied to ensure that only the application actions are added to each zone.
As the Zone class is immutable, a new Zone instance is created after adding an action. As such, a zone_mappings
dictionary must be provided.
:param str zone_mappings: mappings from zone_id string to a Zone instance.
:param str action_classes: the list of action types.
"""
for clazz in action_classes:
action: Action = clazz()
for zone in zone_mappings.values():
if not _can_add_action_to_zone(zone, action):
continue
if action.must_be_unique_instance():
zone = zone.add_action(clazz())
else:
zone = zone.add_action(action)
zone_mappings[zone.get_id()] = zone
return zone_mappings
def _can_add_action_to_zone(zone: Zone, action: Action) -> bool:
satisfied = True # must have all devices
for device_type in action.get_required_devices():
if len(zone.get_devices_by_type(device_type)) == 0:
satisfied = False
break
if not satisfied:
return False
if zone.is_internal() and not action.is_applicable_to_internal_zone():
return False
if zone.is_external() and not action.is_applicable_to_external_zone():
return False
if len(action.get_applicable_levels()) > 0 and (zone.get_level() not in action.get_applicable_levels()):
return False
zone_name_pattern = action.get_applicable_zone_name_pattern()
if zone_name_pattern is not None:
match = re.search(zone_name_pattern, zone.get_name())
if not match:
return False
return True
def get_action_classes(actions_package: str = "zone_api.core.actions",
actions_path: List[str] = actions.__path__) -> List[Type]:
"""
Retrieve a list of action class types defined in the actions_path with the given actions_package.
To avoid loading the non-action classes (the package might contain helper modules), the following restrictions
are used:
1. The normalized action name must be the same as the normalized module name.
e.g. action 'ManagePlugs' is defined in the file 'manage_plugs.py'.
2. The class defined in the module must be an instance of 'Action'.
:param str actions_package: the package of the action classes.
:param str actions_path: the absolute path to the action classes.
"""
classes = []
for importer, module_name, is_pkg in pkgutil.iter_modules(actions_path):
module = importlib.import_module(f"{actions_package}.{module_name}")
for (name, value) in inspect.getmembers(module, lambda member: inspect.isclass(member)):
normalized_module_name = module_name.replace('_', '').lower()
if name.lower() == normalized_module_name:
try:
clazz = getattr(module, name)
obj = clazz()
if isinstance(obj, Action):
classes.append(clazz)
except AttributeError:
pass
except TypeError:
pass
return classes
|
nilq/baby-python
|
python
|
from unittest import TestCase
from unittest.mock import patch, call
import os
import pytest
from osbot_utils.utils.Dev import pprint
from osbot_utils.utils.Json import json_load_file
from cdr_plugin_folder_to_folder.processing.Analysis_Elastic import Analysis_Elastic
from cdr_plugin_folder_to_folder.utils.testing.Setup_Testing import Setup_Testing
FIXTURE_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'test_files',
)
class test_Report_Elastic(TestCase):
test_file = None
@classmethod
def setUpClass(cls) -> None:
cls.file_hash = '458d2ceb57b1bac2866c43e16cc9392b017aa48f0689876df25399d0f7ad198c'
@classmethod
def tearDownClass(cls) -> None:
pass
def setUp(self) -> None:
analysis_file_path = os.path.join(FIXTURE_DIR, 'analysis.json')
assert os.path.isfile(analysis_file_path)
self.analysis_data = json_load_file(analysis_file_path)
assert self.analysis_data is not None
assert len(self.analysis_data ) == 6
self.original_hash = self.analysis_data[self.file_hash]['original_hash']
assert self.original_hash == self.file_hash
self.analysis_elastic = Analysis_Elastic()
self.analysis_elastic.setup()
if self.analysis_elastic.enabled is False:
pytest.skip('Elastic server not available')
def test_add_analysis(self):
analysis_add_report = self.analysis_elastic.add_analysis(self.analysis_data)
assert analysis_add_report.get('_shards').get('successful') == 1
assert self.analysis_elastic.get_analysis (original_hash=self.original_hash) == self.analysis_data[self.file_hash]
assert self.analysis_elastic.delete_analysis(original_hash=self.original_hash).get('result') == 'deleted'
assert self.analysis_elastic.get_analysis (original_hash=self.original_hash) == {}
def test_clear_all_report(self):
self.analysis_elastic.delete_all_analysis()
assert len(self.analysis_elastic.get_all_analysis()) == 0
|
nilq/baby-python
|
python
|
from seleniumbase import BaseCase
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from parameterized import parameterized
import pytest
from test_thermal.test_CompanyAdmin.test_RoleManagement.RoleManageBase import RoleManageBase
from utilities import utilities
from utilities.Authority import Authority
from config.constants import *
class TestRead(RoleManageBase):
TNB = COMPANYADMIN_ROLE_READ_TNB
datafile = COMPANYADMIN_ROLE_READ_DATA_FILE
test_data_read_normal = utilities.gen_testdata(datafile, delimiter=',', header=0, scenario='normal')
@parameterized.expand(test_data_read_normal)
@pytest.mark.scenario_regression_companyadmin(TNB + 1)
def test_read_normal(self, companyname, rolename, expectation):
self.role_read(companyname, rolename, expectation)
authority = Authority()
test_data_read_check_authority_info = authority.get_role_authority_info()
@parameterized.expand(test_data_read_check_authority_info, skip_on_empty=True) # skip_on_empty=True doesn't work
@pytest.mark.scenario_regression_companyadmin(TNB + 2)
@pytest.mark.scenario_regression(TNB + 1)
def test_read_check_authority_info(self, company, role, email_list):
self.check_authority_info(company=company, role=role, email_list=email_list, expectation="pass")
|
nilq/baby-python
|
python
|
"""
Here we're going to code for the local rotations. We're doing an object oriented approach
Left and right are in reference to the origin
"""
__version__ = 1.0
__author__ = 'Katie Kruzan'
import string # just to get the alphabet easily iterable
import sys # This just helps us in our printing
from typing import Dict # This helps us in our documentation
# Getting the structure for the classes we're putting together
class Segment:
"""
These are going to represent the outer segments and the mysteries they hold.
The segments will be adjacent to 2 outer nodes
"""
def __init__(self, name: str):
"""
Initialize the segment, keeping a place for the right left outer vertices to which it is adjacent
:param name: How we will reference this segment. In this implementation, it is expected to be a negative integer
"""
self.leftOuter = None
self.rightOuter = None
self.name = name
def getName(self) -> str:
"""
Return the name we gave to this segment.
:return: name
"""
return self.name
def getLeftOuter(self):
"""
Return the outer node to the left of this segment with respect to the origin
:return: leftOuter
"""
return self.leftOuter
def getRightOuter(self):
"""
Return the outer node to the right of this segment with respect to the origin
:return: rightOuter
"""
return self.rightOuter
def setLeftOuter(self, left):
"""
Set the outer node to the left of this segment with respect to the origin
Also, set left's right segment to this segment.
:param left: A outer node object to be referenced as this segment's left outer node
:return: None
"""
self.leftOuter = left
if left.getRightSegment() is None:
left.setRightSegment(self)
def setRightOuter(self, right):
"""
Set the outer node to the right of this segment with respect to the origin
Also, set right's left segment to this segment.
:param right: A outer node object to be referenced as this segment's right outer node
:return: None
"""
self.rightOuter = right
if right.getLeftSegment() is None:
right.setLeftSegment(self)
def isValidObject(self) -> bool:
"""
Checks to see if this segment has been full initialized.
:return: valid returns true if it has both the left and right outer nodes set
"""
if (self.leftOuter is None) or (self.rightOuter is None):
return False
return True
def toString(self) -> str:
"""
Returns a formatted string of the left and right outer nodes this is associated with
:return: Description string
"""
return 'left Outer: ' + self.leftOuter.getName() + '\nright Outer: ' + self.rightOuter.getName()
class Outer:
"""
Class to represent the outer vertices that are adjacent to an inner vertex and 2 outer segments
"""
def __init__(self, name: str):
"""
Initialize the outer node
Keeping a place for the inner vertex and right and left outer segments to which it is adjacent.
:param name: How we will reference this outer node. In this implementation, it is expected to be a positive integer
"""
self.adjInner = None
self.leftSegment = None
self.rightSegment = None
self.name = name
def getName(self) -> str:
"""
Return the name we gave to this outer node.
:return: name
"""
return self.name
def getLeftSegment(self) -> Segment:
"""
Return the segment object to the left of this outer node with respect to the origin
:return: leftSegment
"""
return self.leftSegment
def getRightSegment(self) -> Segment:
"""
Return the segment object to the right of this outer node with respect to the origin
:return: rightSegment
"""
return self.rightSegment
def getAdjInner(self):
"""
Return the inner node object adjacent to this outer note object
:return: adjInner
"""
return self.adjInner
def setLeftSegment(self, left: Segment):
"""
Set the segment to the left of this outer node with respect to the origin
Also, set left's right outer node to self.
:param left: A segment object to be referenced as this node's left outer segment
:return: None
"""
self.leftSegment = left
if left.getRightOuter() is None:
left.setRightOuter(self)
def setRightSegment(self, right: Segment):
"""
Set the segment to the right of this outer node with respect to the origin
Also, set right's left outer node to self.
:param right: A segment object to be referenced as this node's right outer segment
:return: None
"""
self.rightSegment = right
if right.getLeftOuter() is None:
right.setLeftOuter(self)
def setAdjInner(self, inner):
"""
Set the inner node adjacent to this outer node
Also, set inner's adjacent outer node to self.
:param inner: A inner node object to be referenced as this node's adjacent inner node
:return: None
"""
self.adjInner = inner
if inner.getAdjOuter() is None:
inner.setAdjOuter(self)
def isValidObject(self) -> bool:
"""
Checks to see if this outer node has been full initialized.
:return: valid returns true if it has the left segment, right segment, and inner node set
"""
if (self.leftSegment is None) or (self.rightSegment is None) or (self.adjInner is None):
return False
return True
def toString(self) -> str:
"""
Returns a formatted string of the left segment, right segment, and inner node this outer node is associated with
:return: Description string
"""
return 'left Segment: ' + self.leftSegment.getName() + '\nright Segment: ' + self.rightSegment.getName() \
+ '\nadj Inner: ' + self.adjInner.getName()
class Inner:
"""
Class to represent the inner vertices that are adjacent to an outer vertex and 2 neighboring inner vertices
"""
def __init__(self, name: str):
"""
Initialize the inner node object
Keeping a place for the outer vertex and right and left adjacent inner nodes.
:param name: How we will reference this inner node. In this implementation, it is expected to be a lowercase letter
"""
self.adjOuter = None
self.leftInner = None
self.rightInner = None
self.name = name
def getName(self) -> str:
"""
Return the name we gave to this inner node.
:return: name
"""
return self.name
def getLeftInner(self):
"""
Return the inner node object to the left of this inner node with respect to the origin
:return: leftInner
"""
return self.leftInner
def getRightInner(self):
"""
Return the inner node object to the right of this inner node with respect to the origin
:return: rightInner
"""
return self.rightInner
def getAdjOuter(self) -> Outer:
"""
Return the outer node object adjacent to this inner node
:return: adjOuter
"""
return self.adjOuter
def setLeftInner(self, left):
"""
Set the inner node to the left of this inner node with respect to the origin
Also, set left's right inner node to self.
:param left: An inner node object to be referenced as this node's left inner node
:return: None
"""
self.leftInner = left
if left.getRightInner() is None:
left.setRightInner(self)
def setRightInner(self, right):
"""
Set the inner node to the right of this inner node with respect to the origin
Also, set right's left inner node to self.
:param right: An inner node object to be referenced as this node's right inner node
:return: None
"""
self.rightInner = right
if right.getLeftInner() is None:
right.setLeftInner(self)
def setAdjOuter(self, outer: Outer):
"""
Set the outer node adjacent to this inner node
Also, set outer's adjacent inner node to self.
:param outer: An outer node object to be referenced as this node's adjacent outer node
:return: None
"""
self.adjOuter = outer
if outer.getAdjInner() is None:
outer.setAdjInner(self)
def isValidObject(self) -> bool:
"""
Checks to see if this inner node has been full initialized.
:return: valid returns true if it has the left inner node, right inner node, and adjacent outer node set
"""
if (self.leftInner is None) or (self.rightInner is None) or (self.adjOuter is None):
return False
return True
def toString(self) -> str:
"""
Returns a formatted string of the left inner node, right inner node, and adjacent outer node this inner node
is associated with
:return: Description string
"""
return 'left Inner: ' + self.leftInner.getName() + '\nright Inner: ' + self.rightInner.getName() \
+ '\nadj Outer: ' + self.adjOuter.getName()
def standardCircle(num_verts: int) -> (Dict[str, Segment], Dict[str, Outer], Dict[str, Inner]):
"""
This will go through and initialize our standard starting circle
:param num_verts: the number of outer nodes we will have
:returns: tuple(segs, outs, inns)
-segs - dictionary of str: Segment objects in the circle \\
-outs - dictionary of str: Outer objects in the circle \\
-inns - dictionary of str: Inner objects in the circle
"""
# Initializing our dictionaries
segs = dict()
outs = dict()
inns = dict()
# Running through the number of vertices we will be edning up with
for i in range(num_verts):
# start with an inner node - labeling with lowercase letters
inn = Inner(string.ascii_letters[i])
# If we aren't on the first one, connect it to the previous one.
if i != 0:
inn.setLeftInner(inns[string.ascii_letters[i - 1]])
# If we've hit the end of the line, go ahead and close up the circle.
if i == num_verts - 1:
inn.setRightInner(inns[string.ascii_letters[0]])
# then make the outer
out = Outer(str(i + 1))
# Go ahead and connect the inner we just made with this outer node
out.setAdjInner(inn)
# If we aren't on the first one, go ahead and connect it to the previous segment
if i != 0:
out.setLeftSegment(segs[str(-i)])
# Now time to make the segment
seg = Segment(str(-i - 1))
# Go ahead and connect the outer node we just made with this segment
seg.setLeftOuter(out)
# If we're at the end of the circle, then we close it up. Otherwise, move on
if i == num_verts - 1:
seg.setRightOuter(outs[str(1)])
# add them to our dictionaries
segs[seg.getName()] = seg
outs[out.getName()] = out
inns[inn.getName()] = inn
# If we've made it here, then we've made the full circle and are ready to return it
return segs, outs, inns
def findTheFace(source_in: Inner) -> list:
"""
This will take an inner node and use the algorithm to walk the face that it is on.
The order of the face will be i, o, s, o, i repeat
:param source_in: Inner node object we are starting from.
:return: face: a list representing the face. This list is of inner, outer, and segment objects in the
order i, o, s, o, i, repeat.
"""
# initialize the list
face = list()
# starting the face with the source inner node.
face.append(source_in)
# initialize the ending inner node we will be using for comparison
end_in = None
# As long as we haven't looped back around, go through the following process.
while source_in != end_in:
# inner: find adjacent outer
face.append(face[-1].getAdjOuter())
# outer: go to right seg
face.append(face[-1].getRightSegment())
# segment: go to right outer
face.append(face[-1].getRightOuter())
# outer: then adj inner
face.append(face[-1].getAdjInner())
# then left inner and repeat.
# set this inner node as our node to compare to our starting node.
end_in = face[-1].getLeftInner()
face.append(end_in)
return face
def faceCannonOrder(face: list) -> list:
"""
Just list the face with the face elements in order.
We will do it with the first numerical face, and then go right before it for an order that will be consistent.
:param face: a list representing the face. This list is of inner, outer, and segment objects in the
order i, o, s, o, i, repeat.
:return: ordered face in canonical order
"""
# find the first numerical face then go right before it
# initialize face num as a relatively high number we won't encounter
facenum = 333
# initialize the int for where we will split the list
start_ind = 0
# loop through and find the face we want to find
for i in range(len(face)):
try:
if int(face[i].getName()) < facenum:
# To get here, we must have found a lower face
# keep track of where this is located in the list
start_ind = i - 1
# make our current lowest face the new lowest face to keep comparing to.
facenum = int(face[i].getName())
# if we try casting a letter to a number, python will get upset, but that also means we're looking at
# an inner node, which we don't want for this anyways.
except ValueError:
continue
# make our ordered face getting from the starting index to the end, then wrapping around and getting the rest of
# the face
ord_face = face[start_ind:] + face[:start_ind]
# go through and make sure we don't have any duplicate elements right by each other. If we do, then drop them.
for i in range(len(ord_face) - 1):
if ord_face[i].toString() == ord_face[i + 1].toString():
ord_face.pop(i)
break
# return the ordered face
return ord_face
def grabAllTheFaces(inns: Dict[str, Inner]) -> list:
"""
Function to get the list of unique faces for our circle.
:param inns: dictionary of Inner objects. We will loop through these to get the faces
:return: faces: List of distinct faces in canonical order.
"""
# initialize the list of faces
faces = list()
# a set of all the elements we have covered by the faces. Will use this for a completeness check
covered = set()
# run through every inner node we've been given
for inn in inns:
# Generate the face that inner node lies on
face = findTheFace(inns[inn])
# put the face we've gotten in canonical order
face = faceCannonOrder(face)
# Check if we've already captured it.
if face not in faces:
# If not, then add it to our list of faces
faces.append(face)
# Go ahead and add the elements in this face to our covered set
covered.update(face)
# check we've gotten all the elements
if len(covered) == (3 * len(inns)):
print('We got em!!!')
# Now return a list of all the faces we have.
return faces
def printCircleStatus(segs: Dict[str, Segment], outs: Dict[str, Outer], inns: Dict[str, Inner]):
"""
Helper function that prints the status of the circle to the console
:param segs: dictionary of str: Segment objects in the circle
:param outs: dictionary of str: Outer objects in the circle
:param inns: dictionary of str: Inner objects in the circle
:return: None
"""
# Run through the segments
print('\nSegments:')
for k in segs:
print()
print(k)
print(segs[k].toString())
# Run through the Outer nodes
print('\nOuters:')
for k in outs:
print()
print(k)
print(outs[k].toString())
# Run through the Inner nodes
print('\nInners:')
for k in inns:
print()
print(k)
print(inns[k].toString())
if __name__ == '__main__':
# This is where you change the variables.
# must be a positive integer > 2
verts = 12
# Must be a string with spaces between each element. If you want to denote multiple cycles, you must add a |
switch_txt = '2 3 4 5 | 12 7'
# we're going to make a list of all the switches and all the cycles
switches = list()
# first, we get the cycles, split by '|'
cycles = switch_txt.split('|')
for c in cycles:
# We're going to split the switch into a list split by the whitespace
s = c.strip().split()
# Then we're going to append the switches in the cycle to the new list
switches.append(s)
# Go ahead and make the standard circle given the number of vertices we want to use.
segments, outers, inners = standardCircle(verts)
# Go through and grab the faces for our standard circle
facs = grabAllTheFaces(inners)
print('\nPrinting the faces')
for f in facs:
print()
for p in f:
sys.stdout.write(p.getName() + ' ')
# Go through and do the switches for each cycle
for switch in switches:
for num in range(len(switch)):
# store the current part of the switch we're working on
cs = switch[num]
# store the next part of the switch we're working on, looping to the beginning if we're at the end
ns = switch[(num + 1) % len(switch)]
# Do the actual switch
# Getting the new inner and outer validly switched up
inners[string.ascii_letters[int(cs) - 1]].setAdjOuter(outers[ns])
outers[ns].setAdjInner(inners[string.ascii_letters[int(cs) - 1]])
# print how the final rotation sits
printCircleStatus(segments, outers, inners)
# Go through and generate and print the new faces
new_facs = grabAllTheFaces(inners)
print('\nPrinting the new faces')
for f in new_facs:
print()
for p in f:
sys.stdout.write(p.getName() + ' ')
|
nilq/baby-python
|
python
|
from controller.csi_general import csi_pb2
SUPPORTED_FS_TYPES = ["ext4", "xfs"]
access_mode = csi_pb2.VolumeCapability.AccessMode
SUPPORTED_ACCESS_MODE = [access_mode.SINGLE_NODE_WRITER]
# VolumeCapabilities fields which specify if it is volume with fs or raw block volume
VOLUME_CAPABILITIES_FIELD_ACCESS_TYPE_MOUNT = 'mount'
VOLUME_CAPABILITIES_FIELD_ACCESS_TYPE_BLOCK = 'block'
SECRET_USERNAME_PARAMETER = "username"
SECRET_PASSWORD_PARAMETER = "password"
SECRET_ARRAY_PARAMETER = "management_address"
PARAMETERS_POOL = "pool"
PARAMETERS_CAPABILITIES_SPACEEFFICIENCY = "SpaceEfficiency"
PARAMETERS_VOLUME_NAME_PREFIX = "volume_name_prefix"
PARAMETERS_SNAPSHOT_NAME_PREFIX = "snapshot_name_prefix"
PARAMETERS_CAPACITY_DELIMITER = "="
PARAMETERS_CAPABILITIES_DELIMITER = "="
PARAMETERS_OBJECT_ID_DELIMITER = ":"
PARAMETERS_NODE_ID_DELIMITER = ";"
PARAMETERS_FC_WWN_DELIMITER = ":"
SUPPORTED_CONNECTIVITY_TYPES = 2
OBJECT_TYPE_NAME_VOLUME = "volume"
OBJECT_TYPE_NAME_SNAPSHOT = "snapshot"
VOLUME_SOURCE_SNAPSHOT = "snapshot"
VOLUME_SOURCE_VOLUME = "volume"
|
nilq/baby-python
|
python
|
import pybamm
import numpy as np
import sys
# set logging level
pybamm.set_logging_level("INFO")
# load (1+1D) SPMe model
options = {
"current collector": "potential pair",
"dimensionality": 1,
"thermal": "lumped",
}
model = pybamm.lithium_ion.SPM(options)
# create geometry
geometry = model.default_geometry
# load parameter values and process model and geometry
param = model.default_parameter_values
C_rate = 1
current_1C = 24 * param.process_symbol(pybamm.geometric_parameters.A_cc).evaluate()
param.update(
{
"Typical current [A]": C_rate * current_1C,
#"Initial temperature [K]": 298.15,
#"Negative current collector conductivity [S.m-1]": 1e7,
#"Positive current collector conductivity [S.m-1]": 1e7,
"Heat transfer coefficient [W.m-2.K-1]": 1,
}
)
param.process_model(model)
param.process_geometry(geometry)
# set mesh
var = pybamm.standard_spatial_vars
var_pts = {var.x_n: 5, var.x_s: 5, var.x_p: 5, var.r_n: 10, var.r_p: 10, var.z: 15}
# depending on number of points in y-z plane may need to increase recursion depth...
sys.setrecursionlimit(10000)
mesh = pybamm.Mesh(geometry, model.default_submesh_types, var_pts)
# discretise model
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
# solve model -- simulate one hour discharge
tau = param.evaluate(pybamm.standard_parameters_lithium_ion.tau_discharge)
t_end = 3600 / tau
t_eval = np.linspace(0, t_end, 120)
solution = model.default_solver.solve(model, t_eval)
# plot
output_variables = [
"X-averaged negative particle surface concentration [mol.m-3]",
"X-averaged positive particle surface concentration [mol.m-3]",
"X-averaged cell temperature [K]",
#"Local potenital difference [V]",
"Current collector current density [A.m-2]",
"Terminal voltage [V]",
"Volume-averaged cell temperature [K]",
]
plot = pybamm.QuickPlot(model, mesh, solution, output_variables)
plot.dynamic_plot()
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, RIXEN@TUM.DE.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
"""
Mapping Module
"""
# -- STANDARD MAPPING --
from .mapping_standard import *
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : download.py
@Time : 2020/11/08
@Author : Yaronzz
@Version : 1.0
@Contact : yaronhuang@foxmail.com
@Desc :
'''
import os
import aigpy
import logging
import lyricsgenius
from tidal_dl.settings import Settings
from tidal_dl.tidal import TidalAPI
from tidal_dl.enum import Type, AudioQuality, VideoQuality
from tidal_dl.model import Track, Video, Album
from tidal_dl.printf import Printf
from tidal_dl.decryption import decrypt_security_token
from tidal_dl.decryption import decrypt_file
API = TidalAPI()
def __loadAPI__(user):
API.key.accessToken = user.accessToken
API.key.userId = user.userid
API.key.countryCode = user.countryCode
#API.key.sessionId = user.sessionid1
def __loadVideoAPI__(user):
API.key.accessToken = user.accessToken
API.key.userId = user.userid
API.key.countryCode = user.countryCode
#API.key.sessionId = user.sessionid2 if not aigpy.string.isNull(user.sessionid2) else user.sessionid1
def __getIndexStr__(index):
pre = "0"
if index < 10:
return pre+str(index)
if index < 99:
return str(index)
return str(index)
def __getExtension__(url):
if '.flac' in url:
return '.flac'
if '.mp4' in url:
return '.mp4'
return '.m4a'
def __getArtists__(array):
ret = []
for item in array:
ret.append(item.name)
return ret
def __parseContributors__(roleType, Contributors):
if Contributors is None:
return None
try:
ret = []
for item in Contributors['items']:
if item['role'] == roleType:
ret.append(item['name'])
return ret
except:
return None
GEMIUS = lyricsgenius.Genius('vNKbAWAE3rVY_48nRaiOrDcWNLvsxS-Z8qyG5XfEzTOtZvkTfg6P3pxOVlA2BjaW')
def __getLyrics__(trackName, artistName, proxy):
try:
if not aigpy.string.isNull(proxy):
GEMIUS._session.proxies = {
'http': f'http://{proxy}',
'https': f'http://{proxy}',
}
song = GEMIUS.search_song(trackName, artistName)
return song.lyrics
except:
return ""
def __setMetaData__(track, album, filepath, contributors, lyrics):
obj = aigpy.tag.TagTool(filepath)
obj.album = track.album.title
obj.title = track.title
if not aigpy.string.isNull(track.version):
obj.title += ' (' + track.version + ')'
obj.artist = __getArtists__(track.artists)
obj.copyright = track.copyRight
obj.tracknumber = track.trackNumber
obj.discnumber = track.volumeNumber
obj.composer = __parseContributors__('Composer', contributors)
obj.isrc = track.isrc
obj.albumartist = __getArtists__(album.artists)
obj.date = album.releaseDate
obj.totaldisc = album.numberOfVolumes
obj.lyrics = lyrics
if obj.totaldisc <= 1:
obj.totaltrack = album.numberOfTracks
coverpath = API.getCoverUrl(album.cover, "1280", "1280")
obj.save(coverpath)
return
def __convertToM4a__(filepath, codec):
if 'ac4' in codec or 'mha1' in codec:
return filepath
if '.mp4' not in filepath:
return filepath
newpath = filepath.replace('.mp4', '.m4a')
aigpy.path.remove(newpath)
os.rename(filepath, newpath)
return newpath
def __stripPathParts__(stripped_path, separator):
result = ""
stripped_path = stripped_path.split(separator)
for stripped_path_part in stripped_path:
result += stripped_path_part.strip()
if not stripped_path.index(stripped_path_part) == len(stripped_path) - 1:
result += separator
return result.strip()
def __stripPath__(path):
result = __stripPathParts__(path, "/")
result = __stripPathParts__(result, "\\")
return result.strip()
# "{ArtistName}/{Flag} [{AlbumID}] [{AlbumYear}] {AlbumTitle}"
def __getAlbumPath__(conf: Settings, album):
base = conf.downloadPath + '/Album/'
artist = aigpy.path.replaceLimitChar(album.artists[0].name, '-')
# album folder pre: [ME][ID]
flag = API.getFlag(album, Type.Album, True, "")
if conf.audioQuality != AudioQuality.Master:
flag = flag.replace("M", "")
if not conf.addExplicitTag:
flag = flag.replace("E", "")
if not aigpy.string.isNull(flag):
flag = "[" + flag + "] "
sid = str(album.id)
#album and addyear
albumname = aigpy.path.replaceLimitChar(album.title, '-')
year = ""
if album.releaseDate is not None:
year = aigpy.string.getSubOnlyEnd(album.releaseDate, '-')
# retpath
retpath = conf.albumFolderFormat
if retpath is None or len(retpath) <= 0:
retpath = Settings.getDefaultAlbumFolderFormat()
retpath = retpath.replace(R"{ArtistName}", artist.strip())
retpath = retpath.replace(R"{Flag}", flag)
retpath = retpath.replace(R"{AlbumID}", sid)
retpath = retpath.replace(R"{AlbumYear}", year)
retpath = retpath.replace(R"{AlbumTitle}", albumname.strip())
retpath = __stripPath__(retpath.strip())
return base + retpath
def __getAlbumPath2__(conf, album):
# outputdir/Album/artist/
artist = aigpy.path.replaceLimitChar(album.artists[0].name, '-').strip()
base = conf.downloadPath + '/Album/' + artist + '/'
# album folder pre: [ME][ID]
flag = API.getFlag(album, Type.Album, True, "")
if conf.audioQuality != AudioQuality.Master:
flag = flag.replace("M", "")
if not conf.addExplicitTag:
flag = flag.replace("E", "")
if not aigpy.string.isNull(flag):
flag = "[" + flag + "] "
sid = "[" + str(album.id) + "] " if conf.addAlbumIDBeforeFolder else ""
#album and addyear
albumname = aigpy.path.replaceLimitChar(album.title, '-').strip()
year = ""
if conf.addYear and album.releaseDate is not None:
year = "[" + aigpy.string.getSubOnlyEnd(album.releaseDate, '-') + "] "
return base + flag + sid + year + albumname + '/'
def __getPlaylistPath__(conf, playlist):
# outputdir/Playlist/
base = conf.downloadPath + '/Playlist/'
# name
name = aigpy.path.replaceLimitChar(playlist.title, '-')
return base + name + '/'
# "{TrackNumber} - {ArtistName} - {TrackTitle}{ExplicitFlag}"
def __getTrackPath__(conf: Settings, track, stream, album=None, playlist=None):
if album is not None:
base = __getAlbumPath__(conf, album) + '/'
if album.numberOfVolumes > 1:
base += 'CD' + str(track.volumeNumber) + '/'
if playlist is not None and conf.usePlaylistFolder:
base = __getPlaylistPath__(conf, playlist)
# number
number = __getIndexStr__(track.trackNumber)
if playlist is not None and conf.usePlaylistFolder:
number = __getIndexStr__(track.trackNumberOnPlaylist)
# artist
artist = aigpy.path.replaceLimitChar(track.artists[0].name, '-')
# title
title = track.title
if not aigpy.string.isNull(track.version):
title += ' (' + track.version + ')'
title = aigpy.path.replaceLimitChar(title, '-')
# get explicit
explicit = "(Explicit)" if conf.addExplicitTag and track.explicit else ''
#album and addyear
albumname = aigpy.path.replaceLimitChar(album.title, '-')
year = ""
if album.releaseDate is not None:
year = aigpy.string.getSubOnlyEnd(album.releaseDate, '-')
# extension
extension = __getExtension__(stream.url)
retpath = conf.trackFileFormat
if retpath is None or len(retpath) <= 0:
retpath = Settings.getDefaultTrackFileFormat()
retpath = retpath.replace(R"{TrackNumber}", number)
retpath = retpath.replace(R"{ArtistName}", artist.strip())
retpath = retpath.replace(R"{TrackTitle}", title)
retpath = retpath.replace(R"{ExplicitFlag}", explicit)
retpath = retpath.replace(R"{AlbumYear}", year)
retpath = retpath.replace(R"{AlbumTitle}", albumname.strip())
retpath = retpath.strip()
return base + retpath + extension
def __getTrackPath2__(conf, track, stream, album=None, playlist=None):
if album is not None:
base = __getAlbumPath__(conf, album)
if album.numberOfVolumes > 1:
base += 'CD' + str(track.volumeNumber) + '/'
if playlist is not None and conf.usePlaylistFolder:
base = __getPlaylistPath__(conf, playlist)
# hyphen
hyphen = ' - ' if conf.addHyphen else ' '
# get number
number = ''
if conf.useTrackNumber:
number = __getIndexStr__(track.trackNumber) + hyphen
if playlist is not None:
number = __getIndexStr__(track.trackNumberOnPlaylist) + hyphen
# get artist
artist = ''
if conf.artistBeforeTitle:
artist = aigpy.path.replaceLimitChar(track.artists[0].name, '-') + hyphen
# get explicit
explicit = "(Explicit)" if conf.addExplicitTag and track.explicit else ''
# title
title = track.title
if not aigpy.string.isNull(track.version):
title += ' - ' + track.version
title = aigpy.path.replaceLimitChar(title, '-')
# extension
extension = __getExtension__(stream.url)
return base + number + artist.strip() + title + explicit + extension
def __getVideoPath__(conf, video, album=None, playlist=None):
if album is not None and album.title is not None:
base = __getAlbumPath__(conf, album)
elif playlist is not None and conf.usePlaylistFolder:
base = __getPlaylistPath__(conf, playlist)
else:
base = conf.downloadPath + '/Video/'
# hyphen
hyphen = ' - ' if conf.addHyphen else ' '
# get number
number = ''
if conf.useTrackNumber:
number = __getIndexStr__(video.trackNumber) + hyphen
# get artist
artist = ''
if conf.artistBeforeTitle:
artist = aigpy.path.replaceLimitChar(video.artists[0].name, '-') + hyphen
# get explicit
explicit = "(Explicit)" if conf.addExplicitTag and video.explicit else ''
# title
title = aigpy.path.replaceLimitChar(video.title, '-')
# extension
extension = ".mp4"
return base + number + artist.strip() + title + explicit + extension
def __isNeedDownload__(path, url):
curSize = aigpy.file.getSize(path)
if curSize <= 0:
return True
netSize = aigpy.net.getSize(url)
if curSize >= netSize:
return False
return True
def __downloadVideo__(conf, video:Video, album=None, playlist=None):
if video.allowStreaming is False:
Printf.err("Download failed! " + video.title + ' not allow streaming.')
return
msg, stream = API.getVideoStreamUrl(video.id, conf.videoQuality)
Printf.video(video, stream)
if not aigpy.string.isNull(msg):
Printf.err(video.title + "." + msg)
return
path = __getVideoPath__(conf, video, album, playlist)
logging.info("[DL Video] name=" + aigpy.path.getFileName(path) + "\nurl=" + stream.m3u8Url)
check, msg = aigpy.m3u8.download(stream.m3u8Url, path)
if check is True:
Printf.success(aigpy.path.getFileName(path))
else:
Printf.err("\nDownload failed!" + msg + '(' + aigpy.path.getFileName(path) + ')')
def __downloadTrack__(conf: Settings, track:Track, album=None, playlist=None):
try:
if track.allowStreaming is False:
Printf.err("Download failed! " + track.title + ' not allow streaming.')
return
msg, stream = API.getStreamUrl(track.id, conf.audioQuality)
Printf.track(track, stream)
if not aigpy.string.isNull(msg) or stream is None:
Printf.err(track.title + "." + msg)
return
path = __getTrackPath__(conf, track, stream, album, playlist)
# check exist
if conf.checkExist and __isNeedDownload__(path, stream.url) == False:
Printf.success(aigpy.path.getFileName(path) + " (skip:already exists!)")
return
logging.info("[DL Track] name=" + aigpy.path.getFileName(path) + "\nurl=" + stream.url)
tool = aigpy.download.DownloadTool(path + '.part', [stream.url])
check, err = tool.start(conf.showProgress)
if not check:
Printf.err("Download failed! " + aigpy.path.getFileName(path) + ' (' + str(err) + ')')
return
# encrypted -> decrypt and remove encrypted file
if aigpy.string.isNull(stream.encryptionKey):
os.replace(path + '.part', path)
else:
key, nonce = decrypt_security_token(stream.encryptionKey)
decrypt_file(path + '.part', path, key, nonce)
os.remove(path + '.part')
path = __convertToM4a__(path, stream.codec)
# contributors
contributors = API.getTrackContributors(track.id)
lyrics = ''
if conf.addLyrics:
lyrics = __getLyrics__(track.title, track.artists[0].name, conf.lyricsServerProxy)
__setMetaData__(track, album, path, contributors, lyrics)
Printf.success(aigpy.path.getFileName(path))
except Exception as e:
Printf.err("Download failed! " + track.title + ' (' + str(e) + ')')
def __downloadCover__(conf, album):
if album == None:
return
path = __getAlbumPath__(conf, album) + '/cover.jpg'
url = API.getCoverUrl(album.cover, "1280", "1280")
if url is not None:
aigpy.net.downloadFile(url, path)
def __saveAlbumInfo__(conf, album, tracks):
if album == None:
return
path = __getAlbumPath__(conf, album) + '/AlbumInfo.txt'
infos = ""
infos += "[ID] %s\n" % (str(album.id))
infos += "[Title] %s\n" % (str(album.title))
infos += "[Artists] %s\n" % (str(album.artist.name))
infos += "[ReleaseDate] %s\n" % (str(album.releaseDate))
infos += "[SongNum] %s\n" % (str(album.numberOfTracks))
infos += "[Duration] %s\n" % (str(album.duration))
infos += '\n'
i = 0
while True:
if i >= int(album.numberOfVolumes):
break
i = i + 1
infos += "===========CD %d=============\n" % i
for item in tracks:
if item.volumeNumber != i:
continue
infos += '{:<8}'.format("[%d]" % item.trackNumber)
infos += "%s\n" % item.title
aigpy.file.write(path, infos, "w+")
def __album__(conf, obj):
Printf.album(obj)
msg, tracks, videos = API.getItems(obj.id, Type.Album)
if not aigpy.string.isNull(msg):
Printf.err(msg)
return
if conf.saveAlbumInfo:
__saveAlbumInfo__(conf, obj, tracks)
if conf.saveCovers:
__downloadCover__(conf, obj)
for item in tracks:
__downloadTrack__(conf, item, obj)
for item in videos:
__downloadVideo__(conf, item, obj)
def __track__(conf, obj):
# Printf.track(obj)
msg, album = API.getAlbum(obj.album.id)
if conf.saveCovers:
__downloadCover__(conf, album)
__downloadTrack__(conf, obj, album)
def __video__(conf, obj):
# Printf.video(obj)
__downloadVideo__(conf, obj, obj.album)
def __artist__(conf, obj):
msg, albums = API.getArtistAlbums(obj.id, conf.includeEP)
Printf.artist(obj, len(albums))
if not aigpy.string.isNull(msg):
Printf.err(msg)
return
for item in albums:
__album__(conf, item)
def __playlist__(conf, obj):
Printf.playlist(obj)
msg, tracks, videos = API.getItems(obj.uuid, Type.Playlist)
if not aigpy.string.isNull(msg):
Printf.err(msg)
return
for index, item in enumerate(tracks):
mag, album = API.getAlbum(item.album.id)
item.trackNumberOnPlaylist = index + 1
__downloadTrack__(conf, item, album, obj)
for item in videos:
__downloadVideo__(conf, item, None)
def file(user, conf, string):
txt = aigpy.file.getContent(string)
if aigpy.string.isNull(txt):
Printf.err("Nothing can read!")
return
array = txt.split('\n')
for item in array:
if aigpy.string.isNull(item):
continue
if item[0] == '#':
continue
if item[0] == '[':
continue
start(user, conf, item)
def start(user, conf, string):
__loadAPI__(user)
if aigpy.string.isNull(string):
Printf.err('Please enter something.')
return
strings = string.split(" ")
for item in strings:
if aigpy.string.isNull(item):
continue
if os.path.exists(item):
file(user, conf, item)
return
msg, etype, obj = API.getByString(item)
if etype == Type.Null or not aigpy.string.isNull(msg):
Printf.err(msg + " [" + item + "]")
return
if etype == Type.Album:
__album__(conf, obj)
if etype == Type.Track:
__track__(conf, obj)
if etype == Type.Video:
__loadVideoAPI__(user)
__video__(conf, obj)
if etype == Type.Artist:
__artist__(conf, obj)
if etype == Type.Playlist:
__playlist__(conf, obj)
|
nilq/baby-python
|
python
|
"""
plots how total workseting set increase over time
"""
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
from utils.common import *
import bisect
SLAB_SIZES = [96, 120, 152, 192, 240, 304, 384, 480, 600, 752, 944, 1184, 1480, 1856, 2320, 2904, 3632, 4544, 5680, 7104, 8880,
11104, 13880, 17352, 21696, 27120, 33904, 42384, 52984, 66232, 82792, 103496, 129376, 161720, 202152, 252696,
315872, 394840, 524288, 655360, 819200, 1024000, 1280000, 1600000, 2000000, 2500000, 3125000, 3906250,
]
def _cal_total_workingset_size(trace_reader, window=300, consider_ttl=True, slab_sizes=None):
""" calculate how working set size change over time
"""
metadata_name = "ttl_w{}_{}{}_{}.pickle".format(window, consider_ttl, "_slab" if slab_sizes is not None else "", trace_reader.trace_path.split("/")[-1])
loaded = load_metadata(metadata_name)
if loaded is not None:
return loaded
ttl_obj = defaultdict(list) # the objects that expire at ttl
workingset = {} # obj -> size
workingset_size = 0
workingset_size_list = []
sz_to_slab_mapping = {}
start_ts, current_ts, last_window_ts = -1, 0, 0
for req in trace_reader:
current_ts = req.real_time
if start_ts == -1:
start_ts = req.real_time
if req.op == "set" or req.op == "add":
if req.obj_id not in workingset:
sz = req.obj_size
# sz = 1
if slab_sizes is not None:
# find the slab this object will use
if sz not in sz_to_slab_mapping:
sz_slab = slab_sizes[bisect.bisect_right(slab_sizes, sz)]
sz_to_slab_mapping[sz] = sz_slab
sz = sz_slab
else:
sz = sz_to_slab_mapping[sz]
workingset_size += sz
workingset[req.obj_id] = sz
if consider_ttl and req.ttl != 0:
ttl_obj[current_ts+req.ttl].append(req.obj_id)
if consider_ttl and current_ts in ttl_obj:
for obj in ttl_obj[current_ts]:
workingset_size -= workingset[obj]
del workingset[obj]
del ttl_obj[current_ts]
if (req.real_time - start_ts) % window == 0 and req.real_time != last_window_ts:
workingset_size_list.append(workingset_size)
# print("{} append {}".format(req.real_time, workingset_size))
last_window_ts = req.real_time
save_metadata(workingset_size_list, metadata_name)
trace_reader.reset()
return workingset_size_list
def plot_total_workingset_size(trace_reader, window, consider_ttl=True, slab_sizes=None):
figname = "{}/{}_{}_workingset".format(FIG_DIR, trace_reader.trace_path.split("/")[-1], window)
if consider_ttl:
figname = "{}_ttl".format(figname)
if slab_sizes is not None and slab_sizes is not False:
figname = "{}_slab".format(figname)
if slab_sizes is True:
slab_sizes = SLAB_SIZES
n_color = 2
if slab_sizes:
n_color = 4
plt.set_n_colors(n_color)
ret_dict = {}
workingset_size_list = _cal_total_workingset_size(trace_reader, window, False, slab_sizes=None)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="no-ttl")
ret_dict["no-ttl"] = workingset_size_list[-1]
if consider_ttl:
workingset_size_list = _cal_total_workingset_size(trace_reader, window, True, slab_sizes=None)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="ttl")
ret_dict["ttl"] = workingset_size_list[-1]
if slab_sizes:
workingset_size_list = _cal_total_workingset_size(trace_reader, window, False, slab_sizes=slab_sizes)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="no-ttl-slab")
ret_dict["no-ttl-slab"] = workingset_size_list[-1]
workingset_size_list = _cal_total_workingset_size(trace_reader, window, True, slab_sizes=slab_sizes)
plt.plot([i*window/3600 for i in range(len(workingset_size_list))],
[sz/MB for sz in workingset_size_list], nomarker=True, label="ttl-slab")
ret_dict["ttl-slab"] = workingset_size_list[-1]
if "ttl" in ret_dict and ret_dict["no-ttl"]/ret_dict["ttl"] > 100:
plt.yscale("log")
plt.xlabel("Time (hour)")
plt.ylabel("Working set size (MB)")
# plt.ylabel("Working set size (# million Obj)")
plt.legend()
plt.grid(linestyle="--")
plt.savefig(figname, no_save_plot_data=True)
plt.clf()
return ret_dict
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--trace", type=str, help="trace path")
ap.add_argument("--window", type=int, default=300, help="window size")
p = ap.parse_args()
reader = TwrShortBinTraceReader(p.trace)
plot_total_workingset_size(reader, p.window)
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(name='carpet',
version='2021',
# description='',
url='https://cfaed.tu-dresden.de/friedrich-home',
author='Anton Solovev, Benjamin M Friedrich',
license='MIT',
packages=['carpet'],
zip_safe=False)
|
nilq/baby-python
|
python
|
'''
Convert a neighbors file to human-readable format,
optionally including preferred string expansion.
'''
from hedgepig_logger import log
from .. import nn_io
if __name__ == '__main__':
def _cli():
import optparse
parser = optparse.OptionParser(usage='Usage: %prog')
parser.add_option('-i', '--input', dest='inputf',
help='(REQUIRED) input neighbors file')
parser.add_option('-o', '--output', dest='outputf',
help='(REQUIRED) output remapped neighbors file')
parser.add_option('-v', '--vocab', dest='vocabf',
help='(REQUIRED) neighbor ID <-> key mapping file')
parser.add_option('-k', '--nearest-neighbors', dest='k',
help='number of nearest neighbors to use in statistics (default: %default)',
type='int', default=5)
parser.add_option('-m', '--string-map', dest='string_mapf',
help='file mapping embedding keys to strings')
parser.add_option('--with-distances', dest='with_distances',
action='store_true', default=False,
help='neighbor files have distance information')
parser.add_option('-l', '--logfile', dest='logfile',
help='name of file to write log contents to (empty for stdout)',
default=None)
(options, args) = parser.parse_args()
if not options.inputf:
parser.error('Must provide --input')
elif not options.outputf:
parser.error('Must provide --output')
elif not options.vocabf:
parser.error('Must provide --vocab')
return options
options = _cli()
log.start(options.logfile)
log.writeConfig([
('Input neighbors file', options.inputf),
('Remapped neighbors file', options.outputf),
('Number of nearest neighbors to pull', options.k),
('String map file', options.string_mapf),
('Vocab file', options.vocabf),
('Using distance information', options.with_distances),
], 'Neighborhood file remapping')
node_map = nn_io.readNodeMap(options.vocabf)
neighbors = nn_io.readNeighborFile(
options.inputf,
k=options.k,
node_map=node_map,
with_distances=options.with_distances
)
if options.string_mapf:
log.writeln('Reading string map from %s...' % options.string_mapf)
string_map = nn_io.readStringMap(options.string_mapf, lower_keys=True)
log.writeln('Mapped strings for {0:,} keys.\n'.format(len(string_map)))
remap_key = lambda key: '%s (%s)' % (key, string_map.get(key, '-UNKNOWN-'))
else:
string_map = None
remap_key = lambda key: key
log.writeln('Writing remapped neighbor info to %s...' % options.outputf)
log.track(' >> Wrote {0:,} neighbor sets', writeInterval=100)
with open(options.outputf, 'w') as stream:
for (key, nbrs) in neighbors.items():
if options.with_distances:
nbrs = [
(remap_key(k), dist)
for (k,dist) in nbrs
]
else:
nbrs = [remap_key(k) for k in nbrs]
stream.write('--------------------------------\n')
stream.write('{0}\n'.format(remap_key(key)))
for nbr_info in nbrs:
if options.with_distances:
stream.write(' {0} --> {1}\n'.format(
remap_key(nbr_info[0]),
nbr_info[1]
))
else:
stream.write(' {0}\n'.format(remap_key(nbr_info)))
#nn_io.writeNeighborFileLine(
# stream,
# remap_key(key),
# nbrs,
# with_distances=options.with_distances
#)
log.tick()
log.flushTracker()
log.stop()
|
nilq/baby-python
|
python
|
from django.db import models
# Create your models here.
class Book(models.Model):
title = models.CharField(max_length=32)
price = models.DecimalField(max_digits=8, decimal_places=2)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import cv2
# from opencvutils.video import Camera
cam = cv2.VideoCapture(0)
# cam.init(cameraNumber=0, win=(640, 480))
while True:
try:
ret, img = cam.read()
cv2.imshow('img', img)
if cv2.waitKey(1) == 27:
break # esc to quit
except:
# cam.close()
break
cv2.destroyAllWindows()
print('bye ...')
|
nilq/baby-python
|
python
|
import requests
import json
from xlwt import *
url = "https://api.github.com/users/andrewbeattycourseware/followers"
response = requests.get(url)
data = response.json()
filename = 'githubusers.json'
print(data)
for car in data:
print(car)
#write the Json to a file.
#import json
if filename:
with open(filename, 'w') as f:
json.dump(data,f,indent=4)
w = Workbook()
ws = w.add_sheet('githubusers')
row = 0
ws.write(row,0,"login")
ws.write(row,1,"id")
ws.write(row,2,"node_id")
ws.write(row,3,"avatar_url")
ws.write(row,4,"gravatar_id")
ws.write(row,5,"url")
ws.write(row,6,"html_url")
ws.write(row,7,"followers_url")
ws.write(row,8,"gists_url")
ws.write(row,9,"starred_url")
ws.write(row,10,"subscriptions_url")
ws.write(row,11,"organizations_url")
ws.write(row,12,"repos_url")
ws.write(row,13,"events_url")
ws.write(row,14,"received_events_url")
ws.write(row,15,"type")
ws.write(row,16,"site_admin")
row +=1
for car in data:
ws.write(row,0,car["login"])
ws.write(row,1,car["id"])
ws.write(row,2,car["node_id"])
ws.write(row,3,car["avatar_url"])
ws.write(row,4,car["gravatar_id"])
ws.write(row,5,car["url"])
ws.write(row,6,car["html_url"])
ws.write(row,7,car["followers_url"])
ws.write(row,8,car["gists_url"])
ws.write(row,9,car["starred_url"])
ws.write(row,10,car["subscriptions_url"])
ws.write(row,11,car["organizations_url"])
ws.write(row,12,car["repos_url"])
ws.write(row,13,car["events_url"])
ws.write(row,14,car["received_events_url"])
ws.write(row,15,car["type"])
ws.write(row,16,car["site_admin"])
row +=1
w.save('githubusers.xls')
print(response.status_code)
print(response.text)
|
nilq/baby-python
|
python
|
from .cell_level_analysis import CellLevelAnalysis
from .pixel_level_analysis import PixellevelAnalysis
from .feature_extraction import InstanceFeatureExtraction
from .background_extraction import ExtractBackground
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
Copyright (c) 2014, pietro partescano
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Redis.Cache.Py nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import redis
from redis.cache import config, errors, utilities
class _RedisDal(object):
"""
"""
def __init__(self):
self._db = redis.StrictRedis(host=config.RedisConnectionString, port=config.RedisConnectionStringPort, db=config.RedisDatabase)
pass
def SetTTL(self, key, ttl):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.expire(key, ttl)
return result
except (Exception):
raise
finally:
pass
pass
def DeleteTTL(self, key):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.persist(key)
return result
except (Exception):
raise
finally:
pass
pass
def ItemDelete(self, key):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.delete(key)
return result
except (Exception):
raise
finally:
pass
pass
def ItemExist(self, key):
if( key is None or str(key).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
result = self._db.exists(key)
return result
except (Exception):
raise
finally:
pass
pass
def AddListItem(self, key, value):
if( key is None or str(key).strip() == "" or value is None or str(value).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key or value)")
try:
#value = utilities._ConvertObjToRedisValue(value) #TODO
result = self._db.rpush(key, value)
return result
except (Exception):
raise
finally:
pass
pass
def UpdateTTL_Item(self, key, ttl):
if( key is None or str(key).strip() == "" or ttl is None or str(ttl).strip() == ""):
raise errors.ArgumentError("Parameter is invalid (key or value)")
try:
result = self._db.lset(key, 0, ttl)
return result
except (Exception):
raise
finally:
pass
pass
def AddListItemWithTTL(self, key, value, value_ttl):
if( key is None or str(key).strip() == ""
or value is None or str(value).strip() == ""
or value_ttl is None or str(value_ttl).strip() == ""
):
raise errors.ArgumentError("Parameter is invalid (key or value or value_ttl)")
try:
result = self._db.rpush(key, value_ttl)
result = self._db.rpush(key, value)
return result
except (Exception):
raise
finally:
pass
pass
def GetListItem(self, key):
if( key is None or str(key).strip() == "" ):
raise errors.ArgumentError("Parameter is invalid (key)")
try:
val = self._db.lrange(key, 0, 1)
if(val is None or len(val)==0):
result = None
else:
#val = utilities._ConvertRedisValueToObject(value, t) #TODO
result = (val[0], val[1])
return result
except (Exception):
raise
finally:
pass
pass
|
nilq/baby-python
|
python
|
import xml.etree.ElementTree as ET
import urllib2
from sqlalchemy import and_
from datetime import datetime
from .ConnectDB_ParseExcel import *
from stp0_loadCVs import Load_CV_To_DB
from stp4_loadDataValue.helper import LoadingUtils
class CUAHSI_importer():
'''
This class is used to get data putting to WaMDaM database
from data responded of web.
'''
def __init__(self):
self.setup = DB_Setup()
self.__session = self.setup.get_session()
def load_data(self, response_data):
'''
:param resphonse_string: responded data from web
:return: None
'''
# Firstly, load CV data
instance_cvData = Load_CV_To_DB(None)
instance_cvData.load_data()
instance_cvData.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data to add within Organizations table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Organizations;')
organizationID = 0
for n in recordCountResult:
organizationID = int(n[0])
organizationID += 1
organizationName = 'CUAHSI' #response_data.timeSeries[0].values[0].source[0].organization
organizationWebpage = 'http://hydroportal.cuahsi.org/nwisdv/cuahsi_1_1.asmx?op=GetValuesObject'
# Check whether same name exist in Organizations table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Organizations).filter(
SqlAlchemy.Organizations.OrganizationName == organizationName).first().OrganizationID
except:
pass
if exsting is None:
org = SqlAlchemy.Organizations()
# org.OrganizationID = organizationID
org.OrganizationName = organizationName
org.OrganizationWebpage = organizationWebpage
self.setup.push_data(org)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within People table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM People;')
personID = 0
for n in recordCountResult:
personID = int(n[0])
personID += 1
personName = "Unknown"
# Check whether same name exist in People table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.People).filter(
SqlAlchemy.People.PersonName == personName).first().PersonID
except:
pass
if exsting is None:
people = SqlAlchemy.People()
people.PersonID = personID
people.PersonName = personName
people.OrganizationID = organizationID
self.setup.push_data(people)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Sources table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Sources;')
sourceID = 0
for n in recordCountResult:
sourceID = int(n[0])
sourceID += 1
source_name = "CUAHSI Water One Flow"
# Check whether same name exist in Sources table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Sources).filter(
SqlAlchemy.Sources.SourceName == source_name).first().SourceID
except:
pass
if exsting is None:
sources = SqlAlchemy.Sources()
sources.SourceID = sourceID
sources.SourceName = source_name
sources.SourceWebpage = "http://hydroportal.cuahsi.org/nwisdv/cuahsi_1_1.asmx?WSDL"
sources.PersonID = personID
self.setup.push_data(sources)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Methods table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Methods;')
methodID = 0
for n in recordCountResult:
methodID = int(n[0])
methodID += 1
method_name = "CUAHSI/ODM"
# Check whether same name exist in Methods table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Methods).filter(
SqlAlchemy.Methods.MethodName == method_name).first().MethodID
except:
pass
if exsting is None:
methods = SqlAlchemy.Methods()
methods.MethodID = methodID
methods.MethodName = method_name
methods.MethodWebpage = "https://water.usbr.gov/query.php"
methods.MethodTypeCV = "Derivation"
methods.PersonID = personID
self.setup.push_data(methods)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within ResourceTypes table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM ResourceTypes;')
resourceTypeID = 0
for n in recordCountResult:
resourceTypeID = int(n[0])
resourceTypeID += 1
resource_type = "CUAHSI web service"
# Check whether same name exist in ResourceTypes table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.ResourceTypes).filter(
SqlAlchemy.ResourceTypes.ResourceType == resource_type).first().ResourceTypeID
except:
pass
if exsting is None:
resourceTypes = SqlAlchemy.ResourceTypes()
resourceTypes.ResourceTypeID = resourceTypeID
resourceTypes.ResourceType = resource_type
resourceTypes.ResourceTypeAcronym = "CUAHSI"
resourceTypes.MethodID = methodID
self.setup.push_data(resourceTypes)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within ObjectTypes table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM ObjectTypes;')
objectTypeID = 0
for n in recordCountResult:
objectTypeID = int(n[0])
objectTypeID += 1
objecttype = "site"
# Check whether same name exist in ObjectTypes table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.ObjectTypes).filter(
SqlAlchemy.ObjectTypes.ObjectType == objecttype).first().ObjectTypeID
except:
pass
if exsting is None:
objectTypes = SqlAlchemy.ObjectTypes()
objectTypes.ObjectTypeID = objectTypeID
objectTypes.ObjectType = objecttype
objectTypes.ObjectTypeCV = 'Site'
objectTypes.ObjectTypologyCV = "Node"
objectTypes.ResourceTypeID = resourceTypeID
self.setup.push_data(objectTypes)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Attributes table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Attributes;')
attributesID = 0
for n in recordCountResult:
attributesID = int(n[0])
attributesID += 1
attribute_name = response_data.timeSeries[0].variable.variableName
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Attributes).filter(
SqlAlchemy.Attributes.AttributeName == attribute_name).first().AttributeID
except:
pass
if exsting is None:
attributes = SqlAlchemy.Attributes()
attributes.AttributeID = attributesID
attributes.AttributeName = attribute_name
try:
attributes.ObjectTypeID = self.__session.query(SqlAlchemy.ObjectTypes).filter(
SqlAlchemy.ObjectTypes.ObjectType == 'site').first().ObjectTypeID
except:
raise Exception('Error \n Could not find {} in ObjectTypes'
.format('site'))
attributes.UnitName = 'ft3/s'
if attributes.UnitName=='ft3/s':
attributes.UnitNameCV = 'cubic foot per second'
attributes.AttributeDataTypeCV = 'TimeSeries'
attributes.AttributeNameCV = 'Flow'
self.setup.push_data(attributes)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within MasterNetworks table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM MasterNetworks;')
masterNetworkID = 0
for n in recordCountResult:
masterNetworkID = int(n[0])
masterNetworkID += 1
masternetwork_name = "CUAHSI"
# Check whether same name exist in MasterNetworks table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.MasterNetworks).filter(
SqlAlchemy.MasterNetworks.MasterNetworkName == masternetwork_name).first().MasterNetworkID
except:
pass
if exsting is None:
masterNetworks = SqlAlchemy.MasterNetworks()
masterNetworks.MasterNetworkID = masterNetworkID
masterNetworks.MasterNetworkName = masternetwork_name
self.setup.push_data(masterNetworks)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Scenarios table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Scenarios;')
scenarioID = 0
for n in recordCountResult:
scenarioID = int(n[0])
scenarioID += 1
scenario_name = "CUAHSI data as-is"
# Check whether same name exist in Scenarios table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Scenarios).filter(
SqlAlchemy.Scenarios.ScenarioName == scenario_name).first().ScenarioID
except:
pass
if exsting is None:
scenarios = SqlAlchemy.Scenarios()
scenarios.ScenarioID = scenarioID
scenarios.ScenarioName = 'AS-is'
scenarios.MasterNetworkID = masterNetworkID
scenarios.ScenarioParentName = 'self'
scenarios.ScenarioType = 'Baseline'
scenarios.ScenarioStartDate = '1900-10-01'
scenarios.ScenarioEndDate = '2020-10-01'
scenarios.TimeStepValue = 1
scenarios.TimeStepUnitCV = 'day'
self.setup.push_data(scenarios)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within Instances table
recordCountResult = self.__session.execute('SELECT COUNT(*) FROM Instances;')
instanceID = 0
for n in recordCountResult:
instanceID = int(n[0])
instanceID += 1
node_instance_name = response_data.timeSeries[0].sourceInfo.siteName
# not working
# node_instance_Longitude = response_data.timeSeries[0].sourceInfo.Longitude
# Check whether same name exist in Instances table
exsting = None
try:
exsting = self.__session.query(SqlAlchemy.Instances).filter(
SqlAlchemy.Instances.InstanceName == node_instance_name).first().InstanceID
except:
pass
if exsting is None:
instances = SqlAlchemy.Instances()
instances.InstanceID = instanceID
instances.InstanceName = node_instance_name
if node_instance_name=='BEAR RIVER BL STEWART DAM NR MONTPELIER, ID':
instances.InstanceNameCV = 'USGS 10046500 BEAR RIVER BL STEWART DAM NR MONTPELIER, ID'
else:
instances.InstanceNameCV =''
if node_instance_name=='BEAR RIVER NEAR UTAH-WYOMING STATE LINE':
instances.Longitude_x='-111.062'
instances.Latitude_y = '42.211'
self.setup.push_data(instances)
self.setup.add_data()
instance_name = node_instance_name
#////////////////////////////////////////////////////////////////////#
# Load data for ValuesMapper, Mapping, ScenarioMapping, TimeSeries and TimeSeriesValues table
# Add data for ValuesMapper
valuesMapper = SqlAlchemy.ValuesMapper()
try:
valuesMapper.ValuesMapperID = int(self.__session.query(SqlAlchemy.ValuesMapper).order_by(
SqlAlchemy.ValuesMapper.ValuesMapperID.desc()).first().ValuesMapperID)
valuesMapper.ValuesMapperID += 1
except:
valuesMapper.ValuesMapperID = 1
self.setup.push_data(valuesMapper)
self.setup.add_data()
#///////////////////////////////////#
# Add data for Mapping
attrib_id, instance_id, scenario_id, source_id, method_id = LoadingUtils.get_ids_from_names({'ObjectType': objecttype,
'AttributeName': attribute_name,
'InstanceName': instance_name,
'ScenarioName': scenario_name,
'SourceName': source_name,
'MethodName': method_name}, self.__session)
dataval_map = SqlAlchemy.Mappings()
dataval_map.AttributeID = attrib_id
dataval_map.InstanceID = instance_id
dataval_map.SourceID = source_id
dataval_map.MethodID = method_id
dataval_map.ValuesMapperID = valuesMapper.ValuesMapperID
self.setup.push_data(dataval_map)
self.setup.add_data()
#///////////////////////////////////#
# Add data for ScenarioMappings
scenariomap = SqlAlchemy.ScenarioMappings()
scenariomap.ScenarioID = scenario_id
datavalues = self.__session.query(SqlAlchemy.Mappings).filter(
and_(
SqlAlchemy.Mappings.AttributeID == attrib_id,
SqlAlchemy.Mappings.InstanceID == instance_id,
SqlAlchemy.Mappings.SourceID == source_id,
SqlAlchemy.Mappings.MethodID == method_id
)
).first()
if datavalues:
scenariomap.MappingID = datavalues.MappingID
else:
scenariomap.MappingID = self.__session.query(SqlAlchemy.Mappings).filter(
and_(
SqlAlchemy.Mappings.AttributeID == attrib_id,
SqlAlchemy.Mappings.InstanceID == instance_id,
SqlAlchemy.Mappings.SourceID == source_id,
SqlAlchemy.Mappings.MethodID == method_id
)
).first().MappingID
# if the current mappingid - scenarioid does not exist, a new
# one is created else the old is reused.
try:
test = self.__session.query(SqlAlchemy.ScenarioMappings).filter(
and_(
SqlAlchemy.ScenarioMappings.MappingID == scenariomap.MappingID,
SqlAlchemy.ScenarioMappings.ScenarioID == scenariomap.ScenarioID
)
).first().ScenarioMappingID
except:
self.setup.push_data(scenariomap)
self.setup.add_data()
#///////////////////////////////////#
# Add data within TimeSeries table
timeSeries = SqlAlchemy.TimeSeries()
timeSeries.YearType = 'CalendarYear'
timeSeries.AggregationStatisticCV = "Average"
timeSeries.AggregationInterval = 1
timeSeries.IntervalTimeUnitCV = "day"
timeSeries.ValuesMapperID = valuesMapper.ValuesMapperID
self.setup.push_data(timeSeries)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
# Add data within TimeSeriesValues table
values = response_data.timeSeries[0].values[0].value
for value in values:
timeSeriesValues = SqlAlchemy.TimeSeriesValues()
timeSeriesValues.TimeSeriesID = timeSeries.TimeSeriesID
timeSeriesValues.DateTimeStamp = datetime(value._dateTime.year, value._dateTime.month, value._dateTime.day,
value._dateTime.hour, value._dateTime.minute, value._dateTime.second)
try:
timeSeriesValues.DataValue = value.value
except:
timeSeriesValues.DataValue = 0.0
self.setup.push_data(timeSeriesValues)
self.setup.add_data()
#////////////////////////////////////////////////////////////////////#
|
nilq/baby-python
|
python
|
"""Fsubs config."""
|
nilq/baby-python
|
python
|
"""Utility functions to check attributes returned in API responses and read from the AWS S3."""
import datetime
import re
def check_attribute_presence(node, attribute_name):
"""Check the attribute presence in the given dictionary or list.
To be used to check the deserialized JSON data etc.
"""
found_attributes = node if type(node) is list else node.keys()
assert attribute_name in node, \
"'%s' attribute is expected in the node, " \
"found: %s attributes " % (attribute_name, ", ".join(found_attributes))
def check_attributes_presence(node, attribute_names):
"""Check the presence of all attributes in the dictionary or in the list.
To be used to check the deserialized JSON data etc.
"""
for attribute_name in attribute_names:
found_attributes = node if type(node) is list else node.keys()
assert attribute_name in node, \
"'%s' attribute is expected in the node, " \
"found: %s attributes " % (attribute_name, ", ".join(found_attributes))
def check_and_get_attribute(node, attribute_name):
"""Check the attribute presence and if the attribute is found, return its value."""
check_attribute_presence(node, attribute_name)
return node[attribute_name]
def check_uuid(uuid):
"""Check if the string contains a proper UUID.
Supported format: 71769af6-0a39-4242-94be-1f84f04c8a56
"""
regex = re.compile('^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z',
re.I)
match = regex.match(uuid)
return bool(match)
def check_timestamp(timestamp):
"""Check if the string contains proper timestamp value.
The following four formats are supported:
2017-07-19 13:05:25.041688
2017-07-17T09:05:29.101780
2017-07-19 13:05:25
2017-07-17T09:05:29
"""
assert timestamp is not None
assert isinstance(timestamp, str)
# some attributes contains timestamp without the millisecond part
# so we need to take care of it
if len(timestamp) == len("YYYY-mm-dd HH:MM:SS") and '.' not in timestamp:
timestamp += '.0'
assert len(timestamp) >= len("YYYY-mm-dd HH:MM:SS.")
# we have to support the following formats:
# 2017-07-19 13:05:25.041688
# 2017-07-17T09:05:29.101780
# -> it is needed to distinguish the 'T' separator
#
# (please see https://www.tutorialspoint.com/python/time_strptime.htm for
# an explanation how timeformat should look like)
timeformat = "%Y-%m-%d %H:%M:%S.%f"
if timestamp[10] == "T":
timeformat = "%Y-%m-%dT%H:%M:%S.%f"
# just try to parse the string to check whether
# the ValueError exception is raised or not
datetime.datetime.strptime(timestamp, timeformat)
def check_job_token_attributes(token):
"""Check that the given JOB token contains all required attributes."""
attribs = ["limit", "remaining", "reset"]
for attr in attribs:
assert attr in token
assert int(token[attr]) >= 0
def check_status_attribute(data):
"""Check the value of the status attribute, that should contain just two allowed values."""
status = check_and_get_attribute(data, "status")
assert status in ["success", "error"]
def check_summary_attribute(data):
"""Check the summary attribute that can be found all generated metadata."""
summary = check_and_get_attribute(data, "summary")
assert type(summary) is list or type(summary) is dict
def release_string(ecosystem, package, version=None):
"""Construct a string with ecosystem:package or ecosystem:package:version tuple."""
return "{e}:{p}:{v}".format(e=ecosystem, p=package, v=version)
def check_release_attribute(data, ecosystem, package, version=None):
"""Check the content of _release attribute.
Check that the attribute _release contains proper release string for given ecosystem
and package.
"""
check_attribute_presence(data, "_release")
assert data["_release"] == release_string(ecosystem, package, version)
def check_schema_attribute(data, expected_schema_name, expected_schema_version):
"""Check the content of the schema attribute.
This attribute should contains dictionary with name and version that are checked as well.
"""
# read the toplevel attribute 'schema'
schema = check_and_get_attribute(data, "schema")
# read attributes from the 'schema' node
name = check_and_get_attribute(schema, "name")
version = check_and_get_attribute(schema, "version")
# check the schema name
assert name == expected_schema_name, "Schema name '{n1}' is different from " \
"expected name '{n2}'".format(n1=name, n2=expected_schema_name)
# check the schema version (ATM we are able to check just one fixed version)
assert version == expected_schema_version, "Schema version {v1} is different from expected " \
"version {v2}".format(v1=version, v2=expected_schema_version)
def check_audit_metadata(data):
"""Check the metadata stored in the _audit attribute.
Check if all common attributes can be found in the audit node
in the component or package metadata.
"""
check_attribute_presence(data, "_audit")
audit = data["_audit"]
check_attribute_presence(audit, "version")
assert audit["version"] == "v1"
check_attribute_presence(audit, "started_at")
check_timestamp(audit["started_at"])
check_attribute_presence(audit, "ended_at")
check_timestamp(audit["ended_at"])
def get_details_node(context):
"""Get content of details node, given it exists."""
data = context.s3_data
return check_and_get_attribute(data, 'details')
def check_cve_value(cve, with_score=False):
"""Check CVE values in CVE records."""
if with_score:
# please note that in graph DB, the CVE entries have the following format:
# CVE-2012-1150:5.0
# don't ask me why, but the score is stored in one field together with ID itself
# the : character is used as a separator
pattern = "CVE-(\d{4})-\d{4,}:(\d+\.\d+)"
else:
pattern = "CVE-(\d{4})-\d{4,}"
match = re.fullmatch(pattern, cve)
assert match is not None, "Improper CVE number %s" % cve
year = int(match.group(1))
current_year = datetime.datetime.now().year
# well the lower limit is a bit arbitrary
# (according to SRT guys it should be 1999)
assert year >= 1999 and year <= current_year
if with_score:
score = float(match.group(2))
assert score >= 0.0 and score <= 10.0
|
nilq/baby-python
|
python
|
gagaStop = Lorentz(name = 'gagaStop',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonTop(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSbot = Lorentz(name = 'gagaSbot',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonBot(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSW = Lorentz(name = 'gagaSW',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonW(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSS = Lorentz(name = 'gagaSS',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonS(2*P(-1,1)*P(-1,2)) * (Metric(1,2)*P(-1,1)*P(-1,2) - P(2,1)*P(1,2))')
gagaSOddtop = Lorentz(name = 'gagaSOddtop',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonOddTop(2*P(-1,1)*P(-1,2)) * (Epsilon(1,2,-1,-2)*P(-1,1)*P(-2,2))')
gagaSOddbot = Lorentz(name = 'gagaSOddbot',
spins = [ 3, 3, 1 ],
structure = 'FTriPhotonOddBot(2*P(-1,1)*P(-1,2)) * (Epsilon(1,2,-1,-2)*P(-1,1)*P(-2,2))')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Zero-DCE++: Learning to Enhance Low-Light Image via Zero-Reference Deep Curve
Estimation
Zero-DCE++ has a fast inference speed (1000/11 FPS on single GPU/CPU for an
image with a size of 1200*900*3) while keeping the enhancement performance of
Zero-DCE.
References:
https://github.com/Li-Chongyi/Zero-DCE_extension
"""
from __future__ import annotations
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn
from torch import Tensor
from onevision.factory import ACT_LAYERS
from onevision.factory import IMAGE_ENHANCEMENT
from onevision.factory import LOW_LIGHT_IMAGE_ENHANCEMENT
from onevision.factory import MODELS
from onevision.models.enhancement.image_enhancer import ImageEnhancer
from onevision.models.enhancement.zerodce.loss import CombinedLoss
from onevision.nn import DepthwiseConv
from onevision.nn import PointwiseConv
from onevision.type import Indexes
from onevision.type import Pretrained
from onevision.type import Tensors
__all__ = [
"ZeroDCEPP",
]
# MARK: - Modules
class CSDNTem(nn.Module):
# MARK: Magic Functions
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.dw_conv = DepthwiseConv(
in_channels=in_channels, out_channels=in_channels, padding=1,
groups=in_channels, bias=True,
)
self.pw_conv = PointwiseConv(
in_channels=in_channels, out_channels=out_channels, padding=0,
groups=1
)
# MARK: Forward Pass
def forward(self, x: Tensor) -> Tensor:
out = self.dw_conv(x)
out = self.pw_conv(out)
return out
# MARK: - ZeroDCE++
@MODELS.register(name="zerodce++")
@IMAGE_ENHANCEMENT.register(name="zerodce++")
@LOW_LIGHT_IMAGE_ENHANCEMENT.register(name="zerodce++")
class ZeroDCEPP(ImageEnhancer):
"""
References:
https://github.com/Li-Chongyi/Zero-DCE
Args:
name (str, optional):
Name of the backbone. Default: `zerodce++`.
out_indexes (Indexes):
List of output tensors taken from specific layers' indexes.
If `>= 0`, return the ith layer's output.
If `-1`, return the final layer's output. Default: `-1`.
pretrained (Pretrained):
Use pretrained weights. If `True`, returns a model pre-trained on
ImageNet. If `str`, load weights from saved file. Default: `True`.
- If `True`, returns a model pre-trained on ImageNet.
- If `str` and is a weight file(path), then load weights from
saved file.
- In each inherited model, `pretrained` can be a dictionary's
key to get the corresponding local file or url of the weight.
"""
model_zoo = {
"sice": dict(
path="https://github.com/Li-Chongyi/Zero-DCE_extension/blob/main/Zero-DCE%2B%2B/snapshots_Zero_DCE%2B%2B/Epoch99.pth",
file_name="zerodce++_sice.pth", num_classes=None,
),
}
# MARK: Magic Functions
def __init__(
self,
# Hyperparameters
scale_factor: int = 1,
channels : int = 32,
act = nn.ReLU(inplace=True),
# BaseModel's args
basename : Optional[str] = "zerodce++",
name : Optional[str] = "zerodce++",
num_classes : Optional[int] = None,
out_indexes : Indexes = -1,
pretrained : Pretrained = False,
*args, **kwargs
):
kwargs["loss"] = CombinedLoss(
spa_weight = 1.0,
exp_patch_size = 16,
exp_mean_val = 0.6,
exp_weight = 10.0,
col_weight = 5.0,
tv_weight = 1600.0,
)
super().__init__(
basename = basename,
name = name,
num_classes = num_classes,
out_indexes = out_indexes,
pretrained = pretrained,
*args, **kwargs
)
# NOTE: Get Hyperparameters
self.scale_factor = scale_factor
self.channels = channels
# NOTE: Features
# Zero-DCE DWC + p-shared
self.e_conv1 = CSDNTem(3, self.channels)
self.e_conv2 = CSDNTem(self.channels, self.channels)
self.e_conv3 = CSDNTem(self.channels, self.channels)
self.e_conv4 = CSDNTem(self.channels, self.channels)
self.e_conv5 = CSDNTem(self.channels * 2, self.channels)
self.e_conv6 = CSDNTem(self.channels * 2, self.channels)
self.e_conv7 = CSDNTem(self.channels * 2, 3)
self.upsample = nn.UpsamplingBilinear2d(scale_factor=self.scale_factor)
self.act = act
if isinstance(self.act, str):
self.act = ACT_LAYERS.build(name=self.act)
# NOTE: Load Pretrained
if self.pretrained:
self.load_pretrained()
else:
self.apply(self.weights_init)
# MARK: Configure
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# MARK: Forward Pass
def forward_once(self, x: Tensor, *args, **kwargs) -> Tensors:
"""Forward pass once. Implement the logic for a single forward pass.
Args:
x (Tensor):
Input of shape [B, C, H, W].
Returns:
yhat (Tensors):
Predictions.
"""
if self.scale_factor == 1:
x_down = x
else:
x_down = F.interpolate(
x, scale_factor=1.0 / self.scale_factor, mode="bilinear"
)
x1 = self.act(self.e_conv1(x_down))
x2 = self.act(self.e_conv2(x1))
x3 = self.act(self.e_conv3(x2))
x4 = self.act(self.e_conv4(x3))
x5 = self.act(self.e_conv5(torch.cat([x3, x4], 1)))
x6 = self.act(self.e_conv6(torch.cat([x2, x5], 1)))
x_r = F.tanh(self.e_conv7(torch.cat([x1, x6], 1)))
if self.scale_factor == 1:
x_r = x_r
else:
x_r = self.upsample(x_r)
# NOTE: Enhance
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image_1 = x + x_r * (torch.pow(x, 2) - x)
x = enhance_image_1 + x_r * (torch.pow(enhance_image_1, 2) - enhance_image_1)
x = x + x_r * (torch.pow(x, 2) - x)
x = x + x_r * (torch.pow(x, 2) - x)
enhance_image = x + x_r * (torch.pow(x, 2) - x)
return x_r, enhance_image
|
nilq/baby-python
|
python
|
"""Number constraint names."""
from jsonvl._utilities.venum import Venum
class NumberConstraintNames(Venum):
"""Constraints applied to number types."""
LT = 'lt'
GT = 'gt'
LTE = 'lte'
GTE = 'gte'
EQ = 'eq'
|
nilq/baby-python
|
python
|
import time
import logging
import numpy as np
import torch
import torch.nn as nn
from data import augment, TensorDataset
from diffaugment import DiffAugment
from utils import get_time
def epoch(mode, dataloader, net, optimizer, criterion, args, aug):
loss_avg, acc_avg, num_exp = 0, 0, 0
net = net.to(args.device)
criterion = criterion.to(args.device)
if mode == 'train':
net.train()
else:
net.eval()
for i_batch, datum in enumerate(dataloader):
img = datum[0].float().to(args.device)
if aug:
if args.dsa:
img = DiffAugment(img, args.dsa_strategy, param=args.dsa_param)
else:
img = augment(img, args.dc_aug_param, device=args.device)
lab = datum[1].long().to(args.device)
n_b = lab.shape[0]
output = net(img)
loss = criterion(output, lab)
acc = np.sum(np.equal(np.argmax(output.cpu().data.numpy(), axis=-1), lab.cpu().data.numpy()))
loss_avg += loss.item()*n_b
acc_avg += acc
num_exp += n_b
if mode == 'train':
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_avg /= num_exp
acc_avg /= num_exp
return loss_avg, acc_avg
def evaluate_synset(it_eval, net, images_train, labels_train, testloader, args):
net = net.to(args.device)
images_train = images_train.to(args.device)
labels_train = labels_train.to(args.device)
lr = float(args.lr_net)
Epoch = int(args.epoch_eval_train)
lr_schedule = [Epoch//2+1]
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
criterion = nn.CrossEntropyLoss().to(args.device)
dst_train = TensorDataset(images_train, labels_train)
trainloader = torch.utils.data.DataLoader(dst_train, batch_size=args.batch_train, shuffle=True, num_workers=0)
start = time.time()
for ep in range(Epoch+1):
loss_train, acc_train = epoch('train', trainloader, net, optimizer, criterion, args, aug = True)
if ep in lr_schedule:
lr *= 0.1
optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
time_train = time.time() - start
loss_test, acc_test = epoch('test', testloader, net, optimizer, criterion, args, aug = False)
logging.info('%s Evaluate_%02d: epoch = %04d train time = %d s train loss = %.6f train acc = %.4f, test acc = %.4f' % (get_time(), it_eval, Epoch, int(time_train), loss_train, acc_train, acc_test))
return net, acc_train, acc_test
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect():
"""Connect to the PostgreSQL database. Returns a database connection."""
return psycopg2.connect("dbname=tournament")
def deleteMatches():
"""Remove all the match records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE FROM matches")
DB.commit()
DB.close()
def deletePlayers():
"""Remove all the player records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE FROM members")
DB.commit()
DB.close()
def deleteTournaments():
"""Remove all the tournament records from the database."""
DB = connect()
c = DB.cursor()
c.execute("DELETE FROM tournaments")
DB.commit()
DB.close()
def countTournaments():
"""Returns the number of tournaments currently registered."""
DB = connect()
c = DB.cursor()
c.execute("SELECT COUNT(*) FROM tournaments")
total = c.fetchone()[0]
DB.commit()
DB.close()
return total
def countPlayers():
"""Returns the number of players currently registered."""
DB = connect()
c = DB.cursor()
c.execute("SELECT COUNT(*) FROM members")
total = c.fetchone()[0]
DB.commit()
DB.close()
return total
def registerTournament(name):
"""Adds a tournament to the database.
Args:
name: the name of the tournament (need not be unique).
"""
DB = connect()
c = DB.cursor()
c.execute("INSERT INTO tournaments (name) VALUES (%s)", (name,))
DB.commit()
DB.close()
# Add this to be able to reference unique id of each tournament
def getTournamentId(name):
"""Returns unique ID of registered tournament.
Args:
name: the name of the tournament.
"""
DB = connect()
c = DB.cursor()
c.execute("SELECT id FROM tournaments WHERE name = (%s)", (name,))
idNumber = c.fetchone()[0]
DB.commit()
DB.close()
return idNumber
def registerPlayer(name, tournamentId):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player.
Args:
name: the player's full name (need not be unique).
tournamentId: the unique ID (number) of the tournament the player is registering for.
"""
DB = connect()
c = DB.cursor()
c.execute("INSERT INTO members (name, tournament) VALUES (%s, %s)", (name, tournamentId))
DB.commit()
DB.close()
def playerStandings(tournamentId):
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
Args:
tournamentId: the unique ID (number) of the tournament.
"""
DB = connect()
c = DB.cursor()
c.execute("SELECT * FROM standings WHERE tournament = %s ORDER BY wins DESC", (tournamentId,))
standings = c.fetchall()
DB.commit()
DB.close()
return standings
def reportMatch(tournamentId, winner, loser):
"""Records the outcome of a single match between two players.
Args:
tournamentId: the unique ID of the tournament.
winner: the ID number of the player who won.
loser: the ID number of the player who lost.
"""
DB = connect()
c = DB.cursor()
c.execute("INSERT INTO matches (tournament, winner, loser) VALUES (%s, %s, %s)", (tournamentId, winner, loser))
# Add the win to winner's record
c.execute("UPDATE members SET wins = wins + 1 WHERE id = (%s)", (winner,))
# Add the loss to loser's record
c.execute("UPDATE members SET losses = losses + 1 WHERE id = (%s)", (loser,))
DB.commit()
DB.close()
def swissPairings(tournamentId):
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
Args:
tournamentId: the unique ID of the tournament.
"""
DB = connect()
c = DB.cursor()
c.execute("SELECT id, name FROM members WHERE tournament = (%s) ORDER BY wins DESC", (tournamentId,))
pairings = []
results = c.fetchall()
result_length = len(results)
# Add counters that keep track of array positions to append to pairings array
first = 0
last = 1
# Loop through results and extract pairs that are adjacent to each other and
# append them to the pairings array. Note: the range should be the length of
# the result divided by two because we're pushing two items at a time.
for i in range(result_length / 2):
pair = results[first] + results[last]
pairings.append(pair)
first += 2
last += 2
DB.commit()
DB.close()
return pairings
|
nilq/baby-python
|
python
|
a, b = [int(x) for x in input().split()]
if a > b:
a, b = b, a
if a & 1:
a += 1
if b & 1:
b -= 1
print(((b - a) // 2 + 1) * (a + b) // 2)
|
nilq/baby-python
|
python
|
from ..SimpleSymbolDownloader import SymbolDownloader
from ..symbols.Generic import Generic
from time import sleep
from ..compat import text
import requests
class TigerDownloader(SymbolDownloader):
def __init__(self):
SymbolDownloader.__init__(self, "tiger")
def _add_queries(self, prefix=''):
elements = ['A','APPL','MSFT']
for element in elements:
if element not in self.queries: # Avoid having duplicates in list
self.queries.append(element)
def nextRequest(self, insecure=False, pandantic=False):
self._nextQuery()
success = False
retryCount = 0
json = None
# Eponential back-off algorithm
# to attempt 5 more times sleeping 5, 25, 125, 625, 3125 seconds
# respectively.
maxRetries = 5
while (success == False):
try:
json = self._fetch(insecure)
success = True
except (requests.HTTPError,
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError) as ex:
if retryCount < maxRetries:
attempt = retryCount + 1
sleepAmt = int(math.pow(5, attempt))
print("Retry attempt: " + str(attempt) + " of " + str(maxRetries) + "."
" Sleep period: " + str(
sleepAmt) + " seconds."
)
sleep(sleepAmt)
retryCount = attempt
else:
raise
(symbols, count) = self.decodeSymbolsContainer(json)
for symbol in symbols:
self.symbols[symbol.ticker] = symbol
if count > 10:
# This should never happen with this API, it always returns at most 10 items
raise Exception("Funny things are happening: count "
+ text(count)
+ " > 10. "
+ "Content:"
+ "\n"
+ repr(json))
if self._getQueryIndex() + 1 >= len(self.queries):
self.done = True
else:
self.done = False
return symbols
def decodeSymbolsContainer(self, json):
symbols = []
count = 0
for row in json['data']['items']:
ticker = text(row['symbol'])
name = row['name']
exchange = row['exch']
exchangeDisplay = row['exchDisp']
symbolType = row['type']
symbolTypeDisplay = row['typeDisp']
symbols.append(Generic(ticker, name, exchange, exchangeDisplay, symbolType, symbolTypeDisplay))
count = len(json['data']['items'])
return (symbols, count)
def getRowHeader(self):
return SymbolDownloader.getRowHeader(self) + ["exchangeDisplay", "Type", "TypeDisplay"]
|
nilq/baby-python
|
python
|
from flask import Blueprint
from app.actor import get_hello
import jsonpickle
hello_resource = Blueprint('hello_resource', __name__)
@hello_resource.route('/rest/hello/', defaults={'name': 'world'})
@hello_resource.route('/rest/hello/<name>')
def get(name):
return jsonpickle.encode(get_hello.run(name), unpicklable=False)
|
nilq/baby-python
|
python
|
"""
Copyright 2020 Alexander Brauckmann.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
from absl import logging as lg
from yacos.info.compy.llvm_seq import LLVMSeqBuilder
program_1fn_2 = """
int bar(int a) {
if (a > 10)
return a;
return -1;
}
"""
program_fib = """
int fib(int x) {
switch(x) {
case 0:
return 0;
case 1:
return 1;
default:
return fib(x-1) + fib(x-2);
}
}
"""
def verify_data_dir():
top_dir = os.path.join(os.environ.get('HOME'), '.local')
if not os.path.isdir(os.path.join(top_dir, 'yacos')):
lg.error('YaCoS data does not exist.')
sys.exit(1)
def test_construct_with_custom_visitor():
"""Construction."""
verify_data_dir()
filename = os.path.join(os.environ.get('HOME'),
'.local',
'yacos',
'tests',
'program_1fn_2.c')
builder = LLVMSeqBuilder()
info = builder.source_to_info(filename)
_ = builder.info_to_representation(info)
def test_plot(tmpdir):
"""General tests: Plot."""
verify_data_dir()
filename = os.path.join(os.environ.get('HOME'),
'.local',
'yacos',
'tests',
'program_fib.c')
builder = LLVMSeqBuilder()
info = builder.source_to_info(filename)
seq = builder.info_to_representation(info)
outfile = os.path.join(tmpdir, "syntax_seq.png")
seq.draw(path=outfile, width=8)
assert os.path.isfile(outfile)
|
nilq/baby-python
|
python
|
# coding=utf-8
# Copyright (c) 2016-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import pytest
import requests
from f5_openstack_agent.lbaasv2.drivers.bigip.resource_helper import \
ResourceType
requests.packages.urllib3.disable_warnings()
LOG = logging.getLogger(__name__)
@pytest.fixture(scope="module")
def services():
neutron_services_filename = (
os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../testdata/service_requests/listener_update.json')
)
return (json.load(open(neutron_services_filename)))
def get_next_listener(service_iterator, icontrol_driver, bigip, env_prefix):
service = service_iterator.next()
listener = service['listeners'][0]
folder = '{0}_{1}'.format(env_prefix, listener['tenant_id'])
icontrol_driver._common_service_handler(service)
listener_name = '{0}_{1}'.format(env_prefix, listener['id'])
return bigip.get_resource(
ResourceType.virtual, listener_name, partition=folder)
def get_folder_name(service, env_prefix):
return '{0}_{1}'.format(env_prefix, service['loadbalancer']['tenant_id'])
def test_listener_update(
track_bigip_cfg,
bigip,
services,
icd_config,
icontrol_driver):
env_prefix = 'TEST'
service_iter = iter(services)
# Create loadbalancer
service = service_iter.next()
icontrol_driver._common_service_handler(service)
# Create listener (no name, description)
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert not hasattr(l, 'description')
assert l.connectionLimit == 0
assert l.enabled
# Update name ('spring'). Description is changed to include name.
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring:'
assert l.connectionLimit == 0
assert l.enabled
# Update description ('has sprung')
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring: has-sprung'
assert l.connectionLimit == 0
assert l.enabled
# Update connection limit (200)
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring: has-sprung'
assert l.connectionLimit == 200
assert l.enabled
# Update admin_state_up (False)
l = get_next_listener(service_iter, icontrol_driver, bigip, env_prefix)
assert l.name.startswith('TEST_')
assert l.description == 'spring: has-sprung'
assert l.connectionLimit == 200
assert l.disabled
# Delete listener
service = service_iter.next()
folder = get_folder_name(service, env_prefix)
icontrol_driver._common_service_handler(service)
# Delete loadbalancer
service = service_iter.next()
icontrol_driver._common_service_handler(service, delete_partition=True)
# All objects deleted
assert not bigip.folder_exists(folder)
|
nilq/baby-python
|
python
|
from __future__ import annotations
import logging
import re
from dataclasses import dataclass
from datetime import date
from wordgame_bot.attempt import Attempt, AttemptParser
from wordgame_bot.exceptions import InvalidFormatError, ParsingError
from wordgame_bot.guess import Guesses, GuessInfo
INCORRECT_GUESS_SCORE = 8
@dataclass
class HeardleAttemptParser(AttemptParser):
attempt: str
error: str = "" # TODO
def parse(self) -> HeardleAttempt:
try:
return self.parse_attempt()
except ParsingError as e:
self.handle_error(e)
def parse_attempt(self) -> HeardleAttempt:
lines = self.get_lines()
info = HeardleGuessInfo(lines[0])
guesses = Guesses(lines[1][1:], INCORRECT_GUESS_SCORE, "🟩", "🟩🟥⬜️", 1)
info.score = guesses.correct_guess
return HeardleAttempt(info, guesses)
def get_lines(self) -> list[str]:
lines = [
line.strip()
for line in self.attempt.strip().split("\n")
if line.strip()
]
if len(lines) <= 1 or len(lines) > 3:
raise InvalidFormatError(self.attempt)
return lines
def handle_error(self, error: ParsingError):
logging.warning(f"{error!r}")
self.error = str(error.message)
raise error
@dataclass
class HeardleGuessInfo(GuessInfo):
creation_day: date = date(2022, 2, 25)
valid_format = re.compile("^#Heardle #[0-9]+$")
def validate_format(self):
self.info = self.info.strip()
if self.valid_format.match(self.info) is None:
raise InvalidFormatError(self.info)
def extract_day_and_score(self):
info_parts = self.info.split(" ")
self.day = info_parts[1][1:]
self.score = None
def parse_day(self) -> int:
self.validate_day()
return int(self.day)
def parse_score(self) -> int:
return None
@dataclass
class HeardleAttempt(Attempt):
@property
def maxscore(self):
return 10
@property
def gamemode(self):
return "H"
|
nilq/baby-python
|
python
|
# https://leetcode.com/problems/magic-squares-in-grid/description/
#
# algorithms
# Medium (35.25%)
# Total Accepted: 12,752
# Total Submissions: 36,179
# beats 66.41% of python submissions
class Solution(object):
def splitIntoFibonacci(self, S):
"""
:type S: str
:rtype: List[int]
"""
length = len(S)
if length < 3:
return []
res = [[]]
threshold = 2 ** 31 - 1
def recursive(idx, path):
if res[0]:
return
if idx == length:
if len(path) >= 3:
res[0] = path[:]
return
if S[idx] == '0':
if len(path) < 2 or path[-1] + path[-2] == 0:
recursive(idx + 1, path + [0])
return
for i in xrange(idx, length):
tmp = int(S[idx:i + 1])
if tmp > threshold:
break
if len(path) < 2:
recursive(i + 1, path + [tmp])
elif path[-1] + path[-2] == tmp:
recursive(i + 1, path + [tmp])
elif path[-1] + path[-2] < tmp:
break
recursive(0, [])
return res[0]
|
nilq/baby-python
|
python
|
from django import forms
from formfactory import clean_methods
@clean_methods.register
def check_if_values_match(form_instance, **kwargs):
"""Clean method for when a contact updates password.
"""
first_field = form_instance.cleaned_data["first_field"]
second_field = form_instance.cleaned_data["second_field"]
if not first_field == second_field:
raise forms.ValidationError(
"The values you entered are not equal."
)
|
nilq/baby-python
|
python
|
# File: worker.py
# Aim: Backend worker of the http server
# Imports
import os
import sys
from . import CONFIG
from .local_tools import Tools
tools = Tools()
CONFIG.logger.debug('Worker imported in HTTP package')
# Import other workers
other_folders = [
os.path.join(
os.path.dirname(__file__), # HTTP
'..', # Server
'..', # SocketServerInPython
'..', # [some parDir]
'PinYinInputMethod')
]
sys.path.append(other_folders[0])
from inputMethod.web_compat import Worker as PinYinWorker
pinYin_worker = PinYinWorker()
# Defines
class Worker(object):
# Backend worker object
def __init__(self):
CONFIG.logger.info(f'Worker initialized')
def _synchronize_settings(self):
self.src_dir = CONFIG.get('Runtime', 'srcDir')
self.default_src_dir = CONFIG.get('Default', 'srcDir')
self.known_types = CONFIG.get_section('KnownTypes')
CONFIG.logger.debug(
'Worker synchronized settings: src_dir={}, default_src_dir={}, known_types={}'
.format(self.src_dir, self.default_src_dir, self.known_types))
def fullpath(self, path):
# Make full path based on [path] in request,
# the method also checks the existence of the file on [path],
# will return None, if it fails to pass the check,
# the check is of two-step:
# 1. check if the file exists in srcDir,
# will return the fullpath directly if it passes,
# 2. check if the file exists in defaultSrcDir
for dir, name in zip([self.src_dir, self.default_src_dir],
['srcDir', 'defaultSrcDir']):
# Try src_dir and default_src_dir in order
full = os.path.join(dir, path)
if os.path.isfile(full):
CONFIG.logger.debug(
f'Found {path} in {name}, using it in response')
return full
# Can not find the file in two dirs
CONFIG.logger.warning(
f'Can not find {path} in known dirs, return None')
return None
def response(self, request):
# Make response of [request]
# Synchronize settings
self._synchronize_settings()
# Fetch method and path
method = request['method']
path = request['path'][1:]
# Make response
if method == 'GET':
# Response to 'GET' request
# Customized workers response
res = pinYin_worker.response(path)
if res is not None:
return tools.make_response(resType='application/json',
resContent=res)
# Get useable fullpath
full = self.fullpath(path)
# Can not find file
if full is None:
return tools.make_response(resCode='HTTP/1.1 404',
resContent=f'Not Found {path}')
# Found file
# Get ext
ext = path.split('.')[-1]
# Find file type
resType = 'Content-Type: {}'.format(
self.known_types.get(ext, 'text/html'))
# Read file
with open(full, 'rb') as f:
resContent = f.read()
# Make response and return
return tools.make_response(resType=resType, resContent=resContent)
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.