text stringlengths 38 1.54M |
|---|
import tensorflow as tf
import numpy as np
from env_2 import *
import argparse
from tensorflow import keras
from keras import models
from keras import layers
import matplotlib.pyplot as plt
def produceSections(env, env_dims=25, sight_dim=2):
sim = env.sim
sections = []
for i in range(env_dims - sight_dim * 2):
for j in range(env_dims - sight_dim * 2):
row_pos = sight_dim + i
col_pos = sight_dim + j
section = sim.getClassifiedDroneImageAt(row_pos, col_pos)
sections.append(section)
sections = np.asarray(sections)
sections = np.reshape(sections, [-1, (sight_dim * 2 + 1) * (sight_dim * 2 + 1), 3])
return sections
def produceLabels(sections, sight_dim):
labels = np.zeros(len(sections))
for i in range(len(sections)):
for j in range((sight_dim * 2 + 1) * (sight_dim * 2 + 1)):
if np.max(sections[i, j]) == sections[i, j, 0]:
labels[i] = 1
return labels
def shuffleData(sections, labels):
a = []
b = []
indices = np.arange(len(labels))
for i in range(len(labels)):
num = np.random.randint(0, len(indices), 1)
index = indices[num]
a.append(sections[index, :, :])
b.append(labels[index])
indices = np.delete(indices, num)
a = np.asarray(a)
b = np.asarray(b)
return a, b
def prepareData(args):
environments = []
img_dir = 'Train Images'
for filename in os.listdir(img_dir):
if filename.endswith(".jpg") or filename.endswith(".png") or filename.endswith(".TIF") or filename.endswith(".JPG"):
env = Env(args, os.path.join(img_dir, filename))
environments.append(env)
else:
continue
x = produceSections(environments[0], args.env_dims, args.sight_dim)
for i in range(len(environments) - 1):
x = np.concatenate([x, produceSections(environments[i + 1], args.env_dims, args.sight_dim)], axis=0)
x = np.asarray(x)
y = produceLabels(x, args.sight_dim)
y = np.asarray(y)
x, y = shuffleData(x, y)
x = np.squeeze(x)
y = np.squeeze(y)
eighty = int(round(len(x) * .8))
train_split_x = x[:eighty, :, :]
train_split_y = y[:eighty]
test_x = x[eighty:, :, :]
test_y = y[eighty:]
eighty = int(round(len(train_split_x) * .8))
training_x= train_split_x[:eighty, :, :]
validation_x = train_split_x[eighty:, :, :]
training_y = train_split_y[:eighty]
validation_y = train_split_y[eighty:]
training_x = np.reshape(training_x, [-1, ((args.sight_dim * 2 + 1) * (args.sight_dim * 2 + 1)) * 3])
validation_x = np.reshape(validation_x, [-1, ((args.sight_dim * 2 + 1) * (args.sight_dim * 2 + 1)) * 3])
test_x = np.reshape(test_x, [-1, ((args.sight_dim * 2 + 1) * (args.sight_dim * 2 + 1)) * 3])
return training_x, validation_x, test_x, training_y, validation_y, test_y
def train_model(args):
train_x, val_x, test_x, train_y, val_y, test_y = prepareData(args)
print(train_x.shape, val_x.shape, test_x.shape)
model = models.Sequential()
model.add(layers.Dense(64, activation='relu', input_shape=[75,]))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_x, train_y, epochs=300, batch_size=128, validation_data=(val_x, val_y))
results = model.evaluate(test_x, test_y)
print(results)
return model
#model = train_model(args) #This model is pre-trained
|
WORKLOAD_TO_CREDIT = {'36h': 1, '72h': 2, '108h': 3}
class Materia(object):
ref_database = None
@staticmethod
def ref_database(ref):
Materia.ref_database = ref
@staticmethod
def find(obj, minimum=None, limit=None, detail=False, fast=False):
from auxiliar.word import ratio
if not fast:
result = [[m, max([ratio(a, obj) for a in m.alternativos])] for m in Materia.ref_database.materias.values()]
else:
result = []
for m in Materia.ref_database.materias.values():
r = [m, max([ratio(a, obj) for a in m.alternativos])]
result += [r]
if r[1] == 100: # correspondencia exata
break
result.sort(key=lambda x: x[1], reverse=True)
if minimum is not None:
result = list(filter(lambda x: x[1] >= minimum, result))
if limit is not None:
return result[:limit if limit >= len(result) else len(result)]
if len(result) == 0:
return None
elif len(result) == 1:
if detail:
return result[0]
return result[0][0]
else:
if result[0][1] == result[1][1]:
raise ValueError('Multiple correspondences for: <{}>'.format(obj))
else:
if detail:
return result[0]
return result[0][0]
def __init__(self, _id, nome, carga):
super().__init__()
self.id = _id
self.nome = nome
self.alternativos = []
self.carga = carga
self._ids_requisites = []
self._objs_requisites = []
self._ref_materias = None
self.extra = None
def __repr__(self):
return "({}) {}".format(self.id, self.nome)
def __eq__(self, other):
return self.id == other.id
def add_requisite(self, other):
if isinstance(other, (int, str)):
self._ids_requisites.append(other)
else:
self._objs_requisites.append(other)
self._ids_requisites.append(other.id)
def ref_materiais(self, ref):
self._ref_materias = ref
@property
def requisites(self) -> list:
if not self._ref_materias is None:
self._objs_requisites = []
for id_requisite in self._ids_requisites:
self._objs_requisites.append(self._ref_materias[id_requisite])
return self._objs_requisites
@property
def credit(self):
return WORKLOAD_TO_CREDIT[self.carga]
if __name__ == "__main__":
from access.bridge import Bridge
database = Bridge()
database.sync()
Materia.ref_database(database)
print(Materia.find('Teoria Numerica e Criptografia'))
print('') |
# Packages
import pandas as pd
import numpy as np
import math
import itertools
# Options
pd.set_option("display.max_rows", 100)
# Read in data (aggregated)
iowa = pd.read_csv('C:/Users/Mason/Desktop/Git Repositories/DATA401/Project1/agg_data.csv')
# Clean data
iowa = iowa.dropna() # drop all NA values
# Choosing variables that make sense:
all_features = ['County', 'Year', 'Bottles Sold', 'Sale (Dollars)', 'Population']
response_var = 'Volume Sold (Liters)'
y = iowa[response_var].to_numpy()
# Dummifying categorical variables
numeric_vars = ['Bottles Sold', 'Sale (Dollars)', 'Population']
categorical_vars = ['County', 'Year']
# data = iowa[numeric_vars]
#
# for variable in categorical_vars:
# dummified = pd.get_dummies(iowa[variable])
# data = pd.concat([data, dummified], axis=1)
# model is defined as [[list of features], [B_hat vector], p, AIC, BIC]
models = []
def get_X_matrix(feature_list, iowa, categorical_vars):
"""Arguments:
feature_list: a list of features being used in the model
iowa: the original data
categorical_vars: a list of which variables in the original data are categorical
Returns: a 2D numpy array, with categorical variables turned into indicators"""
this_data = iowa[feature_list] # keep just our features
for feature in feature_list: # for every feature in our list of features
if feature in categorical_vars: # if that feature is categorical:
dummified = pd.get_dummies(this_data[feature]) # dummify the categorical variable
this_data = pd.concat([this_data, dummified], axis=1) # put the new columns into the data
cols_in_data = list(this_data.columns) # get a list of variables in our new dummified data
cols_in_data.remove(feature) # remove original variable, keeping dummy variables
this_data = this_data[cols_in_data] # remove the original feature
return this_data.to_numpy()
def get_num_parameters(feature_list, categorical_vars, iowa):
"""Arguments:
feature_list: a list of column names being used in the model
categorical_vars: a list of which variables in the original data are categorical
iowa: the original data
Returns: an integer p, equal to the number of parameters being estimated in the model"""
p = 1 # initialize p, accounting for intercept
for feature in feature_list: # for every feature in this model
if feature in categorical_vars: # if the feature is categorical
p += len(iowa[feature].unique()) - 1 # (number of categories - 1)
else: # if the feature is numeric
p += 1 # then there's just one coefficient to estimate
return p
def get_Beta_vector(X, y):
"""Arguments:
X: the design matrix as a numpy array
y: the response variable as a 1 x n numpy array
Returns: a numpy array of length p of coefficients for all predictor variables"""
X_t = np.transpose(X)
return np.linalg.inv(X_t.dot(X)).dot(X_t.dot(y))
def get_SSE(y, y_hat):
"""Arguments:
y: the response variable as a 1 x n numpy array
y_hat: the predicted values of the response variable as a 1 x n numpy array
Returns: the Sum of Squared Error, a float"""
return sum([(y[i] - y_hat[i]) ** 2 for i in range(len(y))])
def get_AIC(y, y_hat, p):
"""Arguments:
y: the response variable as a 1 x n numpy array
y_hat: the predicted values of the response variable as a 1 x n numpy array
p: the number of parameters being estimated in the model, an integer
Returns: the AIC, a float"""
return get_SSE(y, y_hat) + (2 * p)
def get_BIC(y, y_hat, p):
"""Arguments:
y: the response variable as a 1 x n numpy array
y_hat: the predicted values of the response variable as a 1 x n numpy array
p: the number of parameters being estimated in the model, an integer
Returns: the BIC, a float"""
return get_SSE(y, y_hat) + (p * math.log(len(y)))
def print_models(models):
"""This function just prints out the models in a way that looks nice.
I wrote this so I didn't have to make a 'Model' object with a __repr__"""
# model is defined as [[list of features], [B_hat vector], p, AIC, BIC]
for model in models:
print("Features:", ", ".join(model[0]))
print("p =", model[2])
print("AIC =", model[3])
print("BIC =", model[4])
print("-" * 20)
def get_forward_stepwise_models(all_features, categorical_vars, iowa):
"""Arguments:
all_features: a list of all feature names
categorical_vars: a list of which variables in the original data are categorical
iowa: the original data frame
Returns: a list of models, where each model has the following format:
[[list of features], [B_hat vector], p, AIC, BIC]"""
models = []
for i in range(1, len(all_features) + 1):
this_model = [None] * 5
vars_in_model = all_features[:i] # only take first i features
X = get_X_matrix(vars_in_model, iowa, categorical_vars) # get X matrix
Beta_vector = get_Beta_vector(X, y) # calculate coefficients
p = get_num_parameters(vars_in_model, categorical_vars, iowa)
pred_vals = X.dot(Beta_vector) # calculate predicted values for this model
AIC = get_AIC(y, pred_vals, p)
BIC = get_BIC(y, pred_vals, p)
this_model[0] = vars_in_model # save variables being used
this_model[1] = Beta_vector # save coefficients
this_model[2] = p
this_model[3] = AIC # save AIC
this_model[4] = BIC # save BIC
models.append(this_model)
return models
def get_backward_stepwise_models(all_features, categorical_vars, iowa):
"""Arguments:
all_features: a list of all feature names
categorical_vars: a list of which variables in the original data are categorical
iowa: the original data frame
Returns: a list of models, where each model has the following format:
[[list of features], [B_hat vector], p, AIC, BIC]"""
models = []
for i in range(len(all_features), 0, -1):
this_model = [None] * 5
vars_in_model = all_features[:i] # only take first i features
X = get_X_matrix(vars_in_model, iowa, categorical_vars) # get X matrix
Beta_vector = get_Beta_vector(X, y) # calculate coefficients
p = get_num_parameters(vars_in_model, categorical_vars, iowa)
pred_vals = X.dot(Beta_vector) # calculate predicted values for this model
AIC = get_AIC(y, pred_vals, p)
BIC = get_BIC(y, pred_vals, p)
this_model[0] = vars_in_model # save variables being used
this_model[1] = Beta_vector # save coefficients
this_model[2] = p
this_model[3] = AIC # save AIC
this_model[4] = BIC # save BIC
models.append(this_model)
return models
def get_best_subsets_models(all_features, categorical_vars, iowa):
"""Arguments:
all_features: a list of all feature names
categorical_vars: a list of which variables in the original data are categorical
iowa: the original data frame
Returns: a list of models, where each model has the following format:
[[list of features], [B_hat vector], p, AIC, BIC]"""
models = []
# generating subsets:
for i in range(1, len(all_features) + 1):
subsets = itertools.combinations(all_features, i)
for subset in subsets:
this_model = [None] * 5
vars_in_model = list(subset)
X = get_X_matrix(vars_in_model, iowa, categorical_vars) # get X matrix
Beta_vector = get_Beta_vector(X, y) # calculate coefficients
p = get_num_parameters(vars_in_model, categorical_vars, iowa)
pred_vals = X.dot(Beta_vector) # calculate predicted values for this model
AIC = get_AIC(y, pred_vals, p)
BIC = get_BIC(y, pred_vals, p)
this_model[0] = vars_in_model # save variables being used
this_model[1] = Beta_vector # save coefficients
this_model[2] = p
this_model[3] = AIC # save AIC
this_model[4] = BIC # save BIC
models.append(this_model)
return models
models = get_best_subsets_models(all_features, categorical_vars, iowa)
print_models(models)
|
from db_schema import *
def get_user_by_session(cookie_session):
'''
Return an user object
'''
session = Session.objects.get(session_hash=cookie_session)
user = User.objects.get(id=session['userID'])
return user
def get_restaurants(session, category, distance, lat,lon):
'''
Returns the closest restaurants to the user
'''
if session == "neutral":
lon = lon
lat = lat
else:
user = get_user_by_session(session)
location = get_user_location(user)
lon, lat = location
print(lon,lat)
if category == 'all':
results = Restaurant.objects(location__geo_within_sphere=[[lon, lat], distance/6371.0])
else:
results = Restaurant.objects(location__geo_within_sphere=[[lon, lat], distance/6371.0], category_code=category)
return results
def get_locations_by_user(userID):
try:
results = Location.objects.get(userID=userID)
except:
results = []
return results
def get_user_location(user):
location = user['current_location']['coordinates']
return location
if __name__=='__main__':
from connect_db import *
#print(get_user_by_session("20b7d99251fe04dd2ef62728a876da0f"))
r = Restaurant.objects.get(pk="5b690b4373c32e764246268c")
print(r['img_url']) |
chicken = 23
goat = 678
pig = 1296
cow = 3848
sheep = 6769
animals = ['chicken', 'goat', 'pig', 'cow', 'sheep']
a = int(input())
if 23 <= a < 678:
print('{number_animals} {animals}'.format(number_animals=a // 23, animals='chicken' if a < 46 else 'chickens'))
elif 678 <= a < 1296:
print('{number_animals} {animals}'.format(number_animals=a // 678, animals='goat' if a < 1356 else 'goats'))
elif 1296 <= a < 3848:
print('{number_animals} {animals}'.format(number_animals=a // 1296, animals='pig' if a < 2592 else 'pigs'))
elif 3848 <= a < 6769:
print('{number_animals} {animals}'.format(number_animals=a // 3848, animals='cow' if a < 7696 else 'cows'))
elif a >= 6769:
print('{number_animals} {animals}'.format(number_animals=a // 6769, animals='sheep'))
elif a < 23:
print('None')
|
#!/usr/bin/env python3
KEY_LEFT_CTRL = 128
KEY_LEFT_SHIFT = 129
KEY_LEFT_ALT = 130
KEY_LEFT_GUI = 131
KEY_RIGHT_CTRL = 132
KEY_RIGHT_SHIFT = 133
KEY_RIGHT_ALT = 134
KEY_RIGHT_GUI = 135
KEY_UP_ARROW = 218
KEY_DOWN_ARROW = 217
KEY_LEFT_ARROW = 216
KEY_RIGHT_ARROW = 215
KEY_BACKSPACE = 178
KEY_TAB = 179
KEY_RETURN = 176
KEY_ESC = 177
KEY_INSERT = 209
KEY_DELETE = 212
KEY_PAGE_UP = 211
KEY_PAGE_DOWN = 214
KEY_HOME = 210
KEY_END = 213
KEY_CAPS_LOCK = 193
KEY_F1 = 194
KEY_F2 = 195
KEY_F3 = 196
KEY_F4 = 197
KEY_F5 = 198
KEY_F6 = 199
KEY_F7 = 200
KEY_F8 = 201
KEY_F9 = 202
KEY_F10 = 203
KEY_F11 = 204
KEY_F12 = 205
KEY_F13 = 240
KEY_F14 = 241
KEY_F15 = 242
KEY_F16 = 243
KEY_F17 = 244
KEY_F18 = 245
KEY_F19 = 246
KEY_F20 = 247
KEY_F21 = 248
KEY_F22 = 249
KEY_F23 = 250
KEY_F24 = 251
|
#!/bin/env python3
## \file autogallery.py Generate static HTML browsable gallery from files hieararchy
from WebpageUtils import *
import os
import glob
import sys
from optparse import OptionParser
import datetime
def pdfs_to_svgs(basedir, do_gzip = True):
for f in glob.glob(basedir+"/*.pdf"):
fsvg = f[:-3]+"svg"
fsvgz = fsvg+('z' if do_gzip else '')
makesvg = not os.path.exists(fsvgz)
if not makesvg: makesvg = os.stat(fsvgz).st_mtime < os.stat(f).st_mtime
if makesvg:
os.system("pdf2svg %s %s"%(f,fsvg))
if do_gzip: os.system("gzip %s; mv %s.gz %sz"%(fsvg,fsvg,fsvg))
def makegallery(basedir, css=None, logo=None):
if css: os.system("cp "+css+" "+basedir+"/sitestyle.css")
if logo: os.system("cp %s %s/logo.%s"%(logo,basedir,logo.split('.')[-1]))
for path, ds, fs in os.walk(basedir):
pdfs_to_svgs(path)
for path, ds, fs in os.walk(basedir):
pname = path.strip("/").split("/")[-1]
Page,b = makePageStructure(pname, css="/sitestyle.css")
h1 = addTag(b,"h1",{},pname)
if path != basedir: addTag(h1,"a",{"href":"../index.html"},"[Up]")
addTag(h1,"a",{"href":"/index.html"},"[Home]")
linklist = []
ds.sort()
for d in ds:
li = ET.Element("li")
addTag(li, "a", {"href":"%s/index.html"%d},d)
linklist.append(li)
fs.sort()
for f in fs:
sfx = f.split(".")[-1]
pfx = f[:-len(sfx)-1]
if pfx == "logo": continue
if sfx in ["svg", "svgz"]:
fg = addTag(b,"figure", {"style":"display:inline-block"})
addTag(fg,"img", {"src":f, "class":"lightbg"})
cc = []
if os.path.exists(path+"/"+pfx+".pdf"):
cc.append(makeLink(pfx+".pdf", pfx+".pdf"))
else: cc.append(f+" ")
cc.append("generated "+datetime.datetime.fromtimestamp(os.stat(path+"/"+f).st_mtime).strftime('%a, %b %-d %-H:%M:%S'))
addTag(fg,"figcaption",{},cc)
if sfx in ["pdf", "txt", "tsv"]:
if sfx == "pdf" and os.path.exists(path+"/"+pfx+".svgz"): continue
li = ET.Element("li")
addTag(li,"a", {"href":f}, f+", generated "+datetime.datetime.fromtimestamp(os.stat(path+"/"+f).st_mtime).strftime('%a, %b %-d %-H:%M:%S'))
linklist.append(li)
if linklist: addTag(b, "ul", {}, linklist)
open(path+"/index.html","w").write("<!DOCTYPE html>\n"+prettystring(Page))
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dir", help="base content directory")
parser.add_option("--css", default="web_interface/sitestyle.css", help="css file to copy to base")
parser.add_option("--logo", help="logo.svg file to copy to base")
options, args = parser.parse_args()
if options.dir: makegallery(options.dir, options.css, options.logo)
|
from PyObjCTools.TestSupport import TestCase
import LocalAuthenticationEmbeddedUI
class TestCallableMetadata(TestCase):
def test_callable_metadata_is_sane(self):
self.assertCallableMetadataIsSane(LocalAuthenticationEmbeddedUI)
|
"""."""
from django.contrib.auth.backends import BaseBackend
from user.models import Users
class Backend(BaseBackend):
"""."""
def authenticate(self, request, username=None, password=None):
"""."""
try:
user = Users.objects.get(username=username)
if user.check_password(password):
return user
pass
except Users.DoesNotExist:
pass
def get_user(self, user_id):
"""."""
try:
user = Users.objects.get(id=user_id)
return user.id
except Users.DoesNotExist:
return None
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.render_index),
url(r'^create-post/', views.render_edit_blog_post),
url(r'^edit-post/([0-9]+)', views.render_edit_blog_post),
url(r'^posts/([0-9]+)/?$', views.render_blog_post),
# API Views
url(r'^api/posts/([0-9]+)?', views.BlogPostViewSet.as_view()),
] |
# Customize this starter script by adding code
# to the run_script function. See the Help for
# complete information on how to create a script
# and use Script Runner.
from django.db.models.lookups import Year
""" Your Description of the script goes here """
# Some commonly used imports
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
def run_script(iface):
print 'hi'
prefix = '/home/zia/Documents/Test/QGIS_Python_Book/ms_rails_mstm'
rails = QgsVectorLayer(prefix + 'ms_rails_mstm.shp', 'Railways', 'ogr')
rules = (
('heavily used', '"DEN09CODE" > 3', 'red', (0, 6000000)),
('moderatly used', '"DEN09CODE" > 1 AND "DEN09CODE" < 4', 'orange', (0, 1500000)),
('lightly used', '"DEN09CODE" < 2', 'grey', (0, 250000)),
)
sym_rails = QgsSymbolV2.defaultSymbol(rails.geometryType())
rend_rails = QgsRuleBasedRendererV2(sym_rails)
root_rule = rend_rails.rootRule()
for label, exp, color, scale in rules:
#create a clone of the default rule
rule = root_rule.children()[0].clone()
#set the label exp and color
rule.setLabel(label)
rule.setFilterExpression(exp)
rule.symbol().SetColor(QColor(color))
#set the scale limits if they have been specified
if scale is not None:
rule.setScaleMinDenom(scale[0])
rule.setScaleMaxDenom(scale[1])
#append the rule to the list of rules
root_rule.appendChild(rule)
root_rule.removeChildAt(0)
rails.setRendererV2(rend_rails)
jax = QgsVectorLayer('/home/zia/Documents/Test/QGIS_Python_Book/jackson/jackson.shp', 'Jackson', 'ogr')
jax_style = {}
jax_style['color'] = '#ffff00'
jax_style['name'] = 'regular_star'
jax_style['outline'] = '#000000'
jax_style['outline-width'] = '1'
jax_style['size'] = '8'
sym_jax = QgsSimpleMarkerSymbolLayerV2.create(jax_style)
jax.rendererV2().symbols()[0].changeSymbolLayer(0, sym_jax)
ms = QgsVectorLayer('/home/zia/Documents/Test/QGIS_Python_Book/Mississippi/mississippi.shp', 'Missi', 'ogr')
ms_style = {}
ms_style['color'] = '#F7F5EB'
sym_ms = QgsSimpleFillSymbolLayerV2.create(ms_style)
ms.rendererV2().symbols()[0].changeSymbolsLayer(0, sym_ms)
QgsMapLayerRegistry.instance().addMapLayers([jax, rails, ms])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-05 20:22
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
replaces = [('analytics', '0001_initial'), ('analytics', '0002_auto_20180502_0054'), ('analytics', '0003_auto_20180503_0305'), ('analytics', '0004_auto_20180503_0342'), ('analytics', '0005_bountiestimeline_schema'), ('analytics', '0006_auto_20180504_0536')]
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BountiesTimeline',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('bounties_issued', models.PositiveIntegerField(default=0)),
('fulfillments_submitted', models.PositiveIntegerField(default=0)),
('fulfillments_accepted', models.PositiveIntegerField(default=0)),
('fulfillments_pending_acceptance', models.PositiveIntegerField(default=0)),
('fulfillment_acceptance_rate', models.FloatField(default=0)),
('bounty_fulfilled_rate', models.FloatField(default=0)),
('avg_fulfiller_acceptance_rate', models.FloatField(default=0)),
('avg_fulfillment_amount', models.FloatField(default=0)),
('total_fulfillment_amount', models.DecimalField(decimal_places=0, default=0, max_digits=64)),
('bounty_draft', models.PositiveIntegerField(default=0)),
('bounty_active', models.PositiveIntegerField(default=0)),
('bounty_completed', models.PositiveIntegerField(default=0)),
('bounty_expired', models.PositiveIntegerField(default=0)),
('bounty_dead', models.PositiveIntegerField(default=0)),
('bounties_issued_cum', models.PositiveIntegerField(default=0)),
('fulfillments_accepted_cum', models.PositiveIntegerField(default=0)),
('fulfillments_submitted_cum', models.PositiveIntegerField(default=0)),
('platform', models.CharField(blank=True, max_length=64)),
],
),
]
|
# Generated by Django 2.0 on 2018-11-14 05:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20181114_1136'),
]
operations = [
migrations.AlterField(
model_name='passport',
name='email',
field=models.EmailField(default='', max_length=254, verbose_name='用户邮箱'),
),
migrations.AlterField(
model_name='passport',
name='tel',
field=models.CharField(default='', max_length=11, verbose_name='电话'),
),
]
|
#basic stuff
import json
#date-time stuff
import datetime
#data and geo stuff
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from shapely.ops import unary_union
# data access
import mysql.connector
def create_dateslist(startdate,enddate):
'''
Returns a list of dates
Starting 1 day before the startdate and ending 1 day after the enddate.
This approach is needed as we convert UTC to local time later on
'''
#day before start date
first_date=datetime.datetime.strptime(startdate, "%Y-%m-%d") - datetime.timedelta(days=1)
#day after last date
last_date=datetime.datetime.strptime(enddate, "%Y-%m-%d") + datetime.timedelta(days=1)
dates_list=[]
step = datetime.timedelta(days=1)
while first_date <= last_date:
dates_list.append([first_date.year, first_date.month, first_date.day])
first_date += step
return dates_list
def create_fileslist(retrieval_set,
path_to_geojson_files):
'''
some info text
'''
# Read sets from json
with open(path_to_geojson_files+ "/sets/" +retrieval_set, 'r') as jsonf:
SETS = json.load(jsonf)
files_list=[]
for entry in SETS:
if entry != 'description':
for files in SETS[entry]:
if len(SETS[entry]['files']) > 0:
for file in SETS[entry]['files']:
files_list.append([path_to_geojson_files+entry+"/"+file])
return files_list
def create_boundingbox(files_wanted_list):
'''
some info text
'''
lat_lon_init = False
#loop trough all files in folder
for filename in files_wanted_list:
fname=str(filename).replace("['",'').replace("']",'')
df_location = gpd.read_file(fname)
#https://gis.stackexchange.com/questions/266730/filter-by-bounding-box-in-geopandas
a_polygon = Polygon(unary_union(df_location['geometry']))
bbox = a_polygon.bounds
#initialize
if lat_lon_init != True:
min_lon = bbox[0]
min_lat = bbox[1]
max_lon = bbox[2]
max_lat = bbox[3]
lat_lon_init = True
#(lat,lon)
# checks are only valid for NL. To make it work for rest of world, additional tests are needed.
if min_lon > bbox[0]: min_lon = bbox[0]
if min_lat > bbox[1]: min_lat = bbox[1]
if max_lon < bbox[2]: max_lon = bbox[2]
if max_lat < bbox[3]: max_lat = bbox[3]
latlonmminmax_list = [min_lat,min_lon,max_lat,max_lon]
#Create the bounding box to be able to check it
lb = Point(min_lon,min_lat)
rb = Point(max_lon,min_lat)
lo = Point(min_lon,max_lat)
ro = Point(max_lon,max_lat)
pointList = [lb, lo, ro, rb]
poly = Polygon([[p.x, p.y] for p in pointList])
# convert polygon to geojson format
boundingbox_geojson = gpd.GeoSeries([poly]).to_json()
#display(boundingbox_geojson)
return boundingbox_geojson, latlonmminmax_list
def retrieve_ships_from_repository(credentialsfile,
which_db, which_day,
lat_min, lat_max, lon_min, lon_max,
which_locations,
# locationid,
tempfile="", verbose=False):
'''
credentialsfile: json file with database access credentials
which_db : the database to use (a year_month string)
which_day : the day in that month (a table in the database)
lat_min : minimum value of latitude
lat_max : maximum value of latitude
lon_min : minimum value of longitude
lot_max : maximum value of longitude
which_locations: string name
### locationid : a name for the location
tempfile : folder + filename where to store temp retrives
verbose : show some additional info
'''
ships_from_db_df=pd.DataFrame()
if verbose: display(credentialsfile)
# Read settings from json file
with open(credentialsfile, 'r') as jsonf:
SETTINGS = json.load(jsonf)
#settings for db access
db_host=SETTINGS['database']['host']
db_user=SETTINGS['database']['user']
db_pass=SETTINGS['database']['password']
cnxn = mysql.connector.connect(user=db_user, password=db_pass, host=db_host, db=which_db)
# cnxn = mysql.connector.connect(user=db_user, password=db_pass, host=db_host, db=which_db, buffered=True )
cursor = cnxn.cursor()
querypart = ("SELECT mmsi, name, callsign, type " +
", longitude AS longitude_val" +
", latitude AS latitude_val" +
", cog, sog, heading, navstat, draught" +
", time AS ships_time_UTC" +
", '" + which_locations + "' AS locations_set" +
### ", '" + locationid + "' AS location" +
" FROM " + "`" + str(which_day).zfill(2) + "`" +
" WHERE (longitude >= " + str(lon_min) + " AND longitude <= " + str(lon_max) + ")" +
" AND (latitude >= " + str(lat_min) + " AND latitude <= " + str(lat_max) + ")" +
";")
if verbose: display(querypart)
df_from_db = pd.read_sql_query(querypart, cnxn)
if verbose: display(tempfile)
if tempfile !="": df_from_db.to_excel(tempfile)
if verbose: display(df_from_db)
ships_from_db_df = ships_from_db_df.append(df_from_db, ignore_index=True)
if verbose: display(len(ships_from_db_df))
# close database connection
cnxn.close()
return(ships_from_db_df)
def create_ais_baseline_df(dates_wanted,
db_credentials,
min_lat,
max_lat,
min_lon,
max_lon,
retrieval_set,
output_temp,
verbose=False):
'''
some info text
'''
df_ais_baseline = pd.DataFrame()
counter = 0
aantal = 0
#loop trough all the dates in the list
for element in dates_wanted:
if verbose: display(element[0],element[1],element[2])
year=element[0]
month=element[1]
day=element[2]
aantal = len(df_ais_baseline)
###
#Get the data
###
the_database=str(year)+ "_" + str(month).zfill(2)
if verbose: display('database :'+the_database)
df_ais_baseline = df_ais_baseline.append( retrieve_ships_from_repository(
credentialsfile=db_credentials,
which_db=the_database,
which_day=day,
lat_min=min_lat,
lat_max=max_lat,
lon_min=min_lon,
lon_max=max_lon,
which_locations=retrieval_set,
tempfile=output_temp + datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")+ "_" + retrieval_set + "_" + str(counter).zfill(3)+".xlsx",
verbose=verbose), ignore_index=True)
if verbose: display("run: "+str(day).zfill(2)+"; length added: "+str(len(df_ais_baseline)-aantal))
counter = counter + 1
df_ais_baseline.drop_duplicates(inplace=True)
return df_ais_baseline
|
import hashlib
import json
from time import time
from urllib.parse import urlparse
from uuid import uuid4
import requests
from flask import Flask, jsonify, request
class Blockchain:
def __init__(self):
self.current_transactions = []
self.chain = []
self.nodes = set()
# Create the genesis block
self.new_block(previous_hash='1', proof=100)
def register_node(self, address):
"""
Add a new node to the list of nodes
:param address: Address of node. Eg. 'http://192.168.0.5:5000'
"""
parsed_url = urlparse(address)
self.nodes.add(parsed_url.netloc)
def valid_chain(self, chain):
"""
Determine if a given blockchain is valid
:param chain: A blockchain
:return: True if valid, False if not
"""
last_block = chain[0]
current_index = 1
while current_index < len(chain):
block = chain[current_index]
print(f'{last_block}')
print(f'{block}')
print("\n-----------\n")
# Check that the hash of the block is correct
if block['previous_hash'] != self.hash(last_block):
return False
# Check that the Proof of Work is correct
if not self.valid_proof(last_block['proof'], block['proof']):
return False
|
import tensorflow as tf
import tflearn
from keras.layers import Activation, Input, Concatenate, Conv2D, Dense, Reshape
from keras.models import Model
def determine_shape(input_amount, kernel_size, padding, stride):
return int((input_amount - kernel_size + 2*padding)/stride + 1)
class ActorNetwork(object):
"""
Input to the network is the state, output is the action
under a deterministic policy.
The output layer activation is a tanh to keep the action
between -action_bound and action_bound
"""
def __init__(self, sess, asset_features_shape, action_dim, action_bound, learning_rate, tau, batch_size):
self.sess = sess
self.asset_features_shape = asset_features_shape #[num_assets, history_length, num_features]
self.a_dim = action_dim # [num_assets]
self.action_bound = action_bound
self.learning_rate = learning_rate
self.tau = tau
self.batch_size = batch_size
# Actor Network
self.asset_inputs, self.portfolio_inputs, self.scaled_out = self.create_actor_network()
self.soft_out = Activation('softmax')(self.scaled_out)
self.network_params = tf.trainable_variables()
# Target Network
self.target_asset_inputs, self.target_portfolio_inputs, self.target_scaled_out = self.create_actor_network()
self.target_soft_out = Activation('softmax')(self.target_scaled_out)
self.target_network_params = tf.trainable_variables()[len(self.network_params):]
self.assign_target_network_params = \
[self.target_network_params[i].assign(self.network_params[i]) for i in range(len(self.target_network_params))]
# Op for periodically updating target network with online network
# weights
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) +
tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# This gradient will be provided by the critic network
self.action_gradient = tf.placeholder(tf.float32, [None, self.a_dim])
# Combine the gradients here
self.unnormalized_actor_gradients = tf.gradients(
self.soft_out, self.network_params, -self.action_gradient)
self.actor_gradients = list(map(lambda x: tf.div(x, self.batch_size), self.unnormalized_actor_gradients))
# Optimization Op
self.optimize = tf.train.AdamOptimizer(self.learning_rate).\
apply_gradients(zip(self.actor_gradients, self.network_params))
self.num_trainable_vars = len(
self.network_params) + len(self.target_network_params)
def create_actor_network(self):
asset_inputs = Input(shape=self.asset_features_shape) # [batch_size, num_assets, history_length, features]
print(asset_inputs.shape)
portfolio_inputs = Input(shape=[self.a_dim]) # []
portfolio_inputs_reshaped = Reshape((self.a_dim, 1, 1))(portfolio_inputs)
net = Conv2D(filters=3, kernel_size=[1, 3])(asset_inputs)
net = Activation('relu')(net)
print(net.shape)
net = Conv2D(filters=20, kernel_size=[1, determine_shape(input_amount=self.asset_features_shape[1],
kernel_size=3,
padding=0,
stride=1)])(net) # [batch_size, num_assets, 1, 20]
net = Activation('relu')(net)
net = Concatenate(axis=-1)([portfolio_inputs_reshaped, net])
net = Conv2D(filters=1, kernel_size=[1, 1])(net) # [batch_size, num_assets, 1, 1]
net = Reshape((self.a_dim,))(net)
return asset_inputs, portfolio_inputs, net
def train(self, asset_inputs, portfolio_inputs, a_gradient):
self.sess.run(self.optimize, feed_dict={
self.asset_inputs: asset_inputs,
self.portfolio_inputs: portfolio_inputs,
self.action_gradient: a_gradient
})
def predict(self, asset_inputs, portfolio_inputs):
return self.sess.run(self.scaled_out, feed_dict={
self.asset_inputs: asset_inputs,
self.portfolio_inputs: portfolio_inputs
})
def predict_target(self, asset_inputs, portfolio_inputs):
return self.sess.run(self.target_soft_out, feed_dict={
self.target_asset_inputs: asset_inputs,
self.target_portfolio_inputs: portfolio_inputs
})
def assign_target_network(self):
self.sess.run(self.assign_target_network_params)
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def get_num_trainable_vars(self):
return self.num_trainable_vars
class CriticNetwork(object):
"""
Input to the network is the state and action, output is Q(s,a).
The action must be obtained from the output of the Actor network.
"""
def __init__(self, sess, asset_features_shape, action_dim, learning_rate, tau, gamma, num_actor_vars):
self.sess = sess
self.asset_features_shape = asset_features_shape
self.a_dim = action_dim
self.learning_rate = learning_rate
self.tau = tau
self.gamma = gamma
# Create the critic network
self.asset_inputs, self.portfolio_inputs, self.action, self.out = self.create_critic_network()
self.network_params = tf.trainable_variables()[num_actor_vars:]
# Target Network
self.target_asset_inputs, self.target_portfolio_inputs, self.target_action, self.target_out = self.create_critic_network()
self.target_network_params = tf.trainable_variables()[(len(self.network_params) + num_actor_vars):]
self.assign_target_network_params = \
[self.target_network_params[i].assign(self.network_params[i]) for i in range(len(self.target_network_params))]
# Op for periodically updating target network with online network
# weights with regularization
self.update_target_network_params = \
[self.target_network_params[i].assign(tf.multiply(self.network_params[i], self.tau) \
+ tf.multiply(self.target_network_params[i], 1. - self.tau))
for i in range(len(self.target_network_params))]
# Network target (y_i)
self.predicted_q_value = tf.placeholder(tf.float32, [None, 1])
# weights for prioritized experience replay
self.weights = tf.placeholder(tf.float32, [None, 1])
# Define loss and optimization Op
self.loss = tf.square(self.predicted_q_value - self.out)
self.loss = tf.multiply(self.weights, self.loss)
self.loss = tf.reduce_mean(self.loss)
#self.loss = tflearn.mean_square(self.predicted_q_value, self.out)
self.optimize = tf.train.AdamOptimizer(
self.learning_rate).minimize(self.loss)
# Get the gradient of the net w.r.t. the action.
# For each action in the minibatch (i.e., for each x in xs),
# this will sum up the gradients of each critic output in the minibatch
# w.r.t. that action. Each output is independent of all
# actions except for one.
self.action_grads = tf.gradients(self.out, self.action)
def create_critic_network(self):
asset_inputs = Input(shape=self.asset_features_shape) # [batch_size, num_assets, history_length, features]
portfolio_inputs = Input(shape=[self.a_dim]) # []
action_inputs = Input(shape=[self.a_dim])
portfolio_inputs_reshaped = Reshape((self.a_dim, 1, 1))(portfolio_inputs)
net = Conv2D(filters=3, kernel_size=[1, 3])(asset_inputs)
net = Activation('relu')(net)
net = Conv2D(filters=20, kernel_size=[1, determine_shape(input_amount=self.asset_features_shape[1],
kernel_size=3,
padding=0,
stride=1)])(net) # [batch_size, num_assets, 1, 20]
net = Activation('relu')(net)
net = Concatenate(axis=-1)([portfolio_inputs_reshaped, net])
net = Conv2D(filters=1, kernel_size=[1, 1])(net) # [batch_size, num_assets, 1, 1]
net = Reshape((self.a_dim,))(net)
net = Concatenate(axis=-1)([net, action_inputs]) # [batch_size, ]
net = Reshape((2*self.a_dim,))(net)
out = Dense(1)(net)
return asset_inputs, portfolio_inputs, action_inputs, out
def train(self, asset_inputs, portfolio_inputs, action, predicted_q_value, weights):
return self.sess.run([self.loss, self.out, self.optimize], feed_dict={
self.asset_inputs: asset_inputs,
self.portfolio_inputs: portfolio_inputs,
self.action: action,
self.predicted_q_value: predicted_q_value,
self.weights: weights
})
def predict(self, asset_inputs, portfolio_inputs, action):
return self.sess.run(self.out, feed_dict={
self.asset_inputs: asset_inputs,
self.portfolio_inputs: portfolio_inputs,
self.action: action
})
def predict_target(self, asset_inputs, portfolio_inputs, action):
return self.sess.run(self.target_out, feed_dict={
self.target_asset_inputs: asset_inputs,
self.target_portfolio_inputs: portfolio_inputs,
self.target_action: action
})
def action_gradients(self, asset_inputs, portfolio_inputs, actions):
return self.sess.run(self.action_grads, feed_dict={
self.asset_inputs: asset_inputs,
self.portfolio_inputs: portfolio_inputs,
self.action: actions
})
def update_target_network(self):
self.sess.run(self.update_target_network_params)
def assign_target_network(self):
self.sess.run(self.assign_target_network_params) |
class Solution:
def reverseWords(self, s: str) -> str:
s = s.split(" ")
return " ".join([i[::-1] for index,i in enumerate(s)])
|
import numpy as np
import matplotlib.pyplot as plt
# Load the training dataset
train_features = np.load("train_features.npy")
train_labels = np.load("train_labels.npy").astype("int8")
n_train = train_labels.shape[0]
def visualize_digit(features, label):
# Digits are stored as a vector of 400 pixel values. Here we
# reshape it to a 20x20 image so we can display it.
plt.imshow(features.reshape(20, 20), cmap="binary")
plt.xlabel("Digit with label " + str(label))
plt.show()
# Visualize a digit
# visualize_digit(train_features[0,:], train_labels[0])
# TODO: Plot three images with label 0 and three images with label 1
# Linear regression
# TODO: Solve the linear regression problem, regressing
# X = train_features against y = 2 * train_labels - 1
# TODO: Report the residual error and the weight vector
# Load the test dataset
# It is good practice to do this after the training has been
# completed to make sure that no training happens on the test
# set!
test_features = np.load("test_features.npy")
test_labels = np.load("test_labels.npy").astype("int8")
n_test = test_labels.shape[0]
# TODO: Implement the classification rule and evaluate it
# on the training and test set
# TODO: Implement classification using random features |
import itertools
def roundrobin(*iterables):
""""roundrobin('ABC', 'D', 'EF') --> A D E B F C
Itertools recipe.
"""
# Recipe credited to George Sakkis
num_active = len(iterables)
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
while num_active:
try:
for next in nexts:
yield next()
except StopIteration:
# Remove the iterator we just exhausted from the cycle.
num_active -= 1
nexts = itertools.cycle(itertools.islice(nexts, num_active))
def repeatfunc(func, times=None, *args):
"""Repeat calls to func with specified arguments.
Itertools recipe.
Example: repeatfunc(random.random)
"""
if times is None:
return itertools.starmap(func, itertools.repeat(args))
return itertools.starmap(func, itertools.repeat(args, times))
def repeat_infinitely(func, *args):
return itertools.chain.from_iterable(repeatfunc(func, None, *args))
|
#!/usr/bin/env python
__all__ = [ 'classifier', 'utils' ]
#from .features import * # Ensures that all the modules have been loaded in their new locations *first*.
#from . import packageA # imports WrapperPackage/packageA
#import sys
#sys.modules['packageA'] = packageA # creates a packageA entry in sys.modules
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-07-20 22:02
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('schedule', '0003_event'),
]
operations = [
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('conference_day', models.CharField(max_length=30)),
],
options={
'managed': True,
},
),
migrations.AlterField(
model_name='event',
name='conference_day',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.Day'),
),
migrations.AlterField(
model_name='talkschedule',
name='conference_day',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='schedule.Day'),
),
]
|
import torch.nn as nn
class CNN(nn.Module):
def __init__(self):
super(CNN,self).__init__()
self.cnn = nn.Sequential(
# CONV/FC -> BatchNorm -> ReLu(or other activation) -> Dropout -> CONV/FC ->
nn.Conv2d(3, 64, kernel_size=3,stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(64, 128, kernel_size=3,stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Dropout2d(p=.25),
nn.Conv2d(128, 256, kernel_size=3,stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
#nn.Dropout2d(p=.25),
nn.Conv2d(256, 128, kernel_size=3,stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
nn.Dropout2d(p=.25),
nn.Conv2d(128, 64, kernel_size=3,stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
#nn.Dropout2d(p=.25),
nn.Conv2d(64, 8, kernel_size=3,stride=1, padding=2),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2,stride=1),
nn.Dropout2d(p=.25),
)
self.fc = nn.Sequential(
nn.Linear(8*15*15, 1024),
nn.ReLU(inplace=True),
nn.Dropout2d(p=.25),
nn.Linear(1024, 512),
nn.ReLU(inplace=True),
nn.Dropout2d(p=.25),
nn.Linear(512, 153)
)
# self.relu = nn.ReLU()
# self.cnn1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3,stride=1, padding=1)
# self.batchnorm1 = nn.BatchNorm2d(64)
# self.maxpool1 = nn.MaxPool2d(kernel_size=2)
# self.cnn2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3,stride=1, padding=1)
# self.batchnorm2 = nn.BatchNorm2d(128)
# self.dropout2 = nn.Dropout2d(p=.25)
# #self.maxpool2 = nn.MaxPool2d(kernel_size=2)
# self.cnn3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3,stride=1, padding=1)
# self.batchnorm3 = nn.BatchNorm2d(256)
# self.dropout3 = nn.Dropout2d(p=.25)
# #self.maxpool3 = nn.MaxPool2d(kernel_size=2)
# self.cnn4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3,stride=1, padding=1)
# self.batchnorm4 = nn.BatchNorm2d(64)
# self.maxpool4 = nn.MaxPool2d(kernel_size=2)
# self.dropout4 = nn.Dropout2d(p=.25)
# self.cnn5 = nn.Conv2d(in_channels=64, out_channels=32, kernel_size=5, stride=1, padding=2)
# self.batchnorm5 = nn.BatchNorm2d(32)
# self.maxpool5 = nn.MaxPool2d(kernel_size=2) #Size now is 32/2 = 16
# self.dropout5 = nn.Dropout2d(p=.25)
#Flatten the feature maps. You have 32 feature mapsfrom cnn2. Each of the feature is of size 16x16 --> 32*16*16 = 8192
# self.fc1 = nn.Linear(in_features=32*7*7, out_features=1024) #Flattened image is fed into linear NN and reduced to half size
# self.droput = nn.Dropout(p=0.5) #Dropout used to reduce overfitting
# # self.fc2 = nn.Linear(in_features=1024, out_features=512)
# # self.droput = nn.Dropout(p=0.5)
# self.fc3 = nn.Linear(in_features=512, out_features=256)
# self.droput = nn.Dropout(p=0.5)
# self.fc4 = nn.Linear(in_features=256, out_features=153)
# self.droput = nn.Dropout(p=0.5)
# self.fc5 = nn.Linear(in_features=50, out_features=2) #You can increase the kernels in Maxpooling to reduce image further and reduce number of hidden linear layers.
def forward(self,x):
out = self.cnn(x)
# print("\n Shape",out.shape)
out = out.view(-1,8*15*15)
# out = output.view(out.size()[0], -1)
out = self.fc(out)
# out = self.cnn1(x)
# out = self.relu(out)
# out = self.batchnorm1(out)
# out = self.maxpool1(out)
# out = self.cnn2(out)
# out = self.relu(out)
# out = self.batchnorm2(out)
# out = self.dropout2(out)
# #out = self.maxpool2(out)
# out = self.cnn3(out)
# out = self.relu(out)
# out = self.batchnorm3(out)
# out = self.dropout3(out)
# #out = self.maxpool3(out)
# out = self.cnn4(out)
# out = self.relu(out)
# out = self.batchnorm4(out)
# out = self.maxpool4(out)
# out = self.dropout4(out)
# out = self.cnn5(out)
# out = self.relu(out)
# out = self.batchnorm5(out)
# out = self.maxpool5(out)
# out = self.dropout5(out)
# print(out.shape)
# #Flattening is done here with .view() -> (batch_size, 32*16*16) = (100, 8192)
# out = out.view(-1,32*7*7) #-1 will automatically update the batchsize as 100; 8192 flattens 32,16,16
# #Then we forward through our fully connected layer
# out = self.fc1(out)
# out = self.relu(out)
# out = self.droput(out)
# # out = self.fc2(out)
# # out = self.relu(out)
# # out = self.droput(out)
# out = self.fc3(out)
# out = self.relu(out)
# out = self.droput(out)
# out = self.fc4(out)
return out
model=CNN() |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.test_mail_full.tests.common import TestMailFullCommon, TestRecipients
class TestSMSPost(TestMailFullCommon, TestRecipients):
""" TODO
* add tests for new mail.message and mail.thread fields;
"""
@classmethod
def setUpClass(cls):
super(TestSMSPost, cls).setUpClass()
cls._test_body = 'VOID CONTENT'
cls.test_record = cls.env['mail.test.sms'].with_context(**cls._test_context).create({
'name': 'Test',
'customer_id': cls.partner_1.id,
'mobile_nbr': cls.test_numbers[0],
'phone_nbr': cls.test_numbers[1],
})
cls.test_record = cls._reset_mail_context(cls.test_record)
def test_message_sms_internals_body(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms('<p>Mega SMS<br/>Top moumoutte</p>', partner_ids=self.partner_1.ids)
self.assertEqual(messages.body, '<p>Mega SMS<br>Top moumoutte</p>')
self.assertEqual(messages.subtype_id, self.env.ref('mail.mt_note'))
self.assertSMSNotification([{'partner': self.partner_1}], 'Mega SMS\nTop moumoutte', messages)
def test_message_sms_internals_check_existing(self):
with self.with_user('employee'), self.mockSMSGateway(sim_error='wrong_number_format'):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=self.partner_1.ids)
self.assertSMSNotification([{'partner': self.partner_1, 'state': 'exception', 'failure_type': 'sms_number_format'}], self._test_body, messages)
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
test_record._notify_record_by_sms(messages, {'partners': [{'id': self.partner_1.id, 'notif': 'sms'}]}, check_existing=True)
self.assertSMSNotification([{'partner': self.partner_1}], self._test_body, messages)
def test_message_sms_internals_sms_numbers(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=self.partner_1.ids, sms_numbers=self.random_numbers)
self.assertSMSNotification([{'partner': self.partner_1}, {'number': self.random_numbers_san[0]}, {'number': self.random_numbers_san[1]}], self._test_body, messages)
def test_message_sms_internals_subtype(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms('<p>Mega SMS<br/>Top moumoutte</p>', subtype_id=self.env.ref('mail.mt_comment').id, partner_ids=self.partner_1.ids)
self.assertEqual(messages.body, '<p>Mega SMS<br>Top moumoutte</p>')
self.assertEqual(messages.subtype_id, self.env.ref('mail.mt_comment'))
self.assertSMSNotification([{'partner': self.partner_1}], 'Mega SMS\nTop moumoutte', messages)
def test_message_sms_internals_pid_to_number(self):
pid_to_number = {
self.partner_1.id: self.random_numbers[0],
self.partner_2.id: self.random_numbers[1],
}
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2).ids, sms_pid_to_number=pid_to_number)
self.assertSMSNotification([
{'partner': self.partner_1, 'number': self.random_numbers_san[0]},
{'partner': self.partner_2, 'number': self.random_numbers_san[1]}],
self._test_body, messages)
def test_message_sms_model_partner(self):
with self.with_user('employee'), self.mockSMSGateway():
messages = self.partner_1._message_sms(self._test_body)
messages |= self.partner_2._message_sms(self._test_body)
self.assertSMSNotification([{'partner': self.partner_1}, {'partner': self.partner_2}], self._test_body, messages)
def test_message_sms_model_partner_fallback(self):
self.partner_1.write({'mobile': False, 'phone': self.random_numbers[0]})
with self.mockSMSGateway():
messages = self.partner_1._message_sms(self._test_body)
messages |= self.partner_2._message_sms(self._test_body)
self.assertSMSNotification([{'partner': self.partner_1, 'number': self.random_numbers_san[0]}, {'partner': self.partner_2}], self._test_body, messages)
def test_message_sms_model_w_partner_only(self):
with self.with_user('employee'):
record = self.env['mail.test.sms.partner'].create({'customer_id': self.partner_1.id})
with self.mockSMSGateway():
messages = record._message_sms(self._test_body)
self.assertSMSNotification([{'partner': self.partner_1}], self._test_body, messages)
def test_message_sms_model_w_partner_only_void(self):
with self.with_user('employee'):
record = self.env['mail.test.sms.partner'].create({'customer_id': False})
with self.mockSMSGateway():
messages = record._message_sms(self._test_body)
# should not crash but have a failed notification
self.assertSMSNotification([{'partner': self.env['res.partner'], 'number': False, 'state': 'exception', 'failure_type': 'sms_number_missing'}], self._test_body, messages)
def test_message_sms_model_w_partner_m2m_only(self):
with self.with_user('employee'):
record = self.env['mail.test.sms.partner.2many'].create({'customer_ids': [(4, self.partner_1.id)]})
with self.mockSMSGateway():
messages = record._message_sms(self._test_body)
self.assertSMSNotification([{'partner': self.partner_1}], self._test_body, messages)
# TDE: should take first found one according to partner ordering
with self.with_user('employee'):
record = self.env['mail.test.sms.partner.2many'].create({'customer_ids': [(4, self.partner_1.id), (4, self.partner_2.id)]})
with self.mockSMSGateway():
messages = record._message_sms(self._test_body)
self.assertSMSNotification([{'partner': self.partner_2}], self._test_body, messages)
def test_message_sms_on_field_w_partner(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, number_field='mobile_nbr')
self.assertSMSNotification([{'partner': self.partner_1, 'number': self.test_record.mobile_nbr}], self._test_body, messages)
def test_message_sms_on_field_wo_partner(self):
self.test_record.write({'customer_id': False})
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, number_field='mobile_nbr')
self.assertSMSNotification([{'number': self.test_record.mobile_nbr}], self._test_body, messages)
def test_message_sms_on_field_wo_partner_wo_value(self):
""" Test record without a partner and without phone values. """
self.test_record.write({
'customer_id': False,
'phone_nbr': False,
'mobile_nbr': False,
})
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body)
# should not crash but have a failed notification
self.assertSMSNotification([{'partner': self.env['res.partner'], 'number': False, 'state': 'exception', 'failure_type': 'sms_number_missing'}], self._test_body, messages)
def test_message_sms_on_field_wo_partner_default_field(self):
self.test_record.write({'customer_id': False})
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body)
self.assertSMSNotification([{'number': self.test_numbers_san[1]}], self._test_body, messages)
def test_message_sms_on_field_wo_partner_default_field_2(self):
self.test_record.write({'customer_id': False, 'phone_nbr': False})
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body)
self.assertSMSNotification([{'number': self.test_numbers_san[0]}], self._test_body, messages)
def test_message_sms_on_numbers(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, sms_numbers=self.random_numbers_san)
self.assertSMSNotification([{'number': self.random_numbers_san[0]}, {'number': self.random_numbers_san[1]}], self._test_body, messages)
def test_message_sms_on_numbers_sanitization(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, sms_numbers=self.random_numbers)
self.assertSMSNotification([{'number': self.random_numbers_san[0]}, {'number': self.random_numbers_san[1]}], self._test_body, messages)
def test_message_sms_on_partner_ids(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2).ids)
self.assertSMSNotification([{'partner': self.partner_1}, {'partner': self.partner_2}], self._test_body, messages)
def test_message_sms_on_partner_ids_default(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body)
self.assertSMSNotification([{'partner': self.test_record.customer_id, 'number': self.test_numbers_san[1]}], self._test_body, messages)
def test_message_sms_on_partner_ids_w_numbers(self):
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=self.partner_1.ids, sms_numbers=self.random_numbers[:1])
self.assertSMSNotification([{'partner': self.partner_1}, {'number': self.random_numbers_san[0]}], self._test_body, messages)
def test_message_sms_with_template(self):
sms_template = self.env['sms.template'].create({
'name': 'Test Template',
'model_id': self.env['ir.model']._get('mail.test.sms').id,
'body': 'Dear ${object.display_name} this is an SMS.',
})
with self.with_user('employee'):
with self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms_with_template(template=sms_template)
self.assertSMSNotification([{'partner': self.partner_1, 'number': self.test_numbers_san[1]}], 'Dear %s this is an SMS.' % self.test_record.display_name, messages)
def test_message_sms_with_template_fallback(self):
with self.with_user('employee'):
with self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms_with_template(template_xmlid='test_mail_full.this_should_not_exists', template_fallback='Fallback for ${object.id}')
self.assertSMSNotification([{'partner': self.partner_1, 'number': self.test_numbers_san[1]}], 'Fallback for %s' % self.test_record.id, messages)
def test_message_sms_with_template_xmlid(self):
sms_template = self.env['sms.template'].create({
'name': 'Test Template',
'model_id': self.env['ir.model']._get('mail.test.sms').id,
'body': 'Dear ${object.display_name} this is an SMS.',
})
self.env['ir.model.data'].create({
'name': 'this_should_exists',
'module': 'test_mail_full',
'model': sms_template._name,
'res_id': sms_template.id,
})
with self.with_user('employee'):
with self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms_with_template(template_xmlid='test_mail_full.this_should_exists')
self.assertSMSNotification([{'partner': self.partner_1, 'number': self.test_numbers_san[1]}], 'Dear %s this is an SMS.' % self.test_record.display_name, messages)
class TestSMSPostException(TestMailFullCommon, TestRecipients):
@classmethod
def setUpClass(cls):
super(TestSMSPostException, cls).setUpClass()
cls._test_body = 'VOID CONTENT'
cls.test_record = cls.env['mail.test.sms'].with_context(**cls._test_context).create({
'name': 'Test',
'customer_id': cls.partner_1.id,
})
cls.test_record = cls._reset_mail_context(cls.test_record)
cls.partner_3 = cls.env['res.partner'].with_context({
'mail_create_nolog': True,
'mail_create_nosubscribe': True,
'mail_notrack': True,
'no_reset_password': True,
}).create({
'name': 'Ernestine Loubine',
'email': 'ernestine.loubine@agrolait.com',
'country_id': cls.env.ref('base.be').id,
'mobile': '0475556644',
})
def test_message_sms_w_numbers_invalid(self):
random_numbers = self.random_numbers + ['6988754']
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, sms_numbers=random_numbers)
# invalid numbers are still given to IAP currently as they are
self.assertSMSNotification([{'number': self.random_numbers_san[0]}, {'number': self.random_numbers_san[1]}, {'number': random_numbers[2]}], self._test_body, messages)
def test_message_sms_w_partners_nocountry(self):
self.test_record.customer_id.write({
'mobile': self.random_numbers[0],
'phone': self.random_numbers[1],
'country_id': False,
})
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=self.test_record.customer_id.ids)
self.assertSMSNotification([{'partner': self.test_record.customer_id}], self._test_body, messages)
def test_message_sms_w_partners_falsy(self):
# TDE FIXME: currently sent to IAP
self.test_record.customer_id.write({
'mobile': 'youpie',
'phone': 'youpla',
})
with self.with_user('employee'), self.mockSMSGateway():
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=self.test_record.customer_id.ids)
# self.assertSMSNotification({self.test_record.customer_id: {}}, {}, self._test_body, messages)
def test_message_sms_w_numbers_sanitization_duplicate(self):
pass
# TDE FIXME: not sure
# random_numbers = self.random_numbers + [self.random_numbers[1]]
# random_numbers_san = self.random_numbers_san + [self.random_numbers_san[1]]
# with self.with_user('employee'), self.mockSMSGateway():
# messages = self.test_record._message_sms(self._test_body, sms_numbers=random_numbers)
# self.assertSMSNotification({}, {random_numbers_san[0]: {}, random_numbers_san[1]: {}, random_numbers_san[2]: {}}, self._test_body, messages)
def test_message_sms_crash_credit(self):
with self.with_user('employee'), self.mockSMSGateway(sim_error='credit'):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2).ids)
self.assertSMSNotification([
{'partner': self.partner_1, 'state': 'exception', 'failure_type': 'sms_credit'},
{'partner': self.partner_2, 'state': 'exception', 'failure_type': 'sms_credit'},
], self._test_body, messages)
def test_message_sms_crash_credit_single(self):
with self.with_user('employee'), self.mockSMSGateway(nbr_t_error={self.partner_2.phone_get_sanitized_number(): 'credit'}):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2 | self.partner_3).ids)
self.assertSMSNotification([
{'partner': self.partner_1, 'state': 'sent'},
{'partner': self.partner_2, 'state': 'exception', 'failure_type': 'sms_credit'},
{'partner': self.partner_3, 'state': 'sent'},
], self._test_body, messages)
def test_message_sms_crash_server_crash(self):
with self.with_user('employee'), self.mockSMSGateway(sim_error='jsonrpc_exception'):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2 | self.partner_3).ids)
self.assertSMSNotification([
{'partner': self.partner_1, 'state': 'exception', 'failure_type': 'sms_server'},
{'partner': self.partner_2, 'state': 'exception', 'failure_type': 'sms_server'},
{'partner': self.partner_3, 'state': 'exception', 'failure_type': 'sms_server'},
], self._test_body, messages)
def test_message_sms_crash_unregistered(self):
with self.with_user('employee'), self.mockSMSGateway(sim_error='unregistered'):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2).ids)
self.assertSMSNotification([
{'partner': self.partner_1, 'state': 'exception', 'failure_type': 'sms_acc'},
{'partner': self.partner_2, 'state': 'exception', 'failure_type': 'sms_acc'},
], self._test_body, messages)
def test_message_sms_crash_unregistered_single(self):
with self.with_user('employee'), self.mockSMSGateway(nbr_t_error={self.partner_2.phone_get_sanitized_number(): 'unregistered'}):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2 | self.partner_3).ids)
self.assertSMSNotification([
{'partner': self.partner_1, 'state': 'sent'},
{'partner': self.partner_2, 'state': 'exception', 'failure_type': 'sms_acc'},
{'partner': self.partner_3, 'state': 'sent'},
], self._test_body, messages)
def test_message_sms_crash_wrong_number(self):
with self.with_user('employee'), self.mockSMSGateway(sim_error='wrong_number_format'):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2).ids)
self.assertSMSNotification([
{'partner': self.partner_1, 'state': 'exception', 'failure_type': 'sms_number_format'},
{'partner': self.partner_2, 'state': 'exception', 'failure_type': 'sms_number_format'},
], self._test_body, messages)
def test_message_sms_crash_wrong_number_single(self):
with self.with_user('employee'), self.mockSMSGateway(nbr_t_error={self.partner_2.phone_get_sanitized_number(): 'wrong_number_format'}):
test_record = self.env['mail.test.sms'].browse(self.test_record.id)
messages = test_record._message_sms(self._test_body, partner_ids=(self.partner_1 | self.partner_2 | self.partner_3).ids)
self.assertSMSNotification([
{'partner': self.partner_1, 'state': 'sent'},
{'partner': self.partner_2, 'state': 'exception', 'failure_type': 'sms_number_format'},
{'partner': self.partner_3, 'state': 'sent'},
], self._test_body, messages)
class TestSMSApi(TestMailFullCommon):
@classmethod
def setUpClass(cls):
super(TestSMSApi, cls).setUpClass()
cls._test_body = 'Zizisse an SMS.'
cls._create_records_for_batch('mail.test.sms', 3)
cls.sms_template = cls._create_sms_template('mail.test.sms')
def test_message_schedule_sms(self):
with self.with_user('employee'):
with self.mockSMSGateway():
self.env['mail.test.sms'].browse(self.records.ids)._message_sms_schedule_mass(body=self._test_body, mass_keep_log=False)
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, content=self._test_body)
def test_message_schedule_sms_w_log(self):
with self.with_user('employee'):
with self.mockSMSGateway():
self.env['mail.test.sms'].browse(self.records.ids)._message_sms_schedule_mass(body=self._test_body, mass_keep_log=True)
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, content=self._test_body)
self.assertSMSLogged(record, self._test_body)
def test_message_schedule_sms_w_template(self):
with self.with_user('employee'):
with self.mockSMSGateway():
self.env['mail.test.sms'].browse(self.records.ids)._message_sms_schedule_mass(template=self.sms_template, mass_keep_log=False)
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, content='Dear %s this is an SMS.' % record.display_name)
def test_message_schedule_sms_w_template_and_log(self):
with self.with_user('employee'):
with self.mockSMSGateway():
self.env['mail.test.sms'].browse(self.records.ids)._message_sms_schedule_mass(template=self.sms_template, mass_keep_log=True)
for record in self.records:
self.assertSMSOutgoing(record.customer_id, None, content='Dear %s this is an SMS.' % record.display_name)
self.assertSMSLogged(record, 'Dear %s this is an SMS.' % record.display_name)
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# --------------------------------------------------------------------------
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
from ._internal import _KeyVaultClientBase
from .keys._client import KeyClient
from .secrets._client import SecretClient
if TYPE_CHECKING:
# pylint:disable=unused-import
from azure.core import Configuration
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpTransport
from typing import Any, Optional
class VaultClient(_KeyVaultClientBase):
"""VaultClient is a high-level interface for managing a vault's resources.
Example:
.. literalinclude:: ../tests/test_examples_vault_client.py
:start-after: [START create_vault_client]
:end-before: [END create_vault_client]
:language: python
:caption: Creates a new instance of VaultClient
"""
def __init__(self, vault_url, credential, config=None, transport=None, api_version=None, **kwargs):
# type: (str, TokenCredential, Configuration, Optional[HttpTransport], Optional[str], **Any) -> None
super(VaultClient, self).__init__(
vault_url, credential, config=config, transport=transport, api_version=api_version, **kwargs
)
self._secrets = SecretClient(self.vault_url, credential, generated_client=self._client, **kwargs)
self._keys = KeyClient(self.vault_url, credential, generated_client=self._client, **kwargs)
@property
def secrets(self):
"""
:rtype: ~azure.security.keyvault.secrets.SecretClient
"""
return self._secrets
@property
def keys(self):
"""
:rtype: ~azure.security.keyvault.keys.KeyClient
"""
return self._keys
# @property
# def certificates(self):
# """
# :rtype: ~azure.security.keyvault.certificates.CertificateClient
# """
# pass
|
import rospy
import util
dataHolder = util.DataHolder()
def work_laser(conn, cmsg, handler=None):
print(cmsg)
message = handle_laser_data('0~180', 5)
conn.send(message)
message = handle_laser_data('540~720', 5)
conn.send(message)
def work_enc(conn, cmsg, handler=None):
message_enc = 'enc '
message_enc += (str(dataHolder.rencoder) + ' ' + str(dataHolder.lencoder))
conn.send(message_enc)
def work_vel(conn, cmsg, handler=None):
# TODO
# Map C++ cmsg to python dictionary
value = {
'linear_x': 10,
'linear_y': 10,
'linear_z': 0,
'ang_x': 0,
'ang_y': 0,
'ang_z': 10,
'speed': 10,
'turn': 10
}
handler.update(**value)
"""
Callback function
"""
def callback_laser(msg):
dataHolder.laser_data = msg.ranges
def callback_Rencoder(msg):
dataHolder.rencoder = msg.data
def callback_Lencoder(msg):
dataHolder.lencoder = msg.data
def handle_laser_data(ranges, decimal):
try:
assert(ranges == '0~180' or ranges == '540~720')
except AssertionError as e:
rospy.logfatal("Wrong Range 0-180, 540-720 ", e)
data = dataHolder.laser_data
if ranges == '0~180':
message = '0~180 ' + "".join(format(x, ".5f") for x in data[:180])
"""
for i in range(180):
message += (str(data[i])[:decimal] + ' ')
"""
rospy.loginfo('Send Laserdata 0~180')
elif ranges == '540~720':
message = '540~720 ' + "".join(format(x, ".5f") for x in data[540:])
"""
for i in range(180):
message += (str(data[540+i])[:decimal] + ' ')
"""
rospy.loginfo('Send Laserdata 540~720')
return message
|
#!/usr/bin/python3
#
# Author:
# @oldboy21
# https://github.com/oldboy21/smbsr/
import socket
import argparse
import logging
import sys
import ipaddress
import urllib
import tempfile
import re
from smb import *
from smb.SMBConnection import SMBConnection
from smb.SMBHandler import SMBHandler
from io import BytesIO
import masscan
import _thread
import threading
from threading import Lock
import random
import uuid
import sys
import os
import sqlite3
import csv
from itertools import compress
import datetime
import faulthandler
import concurrent.futures
class Database:
def __init__(self,db_file):
self.db_file=db_file
def connect_database(self):
self.conn = sqlite3.connect(self.db_file, check_same_thread=False)
self.cursor = self.conn.cursor()
self.lock = threading.Lock()
def create_database(self):
self.connect_database()
try:
smb_match_table = """ CREATE TABLE IF NOT EXISTS smbsr (
id integer PRIMARY KEY AUTOINCREMENT,
file text NOT NULL,
share text NOT NULL,
ip text NOT NULL,
position text NOT NULL,
matchedWith text NOT NULL,
tsCreated text NOT NULL,
tsModified text NOT NULL,
tsAccessed text NOT NULL,
count integer NOT NULL
); """
smb_files_table = """ CREATE TABLE IF NOT EXISTS smbfile (
id integer PRIMARY KEY AUTOINCREMENT,
file text NOT NULL,
share text NOT NULL,
ip text NOT NULL,
tsCreated text NOT NULL,
tsModified text NOT NULL,
tsAccessed text NOT NULL
); """
if self.cursor is not None:
self.create_table(smb_match_table)
self.create_table(smb_files_table)
except Exception as e:
logger.error("Encountered error while creating the database: " + str(e))
sys.exit(1)
def exportToCSV(self):
cursor = self.cursor
exportQuery = "SELECT * from smbsr"
exportQueryFile = "SELECT * from smbfile"
sr = cursor.execute(exportQuery)
with open('smbsr_results.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(sr)
sf = cursor.execute(exportQueryFile)
with open('smbsrfile_results.csv', 'w') as g:
writer = csv.writer(g)
writer.writerows(sf)
def commit(self):
self.conn.commit()
def create_table(self, create_table_sql):
try:
self.cursor.execute(create_table_sql)
except Exception as e:
logger.info(e)
def insertFinding(self, filename, share, ip, line, matched_with ,times):
try:
self.lock.acquire(True)
cursor = self.cursor
checkQuery = 'SELECT id FROM smbsr WHERE ip=\'{ip}\' AND share=\'{share}\' AND file=\'{filename}\''.format(ip=ip, share=share, filename=filename)
results = cursor.execute(checkQuery).fetchall()
if len(results) == 0:
insertFindingQuery = "INSERT INTO smbsr (file, share, ip, position, matchedWith, tsCreated, tsModified, tsAccessed, count) VALUES (?,?,?,?,?,?,?,?,?)"
cursor.execute(insertFindingQuery, (filename, share, ip, line, matched_with, times[0], times[1], times[2], 1))
self.commit()
else:
updateQuery = 'UPDATE smbsr SET count = count + 1 WHERE ip=\'{ip}\' AND share=\'{share}\' AND file=\'{filename}\''.format(ip=ip, share=share, filename=filename)
cursor.execute(updateQuery)
self.commit()
finally:
self.lock.release()
def insertFileFinding(self, filename, share, ip, times):
try:
self.lock.acquire(True)
cursor = self.cursor
insertFindingQuery = "INSERT INTO smbfile (file, share, ip, tsCreated, tsModified, tsAccessed) VALUES (?,?,?,?,?,?)"
cursor.execute(insertFindingQuery, (filename, share, ip, times[0], times[1], times[2]))
self.commit()
finally:
self.lock.release()
class HW(object):
def __init__(self, options, db):
super(HW, self).__init__()
self.options = options
self.conn = SMBConnection(options.username,options.password,options.fake_hostname,'netbios-server-name',options.domain,use_ntlm_v2=True,is_direct_tcp=True)
self.db = db
def get_bool(self,prompt):
while True:
try:
return {"y":True,"n":False}[input(prompt).lower()]
except KeyError:
print("Invalid input please enter [y/n]")
def retrieveTimes(self, share, filename):
times = []
attributes = self.conn.getAttributes(share, filename)
ts_created = datetime.datetime.fromtimestamp(attributes.create_time).strftime('%Y-%m-%d %H:%M:%S')
ts_accessed = datetime.datetime.fromtimestamp(attributes.last_access_time).strftime('%Y-%m-%d %H:%M:%S')
ts_modified = datetime.datetime.fromtimestamp(attributes.last_write_time).strftime('%Y-%m-%d %H:%M:%S')
times.append(ts_created)
times.append(ts_modified)
times.append(ts_accessed)
return times
def passwordHW(self,text, filename,to_match, counter, IP, share):
results = []
output = False
for substring in to_match:
results.append(substring.lower() in text.lower())
output=any(results)
if output:
m = [i for i, x in enumerate(results) if x]
for z in m:
logger.info("Found interesting match in " + filename + " with " + to_match[z] +", line: " + str(counter))
self.db.insertFinding(filename, share, IP, str(counter), to_match[z], self.retrieveTimes(share,filename))
def parse(self, share, filename, to_match, IP):
line_counter = 0
file_obj = tempfile.NamedTemporaryFile()
file_ext = (filename.split('/')[-1]).split('.')[-1]
if file_ext.lower() in self.options.file_extensions_black.split(','):
logger.info("This extensions is blacklisted")
else:
if file_ext.lower() in self.options.file_interesting.split(','):
logger.info("Found interesting file: " + filename)
self.db.insertFileFinding(filename, share, IP, self.retrieveTimes(share,filename))
if (filename.split('/')[-1]).split('.')[0].lower() in to_match:
logger.info("Found interesting file named " + filename)
self.db.insertFileFinding(filename, share, IP, self.retrieveTimes(share,filename))
filesize = (self.conn.getAttributes(share, filename)).file_size
if filesize > self.options.max_size:
logger.info("Skipping file " + filename + ", it is too big and you said i can't handle it")
else:
file_attributes, filesize = self.conn.retrieveFile(share, filename, file_obj)
file_obj.seek(0)
lines = file_obj.readlines()
for line in lines:
line_counter+=1
try:
self.passwordHW((line.decode("utf-8")).strip("\n"), filename,to_match, line_counter, IP, share)
except Exception as e:
logger.warning("Encountered exception while reading: " + str(e))
break
file_obj.close()
def walk_path(self,path,shared_folder,IP,to_match):
#print (depth)
try:
for p in self.conn.listPath(shared_folder, path):
if p.filename!='.' and p.filename!='..':
parentPath = path
if not parentPath.endswith('/'):
parentPath += '/'
if p.isDirectory:
if p.filename.lower() in self.options.folder_black.split(','):
logger.info('Skipping ' + p.filename + " since blacklisted")
continue
else:
if parentPath.count('/') <= self.options.depth:
logger.info("Visiting subfolder " + str(p.filename))
self.walk_path(parentPath+p.filename,shared_folder,IP,to_match)
else:
logger.info("Skipping " + str(parentPath+p.filename) + ", too deep")
continue
else:
logger.info( 'File: '+ parentPath+p.filename )
self.parse(shared_folder, parentPath+p.filename, to_match, IP)
except Exception as e:
logger.warning("Error while listing paths in shares: " + str(e))
def shareAnalyze(self,IPaddress, to_match):
for ip in IPaddress:
logger.info("Checking SMB share on: " + ip)
#domain_name = 'domainname'
#conn = SMBConnection(options.username,options.password,options.fake_hostname,'netbios-server-name','SECRET.LOCAL',use_ntlm_v2=True,is_direct_tcp=True)
try:
self.conn.connect(ip, 445)
except Exception as e:
logger.warning("Detected error while connecting to " + str(ip) + " with message " + str(e))
continue
try:
shares = self.conn.listShares()
except Exception as e:
logger.warning("Detected error while listing shares on " + str(ip) + " with message " + str(e))
continue
for share in shares:
if not share.isSpecial and share.name not in ['NETLOGON', 'IPC$']:
logger.info('Listing file in share: ' + share.name)
try:
sharedfiles = self.conn.listPath(share.name, '/')
except Exception as e:
logger.warning("Detected error while listing shares on " + str(ip) + " with message " + str(e))
continue
self.walk_path("/",share.name,ip, to_match)
self.conn.close()
def shareAnalyzeLightning(self,to_analyze, to_match):
ip = to_analyze.pop(0)
logger.info("Checking SMB share on: " + ip)
#domain_name = 'domainname'
#conn = SMBConnection(options.username,options.password,options.fake_hostname,'netbios-server-name','SECRET.LOCAL',use_ntlm_v2=True,is_direct_tcp=True)
try:
self.conn.connect(ip, 445)
except Exception as e:
logger.info("Detected error while connecting to " + str(ip) + " with message " + str(e))
sys.exit(1)
##NEED TO STOP HERE
try:
shares = self.conn.listShares()
except Exception as e:
logger.info("Detected error while listing shares on " + str(ip) + " with message " + str(e))
sys.exit()
for share in shares:
if not share.isSpecial and share.name not in ['NETLOGON', 'IPC$']:
logger.info('Listing file in share: ' + share.name)
try:
sharedfiles = self.conn.listPath(share.name, '/')
except Exception as e:
logger.warning("Could not list path on share " + share.name + " due to: " + str(e))
self.walk_path("/",share.name,ip, to_match)
self.conn.close()
def scanNetwork(self):
target = self.options.IP
file_target = self.options.ip_list_path
temp = []
final = []
if file_target != "unset":
with open(file_target) as f:
temp = [line.rstrip() for line in f]
f.close()
else:
temp.append(target)
for i in temp:
valid = re.match("^([0-9]{1,3}\.){3}[0-9]{1,3}($|/([0-9]{1,2}))$", i)
if not valid:
logger.info("You entered an hostname, looking up " + i)
try:
final.append(socket.gethostbyname(i))
except socket.gaierror:
logger.warning("\nHostname could not be resolved: " + i)
#sys.exit(1)
else:
final.append(i)
ranges = ','.join(final)
if not self.options.masscan:
for x in final:
ipcheck = re.match("^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$", x)
if not ipcheck:
logger.error("Hey there, if you do not use masscan you can't give me CIDR as input")
sys.exit(1)
return final
mass = masscan.PortScanner()
mass.scan(ranges, ports='445', arguments='--rate 1000')
to_analyze = []
logger.info('Starting masscan to discover SMB ports open')
for key in mass.scan_result['scan']:
if mass.scan_result['scan'][key]['tcp'][445]['state'] == 'open':
to_analyze.append(key)
return to_analyze
def readMatches(self):
filepath = self.options.word_list_path
try:
with open(filepath) as f:
lines = [line.rstrip() for line in f]
f.close()
return lines
except Exception as e:
logger.error("Exception while reading the file " + str(e))
sys.exit(1)
class smbworker (threading.Thread):
def __init__(self, name, options, ip, to_match, db):
threading.Thread.__init__(self)
#self.threadID = threadID
self.name = name
self.options = options
self.ip = ip
self.to_match = to_match
self.db = db
def run(self):
logger.info("Starting " + self.name)
smbHW = HW(self.options, self.db)
smbHW.shareAnalyzeLightning(self.ip, self.to_match)
logger.info("Exiting " + self.name)
def setupPersistence(db, dbfile):
if not os.path.exists(dbfile):
logger.info("Database not found, creating ...")
db.create_database()
logger.info("Database created successfully")
else:
logger.info("Database already existing")
db.connect_database()
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=True, description="SMB Password Revealer ")
parser.add_argument('-username', action='store', default='anonymous',type=str, help='Username for authenticated scan')
parser.add_argument('-password', action='store', default='s3cret', type=str, help='Password for authenticated scan')
parser.add_argument('-domain', action='store', default='SECRET.LOCAL', help='Domain for authenticated scan')
parser.add_argument('-fake-hostname', action='store', default='localhost', help='Computer hostname SMB connection will be from')
parser.add_argument('-word-list-path', action="store", type=str, help="File containing the string to look for", required=True)
parser.add_argument('-max-size', action="store", default=50000 ,type=int, help="Maximum size of the file to be considered for scanning (bytes)")
parser.add_argument('-file-extensions-black', action='store', type=str, default='none', help='Comma separated file extensions to skip while secrets harvesting')
parser.add_argument('-multithread', action='store_true', default=False, help="Assign a thread to any IP to scan")
parser.add_argument('-masscan', action='store_true', default=False, help="Scan for 445 before trying to analyze the share")
parser.add_argument('-T', action='store', default=10, type=int, help="Define the number of thread to use")
parser.add_argument('-logfile', action='store', default='smbsr.log', type=str, help='Log file name')
parser.add_argument('-dbfile', action='store', default='./smbsr.db', type=str, help='Log file name')
parser.add_argument('-file-interesting', action='store', default='none', type=str, help='Comma separated file extensions you want to be notified about')
parser.add_argument('-folder-black', action='store', default='none', type=str, help='Comma separated folder names to skip during the analysis, keep in mind subfolders are also skipped')
parser.add_argument('-csv', action='store_true', default=False, help='Export results to CSV files in the project folder')
parser.add_argument('-depth', action='store', default=100000000, type=int, help='How recursively deep you want to go while looking for secrets')
group = parser.add_mutually_exclusive_group()
group.add_argument('-ip-list-path', action="store", default="unset", type=str, help="File containing IP to scan")
group.add_argument('-IP',action="store", help='IP address, CIDR or hostname')
options = parser.parse_args()
faulthandler.enable()
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s')
logger = logging.getLogger('logger')
#cleaning handlers
logging.getLogger().handlers = []
logger.handlers = []
logger.setLevel(logging.INFO)
infoHandler = logging.FileHandler(options.logfile)
infoHandler.setLevel(logging.INFO)
infoHandler.setFormatter(formatter)
stdoutHandler = logging.StreamHandler(sys.stdout)
stdoutHandler.setLevel(logging.INFO)
stdoutHandler.setFormatter(formatter)
logger.addHandler(stdoutHandler)
logger.addHandler(infoHandler)
if len(sys.argv)==1:
parser.print_help()
print ("\nExamples: ")
print("\t./smb-secrets-revealer.py -IP 127.0.0.1/localhost -word-list-path tomatch.txt\n")
sys.exit(1)
db = Database(options.dbfile)
setupPersistence(db, options.dbfile)
locker = Lock()
smbHW = HW(options, db)
to_match = smbHW.readMatches()
to_analyze = smbHW.scanNetwork()
if options.multithread is True:
#multithreading function call
logger.info("Lighting!!")
with concurrent.futures.ThreadPoolExecutor(max_workers=options.T) as executor:
while len(to_analyze) > 0:
smbHW = HW(options, db)
future = executor.submit(smbHW.shareAnalyzeLightning, to_analyze, to_match)
else:
smbHW.shareAnalyze(to_analyze, to_match)
if options.csv:
db.exportToCSV()
print ("Hope you found something good mate!")
|
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
if n < 2:
return 1
ret_str = "1"
while n > 1:
temp, current_num = '', 0
for i, v in enumerate(ret_str):
if i > 0 and v != ret_str[i-1]:
temp += str(current_num) + ret_str[i-1]
current_num = 1
else:
current_num += 1
ret_str = temp + (str(current_num) + ret_str[-1] if current_num != 0 else "")
n -= 1
return str(ret_str)
if __name__ == '__main__':
s = Solution()
print s.countAndSay(4)
# for i in range(10):
# print s.countAndSay(i)
|
# coding=utf-8
import os
from collections import namedtuple
from SRC import settings
from SRC.common import xmlHelper
from SRC.common.const import RunStatus, RunResult, TestType
from SRC.common.fileHelper import isNoneOrEmpty, delsuffix, isAbsolutePath, delFirstChar
from SRC.common.loga import putSystemLog
TestScene = namedtuple('testScene', ['sceneId', 'testCaseList'])
class TestPlan():
'''
测试方案实体类
目前包含UI测试与接口测试
根据测试类型的不同个,从测试方案配置文件中读取的内容不同
如果需要扩展,请根据原格式编写
'''
def __init__(self, xmlPath, testType):
self.xmlPath = xmlPath # 测试方案路径
self.testType = testType # 测试方案类型
self.initData() # 初始化数据容器
def initData(self):
'''
初始化数据
:return:
'''
self.sceneList = [] # 场景列表
if self.testType == TestType.UI:
self.hubDict = {} # 用于UI测试
elif self.testType == TestType.INTERFACE:
pass
def loadXml(self):
try:
tree = xmlHelper.read_xml(self.xmlPath)
if self.testType == TestType.UI:#UI测试需要获取hub信息
connection_nodes = xmlHelper.find_nodes(tree, settings.XML_TAG['testPlan']['connection'])
if len(connection_nodes) > 0:
self.setHubDict(connection_nodes[0])
scene_nodes = xmlHelper.find_nodes(tree, settings.XML_TAG['testPlan']['scene'])
if len(scene_nodes) > 0:
self.setSceneList(scene_nodes)
except Exception as e:
putSystemLog('[ERROR-0003-0]:解析测试方案配置文件引发的异常.%s' % (e), None, True, RunStatus.END, RunResult.ERROR, True,
'异常')
raise
def setHubDict(self, connection_node):
if len(self.hubDict) != 0:
return
hubDict = {}
hub_nodes = xmlHelper.find_nodes(connection_node, settings.XML_TAG['testPlan']['hub'])
hub_nodes = xmlHelper.get_node_by_keyvalue(hub_nodes, {settings.XML_TAG['testPlan']['enabled']: 'True'}, True)
for hub in hub_nodes:
browser = hub.get(settings.XML_TAG['testPlan']['browser'])
if not isNoneOrEmpty(browser):
remotePath = hub.text.strip() if hub.text is not None else ''
hubDict[browser.lower().strip()] = remotePath
self.hubDict = hubDict
def setSceneList(self, scene_nodes):
for scene_node in scene_nodes:
sceneId=scene_node.get(settings.XML_TAG['testPlan']['sceneid'])
settings.SCENEID = sceneId
testCaseList = self.getTestCaseListInScene(scene_node)
testScene = TestScene(sceneId, testCaseList)
self.sceneList.append(testScene)
def getTestCaseListInScene(self, scene_node):
testCaseList = []
testCase_nodes = xmlHelper.find_nodes(scene_node, settings.XML_TAG['testPlan']['testCase'])
testCase_nodes = xmlHelper.get_node_by_keyvalue(testCase_nodes,
{settings.XML_TAG['testPlan']['enabled']: 'True'}, True)
# testCase_nodes = [node for node in testCase_nodes if not isNoneOrEmpty(node.text)] #是空的时候,调用内置的测试用例
for testCase_node in testCase_nodes:
testCasePath = self.getTestCasePath(testCase_node)
paramPath = self.getParamPath(testCase_node)
scriptId = self.getScriptId(testCase_node)
testCaseList.append({'paramPath': paramPath, 'testCase': testCasePath, 'scriptId': scriptId})
return testCaseList
def getTestCasePath(self, testCase_node):
path=testCase_node.text
if isNoneOrEmpty(path):
return None
return delsuffix(path.strip(), '.py') # 去掉后缀
def getParamPath(self, testCase_node):
paramPath = testCase_node.get(settings.XML_TAG['testPlan']['paramPath'])
if not isNoneOrEmpty(paramPath):
paramPath = delsuffix(paramPath.strip(), '.xml', False) # 增加后缀
if not isAbsolutePath(paramPath):
paramPath = delFirstChar(paramPath, ['/', '\\'])
paramPath = os.path.join(settings.PARAMETERIZATION['dataDir'], paramPath).replace('\\', '/')
else:
paramPath =None
return paramPath
def getScriptId(self,testCase_node):
return testCase_node.get(settings.XML_TAG['testPlan']['scriptId']) # 脚本id
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/1/26 18:39
# @Author : ysy
# 对计算器进行测试
import sys
import pytest
import yaml
sys.path.append('..')
print(sys.path)
from pythoncode.Calculator import Calculator
def get_datas():
with open("./dates/calc.yml") as f:
datas = yaml.safe_load(f)
return datas['add']['datas']
def get_datas2():
with open("./dates/calc.yml") as f:
datas = yaml.safe_load(f)
return datas['div']['datas']
class TestCalc:
def setup(self):
print('【开始计算】')
def teardown(self):
print('【结束计算】')
@pytest.mark.parametrize("a,b,result", get_datas())
def test_add(self, a, b, result):
calc = Calculator()
assert result == calc.add(a, b)
@pytest.mark.parametrize("x,y,result2", get_datas2())
def test_div(self, x, y, result2):
calc = Calculator()
assert result2 == calc.div(x, y)
|
__author__ = 'yiqibai'
#encoding=utf-8
import pymongo
import re, numpy
import LLDAInference
from optparse import OptionParser
IP = "129.63.16.167" # ip mongodb
PORT = 27017 # port mongodb
DB = "KuaiWenCategory" # db mongodb
COLLECTION = "APP" # collection mongodb
'''top numbers of keywords under each category to save in db,
if training data [0, 500], set TPNUM = 50
if training data [500, 1000], set TPNUM = 100
if training data [1000, 2000], set TPNUM = 150
if training data [2000, infinity], set TPNUM = 200
'''
TPNUM = 100
'''load training corpus'''
def load_traincorpus(fname):
'''
:param : filename (eg: total200)
:return : corpus = [d1. d2, ..], di = [w1, w2, ...]
labels = [dl1, dl2, ...], dli = [l1,..] dli是每篇文章的label,对应corpus的每个doc
labelmap 所有label
'''
corpus = []
labels = []
labelmap = dict()
f = open(fname, 'r')
for line in f:
mt = re.match(r'\[(.+?)\](.+)', line)
if mt:
label = mt.group(1).split(',') #extract lables
label1 = []
for x in label:
#x = x.decode("utf-8")
labelmap[x] = 1
label1.append(x)
txt = mt.group(2) #extract wblog
else:
label1 = None
doc = txt.split(" ")
if len(doc) > 0:
corpus.append(doc)
labels.append(label1)
f.close()
return labelmap.keys(), corpus, labels
'''save phi to db'''
def save2mongo(phiData):
'''
:param : {category : T, {keyword : { W, distribution}}}
'''
try:
con = pymongo.MongoClient(IP, PORT) #get data from tencent server
dn = DB
cn = COLLECTION
db = con[dn] #
cur = db[cn]
cur.save(phiData)
except Exception, e:
print e.message
con.close()
'''main file alpha = 50 / T, beta = 0.1'''
def TrainLLDA(trainfile):
'''
:param : trainfile (eg: total200)
'''
lbfilter = "common"
#load training data
labelset, corpus, labels = load_traincorpus(trainfile)
#set parameters
K = len(labelset)
parser = OptionParser()
parser.add_option("-f", dest="filename", help="corpus filename")
parser.add_option("--alpha", dest="alpha", type="float", help="parameter alpha", default = 50.0/ K)
parser.add_option("--beta", dest="beta", type="float", help="parameter beta", default = 0.1)
parser.add_option("-i", dest="iteration", type="int", help="iteration count", default = 64) #iteration
(options, args) = parser.parse_args()
#training
print 'training starts'
llda = LLDAInference.LLDA(options.alpha, options.beta)
vocabulary = llda.set_corpus(labelset, corpus, labels)
for i in range(options.iteration):
print i, '\r',
llda.inference()
#convert phi to phidict
phi = llda.phi()
T_W_Dist = dict()
for k, label in enumerate(labelset):
if label == lbfilter:
continue
print "\n-- label %d : %s" % (k, label)
T_W_Dist[label] = dict()
for w in numpy.argsort(-phi[k])[:TPNUM]: #get TPNUM keywords under each category
print "%s: %.4f" % (llda.vocas[w], phi[k,w])
T_W_Dist[label][llda.vocas[w]] = phi[k,w]
#save phi to database
save2mongo(T_W_Dist)
#save phi to file
numpy.savetxt("PHI", phi, delimiter=",")
TrainLLDA("total200")
|
from src.vision.presentation.repositories.depthImagePointsRepository import depthImagePointsRepository
class Transform3DPointsToDepthFileService:
def __init__(self):
self.depthImageRepository = depthImagePointsRepository()
def execute(self, points_3d, points_a_2d, calibration_data, image):
self.depthImageRepository.saveInPathWithImage(
'bin/second_test.txt',
points_3d,
points_a_2d,
(image.shape[0], image.shape[1])
)
self.depthImageRepository.readFromPath('bin/second_test.txt')
|
#Write a program to prompt the user for hours and rate per hour using input to compute gross pay. Pay the hourly rate for the hours up to 40 and 1.5 times the hourly rate for all hours worked above 40 hours. Use 45 hours and a rate of 10.50 per hour to test the program (the pay should be 498.75). You should use input to read a string and float() to convert the string to a number. Do not worry about error checking the user input - assume the user types numbers properly.
h=input('Enter the hours:')
r=input('Enter the rate:')
hrs = float(h)
rte = float(r)
if hrs<=40:
print(hrs*rte)
if hrs>40:
print(40*rte+(hrs-40)*rte*1.5)
|
import theano
class Model(object):
def test_fprop(self, input_state):
pass
def train_fprop(self, input_state):
pass
class Sequential(Model):
def __init__(self, input_var, output_var, verbose=True):
"""
PARAM:
input_var (T.vector() | T.matrix() | T.tensor3() | T.tensor4()):
The tensor variable input to the model that corresponds to
the number of dimensions of the input X of dataset
input_var (T.vector() | T.matrix() | T.tensor3() | T.tensor4()):
The tensor variable output from the model that corresponds to
the number of dimensions of the output y of dataset
verbose (bool):
print out the layer stats from each layer if True
"""
self.input_var = input_var
self.output_var = output_var
self.layers = []
self.verbose = verbose
def add(self, layer):
self.layers.append(layer)
def pop(self, index):
return self.layers.pop(index)
def test_fprop(self, input_state, layers=None):
test_layers_stats = []
if layers is None:
layers = range(len(self.layers))
for i in layers:
layer_output = self.layers[i]._test_fprop(input_state)
stats = []
if self.verbose:
stats = self.layers[i]._layer_stats(input_state, layer_output)
input_state = layer_output
class_name = self.layers[i].__class__.__name__
stats = [(str(i)+'_'+class_name+'_'+a, b) for (a,b) in stats]
test_layers_stats += stats
return input_state, test_layers_stats
def train_fprop(self, input_state, layers=None):
train_layers_stats = []
if layers is None:
layers = range(len(self.layers))
for i in layers:
layer_output = self.layers[i]._train_fprop(input_state)
stats = []
if self.verbose:
stats = self.layers[i]._layer_stats(input_state, layer_output)
input_state = layer_output
class_name = self.layers[i].__class__.__name__
stats = [(str(i)+'_'+class_name+'_'+a, b) for (a,b) in stats]
train_layers_stats += stats
return input_state, train_layers_stats
def fprop(self, input_values):
return self.fprop_layers(input_values)
def fprop_layers(self, input_values, layers=None):
output, stats = self.test_fprop(self.input_var, layers)
if isinstance(self.input_var, (list, tuple)):
f = theano.function(self.input_var, output, on_unused_input='warn', allow_input_downcast=True)
else:
f = theano.function([self.input_var], output, on_unused_input='warn', allow_input_downcast=True)
if isinstance(input_values, tuple):
return f(*input_values)
else:
return f(input_values)
def get_layers(self):
return self.layers
|
#list of reserved words
RESERVED_WORDS = ['break', 'do', 'instanceof', 'typeof', 'case', 'else', 'new', 'var', 'catch', 'finally', 'return',
'void', 'continue', 'for',
'switch', 'while', 'debugger', 'function', 'this', 'with', 'default', 'if', 'throw', 'delete', 'in',
'try']
FUTURE_RESERVED_WORDS = ['class', 'enum', 'extends', 'super', 'const', 'export', 'import']
FUTURE_STRICT_RESERVED_WORDS = ['implements', 'let', 'private', 'public', 'yield', 'interface', 'package', 'protected',
'static']
PUNCTUATORS = ['{', '}', '(', ')', '[', ']', '.', ';', ',', '<', '>', '<=', '>=', '==', '!=', '===', '!==', '+', '-',
'*', '%', '++', '--', '<<', '>>'
, '>>>', '&', '|', '^', '!', '~', '&&', '||', '?', ':', '=', '+=', '-=', '*=', '%=', '<<=', '>>=', '>>>=', '&=',
'|=', '^=']
SINGLE_CHARACTER_ESC_SEQ = {'b': '\u0008', 't': '\u0009', 'n': '\u000A', 'v': '\u000B', 'f': '\u000C', 'r': '\u000D',
'"': '\u0022', '\'': '\u0027',
'\\': '\u005c'}
#tokens
class TOK:
LT = 1
SINGLE_COMMENT = 2
MULTI_COMMENT = 3
MULTINL_COMMENT = 4
ID = 5
NUMERIC = 6
WS = 7
RESERVED = 8
FUTURE_RESERVED = 9
PUNCTUATOR = 10
NULL = 11
BOOL = 12
STRING = 13
REGEXP = 14
DIV_PUNCTUATOR = 15
UNKNOWN = 999
EOF = 1000
ERROR = 1001
class LexerException(Exception):
pass
def isHexDigit(chr):
return chr.isdigit() or chr.lower() in 'abcdef'
def isLineTerm(c):
return c in '\n\u000a\u000d\u2028\u2029'
def isIDStart(c):
return c.isalpha() or c in '_$'
def isIDPart(c):
return isIDStart(c) or c.isnumeric()
def isWS(c):
return c in ['\u0020', '\u0009', '\n', '\r', '\u000c', '\u000b', '\u00a0', '\u1680', '\u2000', '\u2001', '\u2002',
'\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200A', '\u200B', '\u202F',
'\u3000']
def hexToDec(char):
return '0123456789abcdef'.index(char.lower())
class Lexer:
def __init__(self):
self.state = 0
self.src = ''
self.pointer = -1
self.forward = -1
self.eof = False
self.strictMode = False
def isEOF(self):
return self.forward >= len(self.src)
def lookup(self):
return self.src[self.forward]
def setSrc(self, js):
self.src = js
self.pointer = 0
self.forward = 0
self.eof = False
self.prevPos = 0
self.line = self.lastLine = 1
self.column = self.lastColumn = 1
def extract(self, tokenType):
return tokenType, self.src[self.pointer:self.forward]
def getNext(self, REMode=False):
try:
if self.isEOF():
return TOK.EOF, ''
self.pointer = self.forward
if isLineTerm(self.lookup()):
self.goForward()
return self.extract(TOK.LT)
if isWS(self.lookup()):
self.goForward()
return TOK.WS, ''
if self.lookup() == '/':
self.goForward()
if self.lookup() == '/':
return self.getSingleComment()
if self.lookup() == '*':
return self.getMultiComment()
if REMode:
return self.getRegExp()
else:
if not self.isEOF() and self.src[self.forward] == '=':
self.goForward()
return self.extract(TOK.DIV_PUNCTUATOR)
if isIDStart(self.src[self.forward]):
self.goForward()
while not self.isEOF() and isIDPart(self.src[self.forward]):
self.goForward()
return self.getIDOrReserved()
if self.src[self.forward] == '0':
self.goForward()
if not self.isEOF() and self.src[self.forward] == '.':
self.goForward()
return self.getNumericAfterDot()
#hex digits
if not self.isEOF() and self.src[self.forward] in 'xX':
self.goForward()
if isHexDigit(self.src[self.forward]):
while not self.isEOF() and (
self.src[self.forward].isdigit() or self.src[self.forward].lower() in 'abcdef'):
self.goForward()
return self.extractNumeric()
raise LexerException('Illegal')
return self.extractNumeric()
if self.src[self.forward].isnumeric() and self.src[self.forward] != '0':
self.goForward()
while not self.isEOF() and self.src[self.forward].isdigit():
self.goForward()
if not self.isEOF() and self.src[self.forward] == '.':
self.goForward()
return self.getNumericAfterDot()
if not self.isEOF() and self.src[self.forward] in 'eE':
self.goForward()
return self.getNumericExp()
return self.extractNumeric()
if self.src[self.forward] == '.':
self.goForward()
if self.src[self.forward].isnumeric():
return self.getNumericAfterDot()
return TOK.PUNCTUATOR, '.'
#check punctuators - must be after all rules which starts from one of punctuator
for i in [4, 3, 2, 1]:
if (self.forward + i <= len(self.src)) and self.src[self.forward:self.forward + i] in PUNCTUATORS:
self.goForward(i)
return self.extract(TOK.PUNCTUATOR)
#string literals
if self.src[self.forward] in ['"', "'"]:
return self.getString()
self.goForward()
except LexerException as e:
print(e)
return self.extract(TOK.UNKNOWN)
def getMultiComment(self):
state = 2
nl = False
self.goForward()
while not self.isEOF():
if self.src[self.forward] == '\n': nl = True
if state == 2:
if self.src[self.forward] == '*':
state = 3
self.goForward()
else:
# state = 2
self.goForward()
if state == 3:
if self.src[self.forward] == '*':
# state = 3
self.goForward()
elif self.src[self.forward] == '/':
# state = 4
self.goForward()
return TOK.MULTINL_COMMENT if nl else TOK.MULTI_COMMENT, self.src[self.pointer + 2:self.forward - 2]
else:
state = 2
self.goForward()
def getSingleComment(self):
while not self.isEOF() and not isLineTerm(self.src[self.forward]):
self.goForward()
return TOK.SINGLE_COMMENT, self.src[self.pointer + 2:self.forward]
#NLMode - is the NL must be returned,
#REMode - RegularExpression mode
def getToken(self, REMode=False, LTMode=False):
self.prevPos = self.forward
self.lastLine = self.line
self.lastColumn = self.column
token = self.getNext(REMode)
while token[0] == TOK.SINGLE_COMMENT or token[0] == TOK.MULTI_COMMENT or token[0] == TOK.WS\
or (token[0] == TOK.LT and not LTMode) or (token[0] == TOK.MULTINL_COMMENT and not LTMode):
self.lastLine = self.line
self.lastColumn = self.column
token = self.getNext(REMode)
if token[0] == TOK.MULTINL_COMMENT and LTMode:
return TOK.LT, ''
return token
def extractNumeric(self):
if not self.isEOF() and isIDStart(self.src[self.forward]):
return self.extract(TOK.ERROR)
return self.extract(TOK.NUMERIC)
def getNumericAfterDot(self):
while not self.isEOF() and self.src[self.forward].isnumeric():
self.goForward()
if not self.isEOF() and self.src[self.forward] in 'eE':
self.goForward()
return self.getNumericExp()
return self.extractNumeric()
def getNumericExp(self):
if self.src[self.forward] in '+-':
self.goForward()
if self.src[self.forward].isnumeric():
while not self.isEOF() and self.src[self.forward].isnumeric():
self.goForward()
return self.extractNumeric()
return TOK.UNKNOWN, ''
def getIDOrReserved(self):
id = self.src[self.pointer: self.forward]
if id in RESERVED_WORDS:
return self.extract(TOK.RESERVED)
if id in FUTURE_RESERVED_WORDS:
return self.extract(TOK.FUTURE_RESERVED)
if self.strictMode and id in FUTURE_STRICT_RESERVED_WORDS:
return self.extract(TOK.FUTURE_RESERVED)
if id == 'null':
return self.extract(TOK.NULL)
if id == 'true':
return self.extract(TOK.BOOL)
if id == 'false':
return self.extract(TOK.BOOL)
return self.extract(TOK.ID)
def getString(self):
quote = self.src[self.forward]
token = ''
self.goForward()
try:
while not self.isEOF() and self.src[self.forward] != quote:
if self.src[self.forward] == '\\':
self.goForward()
if isLineTerm(self.src[self.forward]):
self.goForward()
else:
token += self.getEscapeSeq()
else:
token += self.src[self.forward]
self.goForward()
if not self.isEOF() and self.src[self.forward] == quote:
self.goForward()
return TOK.STRING, token
except:
pass
return self.extract(TOK.ERROR)
def getEscapeSeq(self):
if not self.isEOF():
#single escape character
if self.src[self.forward] in SINGLE_CHARACTER_ESC_SEQ:
self.goForward()
return SINGLE_CHARACTER_ESC_SEQ[self.src[self.forward - 1]]
if not isLineTerm(self.src[self.forward]) and not self.src[self.forward].isnumeric() and not self.src[
self.forward] in 'xu':
self.goForward()
return self.src[self.forward - 1]
if self.src[self.forward] == 'u':
if isHexDigit(self.src[self.forward + 1]) and isHexDigit(self.src[self.forward + 2]) and isHexDigit(
self.src[self.forward + 1]) and isHexDigit(self.src[self.forward + 2]):
code = 4096 * int(self.src[self.forward + 1]) + 256 * int(self.src[self.forward + 2]) + 16 * int(
self.src[self.forward + 3]) + int(self.src[self.forward + 4])
self.goForward(5)
return chr(code)
if self.src[self.forward] == 'x':
if isHexDigit(self.src[self.forward + 1]) and isHexDigit(self.src[self.forward + 2]):
code = 16 * hexToDec(self.src[self.forward + 1]) + hexToDec(self.src[self.forward + 2])
self.goForward(3)
return chr(code)
if self.src[self.forward] == '0' and self.forward < len(self.src) - 1 and not self.src[
self.forward + 1].isdigit():
self.goForward()
return '\0'
raise LexerException('error parsing string escape sequence')
def getRegExp(self):
#RegularExpressionFirstChar
if not isLineTerm(self.src[self.forward]) and not self.src[self.forward] in '*\\/[':
self.goForward()
elif self.src[self.forward] == '\\':
self.goForward()
if isLineTerm(self.src[self.forward]):
return self.extract(TOK.ERROR)
self.goForward()
elif self.src[self.forward] == '[':
self.getRegExpClass()
else:
return self.extract(TOK.ERROR)
#RegularExpressionChars
while self.src[self.forward] != '/':
if not isLineTerm(self.src[self.forward]) and not self.src[self.forward] in '\\/[':
self.goForward()
elif self.src[self.forward] == '\\':
self.goForward()
if isLineTerm(self.src[self.forward]):
return self.extract(TOK.ERROR)
self.goForward()
elif self.src[self.forward] == '[':
self.getRegExpClass()
else:
return self.extract(TOK.ERROR)
self.goForward()
#RegularExpressionFlags
while self.forward < len(self.src) and isIDPart(self.src[self.forward]):
self.goForward()
return self.extract(TOK.REGEXP)
def getRegExpClass(self):
self.goForward()
while self.src[self.forward] != ']':
if self.src[self.forward] == '\\':
self.goForward()
if isLineTerm(self.src[self.forward]):
raise LexerException('Error parsing RegExp class - unsuspected LineTerminator')
self.goForward()
elif not isLineTerm(self.src[self.forward]) and not self.src[self.forward] in ']\\':
self.goForward()
else:
raise LexerException('Error parsing RegExp')
self.goForward()
def rewind(self):
self.position = self.forward = self.prevPos
def getLastTokenPos(self):
return {'line': self.lastLine, 'column': self.lastColumn}
def goForward(self, step=1):
while step > 0:
if self.forward < len(self.src) and isLineTerm(self.src[self.forward]):
self.line += 1
self.column = 1
else:
self.column += 1
self.forward += 1
step -= 1
def tokenToStr(token, value=None):
if value == None and type(token) == tuple:
value = token[1]
token = token[0]
if token == TOK.ID: return 'Identifier'
if token == TOK.EOF: return 'EOF'
if token == TOK.PUNCTUATOR: return value
if token == TOK.STRING: return 'String'
return 'token:' + str(token) + ' \'' + value + '\'' |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.externals import joblib
# from sklearn.feature_selection import SelectKBest, chi2
# from sklearn.svm import LinearSVC
# from sklearn.feature_selection import VarianceThreshold
# from sklearn.feature_selection import SelectFromModel
# from sklearn.ensemble import ExtraTreesClassifier
toolCSV = {'TOOL_ID': 'TOOL_ID', 'TOOL_ID (#1)': 'TOOL_ID (#1)', 'TOOL_ID (#2)': 'TOOL_ID (#2)',
'TOOL_ID (#3)': 'TOOL_ID (#3)',
'TOOL': 'TOOL', 'TOOL (#1)': 'TOOL (#1)', 'TOOL (#2)': 'TOOL (#2)',
'Tool': 'Tool_small', 'Tool (#2)': 'Tool (#2)_small', 'tool (#1)': 'tool (#1)_small'}
# 'Tool (#1)': 'Tool (#1)_num', 'tool': 'tool_num', 'Tool (#3)': 'Tool (#3)_num'}
def create_Tools_Dictionary(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if column in toolCSV:
column_contents = df[column].values.tolist()
unique_elements = set(column_contents)
x = 0
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
tempdf = pd.DataFrame(text_digit_vals, index=[0])
tempdf.to_csv(".\\toolCSV\\%s.csv" % (toolCSV[column]))
df[column] = list(map(convert_to_int, df[column]))
return df
def read_Tools_Dictionary(df):
columns = df.columns.values
for column in columns:
text_digit_vals = {}
def convert_to_int(val):
return text_digit_vals[val]
if column in toolCSV:
tempdf = pd.read_csv(".\\toolCSV\\%s.csv" % (toolCSV[column]))
text_digit_vals = tempdf.to_dict('index')[0]
print(text_digit_vals)
df[column] = list(map(convert_to_int, df[column]))
return df
def create_unique_Dictionary(unique_elements):
x = 0
text_digit_vals = {}
for unique in unique_elements:
if unique not in text_digit_vals:
text_digit_vals[unique] = x
x += 1
return text_digit_vals
def create_NumClass_Dictionary(df):
columns = df.columns.values
numClass=[]
for column in columns:
if column not in toolCSV:
unique_elements = sorted(set(df[column].astype(str)))
if len(unique_elements) < 5:
text_digit_vals = create_unique_Dictionary(unique_elements)
df[column] = list(map(lambda x: text_digit_vals[x], df[column].astype(str)))
tempdf = pd.DataFrame(text_digit_vals, index=[0])
tempdf.to_csv(".\\col_class_CSV\\%s.csv" % column)
numClass.append(column)
return df, numClass
def read_NumClass_Dictionary(df):
class_cols = np.loadtxt("class_cols.txt", dtype=bytes).astype(str)
columns = df.columns.values
for column in columns:
if column in class_cols:
tempdf = pd.read_csv(".\\col_class_CSV\\%s.csv" % column)
text_digit_vals = tempdf.to_dict('index')[0]
df[column] = list(map(lambda x: text_digit_vals[x], df[column].astype(str)))
return df
def drop_fill(df):
df.drop(['ID'], 1, inplace=True)
df.replace(0, np.nan, inplace=True)
df.fillna(df.median(), inplace=True)
df.fillna(0, inplace=True) # 先用均值填充NaN,如果还有NaN说明该列全为零,用0填充,让后面删掉
return df
def calculate_mse(_x, _y):
return np.linalg.norm((_x - _y)) / len(_y)
def dropCharacter(df):
# 通过方差选择特征。方差为0的特征会被自动移除。剩下的特征按设定的方差的阈值进行选择。
stdcols = df.std().values.tolist()
headers = df.columns.values
zero_std_cols = []
for index in range(len(stdcols)):
if stdcols[index] <= 1e-14:
zero_std_cols.append(headers[index])
df.drop(zero_std_cols, 1, inplace=True)
return df, zero_std_cols
def scale_train(x):
# 范围0-1缩放标准化
# min_max_scaler = preprocessing.MinMaxScaler()
# label_X_scaler=min_max_scaler.fit_transform(X)
# lab_enc = preprocessing.LabelEncoder()
# label_y = lab_enc.fit_transform(y)
##单变量特征选择-卡方检验,选择相关性最高的前100个特征
# X_chi2 = SelectKBest(chi2, k=2000).fit_transform(label_X_scaler, label_y)
# print("训练集有 %d 行 %d 列" % (X_chi2.shape[0],X_chi2.shape[1]))
# df_X_chi2=pd.DataFrame(X_chi2)
# feature_names = df_X_chi2.columns.tolist()#显示列名
# print('单变量选择的特征:\n',feature_names)
##通过方差选择特征。方差为0的特征会被自动移除。剩下的特征按设定的方差的阈值进行选择。
# sel = VarianceThreshold()#设置方差的阈值为超过80%都为同一个东西
# print(label_X_scaler)
# X_sel=sel.fit_transform(label_X_scaler)#选择方差大于0.6的特征
# df_X_sel=pd.DataFrame(X_sel)
# feature_names = df_X_sel.columns.tolist()#显示列名
# print('方差选择的特征:\n',feature_names)
# print(df_X_sel.head())
##基于L1的特征选择
##lsvc = LinearSVC(C=0.01, penalty="l1", dual=False).fit(label_X_scaler, label_y)
##model = SelectFromModel(lsvc, prefit=True)
##X_lsvc = model.transform(label_X_scaler)
##df_X_lsvc=pd.DataFrame(X_chi2)
##feature_names = df_X_lsvc.columns.tolist()#显示列名
##print('L1选择的特征:\n',feature_names)
##基于树的特征选择,并按重要性阈值选择特征
# clf = ExtraTreesClassifier()#基于树模型进行模型选择
# clf = clf.fit(label_X_scaler, label_y)
# model = SelectFromModel(clf, threshold='1.00*mean',prefit=True)#选择特征重要性为1倍均值的特征,数值越高特征越重要
# X_trees = model.transform(label_X_scaler)#返回所选的特征
# df_X_trees=pd.DataFrame(X_chi2)
# feature_names = df_X_trees.columns.tolist()#显示列名
# print('树选择的特征:\n',feature_names)
scaler = preprocessing.StandardScaler().fit(x)
scaled_x = scaler.transform(x)
joblib.dump(scaler, ".\\model\\scaler.save")
return scaled_x
def scale_load(x):
scaler = joblib.load(".\\model\\scaler.save")
scaled_x = scaler.transform(x)
return scaled_x
def findColumns(df, lower, upper):
columns = df.columns.values
colmean = df.mean()
chosen_Col = []
for column in columns:
if (colmean[column] >= lower and colmean[column] <= upper):
chosen_Col.append(column)
else:
pass
# print("chosen cols:",chosen_Col)
df.drop(chosen_Col, 1, inplace=True)
return df, chosen_Col
|
# Modify to Client instead of APIClient?
from docker import Client
import json
import argparse
# Parser for arguments at script launch
def _argument_parser():
parser = argparse.ArgumentParser()
# Add long and short argument
parser.add_argument("--mode", "-m", help="set command mode")
parser.add_argument("--package", "-p", help="package name")
# Read arguments from the command line
args = parser.parse_args()
return args
# Returns the ID of a container if it's running. If it can't be find, return False
def _get_container_id(client):
for container in client.containers():
if 'development-server' in container['Image']:
found_container = True
running_container = container['Id']
return running_container
running_container = 'NONE'
return running_container
# Returns true or false if the image for the given environment exists
def _get_image(client):
image_name = 'development-server'
if image_name in str(json.dumps(client.images())):
print(image_name + ' image found')
return True
else:
print(image_name + ' image not found')
return False
# Generates a docker image from a provided dockerfile
def build_image(client, dockerfile_path):
# Append the datetime of the last build
# _append_datetime(deploy_env)
# Build the image
print('Building image...')
response = [line for line in client.build(
path=dockerfile_path, rm=True, tag='development-server'
)]
# Format of last response line expected: {"stream":"Successfully built 032b8b2855fc\\n"}
# print('Dockerfile build result: ' + str(response[-1].decode('utf-8')))
# Creates and runs a docker container
def run_container(client):
# Export port for ssh
exposed_port = 22000
container = client.create_container(
image='development-server',
name='development-server',
detach=True,
ports=[22],
host_config=cli.create_host_config(port_bindings={
22:exposed_port
})
)
client.start(container=container.get('Id'))
return 0, exposed_port
# Stops and removes a container
def remove_previous_instance(client):
running_container = _get_container_id(client)
if running_container != 'NONE':
print('Killing previous "developer-server" container')
client.kill(running_container)
print('Removing container')
client.remove_container(running_container)
return 0
else:
print('No previous containers found.')
# Deletes the existing image of a container
def delete_image(client):
if _get_image(client):
if _get_container_id(client) == 'NONE':
print('No running containers found, deleting image...')
client.remove_image('development-server')
else:
print('Found running containers, deleting image...')
remove_previous_instance(client)
client.remove_image('development-server')
print('Image succesfully deleted')
return True
else:
print('Image "development-server" not found')
return False
# Execute commands towards the development server
def execute_commands(client, command, workdir=''):
# Get the ID of the container
container_id = _get_container_id(client)
if container_id != 'NONE':
# Create exec instance with a directory if it is passed.
if workdir:
exec_id = client.exec_create(container_id, command, workdir=workdir)
else:
exec_id = client.exec_create(container_id, command)
# Launch exec
command_result = client.exec_start(exec_id)
else:
command_result = 'Container not found. {} was not launched'.format(command)
return command_result
if __name__ == '__main__':
cli = Client(base_url='unix://var/run/docker.sock')
# Argument parsing
args = _argument_parser()
deploy_mode = args.mode
if deploy_mode == 'install_package' or deploy_mode == 'delete_package':
package = args.package
if deploy_mode == 'status':
if _get_container_id(cli) != 'NONE':
print('Development server is running!')
else:
print('Development server is not currently running.')
elif deploy_mode == 'init':
# Clean up older images
#delete_image(cli)
remove_previous_instance(cli)
# Build new images
build_image(cli, '/home/jenkins/development_server')
# Create and run new container
result, exposed_port = run_container(cli)
print('Created container with the image "development-server", exposing ssh at port {}'.format(str(exposed_port)))
elif deploy_mode == 'remove_server':
remove_previous_instance(cli)
print('Removed development-server instance')
elif deploy_mode == 'update_packages':
print(execute_commands(cli, "pip list --outdated --format=freeze | grep -v '^\-e' | cut -d = -f 1 | xargs -n1 pip install -U").decode('utf-8'))
elif deploy_mode == 'get_from_git':
print(execute_commands(cli, "sh bash_git_clone.sh", workdir='/home/developer'))
print('Successfully installed packages from GitLab')
elif deploy_mode == 'install_package':
print(execute_commands(cli, 'pip install {}'.format(package)).decode('utf-8'))
elif deploy_mode == 'delete_package':
print(execute_commands(cli, 'pip uninstall -y {}'.format(package)).decode('utf-8'))
elif deploy_mode == 'get_packages':
print(execute_commands(cli, 'pip freeze').decode('utf-8'))
else:
print('Unsupported mode {}'.format(deploy_mode))
|
import batchglm.train.tf1.train as train_utils
from batchglm.train.tf1.base import ProcessModelBase, TFEstimatorGraph
from batchglm import pkg_constants
|
#!/home/user/virtualenv/venv_name/3.5/bin/python
from flup.server.fcgi import WSGIServer
from flaskapp import create_app
if __name__ == '__main__':
app = create_app()
WSGIServer(app).run()
|
# -*- coding: UTF-8 -*-
def main():
outputs = []
cases = int(input())
for _ in range(cases):
line = input().strip()
stack = []
valid = True
for char in line:
if char in '[(':
stack.append(char)
elif char in '])':
if len(stack) == 0:
valid = False
break
topChar = stack.pop()
if char == ']' and topChar != '[':
valid = False
break
if char == ')' and topChar != '(':
valid = False
break
else:
valid = False
valid = valid and len(stack) == 0
outputs.append('{}'.format('Yes' if valid else 'No'))
for item in outputs:
print(item)
if __name__ == '__main__':
main() |
# 000562_04_ex01_template.py
def a_descriptive_name(optional_arguments):
"""Строка документации(описывает назначение функции)"""
# код
# код
# код
# код
# return optional_value |
# Source: https://github.com/slackapi/bolt-python/blob/main/tests/scenario_tests/test_slash_command.py
import json
from time import time, sleep
from typing import Tuple
from unittest.mock import patch
from slack_sdk.signature import SignatureVerifier
from slack_sdk.web import WebClient
from slack_bolt.app import App
from slack_bolt.request import BoltRequest
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
from fip_slack_bot.main import message_live, message_meuh
from fip_slack_bot.models import Track
class TestSlashCommand:
signing_secret = "secret"
valid_token = "xoxb-valid"
mock_api_server_base_url = "http://localhost:8888"
signature_verifier = SignatureVerifier(signing_secret)
web_client = WebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
def setup_method(self):
self.old_os_env = remove_os_env_temporarily()
setup_mock_web_api_server(self)
def teardown_method(self):
cleanup_mock_web_api_server(self)
restore_os_env(self.old_os_env)
def generate_signature(self, body: str, timestamp: str):
return self.signature_verifier.generate_signature(
body=body,
timestamp=timestamp,
)
def build_headers(self, timestamp: str, body: str):
return {
"content-type": ["application/x-www-form-urlencoded"],
"x-slack-signature": [self.generate_signature(body, timestamp)],
"x-slack-request-timestamp": [timestamp],
}
def build_valid_request(self) -> BoltRequest:
timestamp, body = str(int(time())), json.dumps(slash_command_body)
return BoltRequest(body=body, headers=self.build_headers(timestamp, body))
def test_mock_server_is_running(self):
resp = self.web_client.api_test()
assert resp != None
def test_success(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
app.command("/hello-world")(commander)
request = self.build_valid_request()
response = app.dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
def test_process_before_response(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
process_before_response=True,
)
app.command("/hello-world")(commander)
request = self.build_valid_request()
response = app.dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
def test_failure(self):
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
request = self.build_valid_request()
response = app.dispatch(request)
assert response.status == 404
assert self.mock_received_requests["/auth.test"] == 1
app.command("/another-one")(commander)
response = app.dispatch(request)
assert response.status == 404
assert self.mock_received_requests["/auth.test"] == 1
def build_custom_valid_request(self, slash_body: Tuple[str]) -> BoltRequest:
timestamp, body = str(int(time())), json.dumps(slash_body)
return BoltRequest(body=body, headers=self.build_headers(timestamp, body))
def test_message_live(self, mocker):
mocker.patch(
"fip_slack_bot.main.get_live_on_FIP", return_value=resp_get_live_on_FIP
)
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
app.command("/whatsonfip")(message_live)
request = self.build_custom_valid_request(whatsonfip_command_body)
response = app.dispatch(request)
# might be asynchronous
sleep(0.1)
assert response.status == 200
assert self.mock_received_requests["/chat.postMessage"] == 1
assert (
self.mock_received_requests_body["/chat.postMessage"][-1]
== whatsonfip_result_body
)
def test_message_meuh(self, mocker):
mocker.patch(
"fip_slack_bot.main.get_live_on_meuh", return_value=resp_get_live_on_meuh
)
app = App(
client=self.web_client,
signing_secret=self.signing_secret,
)
app.command("/meuh")(message_meuh)
request = self.build_custom_valid_request(meuh_command_body)
response = app.dispatch(request)
# might be asynchronous
sleep(0.1)
assert response.status == 200
assert self.mock_received_requests["/chat.postMessage"] == 1
assert (
self.mock_received_requests_body["/chat.postMessage"][-1]
== meuh_result_body
)
resp_get_live_on_FIP = Track(
**{
"title": "Root down (and get it)",
"album": "Root down (live)",
"artist": "Jimmy Smith",
"year": 1972,
"label": "VERVE",
"musical_kind": "Jazz ",
"external_urls": {
"deezer": "https://www.deezer.com/track/2461366",
"itunes": "https://music.apple.com/fr/album/root-down-and-get-it-alternate-take/1442939892?i=1442940484&uo=4",
"spotify": "https://open.spotify.com/track/19PG9tIlRRi56n7Tgywkxm",
},
"cover_url": "https://cdn.radiofrance.fr/s3/cruiser-production/2019/12/afe28a90-5f53-46f9-b8ad-f0afa0c59c4d/266x266_rf_omm_0000230568_dnc.0057956636.jpg",
}
)
resp_get_live_on_meuh = resp_get_live_on_FIP
whatsonfip_command_body = (
"token=verification_token"
"&team_id=T111"
"&team_domain=loudnaround.org"
"&channel_id=C111"
"&channel_name=fip"
"&user_id=W111"
"&user_name=baloo"
"&command=%2Fwhatsonfip"
"&text="
"&enterprise_id=E111"
"&enterprise_name=LNA"
"&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT111%2F111%2Fxxxxx"
"&trigger_id=111.111.xxx"
)
meuh_command_body = (
"token=verification_token"
"&team_id=T111"
"&team_domain=loudnaround.org"
"&channel_id=C111"
"&channel_name=fip"
"&user_id=W111"
"&user_name=baloo"
"&command=%2Fmeuh"
"&text="
"&enterprise_id=E111"
"&enterprise_name=LNA"
"&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT111%2F111%2Fxxxxx"
"&trigger_id=111.111.xxx"
)
whatsonfip_result_body = {
"text": "*Root down (and get it)*\n_Jimmy Smith_\nRoot down (live) - 1972",
"blocks": [
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Live on FIP !* for <@W111>"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Listen :radio:", "emoji": True},
"value": "Listen to FIP",
"url": "https://www.fip.fr",
"action_id": "FIP",
},
},
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Root down (and get it)*\n_Jimmy Smith_\nRoot down (live) - 1972",
},
"accessory": {
"type": "image",
"image_url": "https://cdn.radiofrance.fr/s3/cruiser-production/2019/12/afe28a90-5f53-46f9-b8ad-f0afa0c59c4d/266x266_rf_omm_0000230568_dnc.0057956636.jpg",
"alt_text": "Root down (and get it), by Jimmy Smith",
},
},
{"type": "divider"},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {"type": "plain_text", "text": "Spotify", "emoji": True},
"value": "Listen on Spotify",
"url": "https://open.spotify.com/track/19PG9tIlRRi56n7Tgywkxm",
"action_id": "spotify",
},
{
"type": "button",
"text": {"type": "plain_text", "text": "Deezer", "emoji": True},
"value": "Listen on Deezer",
"url": "https://www.deezer.com/track/2461366",
"action_id": "deezer",
},
{
"type": "button",
"text": {"type": "plain_text", "text": "iTunes", "emoji": True},
"value": "Listen on iTunes",
"url": "https://music.apple.com/fr/album/root-down-and-get-it-alternate-take/1442939892?i=1442940484&uo=4",
"action_id": "itunes",
},
],
},
{
"type": "context",
"elements": [
{
"type": "image",
"image_url": "https://upload.wikimedia.org/wikipedia/fr/thumb/d/d5/FIP_logo_2005.svg/240px-FIP_logo_2005.svg.png",
"alt_text": "FIP",
},
{"type": "mrkdwn", "text": "Try */whatsonfip* yourself !\n"},
],
},
],
"attachments": None,
"thread_ts": None,
"channel": "C111",
"unfurl_links": None,
"unfurl_media": None,
}
meuh_result_body = {
"text": "*Root down (and get it)*\n_Jimmy Smith_\nRoot down (live) - 1972",
"blocks": [
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Live on RadioMeuh !* for <@W111>"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Listen :radio:", "emoji": True},
"value": "Listen to RadioMeuh",
"url": "https://www.radiomeuh.com/",
"action_id": "RadioMeuh",
},
},
{"type": "divider"},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*Root down (and get it)*\n_Jimmy Smith_\nRoot down (live) - 1972",
},
"accessory": {
"type": "image",
"image_url": "https://cdn.radiofrance.fr/s3/cruiser-production/2019/12/afe28a90-5f53-46f9-b8ad-f0afa0c59c4d/266x266_rf_omm_0000230568_dnc.0057956636.jpg",
"alt_text": "Root down (and get it), by Jimmy Smith",
},
},
{"type": "divider"},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {"type": "plain_text", "text": "Spotify", "emoji": True},
"value": "Listen on Spotify",
"url": "https://open.spotify.com/track/19PG9tIlRRi56n7Tgywkxm",
"action_id": "spotify",
},
{
"type": "button",
"text": {"type": "plain_text", "text": "Deezer", "emoji": True},
"value": "Listen on Deezer",
"url": "https://www.deezer.com/track/2461366",
"action_id": "deezer",
},
{
"type": "button",
"text": {"type": "plain_text", "text": "iTunes", "emoji": True},
"value": "Listen on iTunes",
"url": "https://music.apple.com/fr/album/root-down-and-get-it-alternate-take/1442939892?i=1442940484&uo=4",
"action_id": "itunes",
},
],
},
{
"type": "context",
"elements": [
{
"type": "image",
"image_url": "https://upload.wikimedia.org/wikipedia/fr/thumb/d/d5/FIP_logo_2005.svg/240px-FIP_logo_2005.svg.png",
"alt_text": "FIP",
},
{"type": "mrkdwn", "text": "Try */whatsonfip* yourself !\n"},
],
},
],
"attachments": None,
"thread_ts": None,
"channel": "C111",
"unfurl_links": None,
"unfurl_media": None,
}
slash_command_body = (
"token=verification_token"
"&team_id=T111"
"&team_domain=test-domain"
"&channel_id=C111"
"&channel_name=random"
"&user_id=W111"
"&user_name=primary-owner"
"&command=%2Fhello-world"
"&text=Hi"
"&enterprise_id=E111"
"&enterprise_name=Org+Name"
"&response_url=https%3A%2F%2Fhooks.slack.com%2Fcommands%2FT111%2F111%2Fxxxxx"
"&trigger_id=111.111.xxx"
)
def commander(ack, body, payload, command):
assert body == command
assert payload == command
ack()
|
N = int(input())
dois_num = input().split()
L1, C1 = dois_num
L = int(L1)
C = int(C1)
#Criando matriz NxN
matriz = [] #inicializando
[matriz.append([]) for n in range(0,N)] #criando linhas e colunas
#Lendo matriz como string:
matriz_string = []
[matriz_string.append(input().split()) for n in range(0,N)]#lendo como string
#Transformando matriz string em matriz com inteiros:
[[matriz[n].insert(i,int(matriz_string[n][i])) for i in range(0,N)] for n in range(0,N)]
#Contando valores
cont = 0
a=[]
[[a.append(matriz[n][i]) for i in range(0,N) if i==C-1 or n==L-1]for n in range(0,N)]
print(sum(a)-matriz[L-1][C-1])
|
# 234라는 10진수의 16진수는 다음과 같이 구할 수 있다.
# >>> hex(234)
# '0xea'
# 이번에는 반대로 16진수 문자열 0xea를 10진수로 변경해 보자.
# ※ 내장 함수 int를 활용해 보자.
print(hex(234))
print(int(0xea))
#. 입력받은 수를 16진수로 > 10진수로 변환하는 함수
class Sixteen:
def __init__(self):
self.value = 0
def sixteen(self):
value = int(input('10진수 수를 입력하세요: '))
ten = value
global sixteen
sixteen = hex(ten)
print('입력하신 {}를 16진수로 변환한 값은 {}입니다.'.format(ten, sixteen))
class Ten(Sixteen):
def ten(self):
ten = int(sixteen, 16)
print('16진수인 {}를 10진수로 변환한 값은 {}입니다.'.format(sixteen, ten))
change = Ten()
change.sixteen()
change.ten() |
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
builtin_list = list
mongo = None
def _id(id):
if not isinstance(id, ObjectId):
return ObjectId(id)
return id
# [START from_mongo]
def from_mongo(data):
"""
Translates the MongoDB dictionary format into the format that's expected
by the application.
"""
if not data:
return None
data['id'] = str(data['_id'])
return data
# [END from_mongo]
def init_app(app):
global mongo
mongo = PyMongo(app)
mongo.init_app(app)
def list(limit=10, cursor=None):
cursor = int(cursor) if cursor else 0
results = mongo.db.jupes.find(skip=cursor, limit=10).sort('title')
jupes = builtin_list(map(from_mongo, results))
next_page = cursor + limit if len(jupes) == limit else None
return (jupes, next_page)
def read(id):
result = mongo.db.jupes.find_one({'_id': _id(id)})
return from_mongo(result)
def create(data):
result = mongo.db.jupes.insert_one(data)
return read(result.inserted_id)
def update(data, id):
mongo.db.jupes.replace_one({'_id': _id(id)}, data)
return read(id)
def delete(id):
mongo.db.jupes.delete_one({'_id': _id(id)}) |
from pytube import YouTube
url=input ("enter the video link\n")
myvideo=YouTube(url)
print(myvideo.title)
myvideo=myvideo.streams.get_highest_resolution()
myvideo.download() |
from __future__ import absolute_import
from __future__ import print_function
from keras.models import Sequential, Graph
from keras.layers.core import Layer, Activation, Dense, Flatten, Reshape, Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D
import keras.utils.layer_utils as layer_utils
print('-- Sequential model')
left = Sequential()
left.add(Convolution2D(32, 1, 3, 3, border_mode='valid'))
left.add(MaxPooling2D(pool_size=(2, 2)))
left.add(Flatten())
left.add(Dense(32 * 13 * 13, 50))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(784, 30))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(80, 10))
model.add(Activation('softmax'))
layer_utils.print_layer_shapes(model, [(1, 1, 28, 28), (1, 784)])
print('-- Graph model')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=4)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Convolution2D(32, 1, 3, 3), name='conv1', input='input2')
graph.add_node(Flatten(), name='flatten1', input='conv1')
graph.add_node(Dense(32 * 13 * 13, 10), name='dense4', input='flatten1')
graph.add_output(name='output1', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output2', inputs=['dense1', 'dense4'], merge_mode='concat')
layer_utils.print_layer_shapes(graph, {'input1': (1, 32), 'input2': (1, 1, 28, 28)})
print('Test script complete')
|
from django.http import HttpResponse,HttpResponseRedirect,JsonResponse
from django.shortcuts import render,redirect,get_object_or_404
from django.views.decorators.http import require_http_methods
from apps.common.decorators import ajax_required
from django.contrib.auth import get_user_model
from django.shortcuts import render,redirect
from django.urls import reverse
from apps.comment.models import Comment
from apps.activity.models import Action
from apps.post.models import Post
from apps.activity import utils
@ajax_required
@require_http_methods(["POST"])
def comment_add(request):
post_id = request.POST.get('post_id');
post = get_object_or_404(Post,id = post_id)
user = request.user
comment_text = request.POST.get('comment_text');
comment_obj,added = Comment.objects.get_or_create(user=user,
post=post,
content=comment_text)
comment_added = False
if added:
comment_added = True
utils.create_action(user,'commented on',post)
count = post.comments_count
data = {'comment_added':comment_added,'comments_count':count}
return JsonResponse(data) |
#!/usr/bin/env python
# encoding: utf-8
'''
# @Time : 2020/11/8 9:32
# @Author : shursulei
# @Site :
# @File : random_datetime.py
# @Software: PyCharm Community Edition
# @describe:
'''
# 概率曲线:取到每个元素的概率值
# 概率表:展开后的概率
# 例如:
# 原始数据:[0, 1, 2]
# 概率曲线:[2, 3, 4],这个概率曲线表示有2/(2+3+4)的概率取到原始数据中的0,3/(2+3+4)的概率取到原始数据中的1
# 概率表:按照原始数据和概率曲线展开后的表,上面的例子是:[0, 0, 1, 1, 1, 2, 2, 2, 2]。
# 这个list可以直接使用random.choice(list)来选择出满足概率曲线的数值。
import calendar
import random
from datetime import datetime
from tools.rand import random_avg_split
def random_probabilities(origin_probability, num, rate=0.8):
"""
产生多个浮动的概率曲线。配合random_pick或者probability table(在下方)
:param origin_probability:原始的概率曲线,例如[2,15,5,2]
:param num:需要的波动过的概率表数量,如果传2,就按照原始概率表,产生2个新的概率表
:param rate:波动率,0完全随机,1完全平均
:return:
"""
return list(zip(*[random_avg_split(y, num, rate) for y in [x * int(num * 10) for x in origin_probability]]))
def expand_probability_table(origin, origin_probability):
"""
按照概率曲线把原始数据展开成概率表。
:param origin: 原始数据list。比如[0, 1]
:param origin_probability: 概率表。比如[2, 3],这个概率表表示有2 / (2 + 3)的概率取到0,3 / (2 + 3)的概率取到1
:return: [0, 0, 1, 1, 1]。这个结果使用random.choice(),即满足入参的概率条件。
"""
return [z for x, y in zip(origin, origin_probability) for z in [x] * int(y)]
def random_probability_tables(origin, origin_probability, num, rate=0.8):
"""
产生多个概率表并展开,可以使用__probability_demo中的方法来产生概率数据
:param origin: 原始数据list。比如[0, 1]
:param origin_probability: 概率表。比如[2, 3],这个概率表表示有2 / (2 + 3)的概率取到0,3 / (2 + 3)的概率取到1
:param num: 产生的概率表数量(比如从1个年概率曲线生成每日展开的概率表,那么要传365)
:param rate: 波动率,0完全随机,1完全平均
:return: 多个概率表
"""
probabilities = random_probabilities(origin_probability, num, rate)
return [expand_probability_table(origin, i) for i in probabilities]
def random_pick(target_list, weights):
return random.choices(target_list, weights)[0]
# 小时概率曲线demo。
# 把小时按照每个点的概率展开成概率表,这样直接从概率表中随机取1个数,即为按小时概率取出的结果值。
# 这个方案(random.choice直接从展开的概率表取数)的速度要远比random_pick快
# 注意这个方法只是演示如何生成概率table,因为效率问题,不能直接调用。
def __probability_demo():
_24hour = [i for i in range(24)]
hour_probability = [5, 3, 1, 2, 1, 1, 2, 3, 8, 10, 14, 12, 10, 12, 12, 11, 10, 8, 8, 20, 8, 8, 8, 5]
hour_probability_table = expand_probability_table(_24hour, hour_probability) # 保存这个引用,供random.choice反复调用
hour = random.choice(hour_probability_table) # 随机出来的小时
return hour
class RandomDatetimeGenerator:
def __init__(self, year: int, probability):
self.year = year
self.day_probability = random_probabilities(probability, 365 + calendar.isleap(year))
self.minute_table = [i for i in range(24 * 60)]
def get_datetime(self, dt: datetime):
# if self.year != dt.year:
# raise ValueError(f"Year {dt.year} from datetime not equals Generator's {self.year}")
probability = self.day_probability[dt.timetuple().tm_yday - 1]
minute_of_day = random_pick(self.minute_table, probability)
return dt.replace(hour=minute_of_day // 60, minute=minute_of_day % 60, second=random.randint(0, 59))
|
# 题目1:匹配出163的邮箱地址,且@符号之前有4到20位,例如hello@163.com
import re
email = 'hello@163.com'
result = re.match(r'[^@]*', email)
print(result.group())
|
import numpy as np
import math
from math import pi,log
import sys
import ephem as eph
import random as rnd
from dm_functions import *
from neutrino_functions_lowerflux import *
from constants import *
from create_lookupfiles_lowerflux import *
import scipy
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import interp1d,SmoothBivariateSpline
from scipy.stats import norm
##############################################################################
###For test modus of the 2dimensional pdf's
### (test=1 tests the pdf's)
test=0
show_hist=0
print_info=0
if test==1:
print ''
print '##############################################################'
print 'TEST MODE'
print '##############################################################'
print ''
###initialise observer, CygnusA and the Sun
###
line = "Cygnus A,f|J,19:59:28.4,40:44:01.0,1260,2000"
cygnA = ephem.readdb(line)
sun=ephem.Sun()
obs=eph.Observer()
obs.pressure=0.
obs.temp=0.
obs.lon='0.'
obs.lat='0.'
obs.elevation=0.
sun.compute(obs)
rnd.seed()
###flux uncertainties
### (extrapolated ten years)
nu_sigma=np.array([0.08,0.1,0.1])#in fraction of N
###Choose values for simulation
###
steps=4000 #num of steps for integrating the pdf's
N_bins=50 #num of bins for distributions of test-statistic
accuracy=0.0075 #amount by how much both Q_pdf integrals
#are required to be similar when calculating
#the overlap
factor=20 #num of loops when generating the toy models/events
###For the pdf's:
###
N_tt=10 #num of lookup tables for DM, i.e. time bins
N_min_nu=15000 #num of created events to create 2d pdf neutrinos
N_min_DM=10000 #num of created events to create 2d pdf dark matter
#total number of events to generate 2d pdf=
#factor * N_min_(nu/DM)
###For the pseudo experiments:
###
source_length=5000 #number of events in event pool
#total pool size = factor * source_length
N_Q=10 #num of times to vary the fluxes when evaluating Q
#important for neutrino flux uncertainties:
#we have to vary the expectations
N_sim=10 #num of pseudo experiments generated in simulation
#total number of pseudo experiments =
#factor * N_sim * N_Q
mode_max_Mdet=False
if mode_max_Mdet==True: gain_direction=True
###choose detector set-up
### (choose further parameters in constants.py)
### (keep observation time unchanged)
M_det=1.e6#g
t0=eph.date('2014/01/01')
t1=eph.date('2015/01/01')
T_det=(float(t1)-float(t0))
t1_f=float(t1)-float(t0)
###specify mass and cross section range
###
m_DM_array=np.array([6., 30., 100., 1000.])
sigma=np.logspace(-40,-52, 60)
filename_dm='testrun.txt'
f=open(filename_dm,'w')
f.close()
np.random.seed()
##############################################################################
if channel_Erec==True and channel_time==False:
print ''
print 'including recoil energy without time information is not possible'
print ''
sys.exit()
if test==0:
print ''
print '###################################################################'
print 'START'
print '###################################################################'
print ''
print '####################################'
print '#detector specifications:'
print '#mass target material: A=', A,'(N',N,' Z',Z,')'
print '#detector mass = ',M_det/1000.,'kg'
print '#exposure time = ', T_det,' live days'
print '#lower threshold: ',E_thr
print '#upper threshold: ',upper_threshold
print '####################################'
print ''
print 'NEUTRINOS'
print ''
print 'creating lookup tables...'
create_all_neutrino_lookuptables(Npoints=250,Nsteps=500)
print ' DONE!'
print ''
print 'calculating expected neutrino events...'
###calculate number of expected events
### (assuming flux at central value)
time_array_nu,rate_array_nu,dt_nu,N_sun_arr,N_atmo_arr,N_dsnb_arr=\
tot_rate_nu(M_det,E_thr,t0,t1,steps=100)
N_sun0=np.sum(N_sun_arr)
N_atmo0=np.sum(N_atmo_arr)
N_dsnb0=np.sum(N_dsnb_arr)
mu_nu_0=N_sun0+N_atmo0+N_dsnb0
N_nu0=mu_nu_0
N_tot=mu_nu_0
print N_tot
N_nu_avg=int(mu_nu_0)
rest=mu_nu_0-N_nu_avg
if rest>0.5:
N_nu_avg+=1
if test==1:
N_nu_avg=39
###create the pdf's for the neutrino signals
###
bin_prob_B_t=rate_array_nu/N_nu0
acc_prob_B_t=np.cumsum(bin_prob_B_t)
rnd_B_t=interp1d(acc_prob_B_t,time_array_nu,kind='linear')
rate_array_nu[0]=rate_array_nu[1]
pdf_nu_t=interp1d(time_array_nu,rate_array_nu/N_nu0/dt_nu,kind='linear')
###get the relative parts of neutrino sources
### (assuming central flux values)
sun_ratio=N_sun0/N_tot
atmo_ratio=N_atmo0/N_tot
dsnb_ratio=N_dsnb0/N_tot
ratio_array=np.array([sun_ratio,atmo_ratio,dsnb_ratio])
N_nu_arr=np.array([N_sun0,N_atmo0,N_dsnb0])
mu_nu=N_tot
if channel_Erec==True:
###define the grid
###
x_edge,y_edge=[],[]
for ii in range (N_erec-1):
x_edge.append(0.5*(E_rec_bin[ii]+E_rec_bin[ii+1]))
for ii in range (N_theta-1):
y_edge.append(0.5*(theta_bin[ii]+theta_bin[ii+1]))
###shift to cover the whole grid when interpolating
###
x_edge[0]=E_thr
x_edge[-1]=upper_threshold
y_edge[0]=-1.
y_edge[-1]=1.
dtheta=1.*(y_edge[-1]-y_edge[0])/N_theta
derec=1.*(x_edge[-1]-x_edge[0])/N_erec
###create 2d pdf
###
if test==0:
print ''
print '2 dimensional E_rec-cos_theta pdf'
try:
name=basenamepdf+'_nu.txt'
with open(name):
print ' 2d neutrino pdf exist!'
print ' File read!'
except:
print ' Creating the pdf...'
pdf_nu=np.zeros((N_erec-1,N_theta-1))
for ff in range (factor):
N_arr=N_nu_arr
ratio_arr=N_arr/(np.sum(N_arr))
for ii in range (3):
pdf_nu_tmp=recoil_nu(E_thr,ii,\
N_min=int(N_min_nu*ratio_arr[ii]),mode=0)
pdf_nu=pdf_nu+pdf_nu_tmp*ratio_arr[ii]
pdf_nu_tmp=[]
pdf_nu=1.*pdf_nu/factor
f=open(basenamepdf+'_nu.txt','w')
f.close()
tmp=np.where(pdf_nu>0,pdf_nu,10.)
tmp_min=np.min(tmp)
f=open(basenamepdf+'_nu.txt','a')
for ii in range (len(x_edge)):
for jj in range (len(y_edge)):
if pdf_nu[ii][jj]<tmp_min:
pdf_nu[ii][jj]=1./(factor*N_min_nu)
f.write(str(x_edge[ii])+' '+\
str(y_edge[jj])+' '+str(pdf_nu[ii][jj])+'\n')
f.close()
print ' File created!'
###Normalise properly
### (because of shifting edges and
### assigning non-zeros values to bins with no events)
name=basenamepdf+'_nu.txt'
data=np.loadtxt(name)
pdf_val=data[:,2]
pdf_val=np.reshape(pdf_val,(N_erec-1,N_theta-1))
Pb_ipl=RectBivariateSpline(x_edge,y_edge,pdf_val,kx=1,ky=1)
norm_Pb=Pb_ipl.integral(min(x_edge),max(x_edge),\
min(y_edge),max(y_edge))
Pb_ipl=RectBivariateSpline(x_edge,y_edge,pdf_val/norm_Pb,kx=1,ky=1)
###marginalise over angle if only recoil information should be used
###
p_margin=[]
for ii in range (len(x_edge)):
p_margin_tmp=0.
for jj in range (len(y_edge)-1):
p_margin_tmp+=0.5*(pdf_val[ii][jj]+pdf_val[ii][jj+1])*dtheta
p_margin.append(p_margin_tmp)
norm=0.
for ii in range (len(x_edge)-1):
derec=x_edge[ii+1]-x_edge[ii]
norm+=0.5*(p_margin[ii]+p_margin[ii+1])*derec
Pb_noangle_ipl=interp1d(x_edge,p_margin/norm,kind='linear')
if test==1:
D_Erec=upper_threshold-E_thr
D_cos=2.
D_time=365.25
x_test=np.array([E_thr,E_thr,upper_threshold,upper_threshold])
y_test=np.array([-1.,1.,-1.,1.])
pdf_nu=9.54/(D_Erec*D_cos)*np.ones(4)
Pb_ipl=SmoothBivariateSpline(x_test,y_test,pdf_nu,kx=1,ky=1)
pdf_margin=np.array([pdf_nu[0],pdf_nu[2]])
Pb_noangle_ipl=interp1d(np.array([E_thr,upper_threshold]),pdf_margin)
pdf_nu_t=interp1d(time_array_nu,1./D_time*np.ones(len(time_array_nu)),\
kind='linear')
#E_test=np.arange(E_thr,upper_threshold,1.)
#theta_test=np.arange(-1.,1.,0.2)
#f=open('pdfs/test_nu.txt','w')
#f.close()
#for ii in range (len(E_test)):
# for jj in range (len(theta_test)):
# f=open('pdfs/test_nu.txt','a')
# f.write(str(E_test[ii])+' '+str(theta_test[jj])\
# +' '+str(float(Pb_ipl(E_test[ii],theta_test[jj])))+'\n')
###simulate neutrino events
###
if test==0:
print ''
print 'simulating neutrino events...'
###solar neutrinos
###
print ' solar...'
t_src_solar_nu=[]
E_rec_src_solar_nu=[]
cos_src_solar_nu=[]
solar_length=max(10,min(int(1.*N_sun0*N_sim),source_length))
void_source=np.zeros(solar_length)
for ff in range (factor):
t_src_solar_nu.append(void_source)
E_rec_src_solar_nu.append(void_source)
cos_src_solar_nu.append(void_source)
if channel_time==True:
t_src_solar_nu=[]
for ff in range (factor):
r_array=np.random.uniform(0.,1.,solar_length)
t_src_solar_nu.append(rnd_B_t(r_array))
if channel_Erec==True:
E_rec_src_solar_nu,cos_src_solar_nu=[],[]
for ff in range (factor):
E_rec_tmp,cos_tmp=recoil_nu(E_thr,0,N_evt=solar_length,mode=1)
E_rec_src_solar_nu.append(E_rec_tmp)
cos_src_solar_nu.append(cos_tmp)
###atmospheric neutrinos
### (minimum number of events to be gerated
### for the pool of neutrinos is 10 for each species)
print ' atmospheric...'
t_src_atmo_nu=[]
E_rec_src_atmo_nu=[]
cos_src_atmo_nu=[]
atmo_length=max(10,min(int(1.*N_atmo0*N_sim),source_length))
void_source=np.zeros(atmo_length)
for ff in range (factor):
t_src_atmo_nu.append(void_source)
E_rec_src_atmo_nu.append(void_source)
cos_src_atmo_nu.append(void_source)
if channel_time==True:
t_src_atmo_nu=[]
for ff in range (factor):
r_array=np.random.uniform(0.,1.,atmo_length)
t_src_atmo_nu.append(r_array*t1_f)
if channel_Erec==True:
E_rec_src_atmo_nu,cos_src_atmo_nu=[],[]
for ff in range (factor):
E_rec_tmp,cos_tmp=recoil_nu(E_thr,1,N_evt=atmo_length,mode=1)
E_rec_src_atmo_nu.append(E_rec_tmp)
cos_src_atmo_nu.append(cos_tmp)
###DSNB neutrinos
###
print ' DSNB...'
t_src_dsnb_nu=[]
E_rec_src_dsnb_nu=[]
cos_src_dsnb_nu=[]
dsnb_length=max(10,min(int(N_dsnb0*N_sim),source_length))
void_source=np.zeros(dsnb_length)
for ff in range (factor):
t_src_dsnb_nu.append(void_source)
E_rec_src_dsnb_nu.append(void_source)
cos_src_dsnb_nu.append(void_source)
if channel_time==True:
t_src_dsnb_nu=[]
for ff in range (factor):
r_array=np.random.uniform(0.,1.,dsnb_length)
t_src_dsnb_nu.append(t1_f*r_array)
if channel_Erec==True:
E_rec_src_dsnb_nu,cos_src_dsnb_nu=[],[]
for ff in range (factor):
E_rec_tmp,cos_tmp=recoil_nu(E_thr,2,N_evt=dsnb_length,mode=1)
E_rec_src_dsnb_nu.append(E_rec_tmp)
cos_src_dsnb_nu.append(cos_tmp)
del E_rec_tmp
del cos_tmp
if test==0:
print ' DONE!'
print ''
print ''
print 'DARK MATTER'
count=0
###loop over DM mass
###
s_int=0
for mm in range (len(m_DM_array)):
print ''
print ' ',m_DM_array[mm],'GeV'
###calculate the general annual modulation for that DM mass
###
print 'calculating expected dark matter events...'
time_array_DM,rate_array_DM,N_DM0,dt_DM=\
tot_rate_dm(M_det,E_thr,m_DM_array[mm],t0,t1,steps=100)
print ' DONE! (depends on cross section)'
if N_DM0==0:
continue
bin_prob_DM_t=rate_array_DM/N_DM0
acc_prob_DM_t=np.cumsum(bin_prob_DM_t)
rnd_DM_t=interp1d(acc_prob_DM_t,time_array_DM,kind='linear')
rate_array_DM[0]=rate_array_DM[1]
pdf_DM_t=interp1d(time_array_DM,rate_array_DM/N_DM0/dt_DM,kind='linear')
if test==1:
N_DM_exp=10
###get 2d pdf for recoil energy and cos_theta_sun for DM
###
H_out=np.zeros((N_erec-1,N_theta-1))
t0_f=float(t0)
t1_f=float(t1)-t0_f
t0_f=0.
t_edges=np.linspace(t0_f,t1_f,N_tt)
if channel_Erec==True:
if test==0:
print ''
print '2d E_rec-cos_theta pdf for different times of the year'
try:
name=basenamepdf+'_DM_'+str(m_DM_array[mm])+'_'+str(0)+'.txt'
with open(name):
print ' Files for 2d pdf exist!'
print ' All files read!'
except:
print ' Creating the pdf...'
for ff in range (factor):
H_tmp=recoil_dm(E_thr,m_DM_array[mm],\
t0,t1,N_t=N_tt,N_min=N_min_DM,mode=0)
H_out=H_out+H_tmp
print ' File created!'
H_out=1.*H_out/factor
for hh in range (len(H_out)):
f=open(basenamepdf+'_DM_'+str(m_DM_array[mm])+'_'+\
str(hh)+'.txt','w')
f.close()
f=open(basenamepdf+'_DM_'+str(m_DM_array[mm])+'_'+\
str(hh)+'.txt','a')
HHnz=np.where(H_out[hh]>0,H_out[hh],10.)
HHmin=np.min(HHnz)
for ii in range (len(x_edge)):
for jj in range (len(y_edge)):
if H_out[hh][ii][jj]<HHmin:
H_out[hh][ii][jj]=1./(factor*N_min_DM)
f.write(str(x_edge[ii])+' '+str(y_edge[jj])\
+' '+str(H_out[hh][ii][jj])+'\n')
f.close()
###Normalise properly
###
f_array=[]
for tt in range (N_tt):
name=basenamepdf+'_DM_'+str(m_DM_array[mm])+'_'+str(tt)+'.txt'
data=np.loadtxt(name)
pdf_val=data[:,2]
pdf_val=np.reshape(pdf_val,(N_erec-1,N_theta-1))
f_ipl=RectBivariateSpline(x_edge,y_edge,pdf_val,kx=1,ky=1)
norm_Psb=f_ipl.integral(min(x_edge),max(x_edge),\
min(y_edge),max(y_edge))
f_ipl=RectBivariateSpline(x_edge,y_edge,\
pdf_val/norm_Psb,kx=1,ky=1)
f_array.append(f_ipl)
###marginalise over the angle
### (if only recoil information should be used)
if channel_angle==False or gain_direction==True:
f_array_noangle=[]
for tt in range (N_tt):
name=basenamepdf+'_DM_'+str(m_DM_array[mm])+'_'+\
str(tt)+'.txt'
data=np.loadtxt(name)
pdf_val=data[:,2]
pdf_val=np.reshape(pdf_val,(N_erec-1,N_theta-1))
p_margin=[]
for ii in range(len(x_edge)):
p_margin_tmp=0.
for jj in range (len(y_edge)-1):
p_margin_tmp+=0.5*\
(pdf_val[ii][jj]+pdf_val[ii][jj+1])*dtheta
p_margin.append(p_margin_tmp)
norm=0.
for ii in range (len(x_edge)-1):
derec=x_edge[ii+1]-x_edge[ii]
norm+=0.5*(p_margin[ii]+p_margin[ii+1])*derec
f_ipl=interp1d(x_edge,p_margin/norm,kind='linear')
f_array_noangle.append(f_ipl)
###Test interpolation
###
#ee=np.linspace(E_thr,upper_threshold,100)
#plt.plot(ee,f_array_noangle[6](ee),'ro')
#plt.plot(ee,Pb_noangle_ipl(ee),'bo')
#plt.show()
#name=basenamepdf+'_DM_'+str(m_DM_array[mm])+'_'+str(8)+'.txt'
#data=np.loadtxt(name)
#x_val=data[:,0]
#pdf_val=data[:,2]
#ee=np.linspace(E_thr,upper_threshold,100)
#thth=-1.
#plt.plot(x_val,pdf_val,'bo')
#for ii in range (len(ee)):
# plt.plot(ee[ii],f_array[8](ee[ii],thth),'ro')
#plt.yscale('log')
#plt.show()
if test==1:
t0_f=float(t0)
t1_f=float(t1)-t0_f
t0_f=0.
t_edges=np.linspace(t0_f,t1_f,N_tt)
f_array,f_array_noangle=[],[]
for ii in range (N_tt):
pdf_dm=5.5/(D_Erec*D_cos)*np.ones(4)
x_test=np.array([E_thr,E_thr,upper_threshold,upper_threshold])
y_test=np.array([-1.,1.,-1.,1.])
f_ipl=SmoothBivariateSpline(x_test,y_test,pdf_dm,kx=1,ky=1)
f_array.append(f_ipl)
pdf_dm=np.array([pdf_dm[0],pdf_dm[2]])
f_ipl=interp1d(np.array([E_thr,upper_threshold]),pdf_dm)
f_array_noangle.append(f_ipl)
kk=0
pdf_array=5./D_time*np.ones(len(time_array_nu))
pdf_DM_t=interp1d(time_array_DM,pdf_array,kind='linear')
###simulate DM events
###
print 'simulate DM events...'
t_src_DM,E_rec_src_DM,cos_src_DM=[],[],[]
void_source=np.zeros(source_length)
for ff in range (factor):
t_src_DM.append(void_source)
E_rec_src_DM.append(void_source)
cos_src_DM.append(void_source)
if test==0:
if channel_time==True:
t_src_DM=[]
for ff in range (factor):
r_array=np.random.uniform(0.,1.,source_length)
t_tmp=rnd_DM_t(r_array)
t_src_DM.append(t_tmp)
if channel_Erec==True:
t_src_DM,E_rec_src_DM,cos_src_DM=[],[],[]
for ff in range (factor):
t_tmp,E_rec_tmp,cos_tmp=\
recoil_dm(E_thr,m_DM_array[mm],t0,t1,N_tt,\
N_evt=source_length,mode=1)
t_src_DM.append(t_tmp)
E_rec_src_DM.append(E_rec_tmp)
cos_src_DM.append(cos_tmp)
del E_rec_tmp
del cos_tmp
if test==1:
N_DM=10
n_source_DM=N_DM*np.ones(N_sim,dtype=int)
E_rec_src_DM=np.ones((factor,N_sim*N_DM))*25.
t_src_DM=np.ones((factor,N_sim*N_DM))*100.
cos_src_DM=np.ones((factor,N_sim*N_DM))*0.1
src_DM=np.array(zip(t_src_DM,E_rec_src_DM,cos_src_DM))
print ' DONE!'
if test==0:
print ''
print 'loop over cross section...'
print ''
print "DM_mass sigma_SI N_dm N_nu"
###loop over cross section
###
#for ss in range (len(sigma)):
scan=True
waszero=False
wasbig=False
mem1=0
mem2=0
scanned=np.array([])
while scan==True:
jump=False
if s_int<0:
break
if s_int in scanned:
jump=True
if jump==False:
ss=s_int
###initialise arrays
###
Q_B=[]
Q_SB=[]
t_array_SB=[]
###calculate expected dark matter events by multiplying with sigma
###
if test==0:
N_DM1=N_DM0*sigma[ss]
N_DM=int(N_DM1)
rest=N_DM1-N_DM
if rest>0.5:
N_DM+=+1
if N_DM<1. and waszero==False and wasbig==False:
s_int-=1
continue
if N_DM<1:
break
if N_DM>isoevents*N_nu_avg:
s_int+=1
continue
N_DM_exp=N_DM
mu_DM=N_DM_exp
print ("%2E %2E %i %i " % (m_DM_array[mm],\
sigma[ss],N_DM_exp,N_nu_avg))
Q_B_angle,Q_B_erec=[],[]
Q_SB_angle,Q_SB_erec=[],[]
angle_info_DM,erec_info_DM=[],[]
for ff in range (factor):
#######################
###B only hypothesis###
#######################
N_nu_exp=N_sun0+N_atmo0+N_dsnb0
ratio_solar=1.*N_sun0/N_nu_exp
ratio_atmo=1.*N_atmo0/N_nu_exp
ratio_dsnb=1.*N_dsnb0/N_nu_exp
N_nu=N_nu_exp
n_nu_arr=np.random.poisson(N_nu,N_sim)
NN_nu=np.sum(n_nu_arr)
if print_info==1:
print 'B only hypothesis'
print ''
print '(mu_DM_0, mu_nu_0) ~ (center of Poisson) '
print N_DM_exp,N_nu_exp
print ''
print 'n_nu_arr (drawn from the Poisson)'
print n_nu_arr[:12]
print ''
print 'get neutrino events...'
r_solar=ratio_solar*np.ones(NN_nu)
r_atmo=ratio_atmo*np.ones(NN_nu)
r_dsnb=ratio_dsnb*np.ones(NN_nu)
###split up in solar, atmo and dsnb neutrinos
###
r_array=np.random.uniform(0.,1.,NN_nu)
N_solar=np.where(r_array<r_solar,1,0)
N_dsnb=np.where(r_array>(r_solar+r_atmo),1,0)
N_atmo=1-N_solar-N_dsnb
NN_solar=np.sum(N_solar)
NN_atmo=np.sum(N_atmo)
NN_dsnb=np.sum(N_dsnb)
NN_tot=NN_solar+NN_atmo+NN_dsnb
#print 1.*NN_solar/NN_tot,1.*NN_atmo/NN_tot,1.*NN_dsnb/NN_tot
sample_prob_nu_B=np.zeros(N_sim)
t_array_solar,t_array_atmo,t_array_dsnb=[],[],[]
E_rec_array_solar,E_rec_array_atmo,E_rec_array_dsnb=[],[],[]
cos_array_solar,cos_array_atmo,cos_array_dsnb=[],[],[]
###simulate neutrino events
###
if channel_time==True:
prob_nu_B=np.zeros(NN_nu)
###if energy information is used, do proper simulation
###
if channel_Erec==True:
###solar
###
if solar_length>1:
while ((len(E_rec_array_solar))<NN_solar):
jj=np.random.randint(0,solar_length-1,\
solar_length)
t_array_tmp=t_src_solar_nu[ff][jj]
E_rec_array_tmp=E_rec_src_solar_nu[ff][jj]
cos_array_tmp=cos_src_solar_nu[ff][jj]
t_array_solar=np.concatenate(\
(t_array_solar,t_array_tmp))
E_rec_array_solar=np.concatenate(\
(E_rec_array_solar,E_rec_array_tmp))
cos_array_solar=np.concatenate(\
(cos_array_solar,cos_array_tmp))
t_array_solar=t_array_solar[:NN_solar]
E_rec_array_solar=E_rec_array_solar[:NN_solar]
cos_array_solar=cos_array_solar[:NN_solar]
###atmo
###
if atmo_length>1:
while ((len(E_rec_array_atmo))<NN_atmo):
jj=np.random.randint(0,atmo_length-1,\
atmo_length)
t_array_tmp=t_src_atmo_nu[ff][jj]
E_rec_array_tmp=E_rec_src_atmo_nu[ff][jj]
cos_array_tmp=cos_src_atmo_nu[ff][jj]
t_array_atmo=np.concatenate(\
(t_array_atmo,t_array_tmp))
E_rec_array_atmo=np.concatenate(\
(E_rec_array_atmo,E_rec_array_tmp))
cos_array_atmo=np.concatenate(\
(cos_array_atmo,cos_array_tmp))
t_array_atmo=t_array_atmo[:NN_atmo]
E_rec_array_atmo=E_rec_array_atmo[:NN_atmo]
cos_array_atmo=cos_array_atmo[:NN_atmo]
###dsnb
###
if dsnb_length>1:
while ((len(E_rec_array_dsnb))<NN_dsnb):
jj=np.random.randint(0,dsnb_length-1,\
dsnb_length)
t_array_tmp=t_src_dsnb_nu[ff][jj]
E_rec_array_tmp=E_rec_src_dsnb_nu[ff][jj]
cos_array_tmp=cos_src_dsnb_nu[ff][jj]
t_array_dsnb=np.concatenate(\
(t_array_dsnb,t_array_tmp))
E_rec_array_dsnb=np.concatenate(\
(E_rec_array_dsnb,E_rec_array_tmp))
cos_array_dsnb=np.concatenate(\
(cos_array_dsnb,cos_array_tmp))
t_array_dsnb=t_array_dsnb[:NN_dsnb]
E_rec_array_dsnb=E_rec_array_dsnb[:NN_dsnb]
cos_array_dsnb=cos_array_dsnb[:NN_dsnb]
###stick together in correct order
###
E_rec_nu=np.zeros(NN_nu)
cos_nu=np.zeros(NN_nu)
t_nu=np.zeros(NN_nu)
if solar_length>1:
ij=np.nonzero(N_solar>0.5)
E_rec_nu[ij]=E_rec_array_solar
cos_nu[ij]=cos_array_solar
t_nu[ij]=t_array_solar
if atmo_length>1:
ij=np.nonzero(N_atmo>0.5)
E_rec_nu[ij]=E_rec_array_atmo
cos_nu[ij]=cos_array_atmo
t_nu[ij]=t_array_atmo
if dsnb_length>1:
ij=np.nonzero(N_dsnb>0.5)
E_rec_nu[ij]=E_rec_array_dsnb
cos_nu[ij]=cos_array_dsnb
t_nu[ij]=t_array_dsnb
del t_array_solar
del t_array_atmo
del t_array_dsnb
del E_rec_array_solar
del E_rec_array_atmo
del E_rec_array_dsnb
del cos_array_solar
del cos_array_atmo
del cos_array_dsnb
if print_info==1:
print 'DONE!'
print 'evaluate Q for B only'
###calculate Pb_B
###
mu_nu=np.zeros((N_sim,N_Q))
for ii in range (9):
if N_sun_arr[ii]>0.:
N_nu_solar=np.random.normal(N_sun_arr[ii],\
nu_sigma[0]*N_sun_arr[ii],(N_sim,N_Q))
N_nu_solar=np.where(N_nu_solar<0.,0.,N_nu_solar)
mu_nu+=N_nu_solar
for ii in range (4):
if N_atmo_arr[ii]>0.:
N_nu_atmo=np.random.normal(N_atmo_arr[ii],\
nu_sigma[1]*N_atmo_arr[ii],(N_sim,N_Q))
N_nu_atmo=np.where(N_nu_atmo<0.,0.,N_nu_atmo)
mu_nu+=N_nu_atmo
for ii in range (3):
if N_dsnb_arr[ii]>0.:
N_nu_dsnb=np.random.normal(N_dsnb_arr[ii],\
nu_sigma[2]*N_dsnb_arr[ii],(N_sim,N_Q))
N_nu_dsnb=np.where(N_nu_dsnb<0.,0.,N_nu_dsnb)
mu_nu+=N_nu_dsnb
if test==1:
n_nu_arr=N_nu_avg*np.ones(N_sim,dtype=int)
E_rec_nu=2.*E_thr*np.ones(N_sim*N_nu_avg)
cos_nu=0.5*np.ones(N_sim*N_nu_avg)
t_nu=100.*np.ones(N_sim*N_nu_avg)
mu_nu=N_nu_avg*np.ones((N_sim,N_Q))
NN_nu=N_nu_avg*N_sim
mu_nu=np.hstack(mu_nu)
n_nu_arr=np.tile(n_nu_arr,N_Q)
if channel_time==True:
if channel_Erec==True:
dif=np.zeros((len(t_edges),len(t_nu)))
for ii in range(len(t_edges)):
dif[ii]=abs(t_edges[ii]-t_nu)
dif=np.reshape(dif,(len(t_nu),len(t_edges)))
id1_nu=np.argmin(dif,axis=1)
t0_nu=t_edges[id1_nu]
id2_nu=np.where(id1_nu==N_tt-1,id1_nu-1,0)
id2_nu=np.where(id1_nu==0,1,id2_nu)
id2_nu_tmp1=np.where(id2_nu==0,id1_nu+1,id2_nu)
t1_nu=t_edges[id2_nu_tmp1]
id2_nu_tmp2=np.where(id2_nu==0,id1_nu-1,id2_nu)
t2_nu=t_edges[id2_nu_tmp2]
id2_nu=np.where(abs(t1_nu-t_nu)>abs(t2_nu-t_nu),\
id2_nu_tmp2,id2_nu)
id2_nu=np.where(abs(t1_nu-t_nu)<abs(t2_nu-t_nu),\
id2_nu_tmp1,id2_nu)
d1=abs(t_nu-t_edges[id1_nu])
d2=abs(t_nu-t_edges[id2_nu])
pdf1,pdf2=np.zeros(NN_nu),np.zeros(NN_nu)
for ii in range (N_tt):
pdf1=np.where(id1_nu==ii,\
f_array[ii].ev(E_rec_nu,cos_nu),pdf1)
pdf2=np.where(id2_nu==ii,\
f_array[ii].ev(E_rec_nu,cos_nu),pdf2)
prob_nu_B_angle=Pb_ipl.ev(E_rec_nu,cos_nu)
prob_nu_S_angle=pdf1+d1/(d1+d2)*(pdf2-pdf1)
for ii in range (N_tt):
pdf1=np.where(id1_nu==ii,\
f_array_noangle[ii](E_rec_nu),pdf1)
pdf2=np.where(id2_nu==ii,\
f_array_noangle[ii](E_rec_nu),pdf2)
prob_nu_B_erec=Pb_noangle_ipl(E_rec_nu)
prob_nu_S_erec=pdf1+d1/(d1+d2)*(pdf2-pdf1)
prob_nu_S_time=pdf_DM_t(t_nu)
prob_nu_B_time=pdf_nu_t(t_nu)
prob_nu_S_time=np.tile(prob_nu_S_time,N_Q)
prob_nu_B_time=np.tile(prob_nu_B_time,N_Q)
prob_nu_S_angle=np.tile(prob_nu_S_angle,N_Q)
prob_nu_B_angle=np.tile(prob_nu_B_angle,N_Q)
prob_nu_S_erec=np.tile(prob_nu_S_erec,N_Q)
prob_nu_B_erec=np.tile(prob_nu_B_erec,N_Q)
n_split=np.cumsum(n_nu_arr)
n_split=np.delete(n_split,-1)
prob_nu_B_time=np.split(prob_nu_B_time,n_split)
prob_nu_S_time=np.split(prob_nu_S_time,n_split)
prob_nu_B_angle=np.split(prob_nu_B_angle,n_split)
prob_nu_S_angle=np.split(prob_nu_S_angle,n_split)
prob_nu_B_erec=np.split(prob_nu_B_erec,n_split)
prob_nu_S_erec=np.split(prob_nu_S_erec,n_split)
for ii in range (N_sim*N_Q):
time_info=np.log(1.+1.*mu_DM/mu_nu[ii]*\
prob_nu_S_time[ii]/prob_nu_B_time[ii])
angle_info=np.log(1.+1.*mu_DM/mu_nu[ii]*\
prob_nu_S_angle[ii]/prob_nu_B_angle[ii])
erec_info=np.log(1.+1.*mu_DM/mu_nu[ii]*\
prob_nu_S_erec[ii]/prob_nu_B_erec[ii])
pre=-mu_DM+n_nu_arr[ii]*(np.log(mu_nu[ii])-\
np.log(mu_DM+mu_nu[ii]))
angle_info=pre+np.sum(time_info)+np.sum(angle_info)
erec_info=pre+np.sum(time_info)+np.sum(erec_info)
Q_B_angle=np.concatenate(\
(Q_B_angle,np.array([angle_info])))
Q_B_erec=np.concatenate(\
(Q_B_erec,np.array([erec_info])))
del prob_nu_B_time
del prob_nu_S_time
del prob_nu_B_angle
del prob_nu_S_angle
del prob_nu_B_erec
del prob_nu_S_erec
if print_info==1:
print 'DONE!'
#########################
###Now, S+B hypothesis###
#########################
N_SB=N_DM_exp+N_nu_exp
if print_info==1:
print 'S+B hypothesis'
print ''
print 'N_DM_exp, N_nu_exp, N_SB'
print N_DM_exp,N_nu_exp
print ''
print 'get neutrino events'
NN_SB=np.random.poisson(N_SB,N_sim)
ratio_solar=1.*N_sun0/N_SB
ratio_atmo=1.*N_atmo0/N_SB
ratio_dsnb=1.*N_dsnb0/N_SB
ratio_dm=1.*N_DM/N_SB
###get number of DM and neutrino events
###
n_arr=np.sum(NN_SB)
r_dm=ratio_dm*np.ones(n_arr)
r_solar=ratio_solar*np.ones(n_arr)
r_atmo=ratio_atmo*np.ones(n_arr)
r_dsnb=ratio_dsnb*np.ones(n_arr)
###split up in solar, atmo and dsnb neutrinos
###
r_array=np.random.uniform(0.,1.,n_arr)
N_solar=np.where(r_array<r_solar,1,0)
N_dm=np.where(r_array>(r_solar+r_atmo+r_dsnb),1,0)
N_atmo=1-N_solar-N_dm
N_atmo=np.random.uniform(0.,r_atmo+r_dsnb)*N_atmo
N_atmo=np.where(N_atmo>r_dsnb,1,0)
N_dsnb=1-N_dm-N_solar-N_atmo
NN_solar=np.sum(N_solar)
NN_atmo=np.sum(N_atmo)
NN_dsnb=np.sum(N_dsnb)
NN_DM=np.sum(N_dm)
NN_tot=NN_solar+NN_atmo+NN_dsnb+NN_DM
NN_nu=NN_tot-NN_DM
###simulate DM events
###
tmp=np.cumsum(NN_SB)
i_max=len(tmp)
tmp=np.split(N_dm,tmp)
n_array_DM=np.zeros(i_max,dtype=int)
for ii in range (i_max):
n_array_DM[ii]=int(sum(tmp[ii]))
n_nu_arr=NN_SB-n_array_DM
if test==1:
n_array_DM=N_DM_exp*np.ones(N_sim,dtype=int)
if channel_time==True:
t_array_DM,E_rec_array_DM,cos_array_DM=[],[],[]
while ((len(t_array_DM))<NN_DM):
jj=np.random.randint(0,source_length-1,source_length)
t_array_tmp=t_src_DM[ff][jj]
E_rec_array_tmp=E_rec_src_DM[ff][jj]
cos_array_tmp=cos_src_DM[ff][jj]
t_array_DM=np.concatenate((t_array_DM,t_array_tmp))
E_rec_array_DM=np.concatenate(\
(E_rec_array_DM,E_rec_array_tmp))
cos_array_DM=np.concatenate(\
(cos_array_DM,cos_array_tmp))
t_array_DM=t_array_DM[:NN_DM]
E_rec_array_DM=E_rec_array_DM[:NN_DM]
cos_array_DM=cos_array_DM[:NN_DM]
###simulate neutrino events
###
t_array_solar,t_array_atmo,t_array_dsnb=[],[],[]
E_rec_array_solar,E_rec_array_atmo,E_rec_array_dsnb=[],[],[]
cos_array_solar,cos_array_atmo,cos_array_dsnb=[],[],[]
if channel_time==True:
###if energy information is used, do proper simulation
###
if channel_Erec==True:
###solar
###
if solar_length>1:
while ((len(E_rec_array_solar))<NN_solar):
jj=np.random.randint(0,solar_length-1,\
solar_length)
t_array_tmp=t_src_solar_nu[ff][jj]
E_rec_array_tmp=E_rec_src_solar_nu[ff][jj]
cos_array_tmp=cos_src_solar_nu[ff][jj]
t_array_solar=np.concatenate(\
(t_array_solar,t_array_tmp))
E_rec_array_solar=np.concatenate(\
(E_rec_array_solar,E_rec_array_tmp))
cos_array_solar=np.concatenate(\
(cos_array_solar,cos_array_tmp))
t_array_solar=t_array_solar[:NN_solar]
E_rec_array_solar=E_rec_array_solar[:NN_solar]
cos_array_solar=cos_array_solar[:NN_solar]
###atmo
###
if atmo_length>1:
while ((len(E_rec_array_atmo))<NN_atmo):
jj=np.random.randint(0,atmo_length-1,\
atmo_length)
t_array_tmp=t_src_atmo_nu[ff][jj]
E_rec_array_tmp=E_rec_src_atmo_nu[ff][jj]
cos_array_tmp=cos_src_atmo_nu[ff][jj]
t_array_atmo=np.concatenate(\
(t_array_atmo,t_array_tmp))
E_rec_array_atmo=np.concatenate(\
(E_rec_array_atmo,E_rec_array_tmp))
cos_array_atmo=np.concatenate(\
(cos_array_atmo,cos_array_tmp))
t_array_atmo=t_array_atmo[:NN_atmo]
E_rec_array_atmo=E_rec_array_atmo[:NN_atmo]
cos_array_atmo=cos_array_atmo[:NN_atmo]
###dsnb
###
if dsnb_length>1:
while ((len(E_rec_array_dsnb))<NN_dsnb):
jj=np.random.randint(0,dsnb_length-1,\
dsnb_length)
t_array_tmp=t_src_dsnb_nu[ff][jj]
E_rec_array_tmp=E_rec_src_dsnb_nu[ff][jj]
cos_array_tmp=cos_src_dsnb_nu[ff][jj]
t_array_dsnb=np.concatenate(\
(t_array_dsnb,t_array_tmp))
E_rec_array_dsnb=np.concatenate(\
(E_rec_array_dsnb,E_rec_array_tmp))
cos_array_dsnb=np.concatenate(\
(cos_array_dsnb,cos_array_tmp))
t_array_dsnb=t_array_dsnb[:NN_dsnb]
E_rec_array_dsnb=E_rec_array_dsnb[:NN_dsnb]
cos_array_dsnb=cos_array_dsnb[:NN_dsnb]
###stick together in correct order
###
E_rec_nu=np.zeros(NN_tot)
cos_nu=np.zeros(NN_tot)
t_nu=np.zeros(NN_tot)
if solar_length>1:
ij=np.nonzero(N_solar>0.5)
E_rec_nu[ij]=E_rec_array_solar
cos_nu[ij]=cos_array_solar
t_nu[ij]=t_array_solar
if atmo_length>1:
ij=np.nonzero(N_atmo>0.5)
E_rec_nu[ij]=E_rec_array_atmo
cos_nu[ij]=cos_array_atmo
t_nu[ij]=t_array_atmo
if dsnb_length>1:
ij=np.nonzero(N_dsnb>0.5)
E_rec_nu[ij]=E_rec_array_dsnb
cos_nu[ij]=cos_array_dsnb
t_nu[ij]=t_array_dsnb
ij=np.nonzero(t_nu)
t_nu=t_nu[ij]
cos_nu=cos_nu[ij]
E_rec_nu=E_rec_nu[ij]
del t_array_solar
del t_array_atmo
del t_array_dsnb
del E_rec_array_solar
del E_rec_array_atmo
del E_rec_array_dsnb
del cos_array_solar
del cos_array_atmo
del cos_array_dsnb
if print_info==1:
print 'DONE!'
###calculate Pb_SB and Psb_SB
###
mu_nu=np.zeros((N_sim,N_Q))
for ii in range (9):
if N_sun_arr[ii]>0.:
N_nu_solar=np.random.normal(N_sun_arr[ii],\
nu_sigma[0]*N_sun_arr[ii],(N_sim,N_Q))
N_nu_solar=np.where(N_nu_solar<0.,0.,N_nu_solar)
mu_nu+=N_nu_solar
for ii in range (4):
if N_atmo_arr[ii]>0.:
N_nu_atmo=np.random.normal(N_atmo_arr[ii],\
nu_sigma[1]*N_atmo_arr[ii],(N_sim,N_Q))
N_nu_atmo=np.where(N_nu_atmo<0.,0.,N_nu_atmo)
mu_nu+=N_nu_atmo
for ii in range (3):
if N_dsnb_arr[ii]>0.:
N_nu_dsnb=np.random.normal(N_dsnb_arr[ii],\
nu_sigma[2]*N_dsnb_arr[ii],(N_sim,N_Q))
N_nu_dsnb=np.where(N_nu_dsnb<0.,0.,N_nu_dsnb)
mu_nu+=N_nu_dsnb
if test==1:
n_nu_arr=N_nu_avg*np.ones(N_sim,dtype=int)
E_rec_nu=2.*E_thr*np.ones(N_sim*N_nu_avg)
cos_nu=0.5*np.ones(N_sim*N_nu_avg)
t_nu=100.*np.ones(N_sim*N_nu_avg)
mu_nu=N_nu_avg*np.ones((N_sim,N_Q))
NN_nu=N_nu_avg*N_sim
mu_nu=np.hstack(mu_nu)
n_nu_arr=np.tile(n_nu_arr,N_Q)
n_array_DM=np.tile(n_array_DM,N_Q)
###Dark Matter
### (i.e. Signal)
if print_info==1:
print 'mu nu'
print mu_nu[:12]
print ''
print 'n_DM'
print n_array_DM[:12]
print ''
print 'cos_DM'
print cos_array_DM[:12]
print ''
print 'E_rec_DM'
print E_rec_array_DM[:12]
print ''
print 'calculate S+B dark matter'
if channel_time==True:
prob_DM_B=np.ones(NN_DM)
prob_DM_SB=np.ones(NN_DM)
if channel_Erec==True:
dif=np.zeros((len(t_edges),len(t_array_DM)))
for ii in range(len(t_edges)):
dif[ii]=abs(t_edges[ii]-t_array_DM)
dif=np.reshape(dif,(len(t_array_DM),len(t_edges)))
id1_DM=np.argmin(dif,axis=1)
t0_DM=t_edges[id1_DM]
id2_DM=np.where(id1_DM==N_tt-1,id1_DM-1,0)
id2_DM=np.where(id1_DM==0,1,id2_DM)
id2_DM_tmp1=np.where(id2_DM==0,id1_DM+1,id2_DM)
t1_DM=t_edges[id2_DM_tmp1]
id2_DM_tmp2=np.where(id2_DM==0,id1_DM-1,id2_DM)
t2_DM=t_edges[id2_DM_tmp2]
id2_DM=np.where(abs(t1_DM-t_array_DM)>\
abs(t2_DM-t_array_DM),id2_DM_tmp2,id2_DM)
id2_DM=np.where(abs(t1_DM-t_array_DM)<\
abs(t2_DM-t_array_DM),id2_DM_tmp1,id2_DM)
d1=abs(t_array_DM-t_edges[id1_DM])
d2=abs(t_array_DM-t_edges[id2_DM])
pdf1,pdf2=np.zeros(NN_DM),np.zeros(NN_DM)
#if channel_angle==True:
for ii in range (N_tt):
pdf1=np.where(id1_DM==ii,f_array[ii].ev(\
E_rec_array_DM,cos_array_DM),pdf1)
pdf2=np.where(id2_DM==ii,f_array[ii].ev(\
E_rec_array_DM,cos_array_DM),pdf2)
prob_DM_S_angle=pdf1+d1/(d1+d2)*(pdf2-pdf1)
prob_DM_B_angle=Pb_ipl.ev(E_rec_array_DM,cos_array_DM)
#if channel_angle==False:
for ii in range (N_tt):
pdf1=np.where(id1_DM==ii,f_array_noangle[ii](\
E_rec_array_DM),pdf1)
pdf2=np.where(id2_DM==ii,f_array_noangle[ii](\
E_rec_array_DM),pdf2)
prob_DM_S_erec=pdf1+d1/(d1+d2)*(pdf2-pdf1)
prob_DM_B_erec=Pb_noangle_ipl(E_rec_array_DM)
prob_DM_S_time=pdf_DM_t(t_array_DM)
prob_DM_B_time=pdf_nu_t(t_array_DM)
prob_DM_S_time=np.tile(prob_DM_S_time,N_Q)
prob_DM_B_time=np.tile(prob_DM_B_time,N_Q)
prob_DM_S_angle=np.tile(prob_DM_S_angle,N_Q)
prob_DM_B_angle=np.tile(prob_DM_B_angle,N_Q)
prob_DM_S_erec=np.tile(prob_DM_S_erec,N_Q)
prob_DM_B_erec=np.tile(prob_DM_B_erec,N_Q)
n_split=np.cumsum(n_array_DM)
n_split=np.delete(n_split,-1)
prob_DM_B_time=np.split(prob_DM_B_time,n_split)
prob_DM_S_time=np.split(prob_DM_S_time,n_split)
prob_DM_B_angle=np.split(prob_DM_B_angle,n_split)
prob_DM_S_angle=np.split(prob_DM_S_angle,n_split)
prob_DM_B_erec=np.split(prob_DM_B_erec,n_split)
prob_DM_S_erec=np.split(prob_DM_S_erec,n_split)
angle_info_DM,erec_info_DM=[],[]
for ii in range (N_sim*N_Q):
time_info=np.array([np.sum(np.log(\
1.+1.*mu_DM/mu_nu[ii]*prob_DM_S_time[ii]/\
prob_DM_B_time[ii]))])
angle_info=np.array([np.sum(np.log(\
1.+1.*mu_DM/mu_nu[ii]*prob_DM_S_angle[ii]/\
prob_DM_B_angle[ii]))])
erec_info=np.array([np.sum(np.log(\
1.+1.*mu_DM/mu_nu[ii]*prob_DM_S_erec[ii]/\
prob_DM_B_erec[ii]))])
pre=np.array([-mu_DM+\
(n_nu_arr[ii]+n_array_DM[ii])*\
(np.log(mu_nu[ii])-np.log(mu_nu[ii]+mu_DM))])
angle_info_DM=np.concatenate(\
(angle_info_DM,pre+time_info+angle_info))
erec_info_DM=np.concatenate(\
(erec_info_DM,pre+time_info+erec_info))
del prob_DM_B_time
del prob_DM_S_time
del prob_DM_B_angle
del prob_DM_S_angle
del prob_DM_B_erec
del prob_DM_S_erec
if print_info==1:
print 'DONE!'
###neutrinos
### (i.e. Background)
if print_info==1:
print 'n_nu'
print n_nu_arr[:12]
print ''
print 'cos_nu'
print cos_nu[:12]
print ''
print 'E_rec_nu'
print E_rec_nu[:12]
print ''
print 'calculate S+B neutrinos'
if channel_time==True:
prob_nu_SB=np.ones(NN_nu)
if channel_Erec==True:
dif=np.zeros((len(t_edges),len(t_nu)))
for ii in range(len(t_edges)):
dif[ii]=abs(t_edges[ii]-t_nu)
dif=np.reshape(dif,(len(t_nu),len(t_edges)))
id1_nu=np.argmin(dif,axis=1)
t0_nu=t_edges[id1_nu]
id2_nu=np.where(id1_nu==N_tt-1,id1_nu-1,0)
id2_nu=np.where(id1_nu==0,1,id2_nu)
id2_nu_tmp1=np.where(id2_nu==0,id1_nu+1,id2_nu)
t1_nu=t_edges[id2_nu_tmp1]
id2_nu_tmp2=np.where(id2_nu==0,id1_nu-1,id2_nu)
t2_nu=t_edges[id2_nu_tmp2]
id2_nu=np.where(abs(t1_nu-t_nu)>\
abs(t2_nu-t_nu),id2_nu_tmp2,id2_nu)
id2_nu=np.where(abs(t1_nu-t_nu)<\
abs(t2_nu-t_nu),id2_nu_tmp1,id2_nu)
d1=abs(t_nu-t_edges[id1_nu])
d2=abs(t_nu-t_edges[id2_nu])
pdf1,pdf2=np.zeros(NN_nu),np.zeros(NN_nu)
for ii in range (N_tt):
pdf1=np.where(id1_nu==ii,f_array[ii].ev(\
E_rec_nu,cos_nu),pdf1)
pdf2=np.where(id2_nu==ii,f_array[ii].ev(\
E_rec_nu,cos_nu),pdf2)
prob_nu_B_angle=Pb_ipl.ev(E_rec_nu,cos_nu)
prob_nu_S_angle=pdf1+d1/(d1+d2)*(pdf2-pdf1)
for ii in range (N_tt):
pdf1=np.where(id1_nu==ii,f_array_noangle[ii](\
E_rec_nu),pdf1)
pdf2=np.where(id2_nu==ii,f_array_noangle[ii](\
E_rec_nu),pdf2)
prob_nu_B_erec=Pb_noangle_ipl(E_rec_nu)
prob_nu_S_erec=pdf1+d1/(d1+d2)*(pdf2-pdf1)
prob_nu_S_time=pdf_DM_t(t_nu)
prob_nu_B_time=pdf_nu_t(t_nu)
prob_nu_S_time=np.tile(prob_nu_S_time,N_Q)
prob_nu_B_time=np.tile(prob_nu_B_time,N_Q)
prob_nu_S_angle=np.tile(prob_nu_S_angle,N_Q)
prob_nu_B_angle=np.tile(prob_nu_B_angle,N_Q)
prob_nu_S_erec=np.tile(prob_nu_S_erec,N_Q)
prob_nu_B_erec=np.tile(prob_nu_B_erec,N_Q)
n_split=np.cumsum(n_nu_arr)
n_split=np.delete(n_split,-1)
prob_nu_B_time=np.split(prob_nu_B_time,n_split)
prob_nu_S_time=np.split(prob_nu_S_time,n_split)
prob_nu_B_angle=np.split(prob_nu_B_angle,n_split)
prob_nu_S_angle=np.split(prob_nu_S_angle,n_split)
prob_nu_B_erec=np.split(prob_nu_B_erec,n_split)
prob_nu_S_erec=np.split(prob_nu_S_erec,n_split)
if print_info==1:
print 'DONE!'
print 'put everything together'
angle_info_nu,erec_info_nu=[],[]
for ii in range (N_sim*N_Q):
time_info=np.array([np.sum(np.log(\
1.+1.*mu_DM/mu_nu[ii]*prob_nu_S_time[ii]/\
prob_nu_B_time[ii]))])
angle_info=np.array([np.sum(np.log(\
1.+1.*mu_DM/mu_nu[ii]*prob_nu_S_angle[ii]/\
prob_nu_B_angle[ii]))])
erec_info=np.array([np.sum(np.log(\
1.+1.*mu_DM/mu_nu[ii]*prob_nu_S_erec[ii]/\
prob_nu_B_erec[ii]))])
angle_info_nu=np.concatenate(\
(angle_info_nu,time_info+angle_info))
erec_info_nu=np.concatenate(\
(erec_info_nu,time_info+erec_info))
Q_SB_angle=np.concatenate(\
(Q_SB_angle,angle_info_DM+angle_info_nu))
Q_SB_erec=np.concatenate(
(Q_SB_erec,erec_info_DM+erec_info_nu))
del prob_nu_B_time
del prob_nu_S_time
del prob_nu_B_angle
del prob_nu_S_angle
del prob_nu_B_erec
del prob_nu_S_erec
if print_info==1:
print 'DONE!'
print 'Q-statistics...'
###calculate the values of the test statistics
###
for kk in range (2):
if kk==0:
Q_B=-2*Q_B_angle
Q_SB=-2*Q_SB_angle
if kk==1:
Q_B=-2*Q_B_erec
Q_SB=-2*Q_SB_erec
if print_info==1:
print 'Q B'
print Q_B[:12]
print ''
print 'Q SB'
print Q_SB[:12]
print ''
#plt.figure(1)
#plt.hist(Q_B,bins=np.linspace(min(Q_B),max(Q_B),100))
#plt.figure(3)
#plt.hist(Q_SB,bins=np.linspace(min(Q_SB),max(Q_SB),100))
#print 'Next two only non-zero if Q_B>75'
#out1=np.where(Q_B>75,Psb_array_B,0)
#print 'Psb array nu'
#print out1
#out1=np.where(Q_B>75,Pb_array_B,0)
#print 'Pb array nu'
#print out1
#print''
if test==1:
if Q_B[1] < (-36.) and Q_B[1] > (-39.):
print ' Q_B OK!'
if Q_SB[1] < (-50.) and Q_SB[1] > (-53.):
print ' Q_SB OK!'
#print Q_B,Q_SB
else :
print 'some error!'
print Q_B[:10], Q_SB[:10]
sys.exit()
###Now, do statistics with it
###
bins_B=np.linspace(min(Q_B),max(Q_B),N_bins)
bins_SB=np.linspace(min(Q_SB),max(Q_SB),N_bins)
hist_B,Q_grid_B=np.histogram(Q_B,bins=bins_B,normed=True)
hist_SB,Q_grid_SB=np.histogram(Q_SB,bins=bins_SB,normed=True)
Q_min=np.min(Q_grid_SB)
Q_max=np.max(Q_grid_B)
Q_int=np.linspace(Q_min,Q_max,steps)
dQ_int=1.*(Q_max-Q_min)/steps
###The distributions with correct normalisation
###
def pdf_B(Qv):
if Qv<min(Q_grid_B) or Qv>max(Q_grid_B) \
or Qv==max(Q_grid_B):
return 0.
Qv=np.array([Qv,Qv])
id=np.digitize(Qv,bins=Q_grid_B)
id=id[0]-1
return hist_B[id]
def pdf_SB(Qv):
if Qv<min(Q_grid_SB) or Qv>max(Q_grid_SB) :
return 0.
Qv=np.array([Qv,Qv])
id=np.digitize(Qv,bins=Q_grid_SB)
id=id[0]-1
return hist_SB[id]
###calculate overlap of both distributions
###
cl=[]
int_B=0.
int_SB=0.
f_B0=pdf_B(Q_int[0])
f_SB0=pdf_SB(Q_int[0])
for ii in range(steps-1):
f_B1=pdf_B(Q_int[ii+1])
f_SB1=pdf_SB(Q_int[ii+1])
int_B+=dQ_int*0.5*(f_B0+f_B1)
int_SB+=dQ_int*0.5*(f_SB0+f_SB1)
f_B0=f_B1
f_SB0=f_SB1
if np.min(Q_grid_SB)<np.min(Q_grid_B):
if(abs((1.-int_SB)-int_B)<accuracy):
if(int_B>0.):
cl.append(abs(1.-int_SB))
else:
cl.append(0.)
else:
if(abs((1.-int_B)-int_SB)<accuracy):
if(int_SB>0.):
cl.append(abs(1.-int_B))
else:
cl.append(0.)
if(int_SB>0.995):
break
###Test distributions
###
#if print_info==0:
# qq=np.linspace(min(Q_grid_SB),max(Q_grid_B),250)
# plt.figure(2)
# plt.hist(Q_B,bins=Q_grid_B,\
# facecolor='b',alpha=0.5,normed=1.)
# plt.hist(Q_SB,bins=Q_grid_SB,\
# facecolor='r',alpha=0.5,normed=1.)
# #for ii in range (len(qq)):
# # plt.plot(qq[ii],pdf_B(qq[ii]),'bo')
# # plt.plot(qq[ii],pdf_SB(qq[ii]),'ro')
# plt.show(all)
if(len(cl)==0):
cl_avg=0.
else:
cl_avg=1.*np.sum(cl)/len(cl)
if kk==0:
cl_angle=cl_avg
if kk==1:
cl_erec=cl_avg
###write to file
###
f=open(filename_dm,'a')
f.write(str(M_det)+ ' '+str(N_nu_exp)+' '+\
str(m_DM_array[mm])+' '+str(sigma[ss])+\
' '+str(N_DM_exp)+' '+str(cl_angle)+' '+str(cl_erec)+'\n')
f.close()
if cl_angle<0.00001 and mem1==0:#and cl_erec<0.00001 and mem1==0:
waszero=True
mem1=1
if cl_angle>0.00135 and mem2==0:#cl_erec>0.1 and mem2==0:
wasbig=True
mem2=1
if wasbig==True and waszero==True:
scan=False
scanned=np.concatenate((scanned,np.array([s_int])))
if waszero==True:
s_int+=1
if waszero==False:
s_int-=1
if scan==False:
if waszero==True and wasbig==False:
s_int+=1
else:
s_int-=1
print ''
print ''
print ''
print '####################################################################'
print 'END'
print '####################################################################'
print ''
|
import os.path as osp
from setuptools import find_packages, setup
def get_version():
version_file = osp.join(osp.dirname(__file__), 'icrawler/version.py')
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def readme():
filename = osp.join(osp.dirname(__file__), 'README.rst')
with open(filename, 'r') as f:
return f.read()
setup(
name='icrawler',
version=get_version(),
description='A mini framework of image crawlers',
long_description=readme(),
keywords='image crawler spider',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Utilities',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
],
url='https://github.com/hellock/icrawler',
author='Kai Chen',
author_email='chenkaidev@gmail.com',
license='MIT',
install_requires=[
'beautifulsoup4>=4.4.1',
'lxml',
'requests>=2.9.1',
'six>=1.10.0',
'Pillow'
],
zip_safe=False
) # yapf: disable
|
from distutils.core import setup
from setuptools import find_packages
setup(
name='pyesapi',
version='0.2.3',
description='Python interface to Eclipse Scripting API',
author='Michael Folkerts, Varian Medical Systems',
author_email='Michael.Folkerts@varian.com',
license='MIT',
packages=find_packages(),
install_requires=[
'numpy',
'scipy',
'pythonnet', # v2.3.0 tested to work with python 3.6
'pynetdicom'
],
)
|
x=y=z=50
print (x)
print (y)
print (z)
tuple=('rahul', 100, 60.4, 'deepak')
print(tuple)
print(tuple[2:])
dictionary={'name':'charlie','id':100,'dept':'it'}
print(dictionary)
print(dictionary.keys())
print(dictionary.values())
list = ['aman', 678, 20.4, 'saurav']
print(list[1:3]) |
# 🚨 Don't change the code below 👇
student_heights = input("Input a list of student heights ").split()
for n in range(0, len(student_heights)):
student_heights[n] = int(student_heights[n])
# 🚨 Don't change the code above 👆
# Write your code below this row 👇
sum_of_heights = 0
count = 0
for height in student_heights:
sum_of_heights += height
count += 1
print(f"average of height {sum_of_heights / count} ")
average = sum(student_heights) / len(student_heights)
print(f"average of height {round(average)} ")
|
""" Everything in here goes with zero indexing. It could have been one indexed,
except I wasn't sure if it was okay to make Just return 1 more than its arg.
Probably better not to do that. So I left everything here 0 indexed and
then add 1 to the Gi Gj Ai Aj when converting to compressed sparse format
in functions_return
"""
from encoder import create_encoder
from ... import codes
from ... properties.abstract_dim import AbstractDim
def constant(x):
return str(x.value)
def eye(x):
return "%s * speye(%s)" % (toMatlab(x.coeff), x.n)
def ones(x):
if x.transpose: return "%s * ones(1,%s)" % (toMatlab(x.coeff), x.n)
else: return "%s * ones(%s,1)" % (toMatlab(x.coeff), x.n)
def trans(x):
return "(%s).'" % toMatlab(x.arg)
def parameter(x):
return "params.%s" % x.value
def scalar_parameter(x):
return "params.%s" % x.value
def negate(x):
return "-(%s)" % toMatlab(x.arg)
def add(x):
return "%s + %s" % (toMatlab(x.left), toMatlab(x.right))
def mul(x):
return "%s * %s" % (toMatlab(x.left), toMatlab(x.right))
def just(elem):
return "%s" % toMatlab(elem.x)
def loop_rows(x):
mat = toMatlab(x.matrix)
ret = "mod(find(%s)-1,size(%s,1))" % (mat, mat)
if hasattr(x, 'stride') and x.stride != 1:
ret = "%d*%s" % (x.stride, ret)
if hasattr(x, 'offset') and x.offset != 0:
ret = "%s + %s" % (x.offset, ret)
return ret
def loop_cols(x):
mat = toMatlab(x.matrix)
ret = "floor((find(%s)-1)/size(%s,1))" % (mat, mat)
if hasattr(x, 'stride') and x.stride != 1:
ret = "%d*%s" % (x.stride, ret)
if hasattr(x, 'offset') and x.offset != 0:
ret = "%s + %s" % (x.offset, ret)
return ret
def loop_over(x):
return "nonzeros(%s)" % (x.op % toMatlab(x.matrix))
def _range(x):
if x.stride == 1: return "(%s:%s)'" % (x.start, x.end-1)
else: return "(%s:%s:%s)'" % (x.start, x.stride, x.end-1)
def repeat(x):
return "%s*ones(%s,1)" % (toMatlab(x.obj), x.n)
def assign(x):
# echu: probably will be source of some massive bugs in matlab
# see, for instance, the python_encoder
return "%s = sparse(%s);" % (toMatlab(x.lhs), toMatlab(x.rhs))
def nnz(x):
return "%s.nnz" % (toMatlab(x.obj))
lookup = {
codes.ConstantCoeff: constant,
codes.EyeCoeff: eye,
codes.OnesCoeff: ones,
codes.TransposeCoeff: trans,
codes.ParameterCoeff: parameter,
codes.ScalarParameterCoeff: parameter,
codes.NegateCoeff: negate,
codes.AddCoeff: add,
codes.MulCoeff: mul,
codes.Just: just,
codes.LoopRows: loop_rows,
codes.LoopCols: loop_cols,
codes.LoopOver: loop_over,
codes.Range: _range, # "range" is reserved
codes.Repeat: repeat,
codes.Assign: assign,
codes.NNZ: nnz,
str: lambda x: x,
int: lambda x: str(x),
float: lambda x: str(x),
AbstractDim: lambda x: str(x)
}
toMatlab = create_encoder(lookup)
|
from torchvision import models
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from delf import delf_densenet169, init_resnet101gem, init_delf_pca
def initialize_model(model_name, num_classes = 2000, use_pretrained=True):
# Initialize these variables which will be set in this if statement. Each of these
# variables is model specific.
model_ft = None
if model_name == "resnet50":
""" Resnet50
"""
model_ft = models.resnet50(pretrained=use_pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
for param in model_ft.features.parameters():
param.requires_grad = False
elif model_name == "resnet101":
""" Resnet101
"""
model_ft = models.resnet101(pretrained=use_pretrained)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, num_classes)
for param in model_ft.features.parameters():
param.requires_grad = False
elif model_name == "densenet169":
""" Densenet169
"""
model_ft = models.densenet169(pretrained=use_pretrained)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
for param in model_ft.features.parameters():
param.requires_grad = False
features = list(model_ft.children())[:-1]
features.append(nn.ReLU(inplace=True))
features.append(nn.AdaptiveAvgPool2d((1, 1)))
model_ft = nn.Sequential(*features)
elif model_name == "resnext50":
""" Resnext50
"""
model_ft = models.resnext50_32x4d(pretrained=use_pretrained)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
for param in model_ft.features.parameters():
param.requires_grad = False
elif model_name == "resnext101":
""" Resnext101
"""
model_ft = models.resnext101_32x8d(pretrained=use_pretrained)
num_ftrs = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_ftrs, num_classes)
for param in model_ft.features.parameters():
param.requires_grad = False
elif model_name == "delf":
""" DELF using pretrained Densenet169 features """
model_ft = delf_densenet169()
# num_ftrs = model_ft.classifier.in_features
# model_ft.classifier = nn.Linear(num_ftrs, num_classes)
for param in model_ft.parameters():
param.requires_grad = False
elif model_name == "resnet101gem":
model_ft = init_resnet101gem()
for param in model_ft.parameters():
param.requires_grad = False
elif model_name == "delf_pca":
model_ft = init_delf_pca()
for param in model_ft.parameters():
param.requires_grad = False
elif model_name == "densenet_class":
model_ft = init_densenet169()
for param in model_ft.parameters():
param.requires_grad = False
else:
print("Invalid model name, exiting...")
exit()
# model_ft = nn.Sequential(*list(model_ft.children())[:-1])
return model_ft
def load_model(model_name, model_path):
model = initialize_model(model_name, use_pretrained=True)
# Send the model to GPU
if torch.cuda.device_count() > 0:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
else:
print("Let's use CPU!")
model_dict = model.state_dict()
# print (model_dict.keys())
load_dict = torch.load(model_path)
# print ("-"*100)
# print (load_dict.keys())
model_dict = {k: v for k, v in load_dict.items() if k in model_dict}
model.load_state_dict(model_dict)
return model
class Pool(nn.Module):
def __init__(self, dim):
super(Pool,self).__init__()
self.dim = dim
def forward(self, x):
out = F.adaptive_avg_pool2d(x, (1, 1)).view(-1, self.dim)
return out
def init_densenet169():
model_ft = models.densenet169(pretrained=True)
# num_ftrs = model_ft.classifier.in_features
# model_ft.classifier = nn.Linear(num_ftrs, 2000)
features = list(model_ft.children())[:-1]
features.append(nn.ReLU(inplace=True))
features.append(Pool(1664))
# features.append(list(model_ft.children())[-1])
model_ft = nn.Sequential(*features)
return model_ft
|
# module for comparing stats and making recommendataions
"""
Read team names from user input, retrieve features of teams from MySQL DB, compute odds of winning and recommend features to care
"""
import numpy as np
import pandas as pd
import pymysql as mdb
from sklearn import linear_model
def QueryTeamData(tgtName, yourName, db):
"""
Get data from MySQL DB
"""
#cur = db.cursor()
teamTgtPipe = pd.io.sql.read_sql(sql = "SELECT * FROM " + tgtName.replace(' ', '_'), con = db)
teamYourPipe = pd.io.sql.read_sql(sql = "SELECT * FROM " + yourName.replace(' ', '_'), con = db)
return teamTgtPipe, teamYourPipe
def PredictOp(teamTgtPipe, teamYourPipe, tgtName, teamModels):
coef = teamModels[tgtName]
# Get stats of Team A's win and lose matches - only 20 features are saved
features = list(coef['features'])
# Get stats of Team B: Revert the features of A to retrieve features for B
featureYour = []
featureTgt = []
for ii in features:
if '_op' in ii:
featureYour.append(ii[:-3])
else:
featureTgt.append(ii)
dfTgt = teamTgtPipe[teamTgtPipe['season'] == 1415].ix[:, featureTgt]
dfYour = teamYourPipe[teamYourPipe['season'] == 1415].ix[:, featureYour]
dfYour.columns = dfYour.columns + '_op'
# Get mean and reorder into the original feature order
bb = pd.concat([dfTgt.mean(), dfYour.mean()])
bb = bb.reindex(features)
model = coef['model']
featureCoef = pd.DataFrame({'features': features, 'coef': model.coef_[0]})
return featureCoef, model.predict_proba(bb)[0][1] # The prob of tgtTeam win
def GetActions(features):
"""
Get action recommendations for your team, given your features,
meaning you want INCREASE those with POSTIVE COEF, DECREASE those with NEGATIVE COEF
"""
featureOP = []
featureYMore = []
featureYAcc = []
featureImprove = []
count = 0
for index, row in features[::-1].iterrows():
if count > 7: # Don't recommend more than 8 actions
break
if ('poss' not in row['features']) and ('scoring' not in row['features']):
if '_op' in row['features']:
if (row['coef'] < 0) and ('accuracy' not in row['features']) and ('accurate' not in row['features']):
featureOP.append(row['features'][:-3].replace('_', ' ').title())
featureImprove.append(row['features'])
count += 1
else:
if row['coef'] > 0:
if 'accuracy' not in row['features']:
featureYMore.append(row['features'].replace('_', ' ').title())
featureImprove.append(row['features'])
count += 1
else:
featureYAcc.append(row['features'].replace('_', ' ').title())
featureImprove.append(row['features'])
count += 1
# Whether show 2 columns or 3
useTwoCol = True
if useTwoCol:
actions = pd.DataFrame([featureYAcc + featureYMore, featureOP], index = ['Your', 'OP']).T
else:
actions = pd.DataFrame([featureYAcc, featureYMore, featureOP], index = ['YAcc', 'YMore', 'OP']).T
nDimActions = actions.shape
actions = actions.values.tolist()
## Make the actions more readable
for ii in np.arange(nDimActions[0]):
for jj in np.arange(nDimActions[1]):
#print actions[ii][jj]
if actions[ii][jj] == None:
actions[ii][jj] = ' '
else:
actions[ii][jj] = actions[ii][jj].replace('Att', 'Attempt').replace('Obox', 'Outside the Penalty Box').replace('Ibox', 'Inside the Penalty Box').replace('Total ', '').replace('Fwd', 'Forward').replace('18Yardplus', 'Outside the Penalty Box').replace('18Yard', 'Inside the Penalty Box')
if 'Accuracy' in actions[ii][jj]:
actions[ii][jj] = actions[ii][jj][9:] + ' Accuracy'
else:
actions[ii][jj] = '# of ' + actions[ii][jj]
if ("alls" not in actions[ii][jj]) and ("Penalty Box" not in actions[ii][jj]):
if "Won" in actions[ii][jj]:
actions[ii][jj] = actions[ii][jj][:-4] + 's Won'
elif actions[ii][jj][-2:] != 'ss':
actions[ii][jj] = actions[ii][jj] + 's'
else:
actions[ii][jj] = actions[ii][jj] + 'es'
#print actions
return actions, featureImprove
def ImprovedScore(tgtName, yourName, teamModels, featureImprove, teamTgtPipe, teamYourPipe, Imp = 0.1):
"""
Given 10% improvement at the suggested features, how much more likely you are going to win
"""
## Put featureImprove into target model - need to reverse _op and non _op
coef = teamModels[tgtName]
# Get stats of Team A's win and lose matches - only 20 features are saved
features = list(coef['features'])
# Get stats of Team B: Revert the features of A to retrieve features for B
featureYour = []
featureTgt = []
for ii in features:
if '_op' in ii:
featureYour.append(ii[:-3])
else:
featureTgt.append(ii)
dfTgt = teamTgtPipe[teamTgtPipe['season'] == 1415].ix[:, featureTgt]
dfYour = teamYourPipe[teamYourPipe['season'] == 1415].ix[:, featureYour]
dfYour.columns = dfYour.columns + '_op'
# Get mean and reorder into the original feature order
bb = pd.concat([dfTgt.mean(), dfYour.mean()])
bb = bb.reindex(features)
model = coef['model']
for ii in bb.iteritems():
if ((ii[0] + '_op') in featureImprove) or ((ii[0][:-3]) in featureImprove):
if model.coef_[0][features.index(ii[0])] < 0:
bb[ii[0]] *= 1 + Imp
else:
bb[ii[0]] *= 1 - Imp
probTgt = model.predict_proba(bb)[0][1]
## Put featureImprove into your model
coef = teamModels[yourName]
# Get stats of Team A's win and lose matches - only 20 features are saved
features = list(coef['features'])
# Get stats of Team B: Revert the features of A to retrieve features for B
featureYour = []
featureTgt = []
for ii in features:
if '_op' in ii:
featureTgt.append(ii[:-3])
else:
featureYour.append(ii)
dfTgt = teamTgtPipe[teamTgtPipe['season'] == 1415].ix[:, featureTgt]
dfYour = teamYourPipe[teamYourPipe['season'] == 1415].ix[:, featureYour]
dfTgt.columns = dfTgt.columns + '_op'
# Get mean and reorder into the original feature order
bb = pd.concat([dfTgt.mean(), dfYour.mean()])
bb = bb.reindex(features)
model = coef['model']
for ii in bb.iteritems():
if ii[0] in featureImprove:
if model.coef_[0][features.index(ii[0])] > 0:
bb[ii[0]] *= 1 + Imp
else:
bb[ii[0]] *= 1 - Imp
probYour = model.predict_proba(bb)[0][1]
return round(probYour / (probYour + probTgt), 2)
def PredictMatch(yourName, tgtName, teamModels, db):
"""
The main function to make prediction, recommend features, and compute improvement
"""
teamTgtPipe, teamYourPipe = QueryTeamData(tgtName, yourName, db)
featureCoefTgt, probTgt = PredictOp(teamTgtPipe, teamYourPipe, tgtName, teamModels)
featureCoefYour, probYour = PredictOp(teamYourPipe, teamTgtPipe, yourName, teamModels)
odds = round(probYour / (probYour + probTgt), 2)
# In featureCoefYour, you want INCREASE those with POSTIVE COEF, DECREASE those with NEGATIVE COEF
# In featureCoefTgt, you want to do the opposite
# reverse both the sign of the coef, and '_op' in features so as to be the same with featureCoefYour
featureCoefTgt['coef'] = - featureCoefTgt['coef']
featureCoefTgt.features = [ii[:-3] if "_op" in ii else ii + '_op' for ii in featureCoefTgt.features]
# Combine only the most important 10 features
# featureBoth = featureCoefTgt[11:].append(featureCoefYour[11:])
# Combine only all the most important features
featureBoth = featureCoefTgt.append(featureCoefYour)
# get action recommendations
# Somehow the pandas here uses a deprecated para cols, instaed of the new one subset
#featureBoth.drop_duplicates(subset = 'features', take_last = True, inplace = True)
featureBoth.drop_duplicates(cols = 'features', take_last = True, inplace = True)
actions, featureImprove = GetActions(featureBoth)
Imp = 0.1
oddsNew = ImprovedScore(tgtName, yourName, teamModels, featureImprove, teamTgtPipe, teamYourPipe, Imp)
return odds, oddsNew, actions
if __name__ == '__main__':
print 'I\'m a module'
|
class Solution:
# @return a string
def intToRoman(self, num):
mapping = {0:'', 1:'I', 5:'V', 10:'X', 50:'L', 100:'C', 500:'D', 1000:'M'}
units = [1000, 100, 10, 1]
roman = ''
for i in units:
count = num // i
if count == 2:
roman += mapping[i] + mapping[i]
elif count == 3:
roman += mapping[i] + mapping[i] * 2
elif count == 4:
roman += mapping[i] + mapping[5*i]
elif count == 6:
roman += mapping[5*i] + mapping[i]
elif count == 7:
roman += mapping[5*i] + mapping[i] * 2
elif count == 8:
roman += mapping[5*i] + mapping[i] * 3
elif count == 9:
roman += mapping[i] + mapping[10*i]
else:
roman += mapping[count*i]
num = num % i
return roman
if __name__ == '__main__':
test = Solution()
print test.intToRoman(3999)
raw_input() |
from django.conf.urls import url
from fetch.views import GetProductScoreView, GetFullProductInfoView
urlpatterns = [
url(r'^product/overall-score/(?P<pk>\d+)/$', GetProductScoreView.as_view()),
url(r'^product/full/(?P<pk>\d+)/$', GetFullProductInfoView.as_view()),
]
|
# use binary search to find an element in a list
import random
import time
start_time = time.time()
a = random.sample(range(0, 1000), 500)
a.sort()
print(a)
number = int(input("Which number do you want to search for: "))
def BinarySearch(a, number):
while len(a) != 1:
half = int(len(a) / 2)
if(number < a[half]):
a = a[0:half]
else:
a = a[half:]
return a[0]
def ForSearch(a, number):
for element in a:
if(number == element):
return element;
if(number == BinarySearch(a, number)) :
print("The number was found with binary search")
else:
print("The number was not found with binary search")
if(number == ForSearch(a, number)) :
print("The number was also found with a for loop")
else:
print("The number was also not found with a for loop")
|
""" The aim is to run a regression of 8 outcome variables on a set of covariates
related to the size of firms. If the coefficient of 'treatsmall' or 'treatlarge' are
significant, we could conclude that the age of the firm determines the effectiveness
of incentives. The significance of 'treatsmall'('treatlarge') means that small(large) firms
are affected by the public subsidy.
"""
import statsmodels.formula.api as sm
def get_covariates(degree):
"""Collect the regressors (independent variables).
Args:
degree (integer): degree of polynomials
Returns:
regressors (list)
"""
base_variables = [
"smallm",
"largem",
"treatsmall",
"treatlarge",
"ssmall",
"slarge",
"streatsmall",
"streatlarge",
]
if degree == 0:
base = base_variables[0:4]
return base
if degree == 1:
return base_variables
else:
for i in range(2, degree + 1):
base_variables.append(f"ssmall{i}")
base_variables.append(f"slarge{i}")
base_variables.append(f"streatsmall{i}")
base_variables.append(f"streatlarge{i}")
return base_variables
def regress(independent_Variable, dataframe, degree):
"""Regress the dependent variables on covariates (independent variables).
Args:
dependent_variable (float): the independent variable
dataframe (pd.DataFrame): the dataframe of full sample, narrow window, and wide window
degree (integer): degree of polynomials
Returns:
regression result(summary)
"""
reg = (
sm.ols(
formula=f"{independent_Variable} ~ " + ("+").join(get_covariates(degree)),
data=dataframe,
)
.fit(cov_type="cluster", cov_kwds={"groups": dataframe["score"]}, use_t=True)
.summary()
)
return reg
|
class Huffman(object):
data=[]
NodeList=[]
stat={}
def __init__(self,filename):
self.filename=filename
self.read_file()
def read_file(self):
str=''
stat={}
file=open(self.filename)
for line in file:
str=str+line.rstrip('\n')
for i in range(len(str)):
if str[i] in stat:
stat[str[i]]+=1
else:
stat[str[i]]=1
print(stat)
self.stat=stat
for i ,j in stat.iteritems():
self.data.append([i,j])
def make_node(self):
for i in range(len(self.data)):
new_node=HuffmanNode(self.data[i][0],self.data[i][1])
self.NodeList.append(new_node)
self.insertionSort()
def create_tree(self):
self.make_node()
while(len(self.NodeList)!=1):
temp1=self.NodeList.pop(0)
temp2=self.NodeList.pop(0)
if temp1.item not in self.stat:
temp1.item=str(temp1.weight)
if temp2.item not in self.stat:
temp2.item=str(temp2.weight)
new_node=HuffmanNode('0',temp1.weight+temp2.weight,temp1,temp2)
self.NodeList.append(new_node)
self.insertionSort()
height+=1
self.NodeList[0].item=str(self.NodeList[0].weight)
def print_tree(self):
track_depth=self.NodeList[0].depth
queue=list()
queue.append(self.NodeList[0])
while len(queue)!=0:
temp=queue.pop(0)
if temp.depth != track_depth:
print('\n')
track_depth=temp.depth
print ' '+temp.item,
if temp.left != None:
queue.append(temp.left)
if temp.right != None:
queue.append(temp.right)
def level_order(self):
queue=list()
queue.append(self.NodeList[0])
#print('asdjlkj' self.NodeList[0].left.item)
while len(queue)!=0 :
temp=queue.pop(0)
print(temp.item,temp.weight)
if temp.left!= None:
queue.append(temp.left)
if temp.right!=None:
queue.append(temp.right)
def depth_deter(self):
queue=list()
queue.append(self.NodeList[0])
self.NodeList[0].depth=0
while len(queue)!=0 :
temp=queue.pop(0)
print(temp.item,temp.weight,temp.depth)
if temp.left!= None:
self.change_depth(temp.left,temp.depth+1)
queue.append(temp.left)
if temp.right!=None:
self.change_depth(temp.right,temp.depth+1)
queue.append(temp.right)
def change_depth(self,node,depth):
node.depth=depth
def search_key(self,item,str,node):
if node!=None:
if node.item==item:
print(str)
else:
self.search_key(item,str+'0',node.left)
self.search_key(item,str+'1',node.right)
def insertionSort(self):
for i in range(1,len(self.NodeList)):
key=self.NodeList[i]
j=i-1
while j>=0 and self.NodeList[j].weight>key.weight:
self.NodeList[j+1]=self.NodeList[j]
j=j-1
self.NodeList[j+1]=key
class HuffmanNode(object):
def __init__(self,item,weight,left=None,right=None,depth=0):
self.item=item
self.weight=weight
self.left=left
self.right=right
self.depth=depth
Huffmancode=Huffman('char_info.txt')
Huffmancode.create_tree()
Huffmancode.depth_deter()
Huffmancode.print_tree()
#Huffmancode.search_key('f','',Huffmancode.NodeList[0])
|
import re
from functools import partial
from palett.fluo import fluo_vector
from palett.presets import ATLAS, SUBTLE
from texting import LF, TB, fold, has_ansi, ripper, VO
from texting.enum.regexes import LITERAL
splitter = partial(ripper, re.compile(LITERAL))
def deco_str(
text,
width=80,
indent=0,
first_line_indent=0,
presets=(ATLAS, SUBTLE),
effects=None,
vectify=splitter,
joiner=None
):
if not (size := len(text)): return ''
if has_ansi(text): return text
if width and size > width: text = fold(
text,
width=width,
delim=LF + TB * (indent if indent else 0),
first_line_indent=first_line_indent
)
if presets: text = fluo_string(text, presets, effects, vectify, joiner)
return text
def fluo_string(text, presets, effects, vectify, joiner):
words = vectify(text)
fluo_vector(words, presets, effects, mutate=True)
return joiner(words) if joiner else VO.join(words)
|
import enum
from typing import Union, Optional, List
from idefix import Bot, Direction, RelativeDirection, Wall, Board
@enum.unique
class Command(enum.Enum):
Forward = 0,
TurnLeft = 1,
TurnRight = 2,
Wall = 3,
WriteInfo = 4
class Instruction:
__slots__ = ['bot', 'command', 'args']
def __init__(self, bot: Bot, command: Command, args: Optional[List] = None):
self.bot = bot
self.command = command
self.args = args
def to_json_dict(self):
json = {
'bot': self.bot.name,
'cmd': self.command.name
}
if self.args is not None:
json['args'] = self.args
return json
def __repr__(self):
return "Instruction(" + repr(self.to_json_dict()) + ")"
class ProxyBot(Bot):
def __init__(self, bot: Bot):
super().__init__(skip_init=True)
self.bot = bot
self.command_list = [] # type: List[Instruction]
def clear_command_list(self):
self.command_list = []
def _cmd(self, cmd: Command, args: Optional[List] = None):
self.command_list.append(Instruction(self.bot, cmd, args))
@property
def pos(self):
return self.bot.pos
@pos.setter
def pos(self, pos):
self.bot.pos = pos
@property
def dir(self):
return self.bot.dir
@dir.setter
def dir(self, dir):
self.bot.dir = dir
@property
def color(self):
return self.bot.color
@property
def name(self):
return self.bot.name
def wall(self, dir: Union[Direction, RelativeDirection]) -> Wall:
self._cmd(Command.Wall, [dir.value])
return self.bot.wall(dir)
def forward(self, count: int = 1, *args, **kwargs) -> None:
self._cmd(Command.Forward, [count])
return self.bot.forward(count, *args, **kwargs)
def turn_left(self, *args, **kwargs):
self._cmd(Command.TurnLeft)
return self.bot.turn_left(*args, **kwargs)
def turn_right(self, *args, **kwargs):
self._cmd(Command.TurnRight)
return self.bot.turn_right(*args, **kwargs)
def write_info(self, board: Board, *args, **kwargs):
self._cmd(Command.WriteInfo)
return self.bot.write_info(board, *args, **kwargs)
def __repr__(self):
return "ProxyBot('{}')".format(self.bot.name)
|
from testr.packages import check_files
check_files('test_*.log', ['warning', 'error'],
allows=[r'/kadi/settings.py:\d\d: UserWarning:',
r'warnings.warn\(message\)',
'Unable to change file mode',
'unable to get COBSRQID',
'alter_validators_add_error_messages',
'dropping state because of insufficent event time pad',
'negative event duration',
'Coarse OBC',
'no starcat for obsid 0',
'from sot/matching-blocks-error-message'])
|
import re
from setuptools import setup
license = ""
version = ""
with open("../../LICENSE", encoding="utf-8") as f:
license = "\n" + f.read()
for line in open("../../VERSION", encoding="utf-8"):
m = re.search("mvnc[^\w]*?([\d\.]+)", line)
if m:
version = m.group(1)
break
setup(
name = "mvnc",
version = version,
author = "Intel Corporation",
description = ("mvnc python api"),
license = license,
keywords = "",
url = "http://developer.movidius.com",
packages = ["mvnc"],
package_dir = {"mvnc": "mvnc"},
install_requires = [
"numpy",
],
long_description = "-",
classifiers = [
"Development Status :: 5 - Production/Stable",
"Topic :: Software Development :: Libraries",
"License :: Other/Proprietary License",
],
)
|
'''
Created on 29/mag/2010
@author: pgcd
'''
import re
from django.utils.html import escape
from almparse.signals import parsing_done
class Node(object):
defaults = {'_start':r'(?<![a-zA-Z0-9])',
'_end':r'(?![a-zA-Z0-9|])',
'_startline':r'(?<=^)\s*',
'_endline':r'\s*(?=\n|$)',
'_escape':r'\|?'
}
def __init__(self, regex = '', tag = '', children = False, attrs = None, allowed_attributes = None, regex_priority = 100):
self.regex = regex % self.defaults if regex else '^.+$'
self.tag = tag
self.attrs = attrs or {}
self.allowed_attributes = allowed_attributes or []
self.children = children
self.regex_priority = regex_priority
def update_attributes(self, body, basedict = None):
_ = True
basedict = basedict or {}
while _:
at = re.search(r'^(\((?P<key>\w+):(?P<quote>[\'"]?)(?P<val>.*?)(?P=quote)\))(?!\s)', body)
if at is not None:
basedict.update({at.group('key'):at.group('val')})
body = body[at.end():]
else:
_ = False
attrs = self.attrs.copy()
attrs.update(basedict)
return (body, attrs)
def safe_attributes(self, basedict = None):
safedict = {}
basedict = basedict or {}
for d in basedict:
if d in self.allowed_attributes:
safedict[d] = basedict[d]
return safedict
def build_regex(self):
prlst = []
for x in self.children.items():
if x[1].regex_priority not in prlst:
prlst.append(x[1].regex_priority)
prlst.sort()
regexlst = [[] for x in prlst]
for x in self.children.items():
for i, p in enumerate(prlst):
if x[1].regex_priority == p:
regexlst[i].append(x[1].regex)
break
else:
pass # uh? if parsing reaches this point there's a bug
r = r"%s" % '|'.join([r"%s" % '|'.join([x for x in xl]) for xl in regexlst])
rx = re.compile(r, re.S)
return rx
def build(self, body, attrs):
#This method should be somehow overridden in subclasses
# body, attrs = self.update_attributes(body)
attrs = self.safe_attributes(attrs)
if self.tag:
result = "<%(tag)s%(attrs)s />" if self.children == False else "<%(tag)s%(attrs)s>%(content)s</%(tag)s>"
attrs = (" " + " ".join(['%s="%s"' % (x[0], escape(x[1])) for x in attrs.items()])) if attrs else ''
result = result % {'tag':self.tag, 'content':body, 'attrs': attrs}
else:
result = body
return result
def parse(self, text, groupdict = None): #IGNORE:W0613
def xlate(t):
rx = self.build_regex()
m = rx.search(t)
if not m:
return t
head, tail = m.span()
if m.lastgroup:
try:
content_node = m.group(m.lastgroup + '_content')
except IndexError:
content_node = m.group(m.lastindex + 1)
text = self.children[m.lastgroup].parse(content_node, m.groupdict())
else:
text = m.group()
return t[:head] + text + xlate(t[tail:])
text, attrs = self.update_attributes(text) # Text gets cleaned here.
if not self.children:
return self.build(text, attrs)
return self.build(xlate(text), attrs)
class RawHtmlNode(Node):
def parse(self, text, groupdict = None, *args, **kwargs):#IGNORE:W0613
return escape(text)
class CodeNode(Node):
def parse(self, text, groupdict = None, *args, **kwargs):#IGNORE:W0613
return super(CodeNode, self).parse(re.sub('\n', '<br />', escape(text)), *args, **kwargs)
class LineBreakNode(Node):
def parse(self, text, groupdict = None, *args, **kwargs):#IGNORE:W0613
return '<br />'
class LinkNode(Node):
def update_attributes(self, body, basedict = None):
attrs = self.attrs.copy()
match = re.search(r'^\s*(?P<href>\S+)\s*((?P<q>[\'"]?)(?P<content>(.+)?)(?P=q))?\s*$', body)
if match: #TODO: fix regex do that this check is unnecessary
attrs.update(match.groupdict())
else:
attrs.update({'href':'', 'content':''})
body = attrs.get('content') or attrs['href']
attrs['href'] = re.sub("^(?!/|http://|ftp://|https://)", "http://", attrs['href'], 1)
return (body, attrs)
class QuoteNode(Node):
def update_attributes(self, body, basedict = None):
attrs = self.attrs.copy()
body = re.sub(r'(?P<st>^|\n)>\s?', '\g<st>', body)
try:
at = re.search(r'^\s?@(?P<user>\[?.*[^~]\]\s*(?=#)|[^[][^\s#]+)\s?(?P<post>\#\d+(?=\s|$))?', body)
attrs.update(at.groupdict())
trim = at.end('user')
if attrs['post']:
attrs['data-related-post'] = attrs['post'][1:]
trim = at.end('post')
attrs['data-related-user'] = attrs['user'].lstrip('[').rstrip('] ')
if self.tag == 'q':
body = body[trim:].lstrip()
else:
body = body[:trim] + '\n' + body[trim:].lstrip()
except AttributeError:
pass
return (body, attrs)
# def parse(self, text, groupdict):
# text = re.sub(r'(?P<st>^|\n)>\s?', '\g<st>', text)
# return super(QuoteNode, self).parse(text, groupdict)
class ImgNode(Node):
def update_attributes(self, body, basedict = None):
body, attrs = super(ImgNode, self).update_attributes(body)
match = re.search(r'(?m)^\s*(?P<src>\S+)\s*((?P<q>[\'"]?)(?P<title>(.+?)?)(?P=q)?)?\s*$', body)
if match: #TODO: fix regex do that this check is unnecessary
attrs.update(match.groupdict())
else:
attrs.update({'src':''})
attrs['alt'] = attrs.get('title', attrs['src'])
return ('', attrs)
class AutoImgNode(Node):
def update_attributes(self, body, basedict = None):
attrs = self.attrs.copy()
attrs['src'] = attrs['alt'] = body
return ('', attrs)
class AutoLinkNode(Node):
def update_attributes(self, body, basedict = None):
attrs = self.attrs.copy()
attrs['href'] = body
return (body, attrs)
class ListNode(Node):
def parse(self, text, groupdict = None):
list_marker = groupdict.get('list_marker') or groupdict.get('ol_marker') or groupdict.get('ul_marker')
tabs = ' ' * (len(list_marker))
regex = r'''(?x)(?s)
(?P<li>[ \t]*
(?P<list_marker>%(list_marker)s)
(?P<li_content>.+?)
(?=
\n[ \t]*%(list_marker)s(?![*#])
|
$(?# The LI ends before the end of the string )
)
)''' % {'list_marker':re.escape(list_marker)}
def xlate(t):
rx = re.compile(regex, re.S)
m = rx.search(t)
if not m:
return t
head, tail = m.span()
if m.lastgroup:
try:
content_node = m.group(m.lastgroup + '_content')
except IndexError:
content_node = m.group(m.lastindex + 1)
text = self.children[m.lastgroup].parse(content_node, m.groupdict())
else:
text = m.group()
return t[:head].rstrip('\n') + '\n' + tabs + text + xlate(t[tail:])
return '\n' + tabs + self.build(xlate(text), {}) + '\n'
class MacroNode(Node):
def parse(self, text, groupdict = None):
from almparse.models import Macro
try:
m = Macro.objects.get(name = groupdict['macro_name']) #TODO: Filter by user/group/perm
except Macro.DoesNotExist: #@UndefinedVariable
return groupdict['macro'].replace('%}', " -- macro not available%}" + groupdict['macro_name']) #TODO: Something prettier
if m.regex_match:
if groupdict.get('macro_content'):
return re.sub(m.regex_match, m.regex_replace, groupdict['macro_content'])
else:
return groupdict['macro'].replace('%}', " -- content required%}") #TODO: Something prettier
else:
return m.regex_replace
class PluginNode(Node):
pass
BlockNodes = {
'macro':MacroNode(regex = r'(?P<macro>\{%%\s*(?P<macro_name>[^:]+?)\s*(?:%%\}|:(?P<macro_content>[^<].+?[^~])?\s*%%\}))'),
'plugin':PluginNode(regex = r'(?P<plugin><<<(.+?)>>>)'), #TODO:
'blockquote':QuoteNode(tag = 'blockquote', allowed_attributes = "cite data-related-user data-related-post".split(), regex = r'(?s)(?P<blockquote>(?:^|\n)(>.*?))(?:$|\n(?!>))'),
'center':Node(tag = 'div', attrs = {'class':"text-center"}, regex = r'(?P<center>->(.*?[^~])<-)', allowed_attributes = ['class']),
'left':Node(tag = 'div', attrs = {'class':"text-left"}, regex = r'(?P<left><-(.*?[^~])<-)', allowed_attributes = ['class']),
'right':Node(tag = 'div', attrs = {'class':"text-right"}, regex = r'(?P<right>->((?:[^<]|<[^-])*?[^~])->)', allowed_attributes = ['class']),
'force':Node(regex = r'(?P<force>\|(.*?[^~])\|)', regex_priority = 50), #force must come after forcein (see below)
'code':CodeNode(tag = 'code', regex = r'(?P<code>\{\{\{(.*?[^~])\}\}\})', allowed_attributes = ['lang']),
}
EmptyNodes = {
'raw': RawHtmlNode(regex = r'(?P<raw>==(.*?[^~])==)'),
'img':ImgNode(tag = 'img', regex = r'(?P<img>(?<!{)\{\{([^{].+?[^~}])\}\})', allowed_attributes = "src title alt width height border".split()),
#'img':ImgNode(tag = 'img', regex = r'(?<!{)(?P<img>\{\{(\s*[^{\s]\S*\s*((?P<auximgquote>[\'"]?)(.*?[^~}])?(?P=auximgquote)?)?\s*)\}\})', allowed_attributes = "src title alt width height border".split()),
'br':LineBreakNode(regex = r'(?P<br>(?<=\n))', tag = 'br', regex_priority = 10),
'autoimg': AutoImgNode(tag = 'img',
regex = r'%(_start)s(?P<autoimg>(http://\S+(?:jpg|jpeg|gif|png)))%(_end)s', allowed_attributes = "src title alt".split()),
}
InlineNodes = {
'forcein':Node(regex = r'(?P<forcein>\|((?P<aux1>[=-_]{2}|[*_^])(\|(?P<aux2>[=-_]{2}|[*_^]).*?(?P=aux2)\||.)*?(?P=aux1))\|)', regex_priority = 20),
'spoiler':Node(regex = r'%(_start)s(?P<spoiler>\^(.*?[^~])\^)%(_end)s', tag = 'span', attrs = {'class':'spoiler'}, allowed_attributes = ['class']),
'strong':Node(regex = r'%(_start)s(?P<strong>\*([^~*]|[^* ].*?[^~])\*)%(_end)s', tag = 'strong'),
# 'em':Node(regex = r'%(_start)s(?P<em>_([^_ ].*?[^~])\_)%(_end)s', tag = 'em'),
'del':Node(regex = r'%(_start)s(?P<del>--(.*?[^~])--)%(_end)s', tag = 'del'),
'ins':Node(regex = r'%(_start)s(?P<ins>__(.*?[^~])__)%(_end)s', tag = 'ins'),
'em':Node(regex = r'%(_start)s(?P<em>_([^~_]|[^_].*?[^~_])_(?!_))%(_end)s', tag = 'em'),
'q':QuoteNode(tag = 'q', allowed_attributes = "data-related-user data-related-post".split(), regex = r'(?P<q>(?<!")""([^"].*?[^~])"")'),
'a': LinkNode(tag = 'a', regex = r'(?P<a>\[\[(.*?[^~])\]\])', allowed_attributes = "href title".split(" ")),
#'a': LinkNode(tag = 'a', regex = r'(?P<a>\[\[(\s*\S+\s*((?P<auxaquote>[\'"]?)(.*?[^~])(?P=auxaquote)\s*)|[^~])\]\])', allowed_attributes = "href title".split(" ")),
'escape':Node(regex = r'(?P<escape>~(.))', tag = 'span', attrs = {'class':'escaped'}, allowed_attributes = ['class']),
'autolink': AutoLinkNode(tag = 'a',
# regex = r'(?P<autolink>((?=^|(?<=\s))(?:ftp|https?)://\S+[^.,;:!?]))(?<!\.jpg|\.gif|\.png)(?<!\.jpeg)(?=\s|$|[.,;:!?])', allowed_attributes = ["href"]),
regex = r'(?P<autolink>(\b(?:(?:https?|ftp)://)((\S+?)(?!\.jpg|\.gif|\.png|jpeg)\S{4})(?=[.,;:!?]\W|\s|$)))', allowed_attributes = ["href"]),
}
TitleNodes = {
'h1':Node(regex = r'%(_startline)s(?P<h1>\*{4}\s?(.*?[^~])\s?\*{4})%(_endline)s', tag = 'h1'),
'h2':Node(regex = r'%(_startline)s(?P<h2>\*{3}\s?([^ *].*?[^~])\s?\*{3})%(_endline)s', tag = 'h2'),
'h3':Node(regex = r'%(_startline)s(?P<h3>\*\*\s?([^ *].*?[^~])\s?\*\*)%(_endline)s', tag = 'h3'),
}
ListNodes = {
'ol':ListNode(tag = 'ol', regex = r'''(?x)(?s)
(?:^|\n)
[ \t]*
(?P<ol>(
(?P<ol_marker>(?:[*#]\s)*(?:\#\s))
[^*#].*?
))
(?=\n\n|\n[ \t]*(?!(?P=ol_marker))|$)
'''),
# 'ul':Node(tag='ul', regex=r'(?:^|\n)(?P<ul>([ \t]*\*[^*#].*(\n|$)([ \t]*[^\s*#].*(\n|$))*([ \t]*[*#]{2}.*(\n|$))*)+)'),
'ul':ListNode(tag = 'ul', regex = r'''(?x)(?s)
(?:^|\n)
[ \t]*
(?P<ul>(
(?P<ul_marker>(?:[*#]\s)*(?:\*\s))
[^*#].*?
))
(?=\n(?=\n)|\n[ \t]*(?!(?P=ul_marker))|$)
'''),
}
li_node = Node(tag = 'li')
li_node.children = ListNodes
li_node.children.update(InlineNodes)
li_node.children['force'] = BlockNodes['force']
ListNodes['ul'].children = {'li':li_node}
ListNodes['ol'].children = {'li':li_node}
Nodes = {}
Nodes.update(InlineNodes)
Nodes.update(BlockNodes)
Nodes.update(EmptyNodes)
Nodes.update(ListNodes)
Nodes.update(TitleNodes)
for n in BlockNodes:
BlockNodes[n].children = Nodes #They should have all the possible children
for n, v in TitleNodes.items():
v.children = InlineNodes # node: this actually imports all the EmptyNodes as InlineNodes's children (see below)
#v.children = InlineNodes.copy() # use this instead of the previous one to remove images and br from titles!
v.children['raw'] = EmptyNodes['raw']
for n, v in InlineNodes.items():
v.children = InlineNodes
v.children.update(EmptyNodes)
#v.children['autoimg'] = Nodes['autoimg']
#v.children['br'] = Nodes['br']
#v.children['forcein'] = Nodes['forcein']
#v.children['autolink'] = Nodes['autolink']
Nodes['a'].children = {'autoimg':Nodes['autoimg'], 'img':Nodes['img']}
Nodes['autolink'].children = {}
Nodes['code'].children = {} #{'br': Nodes['br']}
def transform(instr):
"""
This is actually the main function.
"""
root = Node(children = Nodes)
r = root.parse(instr)
parsing_done.send(root.__class__, text = r)
return unicode(r)
|
from com.sun.star.table.CellHoriJustify import STANDARD as HORIZONTAL_STANDARD
from com.sun.star.table.CellHoriJustify import LEFT as HORIZONTAL_LEFT
from com.sun.star.table.CellHoriJustify import CENTER as HORIZONTAL_CENTER
from com.sun.star.table.CellHoriJustify import RIGHT as HORIZONTAL_RIGHT
# For some reason UNO api has two enums CellVertJustify and CellVertJustify2
# for vertical alignment and from which latter one is the correct one.
from com.sun.star.table.CellVertJustify2 import STANDARD as VERTICAL_STANDARD
from com.sun.star.table.CellVertJustify2 import TOP as VERTICAL_TOP
from com.sun.star.table.CellVertJustify2 import CENTER as VERTICAL_CENTER
from com.sun.star.table.CellVertJustify2 import BOTTOM as VERTICAL_BOTTOM
from movelister.core import cursor
from movelister.utils.alignment import HorizontalAlignment, VerticalAlignment
def setHorizontalAlignmentToRange(sheet, alignment, startColumn, amount):
"""
This function sets the horizontal alignment of columns in a given range
to a chosen HorizontalAlignment (enum).
"""
area = cursor.getSheetContent(sheet)
cellRange = sheet.getCellRangeByPosition(startColumn, 0, startColumn + amount, len(area) - 1)
if alignment == HorizontalAlignment.LEFT:
cellRange.HoriJustify = HORIZONTAL_STANDARD
elif alignment == HorizontalAlignment.CENTER:
cellRange.HoriJustify = HORIZONTAL_CENTER
elif alignment == HorizontalAlignment.RIGHT:
cellRange.HoriJustify = HORIZONTAL_RIGHT
else:
cellRange.HoriJustify = HORIZONTAL_STANDARD
def setVerticalAlignmentToRange(sheet, alignment, startCol, startRow, endCol, endRow):
"""
This function sets the vertical alignment of an area to a chosen
VerticalAlignment (enum).
"""
cellRange = sheet.getCellRangeByPosition(startCol, startRow, endCol, endRow)
if alignment == VerticalAlignment.TOP:
cellRange.VertJustify = VERTICAL_TOP
elif alignment == VerticalAlignment.CENTER:
cellRange.VertJustify = VERTICAL_CENTER
elif alignment == VerticalAlignment.BOTTOM:
cellRange.VertJustify = VERTICAL_BOTTOM
else:
cellRange.VertJustify = VERTICAL_STANDARD
def setOptimalWidthToRange(sheet, startColumn, amount):
"""
This function sets the OptimalWidth of all columns in range to 1 (true).
"""
cellRange = sheet.getCellRangeByPosition(startColumn, 0, startColumn + amount, 1)
cellRange.getColumns().OptimalWidth = 1
|
# -*- coding: utf-8 -*-
"""
Copyright 2019 KineticSkunk ITS, Cape Town, South Africa.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from appium.webdriver.common.multi_action import MultiAction
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium import webdriver
from toolium.pageelements import PageElement
from toolium.pageelements import PageElements
from toolium.pageobjects.page_object import PageObject
from appium.webdriver.common.mobileby import MobileBy
from appium.webdriver.common.touch_action import TouchAction
from pageobjects.custom_logger import CustomLogger
from pageobjects.login import LoginPageObject
from pageobjects.home import HomePageObject
import time
class CommonPageObject(PageObject):
cl = CustomLogger()
mobile_page = None
page_element = None
page_elements = {}
mobile_page_element = None
mobile_page_elements = None
actual_value = None
connectivity = None
option_locator = 'new UiScrollable(new UiSelector().scrollable(true).instance(0))' \
'.scrollIntoView(new UiSelector().text("{}").instance(0));'
def init_web_page(self, page_name):
"""Initialize mobile pages"""
self.cl.auto_log_info("Attempting to load page {}".format(page_name))
switcher = {
"LoginPageObject": LoginPageObject(),
"HomePageObject": HomePageObject()
}
self.mobile_page = switcher.get(page_name, None)
self.cl.auto_log_info("Mobile page {} = '{}'".format(page_name, self.mobile_page.__class__.__name__))
if self.mobile_page is not None:
self.mobile_page = self.mobile_page.wait_until_loaded(self.driver.
get('{}'.format(self.config.
get('Test', 'url'))))
self.cl.auto_log_info("Loaded page {}".format(self.mobile_page.__class__.__name__))
else:
self.cl.auto_log_error("Failed to load page {}".format(page_name))
return self
def init_web_page_elements(self, mobile_page=None):
"""Method to initialize page elements. """
self.cl.auto_log_info("The mobile_page object = {}".format(mobile_page.__class__.__name__))
for attribute, value in list(mobile_page.__dict__.items()) + list(
mobile_page.__class__.__dict__.items()):
if attribute != 'parent' and isinstance(value, PageElement) or isinstance(value, PageElements):
self.cl.auto_log_info("Element name = {}, value = {}".format(attribute, value))
self.page_elements[attribute] = value
return self
def get_element(self, element_name=None):
"""This method get a given page element."""
try:
if element_name in self.page_elements:
self.cl.auto_log_info("Found element {} in '{}'".format(element_name,
self.page_elements.__class__.__name__))
self.cl.auto_log_debug("Element {} = '{}'".format(element_name, self.page_elements[element_name]))
assert self.page_elements[element_name] is not None
except AssertionError as error:
self.cl.auto_log_error("Element {} = {}'".format(element_name, "None"))
self.cl.auto_log_error("Error message= ".format(error))
return None
else:
return self.page_elements[element_name]
def get_mobile_element_attribute_value(self, element_name=None, attribute_name=None):
"""This method get a given page element and verifies with attribute values."""
actual_value = None
if element_name is not None:
actual_value = self.get_element(element_name).get_attribute(attribute_name)
self.cl.auto_log_debug("{}.{} = {}".format(element_name, attribute_name, actual_value))
else:
self.cl.auto_log_error("{} = {}".format(element_name, "None"))
return actual_value
def click_element(self, element_name=None, attribute=None, value=None):
"""This method clicks page elements and verifies with attribute values."""
try:
self.mobile_page_element = self.get_element(element_name)
assert str(self.get_mobile_element_attribute_value(element_name, attribute)).lower() == \
str(value).lower()
except AssertionError as error:
self.cl.auto_log_error("{}.{} <> {}'".format(element_name, attribute, value))
self.cl.auto_log_error("Error message= ".format(error))
return False
else:
self.mobile_page_element.click()
self.cl.auto_log_debug("Clicked on element '{}'".format(element_name))
return True
finally:
self.mobile_page_element = None
pass
def set_element_attribute_value(self, element_name=None, attribute=None, value=None):
"""This method set a attribute value to a page element."""
try:
self.cl.auto_log_debug("Attempting to set '{}.{}' to '{}'".format(element_name, attribute, value))
self.mobile_page_element = self.utils.get_web_element(self.get_element(element_name))
if str(attribute).lower().__eq__("text"):
self.mobile_page_element.send_keys(value)
elif str(attribute).lower().__eq__("value"):
self.mobile_page_element.send_keys(value)
else:
err_msg = ("Attribute '{}' is not supported or cannot be set".format(attribute))
self.cl.auto_log_error(err_msg)
raise Exception(err_msg)
# assert str(self.get_mobile_element_attribute_value(element_name, attribute)).lower() == str(value).lower()
except AssertionError as error:
err_msg = ("Failed to set '{}.{}' to '{}'".format(element_name, attribute, value))
self.cl.auto_log_error(err_msg)
self.cl.auto_log_error("Error message= ".format(error))
return False
else:
return True
finally:
self.mobile_page_element = None
pass
def select_list_value(self, element_name=None, value=None):
"""This method select a list value in list view."""
self.cl.auto_log_info("Attempting to select list value '{}' in list '{}'".format(value, element_name))
el = self.get_list_element(element_name, value)
if el is not None:
self.utils.get_web_element(el).click()
return True
elif self.mobile_page_element is False:
err_msg = ("List value '{}' was not found in '{}'".format(value, element_name))
self.cl.auto_log_error(err_msg)
raise Exception(err_msg)
return False
def get_list_element(self, element_name=None, value=None):
"""This method get a list element in list view."""
try:
self.cl.auto_log_info("Attempting to locate list value '{}' in list '{}'".format(value, element_name))
self.mobile_page_elements = self.get_element(element_name)
if self.mobile_page_elements is not None:
for el in self.mobile_page_elements.web_elements:
self.cl.auto_log_info("List value = {}".format(el.get_attribute("text")))
if value.lower() == str(el.get_attribute("text")).lower():
self.cl.auto_log_info("List value {} found = '{}'".format(el.get_attribute("text"), True))
return el
self.cl.auto_log_error("List value {} found = '{}'".format(value, False))
return None
else:
err_msg = "Element '{}' is undefined".format(element_name)
self.cl.auto_log_error(err_msg)
raise Exception(err_msg)
finally:
self.mobile_page_elements = None
pass
def check_element(self, element_name=None, checks=None):
"""This method select the checkbox."""
try:
switcher = {
"checked": True,
"unchecked": False
}
perf_check = switcher.get(checks, None)
msg = None
if perf_check is True:
msg = "Attempting to check element = {}".format(element_name)
elif perf_check is False:
msg = "Attempting to uncheck element = {}".format(element_name)
elif perf_check is None:
err_msg = "'{}' is not a supported check box action".format(checks)
self.cl.auto_log_error(err_msg)
raise Exception(err_msg)
self.cl.auto_log_info(msg)
self.mobile_page_element = self.utils.get_web_element(self.get_element(element_name))
# TODO add instance check
if perf_check is True:
msg = "Checking '{}'".format(element_name)
self.mobile_page_element.check()
elif perf_check is False:
msg = "Un-checking '{}'".format(element_name)
self.mobile_page_element.uncheck()
self.cl.auto_log_info(msg)
if perf_check is True:
assert self.mobile_page_element.is_selected() is True
elif perf_check is False:
assert self.mobile_page_element.is_selected() is False
except AssertionError as error:
err_msg = ("Failed to '{}' checkbox '{}'".format(checks, element_name))
self.cl.auto_log_error(err_msg)
self.cl.auto_log_error("Error message= ".format(error))
return False
finally:
self.mobile_page_element = None
pass
def clear_element(self, element_name):
"""This method clear the element value."""
self.cl.auto_log_info("Attempting to clear element value '{}'".format(element_name))
el = self.get_element(element_name)
if el is not None:
self.utils.get_web_element(el).clear()
return True
return self
def switch_off_connectivity(self):
"""This method toggles the wifi."""
self.connectivity = \
self.driver.toggle_wifi()
time.sleep(9)
def scroll_into_view(self, element_name):
"""This method scrolls an element into view."""
self.driver.find_element(MobileBy.ANDROID_UIAUTOMATOR, self.option_locator.format(element_name))
self.cl.auto_log_info("Clicked on element '{}'".format(element_name))
return self
def drag_element(self, first_element=None, second_element=None):
"""This method drags a element to another element."""
el1 = self.utils.get_web_element(self.get_element(first_element))
self.cl.auto_log_error("Attempting to find element value '{}'".format(first_element))
el2 = self.utils.get_web_element(self.get_element(second_element))
self.cl.auto_log_error("Attempting to find element value '{}'".format(second_element))
self.driver.scroll(el1, el2)
return self
def zoom_element(self, first_element=None, second_element=None):
"""This method uses touch action to zoom an element."""
el1 = self.utils.get_web_element(self.get_element(first_element))
self.cl.auto_log_error("Attempting to find element value '{}'".format(first_element))
el2 = self.utils.get_web_element(self.get_element(second_element))
self.cl.auto_log_error("Attempting to find element value '{}'".format(second_element))
action1 = TouchAction()
action1.long_press(el1, 10, 20).move_to(el2, 10, 200).release()
action2 = TouchAction()
action2.long_press(el2, 10, 10).move_to(el1, 10, 100).release()
ma = MultiAction(self.driver)
ma.add(action1, action2)
ma.perform()
def drag_hold(self, element_name=None):
"""This method uses multi touch action to double tap and drag element."""
el1 = self.utils.get_web_element(self.get_element(element_name))
self.cl.auto_log_error("Attempting to find element value '{}'".format(element_name))
action0 = TouchAction().tap(el1).move_to(el1, 10, 200)
action1 = TouchAction().tap(el1).move_to(el1, 200, 10)
ma = MultiAction(self.driver)
ma.add(action0, action1)
ma.perform()
|
#!/usr/bin/env python
""""tfim_diag.py
Chris Herdman
06.07.2017
--Exact diagonalization for transverse field Ising models
--Requires: tfim.py, numpy, scipy.sparse, scipy.linalg, progressbar
"""
import thesis.hamiltonian.tfim as tfim
import numpy as np
from scipy import sparse
from scipy.sparse import linalg as spla
from scipy import linalg
import argparse
###############################################################################
def main():
# Parse command line arguements
###################################
parser = argparse.ArgumentParser(description=(
"Exact numerical diagonalization of "
"transverse field Ising Models of the form:\n"
"H = -\sum_{ij} J_{ij}\sigma^z_i \sigma^z_j"
"- h \sum_i \sigma^x_i") )
parser.add_argument('lattice_specifier',
help=( "Either: L (linear dimensions of the system)"
" or the filename base of matrix files") )
parser.add_argument('-D', type=int,default=1,
help='Number of spatial dimensions')
parser.add_argument('--obc',action='store_true',
help='Open boundary condintions (deault is PBC)')
parser.add_argument('--h_min', type=float, default=0.0,
help='Minimum value of the transverse field')
parser.add_argument('--h_max', type=float, default=4.0,
help='Maximum value of the transverse field')
parser.add_argument('--dh', type=float, default=0.5,
help='Tranverse fied step size')
parser.add_argument('-J', type=float, default=1.0,
help='Nearest neighbor Ising coupling')
parser.add_argument('-k', type=int,default=3,
help='Number eigenvalues to resolve')
parser.add_argument('-o', default='output', help='output filename base')
parser.add_argument('--full',action='store_true',
help='Full (rather than Lanczos) diagonalization')
parser.add_argument('--save_state',action='store_true',
help='Save ground state to file')
parser.add_argument('--init_v0',action='store_true',
help='Start Lanzcos with previous ground state')
parser.add_argument('--load', action='store_true',
help='Load matrices from file' )
parser.add_argument('--fidelity', action='store_true',
help='Compute fidelities' )
parser.add_argument('--delta_h_F0', type=float, default = 1E-4,
help='Inital \Delta h for fidelity' )
parser.add_argument('--N_F_steps', type=int, default = 3,
help='Number of steps for fidelity' )
parser.add_argument('--overlap', action='store_true',
help='Compute the overlap distribution' )
parser.add_argument('--N_ovlp_samples', type=int, default = 10**4,
help='Number of samples of the overlap distribution' )
parser.add_argument('--SK', action='store_true',
help='SK model with infinite range ZZ interactions' )
args = parser.parse_args()
###################################
# Load matricies from file
###################################
load_matrices = args.load
if load_matrices:
loaded_params, JZZ, ZZ, Mz, Ms = tfim.load_diag_ME(
args.lattice_specifier)
Mx = tfim.load_Mx(args.lattice_specifier)
###################################
# Set calculation Parameters
###################################
out_filename = args.o + '.dat'
if load_matrices:
L = loaded_params['L']
D = len(L)
PBC = loaded_params['PBC']
J = loaded_params['J']
else:
D = args.D
L = [ int(args.lattice_specifier) for d in range(D) ]
PBC = not args.obc
J = args.J
k = args.k
init_v0 = args.init_v0
full_diag = args.full
SK = args.SK
save_state = args.save_state
if save_state:
state_filename = args.o + '_psi0.dat'
fidelity_on = args.fidelity
if fidelity_on:
delta_h_F0 = args.delta_h_F0
N_F_steps = args.N_F_steps
dhf = np.flip(delta_h_F0/(2**(np.arange(N_F_steps))),axis=0)
F2 = np.zeros(dhf.shape)
F2_filename = args.o + '_F2.dat'
overlap_on = args.overlap
if overlap_on:
N_ovlp_samples = args.N_ovlp_samples
Pq_filename = args.o + '_Pq.dat'
h_arr = np.arange(args.h_min,args.h_max+args.dh/2,args.dh)
parameter_string = ("D = {}, L = {}, PBC = {}, J = {},"
" k = {}".format(D, L, PBC, J, k) )
print('\tStarting tfim_diag using parameters:\t' + parameter_string)
###################################
# Setup physical quantities
##################################
# Quantities to write ouput file
phys_keys = ['h', 'e0', 'Delta_1', 'Delta_2', 'Mx', 'Mz2', 'Cnn', 'Ms2']
phys = {} # Dictionary for values
##################################
# Build lattice and basis
###################################
lattice = tfim.Lattice(L, PBC)
N = lattice.N
basis = tfim.IsingBasis(lattice)
###################################
# Setup output data files
##################################
width = 25
precision = 16
header_list = [tfim.phys_labels[key] for key in phys_keys]
header = ''.join(['{:>{width}}'.format(head,width=width)
for head in header_list])
out_file = open(out_filename, 'w')
print( "\tData will write to {}".format(out_filename) )
out_file.write( '#\ttfim_diag parameters:\t' + parameter_string + '\n'
+ '#' + header[1:] + '\n' )
if save_state:
state_file = open(state_filename, 'w')
print( "\tGround state will write to {}".format(state_filename) )
state_file.write(
"# tfim_diag parameters:\t{}\n".format(parameter_string)
+ "#{:>{width_h}}{:>{width_psi}}\n".format( 'h', '\psi_0' ,
width_h= ( width - 1 ), width_psi=(width +1 ) ) )
if fidelity_on:
F2_header =( "#{:>{width}}".format( 'h', width=(width - 1) )
+ ''.join(['{:{width}.{prec}e}'.format(dhfi,
width=(width+1), prec=(precision-1)) for dhfi in dhf] ) )
F2_file = open(F2_filename, 'w')
print( "\tFidelities will write to {}".format(F2_filename) )
F2_file.write( '#\ttfim_diag parameters:\t' + parameter_string + '\n'
+ '#' + F2_header[1:] + '\n' )
if overlap_on:
q = np.arange(-N,N+1,2)/float(N)
Pq_header =( "#{:>{width}}".format( 'h', width=(width - 1) )
+ ''.join(['{:{width}.{prec}e}{:>{width}}'.format(qi,
'error', width=(width+1), prec=(precision-1)
) for qi in q] ) )
Pq_file = open(Pq_filename, 'w')
print( "\tOverlap distributions will write to {}".format(Pq_filename) )
Pq_file.write( '#\ttfim_diag parameters:\t' + parameter_string + '\n'
+ '#' + Pq_header[1:] + '\n' )
##################################
# Build Matricies
###################################
if not load_matrices:
print( '\tBuilding matrices...' )
JZZ, ZZ = tfim.z_correlations_NN(lattice,basis,J)
Mz, Ms = tfim.z_magnetizations(lattice,basis)
Mx = tfim.build_Mx(lattice,basis)
if SK:
Jij = tfim.Jij_instance(N,J)
#Jij = np.ones((N/2,N))/N
JZZ = tfim.JZZ_SK(basis,Jij)
###################################
# Main Diagonalization Loop
#######################################################
if full_diag:
print("\tStarting full diagaonalization with h in ({},{}), "
"dh = {}".format(h_arr[0], h_arr[-1],args.dh) )
else:
print("\tStarting sparse diagaonalization with k={} and "
"h in ({},{}), dh ={}".format(k,h_arr[0], h_arr[-1],args.dh) )
v0 = None
for h in h_arr:
H = -JZZ - h*Mx
if full_diag:
# Full diagonalize
E,v = linalg.eigh(H.todense())
else:
# Sparse diagonalize
E,v = spla.eigsh(H, k=k, which='SA', v0=v0)
# Sort eigenvalues/vectors
sort_order = np.argsort(E)
E = E[sort_order]
v = v[:,sort_order]
# Grab Energies & ground state
e0 = E[0]/N
Delta = E - E[0]
psi0 = v[:,0]
# Set starting vector for Lanczos:
if not full_diag and init_v0:
v0 = psi0
# Compute expectation values
###################################
Mx0 = np.real((psi0.conj().T).dot(Mx.dot(psi0)))/N
Mz20 = np.real((psi0.conj().T).dot((Mz.power(2)).dot(psi0)))/(N**2)
Cnn = np.real((psi0.conj().T).dot(ZZ.dot(psi0)))/lattice.N_links
Ms20 = np.real((psi0.conj().T).dot((Ms.power(2)).dot(psi0)))/(N**2)
###################################
# Compute fidelities
###################################
if fidelity_on:
for i, dhfi in enumerate(dhf):
H_F = -JZZ - (h+dhfi)*Mx
E_F,v_F = spla.eigsh(H_F, k=2, which='SA', v0=psi0)
# Sort eigenvalues/vectors
sort_order_F = np.argsort(E_F)
E_F = E_F[sort_order_F]
v_F = v_F[:,sort_order_F]
F2[i] = (np.absolute(np.vdot(v_F[:,0], psi0)))**2
###################################
# Overlap distribution
###################################
if overlap_on:
Pq,Pq_err,q = basis.sample_overlap_distribution(psi0,N_ovlp_samples)
###################################
# Put physical values in phys dictionary
###################################
phys['h'] = h
phys['e0'] = e0
phys['Delta_1'] = Delta[1]
phys['Delta_2'] = Delta[2]
phys['Mx'] = Mx0
phys['Mz2'] = Mz20
phys['Cnn'] = Cnn
phys['Ms2'] = Ms20
###################################
# Write data to output files
###################################
data_list = [phys[key] for key in phys_keys]
data_line = ''.join(['{:{width}.{prec}e}'.format(data,width=width,
prec=precision) for data in data_list])
out_file.write(data_line+ '\n')
# Write psi0 to file
if save_state:
np.savetxt(state_file,
np.concatenate(([h],psi0)).reshape((1,psi0.shape[0]+1)),
fmt='%{}.{}e'.format(width,precision-1) )
# Write fidelities to file
if fidelity_on:
np.savetxt(F2_file,
np.concatenate(([h],F2)).reshape((1,F2.shape[0]+1)),
fmt='%{}.{}e'.format(width,precision-1) )
# Write overlap distribution to file
if overlap_on:
Pq_line = np.zeros(1+2*len(Pq))
Pq_line[0] = h
Pq_line[1::2] = Pq
Pq_line[2::2] = Pq_err
np.savetxt(Pq_file, Pq_line.reshape((1,Pq_line.shape[0])),
fmt='%{}.{}e'.format(width,precision-1) )
#######################################################
# Close files
out_file.close()
if save_state:
state_file.close()
if fidelity_on:
F2_file.close()
if overlap_on:
Pq_file.close()
if __name__ == "__main__":
main()
|
# -*- coding: utf8 -*-
import math
import numpy
from functools import reduce
from rclpy.duration import Duration
from sensor_msgs.msg import Imu
import rclpy
from rclpy.node import Node
from sklearn.base import BaseEstimator
from sklearn.metrics import accuracy_score
class FallChecker(BaseEstimator):
def __init__(self, node, thresh_gyro_pitch=None,
thresh_gyro_roll=None,
thresh_orient_pitch=None,
thresh_orient_roll=None,
smoothing=None):
self.node = node
self.thresh_gyro_pitch = self.node.get_parameter("falling_thresh_gyro_pitch").get_parameter_value().double_value \
if thresh_gyro_pitch is None else thresh_gyro_pitch
self.thresh_gyro_roll = self.node.get_parameter("falling_thresh_gyro_roll").get_parameter_value().double_value \
if thresh_gyro_roll is None else thresh_gyro_roll
self.thresh_orient_pitch = math.radians(self.node.get_parameter("falling_thresh_orient_pitch").get_parameter_value().double_value) \
if thresh_orient_pitch is None else thresh_orient_pitch
self.thresh_orient_roll = math.radians(self.node.get_parameter("falling_thresh_orient_roll").get_parameter_value().double_value) \
if thresh_orient_roll is None else thresh_orient_roll
self.smoothing = self.node.get_parameter("smooth_threshold").get_parameter_value().double_value if smoothing is None else smoothing
self.smoothing_list = []
self.counter = 0
self.last_result = 0
self.STABLE = 0
self.FRONT = 1
self.BACK = 2
self.LEFT = 3
self.RIGHT = 4
def update_reconfigurable_values(self, config, level):
# Dynamic Reconfigure
self.thresh_gyro_pitch = config["falling_thresh_gyro_pitch"]
self.thresh_gyro_roll = config["falling_thresh_gyro_roll"]
self.thresh_orient_pitch = math.radians(config["falling_thresh_orient_pitch"])
self.thresh_orient_roll = math.radians(config["falling_thresh_orient_roll"])
return config
def check_falling(self, not_much_smoothed_gyro, quaternion):
"""Checks if the robot is currently falling and in which direction. """
# Convert quaternion to fused angles
fused_roll, fused_pitch, _ = self.fused_from_quat(quaternion)
# setting the fall quantification function
roll_fall_quantification = self.calc_fall_quantification(
self.thresh_orient_roll,
self.thresh_gyro_roll,
fused_roll,
not_much_smoothed_gyro[0])
pitch_fall_quantification = self.calc_fall_quantification(
self.thresh_orient_pitch,
self.thresh_gyro_pitch,
fused_pitch,
not_much_smoothed_gyro[1])
if roll_fall_quantification + pitch_fall_quantification == 0:
result = self.STABLE
else:
# compare quantification functions
if pitch_fall_quantification > roll_fall_quantification:
# detect the falling direction
if fused_pitch < 0:
result = self.BACK
# detect the falling direction
else:
result = self.FRONT
else:
# detect the falling direction
if fused_roll < 0:
result = self.LEFT
# detect the falling direction
else:
result = self.RIGHT
# Prune old elements from smoothing history
self.smoothing_list = list(filter(
lambda x: x[0] > self.node.get_clock().now() - Duration(seconds=self.smoothing),
self.smoothing_list))
# Add the current element
self.smoothing_list.append((self.node.get_clock().now(), result))
# List only including the results not the whole tuples
results_list = list(zip(*self.smoothing_list))[1]
# Check if stable is not in the list otherwise say we are stable
# This smooths the output but prevents the output of stable when jittering between e.g. right and front
if self.STABLE in results_list:
result = self.STABLE
return result
def calc_fall_quantification(self, falling_threshold_orientation, falling_threshold_gyro, current_axis_euler,
current_axis__gyro):
# check if you are moving forward or away from the perpendicular position, by comparing the signs.
moving_more_upright = numpy.sign(current_axis_euler) != numpy.sign(current_axis__gyro)
# Check if the orientation is over the point of no return threshold
over_point_of_no_return = abs(current_axis_euler) > falling_threshold_orientation
# Calculate quantification if we are moving away from our upright position or if we are over the point of no return
if not moving_more_upright or over_point_of_no_return:
# calculatiung the orentation skalar for the threshold
skalar = max((falling_threshold_orientation - abs(current_axis_euler)) / falling_threshold_orientation, 0)
# checking if the rotation velocity is lower than the the threshold
if falling_threshold_gyro * skalar < abs(current_axis__gyro):
# returning the fall quantification function
return abs(current_axis__gyro) * (1 - skalar)
return 0
def fit(self, x, y):
# we have to do nothing, as we are not actually fitting any model
self.node.get_logger().warn("You can not train this type of classifier", once=True)
pass
def score(self, X, y, sample_weight=None):
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
def predict(self, x):
# only take gyro and orientation from data
y = []
for entry in x:
prediction = self.check_falling(entry[3:6], entry[6:10])
y.append(prediction)
return y
def check_fallen(self, quaternion, not_much_smoothed_gyro):
"""Check if the robot has fallen and is lying on the floor. Returns animation to play, if necessary."""
if numpy.mean(numpy.abs(not_much_smoothed_gyro)) >= 0.2:
return None
# Convert quaternion to fused angles
fused_roll, fused_pitch, _ = self.fused_from_quat(quaternion)
# Decides which side is facing downwards.
if fused_pitch > math.radians(60):
self.node.get_logger().info("FALLEN TO THE FRONT")
return self.FRONT
if fused_pitch < math.radians(-60):
self.node.get_logger().info("FALLEN TO THE BACK")
return self.BACK
if fused_roll > math.radians(60):
self.node.get_logger().info("FALLEN TO THE RIGHT")
return self.RIGHT
if fused_roll < math.radians(-60):
self.node.get_logger().info("FALLEN TO THE LEFT")
return self.LEFT
# If no side is facing downwards, the robot is not fallen yet.
return None
def fused_from_quat(self, q):
# Fused yaw of Quaternion
fused_yaw = 2.0 * math.atan2(q[2], q[3]) # Output of atan2 is [-pi,pi], so this expression is in [-2*pi,2*pi]
if fused_yaw > math.pi:
fused_yaw -= 2 * math.pi # fused_yaw is now in[-2 * pi, pi]
if fused_yaw <= -math.pi:
fused_yaw += 2 * math.pi # fused_yaw is now in (-pi, pi]
# Calculate the fused pitch and roll
stheta = 2.0 * (q[1] * q[3] - q[0] * q[2])
sphi = 2.0 * (q[1] * q[2] + q[0] * q[3])
if stheta >= 1.0: # Coerce stheta to[-1, 1]
stheta = 1.0
elif stheta <= -1.0:
stheta = -1.0
if sphi >= 1.0: # Coerce sphi to[-1, 1]
sphi = 1.0
elif sphi <= -1.0:
sphi = -1.0
fused_pitch = math.asin(stheta)
fused_roll = math.asin(sphi)
return fused_roll, fused_pitch, fused_yaw
|
"""Helper script to launch the bot."""
# Standard library:
import configparser
import logging
import os
import re
from typing import List
# disquip.
from disquip.bot import BotHelper, DisQuipBot
from disquip.discover import AudioCollection
from disquip.normalize import normalize
def split(str_in: str) -> List[str]:
"""Split string by commas."""
return [x.strip() for x in re.split(r'\s*,\s*', str_in)]
def main():
"""Parse disquip.ini and fire up the bot!"""
# Get configuration.
config = configparser.ConfigParser()
config.read('disquip.ini')
dqc = config['disquip']
# Configure logging early
logging.basicConfig(
level=getattr(logging, dqc['log_level'].upper()),
format="%(asctime)s [%(levelname)s] [%(name)s]: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S%z"
)
# Get a list of audio extensions.
audio_extensions = split(dqc['audio_extensions'])
# Extract the ffmpeg_path.
ffmpeg_path = dqc['ffmpeg_path']
# Pre-normalize files.
if int(dqc['pre_normalize']):
# Create a sibling audio directory called "normalized."
audio_directory = dqc['audio_directory'] + '_normalized'
try:
os.mkdir(audio_directory)
except FileExistsError:
# No big deal if the directory already exists.
pass
# Normalize each directory.
for sub_dir in os.scandir(dqc['audio_directory']):
# Skip non-directories.
if not sub_dir.is_dir():
continue
# Normalize all the files in the directory and put them in
# the new normalized directory.
normalize(in_dir=sub_dir.path,
out_dir=os.path.join(audio_directory, sub_dir.name),
extensions=audio_extensions, ffmpeg_path=ffmpeg_path)
else:
audio_directory = dqc['audio_directory']
# Discover available audio.
audio_collection = AudioCollection(
top_dir=audio_directory,
audio_store_kw_args={'audio_extensions': audio_extensions}
)
# Get aliases.
aliases = {key: split(value) for key, value in config['aliases'].items()}
# Fire up the bot helper.
bot_helper = BotHelper(
cmd_prefix=dqc['cmd_prefix'], audio_collection=audio_collection,
aliases=aliases)
# Instantiate a DisQuipBot.
bot = DisQuipBot(bot_helper=bot_helper, ffmpeg=ffmpeg_path)
# Run it! This will connect to Discord.
bot.run(dqc['api_token'])
if __name__ == '__main__':
main()
|
# 1081. Smallest Subsequence of Distinct Characters
class Solution:
def smallestSubsequence(self, text: str) -> str:
last = {c: i for i, c in enumerate(text)}
stack = []
for i, c in enumerate(text):
if c in stack: continue
while stack and stack[-1] > c and i < last[stack[-1]]:
stack.pop()
stack.append(c)
return "".join(stack) |
from __future__ import division
from builtins import object
import numpy as np
from scipy.ndimage import convolve
from sporco import fft
class TestSet01(object):
def setup_method(self, method):
np.random.seed(12345)
def test_01(self):
x = np.random.randn(16, 8)
xf = fft.fftn(x, axes=(0,))
n1 = np.linalg.norm(x)**2
n2 = fft.fl2norm2(xf, axis=(0,))
assert np.abs(n1 - n2) < 1e-12
def test_02(self):
x = np.random.randn(16, )
xf = fft.rfftn(x, axes=(0,))
n1 = np.linalg.norm(x)**2
n2 = fft.rfl2norm2(xf, xs=x.shape, axis=(0,))
assert np.abs(n1 - n2) < 1e-12
def test_03(self):
x = np.array([[0, 1], [2, 3]])
y = np.array([[4, 5], [6, 7]])
xy = np.array([[38, 36], [30, 28]])
assert np.allclose(fft.fftconv(x, y, axes=(0, 1)), xy)
def test_04(self):
x = np.random.randn(5,)
y = np.zeros((12,))
y[4] = 1.0
xy0 = convolve(y, x)
xy1 = fft.fftconv(x, y, axes=(0,), origin=(2,))
assert np.allclose(xy0, xy1)
|
import pygame
import img
#imagen -> surface
#pantalla -> surface (principal)
#posicion -> (int, int)
#directorio -> str
class background(img.img):
#__init__: self surface str str bool tupla -> background
#crea objeto del tipo background, se recomienda tener solo uno de estos por pantalla
def __init__(self, pantalla = None, imagen = "",alpha = False, directorio = "bg/", posicion = (0,0)):
img.img.__init__(self,pantalla,imagen,alpha,directorio,posicion)
|
# -*- coding: utf-8 -*-
# This module is not used directly but is referenced by config.yaml.
# This must be imported before `pywb` is imported in `via.app`.
import via.rewriter # noqa: F401
from via._version import get_version
__all__ = ("__version__",)
__version__ = get_version()
|
#program to read a string anf print it in reverse order
s=input("Enter your name")
l=len(s)
print("length of the string",l)
for i in range(l-1,-1,-1):
print(s[i],end=" ")
|
import os
import re
import torch
import torchvision.transforms as transforms
import torchvision.datasets as torchdata
import torchvision.models as torchmodels
import numpy as np
import shutil
from random import randint, sample
from utils.fmow_dataloader import CustomDatasetFromImages
def save_args(__file__, args):
shutil.copy(os.path.basename(__file__), args.cv_dir)
with open(args.cv_dir+'/args.txt','w') as f:
f.write(str(args))
def performance_stats(policies, rewards, matches):
# Print the performace metrics including the average reward, average number
# and variance of sampled num_patches, and number of unique policies
policies = torch.cat(policies, 0)
rewards = torch.cat(rewards, 0)
accuracy = torch.cat(matches, 0).mean()
reward = rewards.mean()
num_unique_policy = policies.sum(1).mean()
variance = policies.sum(1).std()
policy_set = [p.cpu().numpy().astype(np.int).astype(np.str) for p in policies]
policy_set = set([''.join(p) for p in policy_set])
return accuracy, reward, num_unique_policy, variance, policy_set
def compute_reward(preds, targets, policy, penalty):
# Reward function favors policies that drops patches only if the classifier
# successfully categorizes the image
patch_use = policy.sum(1).float() / policy.size(1)
sparse_reward = 1.0 - patch_use**2
_, pred_idx = preds.max(1)
match = (pred_idx==targets).data
reward = sparse_reward
reward[~match] = penalty
reward = reward.unsqueeze(1)
return reward, match.float()
def get_transforms(rnet, dset):
if dset=='C10' or dset=='C100':
mean = [x/255.0 for x in [125.3, 123.0, 113.9]]
std = [x/255.0 for x in [63.0, 62.1, 66.7]]
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
elif dset=='ImgNet':
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform_train = transforms.Compose([
transforms.Scale(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
transform_test = transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
elif dset=='fMoW':
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
transform_train = transforms.Compose([
transforms.Scale(224),
transforms.RandomCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
transform_test = transforms.Compose([
transforms.Scale(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean, std)
])
return transform_train, transform_test
def agent_chosen_input(input_org, policy, mappings, patch_size):
""" Generate masked images w.r.t policy learned by the agent.
"""
input_full = input_org.clone()
sampled_img = torch.zeros([input_org.shape[0], input_org.shape[1], input_org.shape[2], input_org.shape[3]])
for pl_ind in range(policy.shape[1]):
mask = (policy[:, pl_ind] == 1).cpu()
sampled_img[:, :, mappings[pl_ind][0]:mappings[pl_ind][0]+patch_size, mappings[pl_ind][1]:mappings[pl_ind][1]+patch_size] = input_full[:, :, mappings[pl_ind][0]:mappings[pl_ind][0]+patch_size, mappings[pl_ind][1]:mappings[pl_ind][1]+patch_size]
sampled_img[:, :, mappings[pl_ind][0]:mappings[pl_ind][0]+patch_size, mappings[pl_ind][1]:mappings[pl_ind][1]+patch_size] *= mask.unsqueeze(1).unsqueeze(1).unsqueeze(1).float()
input_org = sampled_img
return input_org.cuda()
def action_space_model(dset):
# Model the action space by dividing the image space into equal size patches
if dset == 'C10' or dset == 'C100':
img_size = 32
patch_size = 8
elif dset == 'fMoW':
img_size = 224
patch_size = 56
elif dset == 'ImgNet':
img_size = 224
patch_size = 56
mappings = []
for cl in range(0, img_size, patch_size):
for rw in range(0, img_size, patch_size):
mappings.append([cl, rw])
return mappings, img_size, patch_size
# Pick from the datasets available and the hundreds of models we have lying around depending on the requirements.
def get_dataset(model, root='data/'):
rnet, dset = model.split('_')
transform_train, transform_test = get_transforms(rnet, dset)
if dset=='C10':
trainset = torchdata.CIFAR10(root=root, train=True, download=True, transform=transform_train)
testset = torchdata.CIFAR10(root=root, train=False, download=True, transform=transform_test)
elif dset=='C100':
trainset = torchdata.CIFAR100(root=root, train=True, download=True, transform=transform_train)
testset = torchdata.CIFAR100(root=root, train=False, download=True, transform=transform_test)
elif dset=='ImgNet':
trainset = torchdata.ImageFolder(root+'/ImageNet/train/', transform_train)
testset = torchdata.ImageFolder(root+'/ImageNet/test/', transform_test)
elif dset=='fMoW':
trainset = CustomDatasetFromImages(root+'/fMoW/train.csv', transform_train)
testset = CustomDatasetFromImages(root+'/fMoW/test.csv', transform_test)
return trainset, testset
def get_model(model):
from models import resnet
if model=='R32_C10':
rnet_hr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 3, 10)
rnet_lr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 3, 10)
agent = resnet.ResNet(resnet.BasicBlock, [1,1,1,1], 3, 16)
elif model=='R32_C100':
rnet_hr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 3, 100)
rnet_lr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 3, 100)
agent = resnet.ResNet(resnet.BasicBlock, [1,1,1,1], 3, 16)
elif model=='R50_ImgNet':
rnet_hr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 7, 1000)
rnet_lr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 7, 1000)
agent = resnet.ResNet(resnet.BasicBlock, [2,2,2,2], 3, 16)
elif model=='R34_fMoW':
rnet_hr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 7, 62)
rnet_lr = resnet.ResNet(resnet.BasicBlock, [3,4,6,3], 7, 62)
agent = resnet.ResNet(resnet.BasicBlock, [2,2,2,2], 3, 16)
return rnet_hr, rnet_lr, agent
|
"""Copyright 2016 Mirantis, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
from fuelweb_test import logwrap
from keystoneauth1.identity import v2 # TODO(otsvigun) v3
from keystoneauth1 import session
from fuelweb_test.settings import KEYSTONE_CREDS
from fuelweb_test.settings import PATH_TO_CERT
from fuelweb_test.settings import VERIFY_SSL
from fuelweb_test.settings import DISABLE_SSL
class ContrailClient(object):
"""Contrail utilite wrapper."""
def __init__(self, controller_node_ip, contrail_port=8082,
credentials=KEYSTONE_CREDS, **kwargs):
"""Create ContrailClient object."""
if DISABLE_SSL:
self.url = "http://{0}:{1}".format(
controller_node_ip, contrail_port)
self.keystone_url = "http://{0}:5000/v2.0".format(
controller_node_ip)
else:
self.url = "https://{0}:{1}".format(
controller_node_ip, contrail_port)
self.keystone_url = 'https://{0}:5000/v2.0/'.format(
controller_node_ip)
insecure = not VERIFY_SSL
credentials.update({'ca_cert': PATH_TO_CERT, 'insecure': insecure})
auth = v2.Password(auth_url=self.keystone_url,
username=KEYSTONE_CREDS['username'],
password=KEYSTONE_CREDS['password'],
tenant_name=KEYSTONE_CREDS['tenant_name'])
self._client = session.Session(auth=auth, verify=False)
@property
def client(self):
"""Client property."""
return self._client
def _get(self, data_path):
"""Get method."""
return self.client.get(url=self.url + data_path).json()
def _delete(self, data_path):
"""Delete method."""
return self.client.delete(url=self.url + data_path).json()
def _post(self, data_path, **kwargs):
"""Post method."""
return self.client.post(
url=self.url + data_path, connect_retries=1, **kwargs).json()
def _put(self, data_path, **kwargs):
"""Put method."""
return self.client.put(
url=self.url + data_path, connect_retries=1, **kwargs).json()
@logwrap
def create_network(self, net_name, net_attr):
"""Create virtual-network.
:param project_name: type list, tenant(project), network name
:param net_attr: type dictionary, network attributes
"""
data = {
"virtual-network": {
"parent_type": "project",
"fq_name": net_name,
"network_ipam_refs": net_attr}}
return self._post('/virtual-networks', json=data)
def add_router_interface(self, network, route_table, attr=None):
"""Add router interface to network.
:param network: type dictionary, network
:param route_table: type dictionary, router
:param attr: type dictionary, parameters of router interface(optianal)
"""
data = {"virtual-network": {'fq_name': network['fq_name'],
'route_table_refs': [{
'to': route_table['fq_name'], "attr":attr}]}}
return self._put(
'/virtual-network/{0}'.format(network['uuid']), json=data)
def get_route_tables(self):
"""Get router."""
return self._get('/route-tables')
def get_networks(self):
"""Get networks."""
return self._get('/virtual-networks')
def get_router_by_name(self, name):
"""Get router by name.
:param name: type string, name of router.
:return dictionary
"""
route_tables = self.get_route_tables()['route-tables']
route_table = [
route for route in route_tables
if name in route['fq_name']]
return route_table.pop()
def get_projects(self):
"""Get router."""
return self._get('/projects')
def get_project_by_name(self, name):
"""Get project by name.
:param name: type string, name of project.
:return dictionary
"""
projects = self.get_projects()
project = [p for p in projects if name in p['fq_name']]
return project.pop()
def get_instance_by_id(self, instance_id):
"""Get instance by id.
:param instance_id: type string, instance id.
:return dictionary
"""
return self._get('/virtual-machine/{0}'.format(instance_id))
def get_net_by_id(self, net_id):
"""Get network by id.
:param net_id: type string, instance id.
:return dictionary
"""
return self._get('/virtual-network/{0}'.format(net_id))
def get_bgp_routers(self):
"""Get bgp routers."""
return self._get('/bgp-routers')
def get_bgp_by_id(self, bgp_id):
"""Get bgp router by id.
:param bgp_id: type string, bgp router id.
:return dictionary
"""
return self._get('/bgp-router/{0}'.format(bgp_id))
|
from homework.homework3.task4 import is_armstrong
def test_positive_small():
assert is_armstrong(9)
def test_positive():
assert is_armstrong(153)
def test_negative():
assert not is_armstrong(20)
|
# coding:utf-8
from boto3.session import Session
import os
import ConfigParser
import json
# import datetime
from datetime import datetime, timedelta
import pytz
import boto3
import requests
from myutils import contains
from my_aws_utils import store_json_to_s3, get_json_from_s3
config = ConfigParser.SafeConfigParser()
config.read('./config/config.ini')
bucket_name = 'nu.mine.kino.temperature'
# ネットから情報を取得し、S3へストアする。指定した路線だけに絞って、ストアすることにした。
def lambda_handler(event, context):
delayList = get_json_from_s3(bucket_name, 'delay.json')
delayPrevList = get_json_from_s3(bucket_name, 'delay_prev.json')
if (compare(delayList, delayPrevList)):
print('DIFFなし')
# message = '電車運行情報で更新情報なし'
# send_mail(message, delayList, delayPrevList)
else:
print('DIFFアリ')
message = '電車運行情報で更新情報あり'
send_mail(message, delayList, delayPrevList)
# 直近のファイルを、前回分として保存。(次回のため)
result = json.dumps(delayList, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
store_json_to_s3(bucket_name, 'delay_prev.json', result)
def send_mail(message, delayList, delayPrevList):
send_message = '電車運行情報\n\n'
jst = pytz.timezone('Asia/Tokyo')
for i, obj in enumerate(delayList):
update_time = datetime.fromtimestamp(obj['lastupdate_gmt'], tz=jst)
send_message += '・%s (%s)\n' % (obj['name'], update_time.strftime("%H:%M"))
send_message += '\n'
send_message += '(カッコは更新時刻)\n'
send_message += 'https://www.tetsudo.com/traffic/\n'
send_message += 'https://rti-giken.jp/fhc/api/train_tetsudo/\n'
# send_message += '\n\n\n\n------------------\n'
# send_message += 'delayList\n'
# send_message += '------------------\n'
#
# for i, obj in enumerate(delayList):
# send_message += json.dumps(obj, ensure_ascii=False) + '\n'
#
# send_message += '\n'
# send_message += 'delayPrevList\n'
# send_message += '------------------\n'
#
# for i, obj in enumerate(delayPrevList):
# send_message += json.dumps(obj, ensure_ascii=False) + '\n'
#
# send_message += '------------------\n'
# topic = config.get('aws', 'topic')
# subject = message
# region = 'ap-northeast-1'
#
# sns = boto3.client('sns')
# response = sns.publish(
# TopicArn=topic,
# Message=send_message,
# Subject=subject,
# MessageStructure='Raw'
# )
send_slack(send_message)
def send_slack(message):
url = 'https://hooks.slack.com/services' + config.get('slack', 'bot_url')
headers = {
'Content-Type': 'application/json'
}
payload = {"text": message, "channel": "#rail_info", }
requests.post(url, data=json.dumps(payload), headers=headers)
def compare(delayList, delayPrevList):
# print(delayList)
# print(delayPrevList)
for i, obj in enumerate(delayList):
print(obj)
print('------------------')
for i, obj in enumerate(delayPrevList):
print(obj)
print('------------------')
# サイズ違いはそもそもDIFFあり
if (len(delayList) != len(delayPrevList)):
return False
# サイズがおなじ場合、now側で、For文回す
for i, delay in enumerate(delayList):
print(delay['name'], ' で処理開始')
matchFlag = containsRailInfo(delay, delayList)
# 一つづつもってきて、名前があるかを比較
# for delayPrev in delayPrevList:
# matchFlag = compareRailInfo(delay,delayPrev)
# containsRailInfo をくぐり抜けて、Falseだったら 一致するものがなかったということ
if (not (matchFlag)):
return False
return True
def compareRailInfo(a, b):
'''
引数のRailInfoオブジェクトを比較する。name 属性がおなじかどうか。
:param a:
:param b:
:return:
'''
return a['name'] == b['name']
def containsRailInfo(target, list):
'''
引数のRailInfoオブジェクトを比較する。含まれているかどうかをTrue/Falseで。
:param target:
:param list:
:return:
'''
for element in list:
if (compareRailInfo(target, element)):
return True
return False
def main():
jst = pytz.timezone('Asia/Tokyo')
print(datetime.fromtimestamp(1479460502))
date = datetime.fromtimestamp(1479460502, tz=jst)
print(date)
print(date.strftime("%Y/%m/%d %H:%M:%S"))
a_test()
def a_test():
jsonStr1 = '''
{
"lastupdate_gmt": 1479471904,
"name": "京急線",
"company": "京急電鉄",
"source": "鉄道com RSSa"
}
'''
jsonStr2 = '''
[
{
"company": "都営地下鉄",
"lastupdate_gmt": 1479472082,
"name": "浅草線",
"source": "鉄道com RSS"
},
{
"company": "京急電鉄",
"lastupdate_gmt": 1479471904,
"name": "京急線",
"source": "鉄道com RSS"
}
]
'''
target = json.loads(jsonStr1)
list = json.loads(jsonStr2)
print(containsRailInfo(target, list))
if __name__ == "__main__":
lambda_handler('', '')
# main()
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2014 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth.decorators import login_required, \
permission_required
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template.context import RequestContext
from django.utils.translation import ugettext as _
from dialer_audio.constants import AUDIO_COLUMN_NAME
from dialer_audio.forms import DialerAudioFileForm
from audiofield.models import AudioFile
from common.common_functions import get_pagination_vars
import os.path
audio_redirect_url = '/module/audio/'
@permission_required('audiofield.view_audiofile', login_url='/')
@login_required
def audio_list(request):
"""AudioFile list for the logged in user
**Attributes**:
* ``template`` - frontend/audio/audio_list.html
**Logic Description**:
* List all audios which belong to the logged in user.
"""
sort_col_field_list = ['id', 'name', 'updated_date']
default_sort_field = 'id'
pagination_data = get_pagination_vars(
request, sort_col_field_list, default_sort_field)
PAGE_SIZE = pagination_data['PAGE_SIZE']
sort_order = pagination_data['sort_order']
audio_list = AudioFile.objects.filter(user=request.user).order_by(sort_order)
domain = Site.objects.get_current().domain
template = 'frontend/audio/audio_list.html'
data = {
'audio_list': audio_list,
'total_audio': audio_list.count(),
'PAGE_SIZE': PAGE_SIZE,
'AUDIO_COLUMN_NAME': AUDIO_COLUMN_NAME,
'col_name_with_order': pagination_data['col_name_with_order'],
'domain': domain,
'msg': request.session.get('msg'),
'AUDIO_DEBUG': settings.AUDIO_DEBUG,
}
request.session['msg'] = ''
request.session['error_msg'] = ''
return render_to_response(template, data,
context_instance=RequestContext(request))
@permission_required('audiofield.add_audiofile', login_url='/')
@login_required
def audio_add(request):
"""Add new Audio for the logged in user
**Attributes**:
* ``form`` - SurveyCustomerAudioFileForm
* ``template`` - frontend/audio/audio_change.html
**Logic Description**:
* Add a new audio which will belong to the logged in user
via the CustomerAudioFileForm & get redirected to the audio list
"""
form = DialerAudioFileForm()
if request.method == 'POST':
form = DialerAudioFileForm(request.POST, request.FILES)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
request.session["msg"] = _('"%(name)s" added.') % \
{'name': request.POST['name']}
return HttpResponseRedirect(audio_redirect_url)
template = 'frontend/audio/audio_change.html'
data = {
'form': form,
'action': 'add',
'AUDIO_DEBUG': settings.AUDIO_DEBUG,
}
return render_to_response(template, data,
context_instance=RequestContext(request))
def delete_audio_file(obj):
"""Delete audio file from computer drive"""
if obj.audio_file:
if os.path.exists(obj.audio_file.path):
os.remove(obj.audio_file.path)
return True
@permission_required('audiofield.delete_audiofile', login_url='/')
@login_required
def audio_del(request, object_id):
"""Delete a audio for a logged in user
**Attributes**:
* ``object_id`` - Selected audio object
* ``object_list`` - Selected audio objects
**Logic Description**:
* Delete selected the audio from the audio list
"""
if int(object_id) != 0:
audio = get_object_or_404(
AudioFile, pk=int(object_id), user=request.user)
request.session["msg"] = _('"%(name)s" is deleted.') % {'name': audio.name}
# 1) remove audio file from drive
delete_audio_file(audio)
# 2) delete audio
audio.delete()
else:
try:
# When object_id is 0 (Multiple records delete)
values = request.POST.getlist('select')
values = ", ".join(["%s" % el for el in values])
audio_list = AudioFile.objects \
.filter(user=request.user) \
.extra(where=['id IN (%s)' % values])
request.session["msg"] = _('%(count)s audio(s) are deleted.') \
% {'count': audio_list.count()}
# 1) remove audio file from drive
for audio in audio_list:
delete_audio_file(audio)
# 2) delete audio
audio_list.delete()
except:
raise Http404
return HttpResponseRedirect(audio_redirect_url)
@permission_required('audiofield.change_audiofile', login_url='/')
@login_required
def audio_change(request, object_id):
"""Update Audio for the logged in user
**Attributes**:
* ``form`` - SurveyCustomerAudioFileForm
* ``template`` - frontend/audio/audio_change.html
**Logic Description**:
* Update audio which is belong to the logged in user
via the CustomerAudioFileForm & get redirected to the audio list
"""
obj = get_object_or_404(AudioFile, pk=object_id, user=request.user)
form = DialerAudioFileForm(instance=obj)
if request.method == 'POST':
if request.POST.get('delete'):
audio_change(request, object_id)
return HttpResponseRedirect(audio_redirect_url)
else:
form = DialerAudioFileForm(
request.POST, request.FILES, instance=obj)
if form.is_valid():
form.save()
return HttpResponseRedirect(audio_redirect_url)
template = 'frontend/audio/audio_change.html'
data = {
'form': form,
'action': 'update',
'AUDIO_DEBUG': settings.AUDIO_DEBUG,
}
return render_to_response(template, data,
context_instance=RequestContext(request))
|
import unittest
from io import StringIO
import sys
import import_ipynb
class Test(unittest.TestCase):
def setUp(self):
import Divisibility_with_Conditionals
self.exercise = Divisibility_with_Conditionals
if self.exercise.x % 6 == 0:
self.truth = 'x is divisible by 6'
elif self.exercise.x % 2 == 0:
self.truth = 'x is divisible by 2'
elif self.exercise.x % 3 == 0:
self.truth = 'x is divisible by 3'
else:
self.truth = 'x is not divisible by 2 or 3'
# def test_x(self):
# self.assertEqual(self.exercise.x, 12)
def test_truth(self):
with open('output.txt', 'r') as f:
data = f.readline()
self.assertEqual(self.truth, data)
if __name__ == '__main__':
unittest.main()
|
from django.test import TestCase, SimpleTestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from .models import Content
# Create your tests here.
class JokesTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username = 'tester',
email = 'tester@email.com',
password = 'pass'
)
self.content = Content.objects.create(
title = 'this is a joke',
author = self.user,
body = 'this is the joke body',
)
def test_string_representation(self):
content = Content(title='joke')
self.assertEqual(str(content), content.title)
def test_jokes_content(self):
self.assertEqual(f'{self.content.title}', 'this is a joke')
self.assertEqual(f'{self.content.author}', "tester")
self.assertEqual(f'{self.content.body}', 'this is the joke body')
def test_jokes_list_view(self):
response = self.client.get(reverse('list'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'this is a joke')
self.assertTemplateUsed(response, 'list.html')
def test_jokes_detail_view(self):
response = self.client.get('/content/1/')
no_response = self.client.get('/content/100000/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'this is the joke body')
self.assertTemplateUsed(response, 'details.html')
def test_jokes_create_view(self):
response = self.client.post(reverse('new'), {
'title': 'another joke',
'author': self.user,
'body': "this is another joke",
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'another joke')
self.assertContains(response, 'this is another joke')
def test_jokes_update_view(self):
response = self.client.post(reverse('update',args='1'), {
'title': 'Updated joke',
'body': 'Updated joke body',
})
self.assertEqual(response.status_code, 302)
def test_snack_delete_view(self):
response = self.client.get(reverse('delete',args='1'))
self.assertEqual(response.status_code, 200)
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import base64
import json
from synchronizers.new_base.ansible_helper import *
from mock_modelaccessor import *
import syncstep
class SyncControllerSlicePrivileges(syncstep.SyncStep):
provides=[SlicePrivilege]
requested_interval=0
observes=ControllerSlicePrivilege
playbook = 'sync_controller_users.yaml'
def map_sync_inputs(self, controller_slice_privilege):
if not controller_slice_privilege.controller.admin_user:
return
template = os_template_env.get_template('sync_controller_users.yaml')
roles = [controller_slice_privilege.slice_privilege.role.role]
# setup user home slice roles at controller
if not controller_slice_privilege.slice_privilege.user.site:
raise Exception('Sliceless user %s'%controller_slice_privilege.slice_privilege.user.email)
else:
user_fields = {
'endpoint':controller_slice_privilege.controller.auth_url,
'endpoint_v3': controller_slice_privilege.controller.auth_url_v3,
'domain': controller_slice_privilege.controller.domain,
'name': controller_slice_privilege.slice_privilege.user.email,
'email': controller_slice_privilege.slice_privilege.user.email,
'password': controller_slice_privilege.slice_privilege.user.remote_password,
'admin_user': controller_slice_privilege.controller.admin_user,
'admin_password': controller_slice_privilege.controller.admin_password,
'ansible_tag':'%s@%s@%s'%(controller_slice_privilege.slice_privilege.user.email.replace('@','-at-'),controller_slice_privilege.slice_privilege.slice.name,controller_slice_privilege.controller.name),
'admin_tenant': controller_slice_privilege.controller.admin_tenant,
'roles':roles,
'tenant':controller_slice_privilege.slice_privilege.slice.name}
return user_fields
def map_sync_outputs(self, controller_slice_privilege, res):
controller_slice_privilege.role_id = res[0]['id']
controller_slice_privilege.save()
def delete_record(self, controller_slice_privilege):
controller_register = json.loads(controller_slice_privilege.controller.backend_register)
if (controller_register.get('disabled',False)):
raise InnocuousException('Controller %s is disabled'%controller_slice_privilege.controller.name)
if controller_slice_privilege.role_id:
driver = self.driver.admin_driver(controller=controller_slice_privilege.controller)
user = ControllerUser.objects.filter(
controller_id=controller_slice_privilege.controller.id,
user_id=controller_slice_privilege.slice_privilege.user.id
)
user = user[0]
slice = ControllerSlice.objects.filter(
controller_id=controller_slice_privilege.controller.id,
user_id=controller_slice_privilege.slice_privilege.user.id
)
slice = slice[0]
driver.delete_user_role(
user.kuser_id,
slice.tenant_id,
controller_slice_privilege.slice_prvilege.role.role
)
|
""""
On affiche sous forme de graphique le temps de convergence en fonction a la taille de l'échantillon
"""
import matplotlib.pyplot as plt
import json
x = []
y = []
# On selectionne un fichier génerer au préalable
file = open("dataGreedySansContrainte.json", "r")
obj = json.load(file)
# On trie les données au cas ou
obj = sorted(obj)
# On ajoute le nombre de ville en absice et le temps de convergence en ordonnée
for f in obj:
x.append(f[0])
y.append(f[1])
# On affiche notre graphique
plt.title("Graphique du temps de convergence en fonction de la taille de l'échantillion")
plt.plot(x, y)
plt.xlabel('Taille de l\'échantilltion en nombre de ville')
plt.ylabel('Temps de convergence en seconde')
plt.show()
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
BLACK = [255,0,0]
img1 = cv2.imread('bde3727f3a9e8b2b58f383ebc762b2157eb50cdbff23e69b025418b43967556b.png')
origin_height,origin_width=img1.shape[0],img1.shape[1]
new_size=800
#boundary: top, bottom, left, right
constant= cv2.copyMakeBorder(img1,0,new_size-origin_height,0,new_size-origin_width,cv2.BORDER_CONSTANT,value=BLACK)
cropped = constant[0:origin_height+10, 0:origin_width+10]
plt.imshow(constant,'gray'),plt.title('CONSTANT')
plt.show()
plt.imshow(cropped,'gray')
plt.show()
|
import os
from os import path
from FacebookPostsScraper import FacebookPostsScraper as Fps
from database import database as dbs
from debuging import debuging as dbg
from textprocessor import textprocessor as txtpr
from pprint import pprint as pp
from flask import Flask, jsonify, make_response
import json
app = Flask(__name__)
tasks = [{'id': 1}]
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.route('/todo/api/v1.0/tasks/<int:task_id>', methods=['GET'])
def get_task(task_id):
task = [task for task in tasks if task['id'] == task_id]
if len(task) == 0:
not_found(404)
data_user = dbs.conect_graphql(task_id)
#id_user_facebok, occasion_name = json.dumps(dataUser)
#id_user_facebok = 'https://www.facebook.com/cesarsalascasasmx/'
id_user_facebok = data_user['data']['social_network']['url_social_network']
occasion_name = data_user['data']['social_network']['occasion_name']
# Enter your Facebook email and password
email = 'sue@synapbox.com'
password = 'Suevcello30'
# Instantiate an object
fps = Fps(email, password, post_url_text='Full Story')
# Example with single profile
single_profile = id_user_facebok #facebook user
data = fps.get_posts_from_profile(single_profile)
fps.posts_to_csv('my_posts') # You can export the posts as CSV document
# get data directory (using getcwd() is needed to support running example in generated IPython notebook)
d = path.dirname(__file__) if "__file__" in locals() else os.getcwd()
text = open(path.join(d, 'my_posts.csv')).read()
if len(text) > 0:
text = dbg.denoise_text(text)
text = dbg.give_emoji_free_text(r'',text)
text = dbg.replace_char(text)
text = dbg.normalize(text)
data = dbg.parce_json(text)
data_podium = txtpr.processWords(text, occasion_name)
else:
data_podium = {'error': 'informacion no encontrada'}
# Synchronous mutation
dataResponse = dbs.podium_graphql(data_podium)
#dataResult = dbs.search_result_graphql(task_id, data)
print(data_podium)
return json.dumps(dataResponse)
if __name__ == '__main__':
app.run(debug=True) |
#!/usr/bin/env python
import numpy as np
from sklearn.linear_model import SGDRegressor
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import RBFSampler
class QLearnRBF:
def __init__(self, env=None, n_actions=None, epsilon=0.1, lr=0.1, gamma=0.99, reg_term=0.0, rbf_samplers=None, observation_examples=None):
if not env == None:
self.env = env
self.actions = range(env.action_space.n)
else:
self.actions = range(n_actions)
self.epsilon = epsilon # Exploration constant (Epsilon-Greedy)
self.gamma = gamma # Discount factor
# RBF kernels implementation example with Scikit-learn (RBF feature extraction)
if rbf_samplers == None:
if observation_examples == None:
observation_examples = np.array([env.observation_space.sample() for x in range(10000)])
scaler = StandardScaler()
scaler.fit(observation_examples)
# Concatenation of several RBF kernels with different variance
featurizer = FeatureUnion([
("rbf1", RBFSampler(gamma=5.0, n_components=500)),
("rbf2", RBFSampler(gamma=2.0, n_components=500)),
("rbf3", RBFSampler(gamma=1.0, n_components=500)),
("rbf4", RBFSampler(gamma=0.5, n_components=500))
])
featurizer.fit(scaler.transform(observation_examples))
else:
scaler = StandardScaler()
scaler.fit(observation_examples)
featurizer = FeatureUnion(rbf_samplers)
featurizer.fit(scaler.transform(observation_examples))
self.scaler = scaler
self.featurizer = featurizer
self.models = []
# One linear model per action (estimation of V(s) per action)
for _ in self.actions:
# SGDRegressor:
# loss='squared_loss', penalty='l2', alpha=0.0001, l1_ratio=0.15,
# fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
# epsilon=0.1, random_state=None, learning_rate='invscaling',
# eta0=0.01, power_t=0.25, warm_start=False, average=False
model = SGDRegressor(eta0=lr, alpha=reg_term, learning_rate="constant")
# Parameter initialization
model.partial_fit(self.transform([observation_examples[0]]), [0])
self.models.append(model)
def transform(self, observations):
'''
Applies to the given observations the RBF feature extractor.
'''
scaled = self.scaler.transform(observations)
return self.featurizer.transform(scaled)
def getQ(self, state):
'''
Get the V(s) value for each action
'''
X = self.transform([state])
return np.stack([m.predict(X) for m in self.models]).T
def chooseAction(self, state):
'''
Epsilon-Greedy
'''
if np.random.random() < self.epsilon:
return np.random.choice(self.actions)
else:
return np.argmax(self.getQ(state))
def learn(self, current_state, current_action, next_reward, next_state):
'''
Train the linear model
'''
#TODO:targets = [r + self.gamma*next_q if not done else r for r, next_q, done in zip(rewards, next_Q, dones)] -> In this case we are considering that a terminal state has a return of 0. Check if this helps to improve preformance
# Compute the estimation of the expected return
G = next_reward + self.gamma * np.max(self.getQ(next_state))
self.models[current_action].partial_fit(self.transform([current_state]), [G]) |
from django.test import TestCase, Client
from django.core.urlresolvers import resolve
from django.core.cache import cache
from django.http import HttpRequest, HttpResponseRedirect
from django.core.urlresolvers import reverse
from ghostnames.views import list_names, choose_ghost_name
from ghostnames.models import Username, Ghost
from ghostnames.forms import available_ghosts
class SimpleTestHomePage(TestCase):
def setUp(self):
self.client = Client()
def test_root_url_resolves_to_home_page_view(self):
found = resolve('/ghostnames/')
self.assertEqual(found.func, list_names)
def test_home_page_returns_correct_html(self):
request = HttpRequest()
response = list_names(request)
self.assertTrue(response.content.startswith(b'<html>'))
self.assertIn(b'<title>Ghost Name Picker</title>', response.content)
self.assertTrue(response.content.endswith(b'</html>'))
def test_homepage(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 301)
response = self.client.get('/ghostnames/')
self.assertEqual(response.status_code, 200)
def test_misconfigured_choose_name_url_resolves_to_home_page_view(self):
found = resolve('/ghostnames/choose/124')
self.assertEqual(found.func, choose_ghost_name)
found = resolve('/ghostnames/choose/')
self.assertEqual(found.func, choose_ghost_name)
def test_homepage_has_usernames_on_it(self):
users = ['andy antman alpha', 'brian beast beta', 'charlie chopper chase']
Username.objects.create(firstname='derek',lastname='deamon')
for user_fn in users:
firstname, ghostname, lastname=user_fn.split(' ')
Username.objects.create(firstname=firstname,lastname=lastname, ghostname=ghostname)
response = self.client.get('/')
self.assertEqual(response.status_code, 301)
response = self.client.get('/ghostnames/')
for user_fn in users:
firstname, ghostname, lastname=user_fn.split(' ')
self.assertTrue(firstname in response.content)
self.assertTrue(lastname in response.content)
self.assertTrue(ghostname in response.content)
self.assertTrue('derek' not in response.content)
self.assertTrue('deamon' not in response.content)
class SimpleSubmitName(TestCase):
def setUp(self):
self.client = Client()
def test_submit_name_to_create_ghost_name(self):
"""
requirment:
Includes a button for new users to enter their name and select a ghost name.
"""
response = self.client.post('/ghostnames/', {'firstname': 'john',
'lastname': 'smith'}
)
self.assertEqual(response.status_code, 302)
def test_home_page_can_save_a_POST_request(self):
self.assertEqual(Username.objects.all().count(),0)
request = HttpRequest()
request.method = 'POST'
firstname = 'Andy'
lastname = 'Alpha'
request.POST['firstname'] = firstname
request.POST['lastname'] = lastname
response = list_names(request)
self.assertEqual(Username.objects.all().count(), 1)
created_user = Username.objects.all()[0]
self.assertEqual(created_user.firstname, firstname)
self.assertEqual(created_user.lastname, lastname)
def test_home_page_saved_POST_now_choose_a_ghost_name(self):
response = self.client.post('/ghostnames/',
{'firstname': 'brian',
'lastname': 'beta'}
)
self.assertEqual(response.status_code, 302)
self.assertTrue('choose' in response['Location'])
response = self.client.get(response['Location'])
self.assertTrue('brian' in response.content)
self.assertTrue('beta' in response.content)
class GhostNameAssignmentTests(TestCase):
def setUp(self):
self.client = Client()
def test_get_available_ghost_names_when_all_are_available(self):
ghostnames = ['Betelgeuse','Bhoot','Bloody Mary','Bogle']
for name in ghostnames:
Ghost.objects.create(name=name,taken='available')
next_ghosts = available_ghosts(3)
next_ghosts_names = [n.name for n in next_ghosts]
for name in ghostnames[:3]:
self.assertTrue(name in next_ghosts_names)
def test_get_available_ghost_names_when_some_are_taken(self):
ghostnames = ['Betelgeuse','Bhoot','Bloody Mary','Bogle']
for i, name in enumerate(ghostnames):
taken = 'available' if i<2 else 'taken'
g = Ghost.objects.create(name=name, taken=taken)
next_ghosts = available_ghosts(3)
next_ghosts_names = [n.name for n in next_ghosts]
for name in ghostnames[:2]:
self.assertTrue(name in next_ghosts_names)
def test_initialize(self):
Ghost.initialize()
all_ghosts_count = Ghost.objects.count()
self.assertTrue(all_ghosts_count == 43)
def test_initialize_has_no_newlines_in_names(self):
Ghost.initialize()
all_ghosts = Ghost.objects.all()
all_names_with_newline = [g.name for g in all_ghosts if '\n' in g.name]
self.assertTrue(len(all_names_with_newline) == 0)
def test_initialize_only_runs_once(self):
Ghost.initialize()
Ghost.initialize()
all_ghosts_count = Ghost.objects.count()
self.assertTrue(all_ghosts_count == 43)
def test_select_ghost_name_is_saved(self):
Ghost.initialize()
thisuser =Username.objects.create(firstname='brian',
lastname='beta'
)
first_ghost= available_ghosts(3)[0]
first_ghostname = first_ghost.name
self.assertTrue(first_ghost.taken == 'available')
response = self.client.post('/ghostnames/choose/%s' % thisuser.pk,
{
'ghost_name': first_ghost.name
})
self.assertTrue(Username.objects.get(lastname='beta').ghostname == first_ghostname)
first_ghost = Ghost.objects.get(name = first_ghostname)
self.assertTrue(first_ghost.taken == 'taken')
self.assertEqual(response.status_code, 302)
def test_select_ghost_name_releases_old_name(self):
Ghost.initialize()
first_ghost= available_ghosts(3)[0]
thisuser =Username.objects.create(firstname='brian',
lastname='beta',
ghostname=first_ghost.name
)
response = self.client.get('/ghostnames/choose/%s' % thisuser.pk)
self.assertNotEqual(
Username.objects.get(firstname='brian').ghostname,
first_ghost.name)
self.assertNotEqual(
Ghost.objects.get(name=first_ghost.name).taken,
'taken')
|
import os
APPLESTORE_CATEGORIES = "https://itunes.apple.com/us/genre/ios/id36?mt=8"
GOOGLESTORE_CATEGORIES = "https://play.google.com/store/apps/top"
APPDB_HOST='127.0.0.1'
APPDB_PORT=9000
APPDB_MONGO = {
'CONNECTION': 'mongodb://127.0.0.1:27017',
'DATABASE': 'twoappstores',
'COLLECTION': 'apps'
}
BASE_DIR = os.path.realpath(os.path.dirname(__file__))
URLDB_HOST='127.0.0.1'
URLDB_PORT=9001
URLDB_MONGO = {
'CONNECTION': 'mongodb://127.0.0.1:27017',
'DATABASE': 'twoappstores',
'COLLECTION': 'urls'
}
TIME_BETWEEN_REQUESTS = [1, 5] |
import os
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import numpy
import itertools
import random
import cv2
import math
import config
import tensorflow as tf
version = "ABCD"
CHARS_W_BLANK = config.CHARS + " "
BG_PATH = "/python/chinese_rec/bgs"
FONT_HEIGHT = 32
# 创建字体文件的图像集,高度32pix
def make_char_ims(font_path, output_height):
font_size = output_height * 4
# 加载字体文件,创建字体对象
font = ImageFont.truetype(font_path, font_size)
# 所有字符的最大高度
height = max(font.getsize(c)[1] for c in CHARS_W_BLANK)
for c in CHARS_W_BLANK:
# 字符宽度
width = font.getsize(c)[0]
# 黑色底图
im = Image.new("RGBA", (width, height), (255, 255, 255, 0))
draw = ImageDraw.Draw(im)
# 白色文字
draw.text((0, 0), c, (255, 255, 255), font=font)
# 缩放比例
scale = float(output_height) / height
# 比例缩放,输出高度为32pix
im = im.resize((int(width * scale), output_height), Image.ANTIALIAS)
# numpy.array(im)返回一个 width * height * 4 的3D数组
# [:,:,0]返回一个width * height的2D数组,值为 RGBA 层的 第1列
yield c, numpy.array(im)[:, :, 0].astype(numpy.float32) / 255.
# 加载字体文件
def load_fonts(folder_path):
font_char_ims = {}
fonts = [f for f in os.listdir(folder_path) if f.endswith('.ttf')]
for font in fonts:
font_char_ims[font] = dict(make_char_ims(os.path.join(folder_path,
font),
FONT_HEIGHT))
return fonts, font_char_ims
# 生成测试图片背景
def generate_bg(num_bg_images):
fname = BG_PATH + "/{:08d}.jpg".format(random.randint(0, num_bg_images - 1))
bg = cv2.imread(fname, cv2.IMREAD_GRAYSCALE) / 255.
bg = cv2.resize(bg, config.IMG_SHAPE)
return bg
def rounded_rect(shape, radius):
out = numpy.ones(shape)
out[:radius, :radius] = 0.0
out[-radius:, :radius] = 0.0
out[:radius, -radius:] = 0.0
out[-radius:, -radius:] = 0.0
cv2.circle(out, (radius, radius), radius, 1.0, -1)
cv2.circle(out, (radius, shape[0] - radius), radius, 1.0, -1)
cv2.circle(out, (shape[1] - radius, radius), radius, 1.0, -1)
cv2.circle(out, (shape[1] - radius, shape[0] - radius), radius, 1.0, -1)
return out
def generate_code():
v = random.choice(version)
# 16位,四位分隔
if v == "A":
return "{}{}{}{} {}{}{}{} {}{}{}{} {}{}{}{}".format(
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS))
# 19位连续
if v == "B":
return "{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}{}".format(
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS))
# 19位,前6,后13
if v == "C":
return "{}{}{}{}{}{} {}{}{}{}{}{}{}{}{}{}{}{}{}".format(
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS))
# 默认,19位,4位分隔,最后三位
return "{}{}{}{} {}{}{}{} {}{}{}{} {}{}{}{} {}{}{}".format(
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS),
random.choice(config.CHARS))
def pick_colors():
first = True
while first or plate_color - text_color < 0.3:
text_color = random.random()
plate_color = random.random()
if text_color > plate_color:
text_color, plate_color = plate_color, text_color
first = False
return text_color, plate_color
def euler_to_mat(yaw, pitch, roll):
# Rotate clockwise about the Y-axis
c, s = math.cos(yaw), math.sin(yaw)
M = numpy.matrix([[c, 0., s],
[0., 1., 0.],
[-s, 0., c]])
# Rotate clockwise about the X-axis
c, s = math.cos(pitch), math.sin(pitch)
M = numpy.matrix([[1., 0., 0.],
[0., c, -s],
[0., s, c]]) * M
# Rotate clockwise about the Z-axis
c, s = math.cos(roll), math.sin(roll)
M = numpy.matrix([[c, -s, 0.],
[s, c, 0.],
[0., 0., 1.]]) * M
return M
# 生成车牌
def generate_plate(font_height, char_ims):
# 水平偏移
h_padding = random.uniform(0.2, 0.4) * font_height
# 垂直偏移
v_padding = random.uniform(0.1, 0.3) * font_height
spacing = font_height * random.uniform(0.05, 0.1)
radius = 1 + int(font_height * 0.1 * random.random())
code = generate_code()[0:5]
text_width = sum(char_ims[c].shape[1] for c in code)
text_width += (len(code) - 1) * spacing
out_shape = (int(font_height + v_padding * 2),
int(text_width + h_padding * 2))
text_color, plate_color = pick_colors()
text_mask = numpy.zeros(out_shape)
x = h_padding
y = v_padding
for c in code:
char_im = char_ims[c]
ix, iy = int(x), int(y)
text_mask[iy:iy + char_im.shape[0], ix:ix + char_im.shape[1]] = char_im
x += char_im.shape[1] + spacing
plate = (numpy.ones(out_shape) * plate_color * (1. - text_mask) +
numpy.ones(out_shape) * text_color * text_mask)
return plate, rounded_rect(out_shape, radius), code.replace(" ", "")
# 生成测试图片
def generate_im(char_ims, num_bg_images):
# 生成背景图
bg = generate_bg(num_bg_images)
plate, plate_mask, code = generate_plate(FONT_HEIGHT, char_ims)
x = numpy.random.randint(0, bg.shape[0] - plate.shape[0])
y = numpy.random.randint(0, bg.shape[1] - plate.shape[1])
bg[x:plate.shape[0] + x, y:plate.shape[1] + y] = plate
return bg, code
# 创建测试图片(生成器)
def generate_ims():
"""
Generate number plate images.
:return:
Iterable of number plate images.
"""
variation = 1.0
fonts, font_char_ims = load_fonts("./fonts")
num_bg_images = len(os.listdir(BG_PATH))
# 当islice到达stop位时,生成器不再收到next()调用,将会一直等待直到被垃圾回收
while True:
# print(font_char_ims[num])
# 根据随机字体文件(现在只有一个)的图片集,生成测试图片
yield generate_im(font_char_ims[random.choice(fonts)], num_bg_images)
if __name__ == "__main__":
if not os.path.isdir("./test"):
os.mkdir("test")
im_gen = itertools.islice(generate_ims(), 100)
for img_idx, (im, c) in enumerate(im_gen):
fname = "test/{:08d}_{}.png".format(img_idx, c)
print(fname)
cv2.imwrite(fname, im * 255.)
|
#!/usr/bin/env python
import argparse
import json
import sys
import chainer
import numpy
import nets
import nlp_utils
def setup_model(device, model_setup):
sys.stderr.write(json.dumps(args.__dict__, indent=2) + '\n')
setup = json.load(open(model_setup))
sys.stderr.write(json.dumps(setup, indent=2) + '\n')
vocab = json.load(open(setup['vocab_path']))
n_class = setup['n_class']
# Setup a model
if setup['model'] == 'rnn':
Encoder = nets.RNNEncoder
elif setup['model'] == 'cnn':
Encoder = nets.CNNEncoder
elif setup['model'] == 'bow':
Encoder = nets.BOWMLPEncoder
encoder = Encoder(n_layers=setup['layer'], n_vocab=len(vocab),
n_units=setup['unit'], dropout=setup['dropout'])
model = nets.TextClassifier(encoder, n_class)
chainer.serializers.load_npz(setup['model_path'], model)
model.to_device(device) # Copy the model to the device
return model, vocab, setup
def run_online(device):
# predict labels online
print('Enter inputs for Online Predictions')
for l in sys.stdin:
l = l.strip()
if not l:
print('# blank line')
continue
text = nlp_utils.normalize_text(l)
words = nlp_utils.split_text(text, char_based=setup['char_based'])
xs = nlp_utils.transform_to_array([words], vocab, with_label=False)
xs = nlp_utils.convert_seq(xs, device=device, with_label=False)
with chainer.using_config('train', False), chainer.no_backprop_mode():
prob = model.predict(xs, softmax=True)[0]
answer = int(model.xp.argmax(prob))
score = float(prob[answer])
print('{}\t{:.4f}\t{}'.format(answer, score, ' '.join(words)))
def run_batch(device, batchsize=64):
# predict labels by batch
def predict_batch(words_batch):
xs = nlp_utils.transform_to_array(words_batch, vocab, with_label=False)
xs = nlp_utils.convert_seq(xs, device=device, with_label=False)
with chainer.using_config('train', False), chainer.no_backprop_mode():
probs = model.predict(xs, softmax=True)
answers = model.xp.argmax(probs, axis=1)
scores = probs[model.xp.arange(answers.size), answers].tolist()
for words, answer, score in zip(words_batch, answers, scores):
print('{}\t{:.4f}\t{}'.format(answer, score, ' '.join(words)))
batch = []
print('Enter inputs for Batch Predictions')
for l in sys.stdin:
l = l.strip()
if not l:
if batch:
predict_batch(batch)
batch = []
print('# blank line')
continue
text = nlp_utils.normalize_text(l)
words = nlp_utils.split_text(text, char_based=setup['char_based'])
batch.append(words)
if len(batch) >= batchsize:
predict_batch(batch)
batch = []
if batch:
predict_batch(batch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Chainer example: Text Classification')
parser.add_argument('--device', '-d', type=str, default='-1',
help='Device specifier. Either ChainerX device '
'specifier or an integer. If non-negative integer, '
'CuPy arrays with specified device id are used. If '
'negative integer, NumPy arrays are used')
parser.add_argument('--model-setup', required=True,
help='Model setup dictionary.')
group = parser.add_argument_group('deprecated arguments')
group.add_argument('--gpu', '-g', dest='device',
type=int, nargs='?', const=0,
help='GPU ID (negative value indicates CPU)')
args = parser.parse_args()
device = chainer.get_device(args.device)
device.use()
model, vocab, setup = setup_model(device, args.model_setup)
if device.xp is numpy:
run_online(device)
else:
run_batch(device)
|
from __future__ import print_function
__copyright__ = """
Copyright 2016 Lukasz Tracewski
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
__Modifications_copyright__ = """
Copyright 2019 Samapriya Roy
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
"""
Modifications to file:
- Uses selenium based upload instead of simple login
- Removed multipart upload
- Added poster for streaming upload
"""
import ast
import csv
import getpass
import glob
import logging
import os
import sys
lp = os.path.dirname(os.path.realpath(__file__))
sys.path.append(lp)
import time
import subprocess
import json
import manifest_lib
import pandas as pd
import ee
import requests
import retrying
from google.cloud import storage
from .metadata_loader import load_metadata_from_csv, validate_metadata_from_csv
from selenium import webdriver
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from requests_toolbelt import MultipartEncoder
ee.Initialize()
def selupload(
user,
source_path,
destination_path,
manifest=None,
metadata_path=None,
nodata_value=None,
bucket_name=None,
):
submitted_tasks_id = {}
__verify_path_for_upload(destination_path)
path = os.path.join(os.path.expanduser(source_path), "*.tif")
all_images_paths = glob.glob(path)
if len(all_images_paths) == 0:
print("%s does not contain any tif images.", path)
sys.exit(1)
metadata = load_metadata_from_csv(metadata_path) if metadata_path else None
if user is not None:
password = getpass.getpass()
google_session = __get_google_auth_session(user, password)
else:
storage_client = storage.Client()
__create_image_collection(destination_path)
images_for_upload_path = __find_remaining_assets_for_upload(
all_images_paths, destination_path
)
no_images = len(images_for_upload_path)
if no_images == 0:
print("No images found that match %s. Exiting...", path)
sys.exit(1)
failed_asset_writer = FailedAssetsWriter()
for current_image_no, image_path in enumerate(images_for_upload_path):
print(
"Processing image "
+ str(current_image_no + 1)
+ " out of "
+ str(no_images)
+ ": "
+ str(image_path)
)
filename = __get_filename_from_path(path=image_path)
asset_full_path = destination_path + "/" + filename
if metadata and not filename in metadata:
print(
"No metadata exists for image "
+ str(filename)
+ " : it will not be ingested"
)
failed_asset_writer.writerow([filename, 0, "Missing metadata"])
continue
properties = metadata[filename] if metadata else None
if manifest == "PSO":
data = manifest_lib.data_pso
elif manifest == "PSO_DN":
data = manifest_lib.data_psodn
elif manifest == "PSO_V":
data = manifest_lib.data_psov
elif manifest == "PS4B":
data = manifest_lib.data_ps4b
elif manifest == "PS4B_SR":
data = manifest_lib.data_ps4bsr
elif manifest == "PS4B_DN":
data = manifest_lib.data_ps4bdn
elif manifest == "PS3B":
data = manifest_lib.data_ps3b
elif manifest == "PS3B_DN":
data = manifest_lib.data_ps3bdn
elif manifest == "PS3B_V":
data = manifest_lib.data_ps3bv
elif manifest == "REO":
data = manifest_lib.data_reo
elif manifest == "REO_V":
data = manifest_lib.data_reov
else:
print("No Manifest Provided")
sys.exit()
try:
if user is not None:
gsid = __upload_file_gee(session=google_session, file_path=image_path)
else:
gsid = __upload_file_gcs(storage_client, bucket_name, image_path)
asset_request = __create_asset_request(
asset_full_path, gsid, properties, nodata_value
)
df = pd.read_csv(metadata_path)
stringcol = list(df.select_dtypes(include=["object"]).columns)
intcol = list(df.select_dtypes(include=["int64"]).columns)
floatcol = list(df.select_dtypes(include=["float64"]).columns)
with open(metadata_path, "r") as f:
reader = csv.DictReader(f, delimiter=",")
for i, line in enumerate(reader):
if line["id_no"] == os.path.basename(image_path).split(".")[0]:
for key, value in data["properties"].items():
for integer in intcol:
try:
data["properties"][integer] = int(line[integer])
except Exception as e:
print(e)
for s in stringcol:
try:
data["properties"][s] = str(line[s])
except Exception as e:
print(e)
for f in floatcol:
try:
data["properties"][f] = float(line[f])
except Exception as e:
print(e)
data["id"] = destination_path + "/" + line["id_no"]
data["tilesets"][0]["sources"][0]["primaryPath"] = gsid
json_data = json.dumps(data)
with open(os.path.join(lp, "data.json"), "w") as outfile:
json.dump(data, outfile)
subprocess.call(
"earthengine --no-use_cloud_api upload image --manifest "
+ '"'
+ os.path.join(lp, "data.json")
+ '"',
shell=True,
)
except Exception as e:
print("Upload of " + str(filename) + " has failed.")
failed_asset_writer.writerow([filename, 0, str(e)])
__check_for_failed_tasks_and_report(
tasks=submitted_tasks_id, writer=failed_asset_writer
)
failed_asset_writer.close()
def __create_asset_request(asset_full_path, gsid, properties, nodata_value):
return {
"id": asset_full_path,
"tilesets": [{"sources": [{"primaryPath": gsid, "additionalPaths": []}]}],
"bands": [],
"properties": properties,
"missingData": {"value": nodata_value},
}
def __verify_path_for_upload(path):
folder = path[: path.rfind("/")]
response = ee.data.getInfo(folder)
if not response:
print(
str(path)
+ " is not a valid destination. Make sure full path is provided e.g. users/user/nameofcollection "
"or projects/myproject/myfolder/newcollection and that you have write access there."
)
sys.exit(1)
def __find_remaining_assets_for_upload(path_to_local_assets, path_remote):
local_assets = [__get_filename_from_path(path) for path in path_to_local_assets]
if __collection_exist(path_remote):
remote_assets = __get_asset_names_from_collection(path_remote)
if len(remote_assets) > 0:
assets_left_for_upload = set(local_assets) - set(remote_assets)
if len(assets_left_for_upload) == 0:
print(
"Collection already exists and contains all assets provided for upload. Exiting ..."
)
sys.exit(1)
print(
"Collection already exists. "
+ str(len(assets_left_for_upload))
+ " assets left for upload to "
+ str(path_remote)
)
assets_left_for_upload_full_path = [
path
for path in path_to_local_assets
if __get_filename_from_path(path) in assets_left_for_upload
]
return assets_left_for_upload_full_path
return path_to_local_assets
def retry_if_ee_error(exception):
return isinstance(exception, ee.EEException)
@retrying.retry(
retry_on_exception=retry_if_ee_error,
wait_exponential_multiplier=1000,
wait_exponential_max=4000,
stop_max_attempt_number=3,
)
def __start_ingestion_task(asset_request):
task_id = ee.data.newTaskId(1)[0]
_ = ee.data.startIngestion(task_id, asset_request)
return task_id
def __validate_metadata(path_for_upload, metadata_path):
validation_result = validate_metadata_from_csv(metadata_path)
keys_in_metadata = {result.keys for result in validation_result}
images_paths = glob.glob(os.path.join(path_for_upload, "*.tif*"))
keys_in_data = {__get_filename_from_path(path) for path in images_paths}
missing_keys = keys_in_data - keys_in_metadata
if missing_keys:
print(
str(
len(missing_keys)
+ " images does not have a corresponding key in metadata"
)
)
print("\n".join(e for e in missing_keys))
else:
print("All images have metadata available")
if not validation_result.success:
print('Validation finished with errors. Type "y" to continue, default NO: ')
choice = input().lower()
if choice not in ["y", "yes"]:
print("Application will terminate")
exit(1)
def __extract_metadata_for_image(filename, metadata):
if filename in metadata:
return metadata[filename]
else:
print("Metadata for " + str(filename) + " not found")
return None
@retrying.retry(
retry_on_exception=retry_if_ee_error,
wait_exponential_multiplier=1000,
wait_exponential_max=4000,
stop_max_attempt_number=3,
)
def __get_google_auth_session(username, password):
ee.Initialize()
options = Options()
options.add_argument("-headless")
authorization_url = "https://code.earthengine.google.com"
uname = str(username)
passw = str(password)
if os.name == "nt":
driver = Firefox(
executable_path=os.path.join(lp, "geckodriver.exe"), firefox_options=options
)
elif os.name == "posix":
driver = Firefox(
executable_path=os.path.join(lp, "geckodriver"), firefox_options=options
)
driver.get(authorization_url)
time.sleep(5)
username = driver.find_element_by_xpath('//*[@id="identifierId"]')
username.send_keys(uname)
driver.find_element_by_id("identifierNext").click()
time.sleep(5)
# print('username')
passw = driver.find_element_by_name("password").send_keys(passw)
driver.find_element_by_id("passwordNext").click()
time.sleep(5)
# print('password')
try:
driver.find_element_by_xpath(
"//div[@id='view_container']/form/div[2]/div/div/div/ul/li/div/div[2]/p"
).click()
time.sleep(5)
driver.find_element_by_xpath(
"//div[@id='submit_approve_access']/content/span"
).click()
time.sleep(5)
except Exception as e:
pass
cookies = driver.get_cookies()
session = requests.Session()
for cookie in cookies:
session.cookies.set(cookie["name"], cookie["value"])
driver.close()
return session
def __get_upload_url(session):
r = session.get("https://code.earthengine.google.com/assets/upload/geturl")
try:
d = ast.literal_eval(r.text)
return d["url"]
except Exception as e:
print(e)
@retrying.retry(
retry_on_exception=retry_if_ee_error,
wait_exponential_multiplier=1000,
wait_exponential_max=4000,
stop_max_attempt_number=3,
)
def __upload_file_gee(session, file_path):
with open(file_path, "rb") as f:
file_name = os.path.basename(file_path)
upload_url = __get_upload_url(session)
files = {"file": f}
m = MultipartEncoder(fields={"image_file": (file_name, f)})
try:
resp = session.post(
upload_url, data=m, headers={"Content-Type": m.content_type}
)
gsid = resp.json()[0]
return gsid
except Exception as e:
print(e)
@retrying.retry(
retry_on_exception=retry_if_ee_error,
wait_exponential_multiplier=1000,
wait_exponential_max=4000,
stop_max_attempt_number=3,
)
def __upload_file_gcs(storage_client, bucket_name, image_path):
bucket = storage_client.get_bucket(bucket_name)
blob_name = __get_filename_from_path(path=image_path)
blob = bucket.blob(blob_name)
blob.upload_from_filename(image_path)
url = "gs://" + bucket_name + "/" + blob_name
return url
def __periodic_check(current_image, period, tasks, writer):
if (current_image + 1) % period == 0:
print("Periodic check")
__check_for_failed_tasks_and_report(tasks=tasks, writer=writer)
# Time to check how many tasks are running!
__wait_for_tasks_to_complete(waiting_time=10, no_allowed_tasks_running=20)
def __check_for_failed_tasks_and_report(tasks, writer):
if len(tasks) == 0:
return
statuses = ee.data.getTaskStatus(tasks.keys())
for status in statuses:
if status["state"] == "FAILED":
task_id = status["id"]
filename = tasks[task_id]
error_message = status["error_message"]
writer.writerow([filename, task_id, error_message])
print(
"Ingestion of image "
+ str(filename)
+ " has failed with message "
+ str(error_message)
)
tasks.clear()
def __get_filename_from_path(path):
return os.path.splitext(os.path.basename(os.path.normpath(path)))[0]
def __get_number_of_running_tasks():
return len([task for task in ee.data.getTaskList() if task["state"] == "RUNNING"])
def __wait_for_tasks_to_complete(waiting_time, no_allowed_tasks_running):
tasks_running = __get_number_of_running_tasks()
while tasks_running > no_allowed_tasks_running:
logging.info(
"Number of running tasks is %d. Sleeping for %d s until it goes down to %d",
tasks_running,
waiting_time,
no_allowed_tasks_running,
)
time.sleep(waiting_time)
tasks_running = __get_number_of_running_tasks()
def __collection_exist(path):
return True if ee.data.getInfo(path) else False
def __create_image_collection(full_path_to_collection):
if __collection_exist(full_path_to_collection):
print("Collection " + str(full_path_to_collection) + " already exists")
else:
ee.data.createAsset(
{"type": ee.data.ASSET_TYPE_IMAGE_COLL}, full_path_to_collection
)
print("New collection " + str(full_path_to_collection) + " created")
def __get_asset_names_from_collection(collection_path):
assets_list = ee.data.getList(params={"id": collection_path})
assets_names = [os.path.basename(asset["id"]) for asset in assets_list]
return assets_names
class FailedAssetsWriter(object):
def __init__(self):
self.initialized = False
def writerow(self, row):
if not self.initialized:
if sys.version_info > (3, 0):
self.failed_upload_file = open("failed_upload.csv", "w")
else:
self.failed_upload_file = open("failed_upload.csv", "wb")
self.failed_upload_writer = csv.writer(self.failed_upload_file)
self.failed_upload_writer.writerow(["filename", "task_id", "error_msg"])
self.initialized = True
self.failed_upload_writer.writerow(row)
def close(self):
if self.initialized:
self.failed_upload_file.close()
self.initialized = False
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from xlwt import Workbook,easyxf
start_pos = 9 # НЕ ставити менше 3! Установка початкової комірки
day_style = easyxf(
'font: bold on;'
'align: vertical center, horizontal center, rotation 90;'
'borders: left 2, right 2, top 2, bottom 2;'
)
pair_style = easyxf(
'font: bold on;'
'align: vertical center, horizontal center;'
'borders: left 2, right 2, top 2, bottom 2;'
)
sybject_style1 = easyxf(
'font: height 220;'
'align: horizontal center;'
'borders: left 1, right 1, top 1;'
)
sybject_style2 = easyxf(
'font: height 220;'
'align: vertical top, horizontal center;'
'borders: left 1, right 1, bottom 1;'
)
groups_style = easyxf(
'font: bold on;'
'align: vertical center, horizontal center;'
'borders: left 2, right 2, top 2, bottom 2;'
)
group_pair_style = easyxf(
'font: height 160;'
'borders: left 1, right 1, top 1, bottom 1;'
)
def SetDays(): #Фунція установки розмітки днів і пар
st_pos_days = start_pos
st_pos_pairs = start_pos
days = [u'Понеділок', u'Вівторок', u'Середа', u'Четвер', u'Пятниця']
pairs = [u'I', u'II', u'III', u'IV', u'V', u'VI']
ws.col(0).width = 0x470
ws.col(1).width = 0x470
for day in range(5):
ws.write_merge(st_pos_days, st_pos_days+23, 0, 0, days[day], day_style)
for pair in range(6):
ws.write_merge(st_pos_pairs, st_pos_pairs+3, 1, 1, pairs[pair], pair_style)
st_pos_pairs += 4
st_pos_days += 24
def SetGroups(groups): #Функція установки розмітки груп
st_pos_row = start_pos
st_pos_col = 2
ws.write(st_pos_row-2, 1, u'групи', group_pair_style)
ws.write(st_pos_row-1, 1, u'пари', group_pair_style)
for group in range(len(groups)):
ws.write_merge(st_pos_row-2, st_pos_row-1, st_pos_col, st_pos_col, unicode(groups[group]), groups_style)
ws.col(st_pos_col).width = 0x1950
st_pos_col += 1
def SetSubject(day, pair, group, pairtype, sybject, audience): #Фуккція запису предмету в розклад
st_pos = start_pos
start_day_pos = st_pos + day*24
start_pair_pos = pair*4
start_subj_pos = 0
start_subj_pos = 0 if pairtype == (1 or 3) else 2
if pairtype == 1 or pairtype == 2:
ws.write(start_day_pos+start_pair_pos+start_subj_pos, group, sybject, sybject_style1)
ws.write(start_day_pos+start_pair_pos+start_subj_pos+1, group, audience, sybject_style2)
elif pairtype == 3:
ws.write_merge(start_day_pos+start_pair_pos, start_day_pos+start_pair_pos+1, group, group, sybject, sybject_style1)
ws.write_merge(start_day_pos+start_pair_pos+2, start_day_pos+start_pair_pos+3, group, group, audience, sybject_style2) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.base_user import AbstractBaseUser
from .managers import UserManager
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(
unique=True,
help_text=("The user's academic email.")
)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = 'user'
verbose_name_plural = 'users'
date_joined = models.DateTimeField(
auto_now_add=True,
help_text=("The date the user joined")
)
is_active = models.BooleanField(
default=True,
help_text=("The user active state")
)
MALE = -1
UNDEFINED = 0
FEMALE = 1
GENDER_CHOICES = (
(MALE, 'Male'),
(UNDEFINED, 'Undefined'),
(FEMALE, 'Female')
)
school = models.ForeignKey('unimeet.School')
gender = models.IntegerField(
default=0,
choices=GENDER_CHOICES,
help_text=("The user's gender, by default UNDEFINED, unless otherwise "
"explicitly specified by the user.")
)
interestedInGender = models.IntegerField(
default=0,
choices=GENDER_CHOICES,
help_text=("The gender that the user is interested in talking to, by "
"default UNDEFINED.")
)
interestedInSchools = models.ManyToManyField('unimeet.School', related_name='user_interested_schools')
token = models.CharField(
default='',
max_length=40,
db_index=True,
help_text=("The user's authentication token for unimeet")
)
welcomeToken = models.CharField(
default='',
max_length=255,
db_index=True,
help_text=('The one-time authentication token used in welcome link.')
)
registrationIP = models.GenericIPAddressField(
default='127.0.0.1',
db_index=True,
help_text=('The IP that the user used for registration.')
)
|
#글자 수 세기
#특정 문자열을 매개변수로 넣기 매개변수로 넣으면 길이를 반환
#a = 'python is too hard'
#print(a.count(''))
def word_count(word):
word_cnt=word.split(" ")
return len(word_cnt)
print(word_count('python is too hard'))
##search
def search(string,word):
if type(string)==str:
new_string= string.split(" ")
elif type(string)== tuple:
new_string= string.split(" ")
elif type(string)== list:
new_string= string.split(" ")
return True
else:
return False
string = 'i wanna something to eat'
word = ({'A':'a','B':'b','C':'c'})
print(search('A'))
|
# p4.py
#
# Project Euler, problem 4
#
# A palindromic number reads the same both ways. The largest palindrome made
# from the product of two 2-digit numbers is 9009 = 91*99.
#
# Find the largest palindrome made from the product of two 3-digit numbers.
# ALGORITHM
# Just brute force here. Go through all of the products of 3 digit numbers,
# check if it's bigger than the current biggest palindrome. If it is, check
# if it itself is a palindrome, and if so, replace the biggest palindrome.
# Some time is saved if the remaning products in a given loop will be less
# than the biggest palindrome.
def isPalindrome(n):
'''Takes any single argument. Returns True or False, depending on whether
it is a palindrome in its string-state.'''
n = str(n)
reversed = n[::-1]
return n == reversed
def biggestPalindromeFromProduct(min, max):
'''Returns biggest numeric palindrome between the numbers min and
max. Returns 0 if there are no palindromes.'''
if min > max:
raise ValueError, "min must be equal to or less than max."
if max < 1 or min < 1:
raise ValueError, "Arguments must be 1 or greater"
biggestPalindrome = 0
rangeOfProducts = range(max, min - 1, -1)
for i in rangeOfProducts:
if i**2 < biggestPalindrome:
break
for j in range(i, min - 1, -1): # Do N**2/2, not N**2
product = i*j
if product <= biggestPalindrome:
break
if product > biggestPalindrome: # Comparing ints is fast
if isPalindrome(product): # ...so this is second.
biggestPalindrome = product
return biggestPalindrome
if __name__ == '__main__':
print "The largest palindrome from products between "\
"%d and %d is %d" % (100, 999, biggestPalindromeFromProduct(100, 999)) |
import csv
import os, sys
#import datetime
from django.utils import timezone
# Full path to your django project directory
VarDB_home="/Users/User/Desktop/ITModule/VarDB/"
# name to your csv file
csv_file="BRCA1_Django_test_data.csv"
# Full path to your django project directory
VarDB_home="/Users/User/Desktop/ITModule/VarDB/"
os.chdir(VarDB_home)
os.environ['DJANGO_SETTINGS_MODULE'] = 'mysite.settings'
import django
django.setup()
from django.db import models
from VarDB.models import * # imports the models
with open(csv_file) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
p = Patient()
p.forename = row['Name'].split()[0]
p.surname = row['Name'].split()[1]
p.registration_date = timezone.now()
p.save()
o = Occurrence()
o.occurrence = p
o.stage = row['Stage']
o.description = row['Description']
o.age_occurrence = row['Age']
o.save()
i = Investigation()
i.investigation = o
i.platform = row['Sequencer']
i.investigation_date = timezone.now()
i.save()
g = Gene()
g.gene = i
g.symbol = row['Gene']
g.save()
r = Refseq()
r.refseq = g
r.reference = row['Transcript']
r.save()
v = Variant()
v.variant = r
v.cDNA = row['Variant cDNA']
v.protein = row['Variant Protein']
v.genome = row['Variant Genome']
v.patogenicity = row['Pathogenicity']
v.date_classified = timezone.now()
v.save() |
import os
from Config.api import getWorkDir
files = []
dirs = []
def init():
files.clear()
dirs.clear()
for root, ds, fs in os.walk(getWorkDir()):
for i in fs:
temp = root + '\\' + i
files.append(temp)
for i in ds:
temp = root + '\\' + i
dirs.append(temp)
def getFiles(initFlag=True):
if initFlag:
init()
return files
def getDirs(initFlag=True):
if initFlag:
init()
return dirs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.