text stringlengths 38 1.54M |
|---|
from keras.applications.vgg16 import VGG16
m = VGG16()
with open("vgg16.json", "w") as f:
f.write(m.to_json())
m.save_weights("vgg16.h5")
|
#
# -------------------------------------------------------------------------
# Copyright (c) 2018 Intel Corporation Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
import copy
import unittest
import mock
import yaml
from conductor.solver.optimizer.constraints import hpa
from conductor.solver.utils import constraint_engine_interface as cei
class TestHPA(unittest.TestCase):
def setUp(self):
req_json_file = './conductor/tests/unit/solver/candidate_list.json'
hpa_json_file = './conductor/tests/unit/solver/hpa_constraints.json'
hpa_json = yaml.safe_load(open(hpa_json_file).read())
req_json = yaml.safe_load(open(req_json_file).read())
(constraint_id, constraint_info) = \
list(hpa_json["conductor_solver"]["constraints"][0].items())[0]
c_property = constraint_info['properties']
constraint_type = constraint_info['properties']
constraint_demands = list()
parsed_demands = constraint_info['demands']
if isinstance(parsed_demands, list):
for d in parsed_demands:
constraint_demands.append(d)
self.hpa = hpa.HPA(constraint_id,
constraint_type,
constraint_demands,
_properties=c_property)
self.candidate_list = req_json['candidate_list']
def tearDown(self):
pass
@mock.patch.object(hpa.LOG, 'error')
@mock.patch.object(hpa.LOG, 'info')
@mock.patch.object(cei.LOG, 'debug')
def test_solve(self, debug_mock, info_mock, error_mock):
flavor_infos = [{"flavor_label_1": {"flavor-id": "vim-flavor-id1",
"flavor-name": "vim-flavor-1"}},
{"flavor_label_2": {"flavor-id": "vim-flavor-id2",
"flavor-name": "vim-flavor-2"}}]
self.maxDiff = None
hpa_candidate_list_1 = copy.deepcopy(self.candidate_list)
hpa_candidate_list_1[1]['flavor_map'] = {}
hpa_candidate_list_1[1]['flavor_map'].update(flavor_infos[0])
hpa_candidate_list_2 = copy.deepcopy(hpa_candidate_list_1)
hpa_candidate_list_2[1]['flavor_map'].update(flavor_infos[1])
mock_decision_path = mock.MagicMock()
mock_decision_path.current_demand.name = 'vG'
request_mock = mock.MagicMock()
client_mock = mock.MagicMock()
client_mock.call.return_value = None
request_mock.cei = cei.ConstraintEngineInterface(client_mock)
self.assertEqual(None, self.hpa.solve(mock_decision_path,
self.candidate_list,
request_mock))
client_mock.call.side_effect = [hpa_candidate_list_1,
hpa_candidate_list_2]
self.assertEqual(hpa_candidate_list_2,
self.hpa.solve(mock_decision_path,
self.candidate_list, request_mock))
if __name__ == "__main__":
unittest.main()
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def merge_sort(self,list):
# 1. return if at the bottom
# -- In this case, we are at the bottom when the list size is one (or zero)
# 2. Divide
# -- In this case, we divide into two pieced, left and right, and then pass each to be further divided
# 3. Conquer
# -- In this case, we merge by looping thru left and right and iteratively building a new array
# a list of 1 or zero is already sorted
if len(list) <= 1:
print("one or zero length - at bottom ", end=': ')
print(list)
return list
extra = len(list) % 2 # 0 if even, 1 if odd
mid_index = len(list)//2 + extra # note 4//2 = 2, 5//2 = 2, 6//2 = 3, 7//2 = 3, etc
left = list[:mid_index]
right = list[mid_index:]
print("list",list,"left",left,"right",right, end=': ')
print("")
left = self.merge_sort(left)
right = self.merge_sort(right)
result = self.merge(left, right)
return result
def merge(self, left, right):
print("in merge ")
result = []
while len(left) != 0 and len(right) != 0:
if left[0] <= right[0]: # [1,2] -v- [3] => [1], then [1,2], then below => [1,2,3]
result.append(left.pop(0))
else:
result.append(right.pop(0))
# add the remainders together. note: At this point, either left or right is an empty list ([])
result = result + left + right
return result
# test just the splitting up part
def split_list(self, l):
extra = len(l) % 2 # 0 if even, 1 if odd
mid_index = len(l)//2 + extra # note 4//2 = 2, 5//2 = 2, 6//2 = 3, 7//2 = 3, etc
left = l[:mid_index]
right = l[mid_index:]
return left, right
|
#!/usr/bin/env python
print("Loading...")\
from gpiozero import DigitalOutputDevice
from time import sleep
pin = DigitalOutputDevice(21)
print("Turning fan on for 20 seconds, turn potentiometer to test.")
pin.on()
sleep(20)
print("Turning fan off")
pin.off()
print("Exiting")
pin.close()
exit()
|
import numpy as np
import scipy.io
from scipy.optimize import least_squares
import scipy.stats
cam_data = scipy.io.loadmat('cameraParameters.mat')
f = cam_data['focal']
pixelSize = cam_data['pixelSize']
pp = cam_data['pp']
K = np.array([[f[0][0]/pixelSize[0][0], 0, pp[0][0]], [ 0 ,f[0][0]/pixelSize[0][0], pp[0][1]], [0, 0, 1]], dtype=np.float32)
vp_dir = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0]], dtype=np.float32)
P_m_prior = [0.13, 0.24, 0.13, 0.5]
sig = 0.5
mu = 0.0
def remove_polarity(x):
'''
:param x: the angle differences between the predicted normal direction and the gradient direction of a pixel.
x is in shape [3,] which represent the normal direction with respect to the three edge models.
:return: the minimal value after add pi and -pi
'''
x = np.expand_dims(x, axis=0)
new = np.abs(np.concatenate([x, x + np.pi, x - np.pi], axis= 0))
diff = np.min(new, axis=0)
return diff
def homo2img_coord(x):
u=x[0]/x[2]
v=x[1]/x[2]
return np.array([u,v])
def angle2matrix(a, b, g):
'''
:param a: the rotation angle around z axis
:param b: the rotation angle around y axis
:param g: the rotation angle around x axis
:return: rotation matrix
'''
R = np.array([[np.cos(a)*np.cos(b), -np.sin(a)*np.cos(g)+np.cos(a)*np.sin(b)*np.sin(g),
np.sin(a)*np.sin(g)+np.cos(a)*np.sin(b)*np.cos(g), 0],
[np.sin(a)*np.cos(b), np.cos(a)*np.cos(g)+np.sin(a)*np.sin(b)*np.sin(g),
-np.cos(a)*np.sin(g)+np.sin(a)*np.sin(b)*np.cos(g), 0],
[-np.sin(b) , -np.cos(b)*np.sin(g), np.cos(b)*np.cos(g), 0]],
dtype=np.float32)
return R
def vector2matrix(S):
'''
:param S: the Cayley-Gibbs-Rodrigu representation
:return: rotation matrix R
'''
S = np.expand_dims(S, axis=1)
den = 1 + np.dot(S.T, S)
num = (1 - np.dot(S.T, S))*(np.eye(3)) + 2 * skew(S) + 2 * np.dot(S, S.T)
R = num/den
homo = np.zeros([3,1], dtype=np.float32)
R = np.hstack([R, homo])
return R
def skew(a):
s = np.array([[0, -a[2, 0], a[1, 0]], [a[2, 0], 0, -a[0, 0]], [-a[1, 0], a[0, 0], 0]])
return s
def matrix2quaternion(T):
R = T[:3, :3]
rotdiff = R - R.T
r = np.zeros(3)
r[0] = -rotdiff[1, 2]
r[1] = rotdiff[0, 2]
r[2] = -rotdiff[0, 1]
sintheta = np.linalg.norm(r) / 2
r0 = np.divide(r, np.linalg.norm(r) + np.finfo(np.float32).eps)
costheta = (np.trace(R) - 1) / 2
theta = np.arctan2(sintheta, costheta)
q = np.zeros(4)
q[0] = np.cos(theta / 2)
q[1:] = r0 * np.sin(theta / 2)
return q
def matrix2vector(R):
'''
:param R: the camera rotation marix
:return: the Cayley-Gibbs-Rodrigu representation
'''
Q = matrix2quaternion(R)
S = Q[1:]/Q[0]
return S
def vp2dir(K, R, u):
'''
:param K: camera intrinsic matrix
:param R: camera rotation matrix
:param u: pixel location represented in homogeneous coordinate [x, y, 1]
:return: the estimated normal direction for edge that pass through pixel u
'''
vp_trans = K.dot(R).dot(vp_dir)
vp_trans = K.dot(R).dot(vp_dir)
edges = np.cross(vp_trans.transpose(), u)
thetas_es = np.arctan2(edges[:, 1], edges[:, 0])
return thetas_es
def down_sample(Gmag_, Gdir_):
'''
:param Gmag_: gradient magtitude of the original image
:param Gir_: gradient direction of the original image
:return: pixels we will use in the EM algorithm and the corresponding gradient direction
'''
Gmag = Gmag_[4::5, 4::5]
Gdir = Gdir_[4::5, 4::5]
threshold = np.sort(np.reshape(Gmag, [Gmag.shape[0]*Gmag.shape[1]]))
idx = np.argwhere(Gmag > threshold[-2001])
return Gdir, idx
def pro_mixture(a, b, g, idx, Gdir):
'''
:param a, b, g_: camera rotation parameters
:return: p_image
'''
R = angle2matrix(a, b, g) # convert the angles into rotation matrix
p_image = 0.0 # initialise posterior setting to zero
P_ang=np.zeros(shape=(idx.shape[0],4))
for i in range(idx.shape[0]):
# pixel Location
p=np.array([idx[i,1]*5+4,idx[i,0]*5+4,1],dtype=float)
# theta and phi (pixel gradient)
Theta=vp2dir(K, R, p)
phi=Gdir[idx[i,0],idx[i,1]]
error=remove_polarity(phi-Theta)
# P ang
P_ang[i,:3]=np.nan_to_num(scipy.stats.norm(mu, sig).pdf(error))
P_ang[i,3]=1/(2.0*np.pi)
# evidence
p_image = np.sum(np.nan_to_num(np.log(P_ang.dot(np.array(P_m_prior)))))
return p_image
def E_step(S, idx, Gdir):
'''
:param S : the Cayley-Gibbs-Rodrigu representation of camera rotation parameters
:return: w_pm
'''
R = vector2matrix(S) # Note that the 'S' is just for optimization, it has to be converted to R during computation
w_pm = np.zeros([idx.shape[0], 4], dtype=np.float32)
P_ang=np.zeros(shape=(idx.shape[0],4))
# to be implemented, the E-step to compute the weights for each vanishing point at each pixel
for i in range(idx.shape[0]):
# pixel Location
p=np.array([idx[i,1]*5+4,idx[i,0]*5+4,1],dtype=float)
# theta and phi (pixel gradient)
Theta=vp2dir(K, R, p)
phi=Gdir[idx[i,0],idx[i,1]]
error=remove_polarity(phi-Theta)
# P ang
P_ang[i,:3]=np.nan_to_num(scipy.stats.norm(mu, sig).pdf(error))
P_ang[i,3]=1/(2.0*np.pi)
P_ang=P_ang*(np.array(P_m_prior))
w_pm=P_ang
Z_p=np.sum(w_pm,axis=1)
Z_p=np.repeat(np.array((1./Z_p)), repeats=4).reshape((idx.shape[0],4))
w_pm=w_pm*Z_p
return w_pm
def M_step(S0, w_pm, idx, Gdir):
'''
:param S0 : the camera rotation parameters from the previous step
:param w_pm : weights from E-step
:return: R_m : the optimized camera rotation matrix
'''
def error_fun(S, w_pm):
'''
:param c : the variable we are going to optimize over
:param w_pm : weights from E-step
:return: error : the error we are going to minimize
'''
error = 0.0 # initial error setting to zero
R = vector2matrix(S) # Note that the 'S' is just for optimization, it has to be converted to R during computation
Weighted_Least_Sq=np.zeros(shape=(idx.shape[0],))
for i in range(idx.shape[0]):
# pixel Location
p=np.array([idx[i,1]*5+4,idx[i,0]*5+4,1],dtype=float)
# theta and phi (pixel gradient)
Theta=vp2dir(K, R, p)
phi=Gdir[idx[i,0],idx[i,1]]
error=remove_polarity(phi-Theta)
error=remove_polarity(error**2)
w_pm_temp=w_pm[i,:3]
# weighted Least Square
Weighted_Least_Sq[i]=w_pm_temp.dot(error)
error=np.sum(Weighted_Least_Sq)
return error
S_m = least_squares(error_fun, S0, args= (w_pm,))
return S_m
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
import json
from boto.s3.connection import S3Connection
from boto.s3.key import Key
tournament = 'RBC Heritage'
tournament_link = 'rbc-heritage'
year = 2015
# create connection to bucket
c = S3Connection('AKIAIQQ36BOSTXH3YEBA','cXNBbLttQnB9NB3wiEzOWLF13Xw8jKujvoFxmv3L')
# create connection to bucket
b = c.get_bucket('public.tenthtee')
# get current field list
k1 = Key(b)
k1.key = 'field/' + str(year) + '/' + tournament + '/field'
old_field = k1.get_contents_as_string()
old_field = json.loads(old_field)
link = 'http://www.pgatour.com/tournaments/' + tournament_link + '/field.html'
field = []
r = requests.get(link)
soup = BeautifulSoup(r.text)
player_table = soup.find(class_='field-table-content')
players = player_table.find_all("p")
for player in players:
raw_name = player.text
clean_name = raw_name.split(',')
clean_name = clean_name[1][1:] + ' ' + clean_name[0]
field.append(clean_name)
# check if withdrawals file exists
withdrawals_key = 'withdrawals'
k2 = Key(b)
k2.key = withdrawals_key
withdrawals = k2.get_contents_as_string()
withdrawals = json.loads(withdrawals)
print withdrawals
if withdrawals['tournament'] != tournament:
withdrawals = {}
withdrawals['tournament'] = tournament
withdrawals['players'] = []
for player in old_field:
if player not in field:
withdrawals['players'].append(player)
print withdrawals
withdrawals = json.dumps(withdrawals)
k = Key(b)
k.key = 'withdrawals'
k.set_contents_from_string(withdrawals)
k.make_public()
k1 = Key(b)
k1.key = 'withdrawals/' + str(year) + '/' + tournament + '/withdrawals'
k1.set_contents_from_string(withdrawals)
field = json.dumps(field)
k = Key(b)
k.key = 'field'
k.set_contents_from_string(field)
k.make_public() |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# -- Function Print and Export Object
# ----------------------------------------------------------------------------
# -- (c) David Muñoz Bernal
# ----------------------------------------------------------------------------
# -- This function set the object to yhe print position and export the object.
import os
import sys
import FreeCAD
import FreeCADGui
import PySide
from PySide import QtCore, QtGui
def print_export(objSelect):
show_message = True
nema = 'motorholder'
if nema in objSelect.Name:
pos = objSelect.Placement.Base
rot = FreeCAD.Rotation(FreeCAD.Vector(0,1,0),180)
centre = FreeCAD.Vector(0,0,0)
objSelect.Placement = FreeCAD.Placement(pos,rot,centre)
elif objSelect.Name == 'idler_tensioner':
pos = objSelect.Placement.Base
rot = FreeCAD.Rotation(FreeCAD.Vector(0,1,0),90)
centre = FreeCAD.Vector(0,0,0)
objSelect.Placement = FreeCAD.Placement(pos,rot,centre)
elif objSelect.Name == 'tensioner_holder':
pos = objSelect.Placement.Base
rot = FreeCAD.Rotation(FreeCAD.Vector(1,0,0),-90)
centre = FreeCAD.Vector(0,0,0)
objSelect.Placement = FreeCAD.Placement(pos,rot,centre)
elif objSelect.Name == 'filter_holder':
pos = objSelect.Placement.Base
rot = FreeCAD.Rotation(FreeCAD.Vector(0,0,0),0)
centre = FreeCAD.Vector(0,0,0)
objSelect.Placement = FreeCAD.Placement(pos,rot,centre)
else:
FreeCAD.Console.PrintMessage('This object is not a workbench object.\n')
show_message = False
if show_message == True:
FreeCAD.Console.PrintMessage("You select" + objSelect.Name + " to change to print position and export.\n")
FreeCADGui.activeDocument().activeView().viewAxonometric()
FreeCADGui.SendMsgToActiveView("ViewFit")
|
# Copyright (c) 2019 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from mock import Mock
from os import environ, path
from unittest import skipUnless
from . import AnsibleTestBase
from .. import AnsiblePlaybookFromFile
class AnsibleSDKTest(AnsibleTestBase):
def test_that_tests_can_run_correctly(self):
"""Check that these tests can actually run."""
self.assertTrue(path.isfile(self.playbook_path))
self.assertTrue(path.isfile(self.hosts_path))
self.assertIn(
self.hosts_path,
AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path, logger=getLogger('testLogger')).sources)
self.assertIn(
self.playbook_path,
AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path, logger=getLogger('testLogger')).playbook)
@skipUnless(
environ.get('TEST_ZPLAYS', False),
reason='This test requires you to run "vagrant up". '
'And export TEST_ZPLAYS=true')
def test_zplays(self):
"""Run an actual Ansible playbook from a file."""
AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
logger=getLogger('testLogger')
).execute()
def test_env(self):
test_env = environ.copy()
new = {'foo': 'bar'}
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
environment_variables=new,
logger=getLogger('testLogger')
)
test_env.update(new)
self.assertEqual(p.env, test_env)
def test_verbosity(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
verbosity=5,
logger=getLogger('testLogger')
)
self.assertEqual(p.verbosity, '-vvvvv')
def test_options(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
run_data={'taco': 'foo'},
options_config={'foo': 'bar'},
logger=getLogger('testLogger')
)
self.assertIn('--foo=\'bar\'', p.options)
if 'extra-vars' in p.options:
self.assertIn('@', p.options)
def test_command(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
logger=getLogger('testLogger')
)
self.assertEqual(p.process_args[0], '-vv')
self.assertEqual(p.process_args[1], '-i {0}'.format(self.hosts_path))
self.assertIn('--extra-vars', p.process_args[2])
self.assertEqual('', p.process_args[-2])
self.assertIn('ansible-examples/lamp_simple/site.yml',
p.process_args[3])
def test_execute(self):
p = AnsiblePlaybookFromFile(
self.playbook_path,
self.hosts_path,
logger=getLogger('testLogger')
)
dummy_mock = Mock('Dummy process execution func')
dummy_mock.return_value = None
result = p.execute(dummy_mock)
self.assertIsNone(result)
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from deeplearning.helpers import L_layer_model, L_model_forward, normalize_input
DATA_DIR = './data/'
EXPORT_DIR = './exports/'
PLOT = True
def main():
# analyze_data()
X_train, X_test, Y_train, cache_test = basic_prepare_data()
assert X_train.shape[1] == Y_train.shape[1] # sample size check
np.random.seed(1)
p = np.random.permutation(X_train.shape[1]) # we shuffle inputs
X_train = X_train.T[p].T
Y_train = Y_train.T[p].T
# let s split our training data into 2 sets
train_ratio = 0.8
train_size = int(X_train.shape[1] * train_ratio)
X_cross_val = X_train[:, train_size:]
X_train = X_train[:, :train_size]
Y_cross_val = Y_train[:, train_size:]
Y_train = Y_train[:, :train_size]
# train our model
params, accuracy = basic_train_model(X_train, Y_train)
print("train accuracy:", accuracy)
y_cross_val_hat, _ = L_model_forward(X_cross_val, params)
cross_val_predictions = y_cross_val_hat > 0.5
accuracy = np.sum(cross_val_predictions == Y_cross_val) / X_cross_val.shape[1]
print("cross val accuracy:", accuracy)
y_test_hat, _ = L_model_forward(X_test, params)
test_predictions = np.int8(y_test_hat > 0.5)
res = pd.DataFrame({'PassengerId': cache_test.values.reshape(-1, ), 'Survived': test_predictions.T.reshape(-1, )})
res.to_csv(EXPORT_DIR + "/submit.csv", index=False)
def basic_train_model(X_train, Y_train):
# hyperparameters choices
nb_of_hidden_layouts = 2
nb_of_units_per_hidden_layouts = 10
hidden_lay_dims = [nb_of_units_per_hidden_layouts] * nb_of_hidden_layouts
layers_dims = (X_train.shape[0], *hidden_lay_dims, 1)
# Reshape the training and test examples
print('X_train shape', X_train.shape)
print('Y_train shape', Y_train.shape)
# the below gives a 77% accuracy on test set submited on kaggle (~ok accuracy, a bit less than avg submit)
opti_params = L_layer_model(X_train, Y_train, layers_dims, learning_rate=0.5, num_iterations=8001, lambda_reg=0.2,
print_cost=True)
y_train_hat, cache = L_model_forward(X_train, opti_params)
train_predictions = y_train_hat > 0.5
accuracy = np.sum(train_predictions == Y_train) / X_train.shape[1]
return opti_params, accuracy
def basic_fill_nan_values(df, display=True):
nul_nb_per_column = df.isnull().sum()
if display:
print("count of NaN data per column")
print(nul_nb_per_column)
print()
input_names = list(df)
for input_name in input_names:
if nul_nb_per_column[input_name]:
most_frequent_value = df[input_name].value_counts().idxmax()
df[input_name] = df[input_name].fillna(most_frequent_value)
if display:
print(input_name, ' -> ', nul_nb_per_column[input_name], '(NaN values) filled with ->',
most_frequent_value)
if display:
print(df.info())
return df
def basic_prepare_data():
train = pd.read_csv(DATA_DIR + "train.csv")
test = pd.read_csv(DATA_DIR + "test.csv")
# dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True)
# Fill empty and NaNs values with NaN
train = train.fillna(np.nan)
test = test.fillna(np.nan)
cache_test = test["PassengerId"] # need it later on to export results
combine = pd.concat([train.drop('Survived', 1), test])
combine['cabin_known'] = combine['Cabin'].isnull() == False
combine['is_female'] = combine['Sex'] == 'female'
# remove all non-quantitative data
combine = combine.drop(['PassengerId', 'Cabin', 'Embarked', 'Name', 'Ticket', 'Sex'], 1)
combine = basic_fill_nan_values(combine, display=False)
y_train = train['Survived']
Y_train = y_train.values.T.reshape(1, y_train.shape[0])
standardized_combine, input_mean, input_var = normalize_input(combine.values.T)
X_train = standardized_combine[:, :len(train)]
X_test = standardized_combine[:, len(train):]
return X_train, X_test, Y_train, cache_test
""" data analysis
some lines are commented but can be interesting to uncomment to see more graphs
This analysis is not directly used in data treatment, but ome decisions have been made considering it"""
def analyze_data():
train = pd.read_csv(DATA_DIR + "train.csv")
test = pd.read_csv(DATA_DIR + "test.csv")
dataset = pd.concat(objs=[train, test], axis=0).reset_index(drop=True)
# Fill empty and NaNs values with NaN
dataset = dataset.fillna(np.nan)
train = train.fillna(np.nan)
test = test.fillna(np.nan)
# print(train.head(8)) # show the 8 first lines of the data
# print(train.describe()) # provides statistics, column by column (quartiles, min, max, std)
# print(train.isnull().sum()) # gives the number of null values per column
# print(test.info()) # provides global info on the test_set
# Correlation matrix between numerical values (SibSp Parch Age and Fare values) and Survived
# g = sns.heatmap(train[["Survived", "SibSp", "Parch", "Age", "Fare"]].corr(), annot=True, fmt=".2f", cmap="coolwarm")
# if PLOT: plt.show()
surv = train[train['Survived'] == 1]
nosurv = train[train['Survived'] == 0]
surv_col = "blue"
nosurv_col = "red"
print("Survived: %i (%.1f percent), Not Survived: %i (%.1f percent), Total: %i"
% (len(surv), 1. * len(surv) / len(train) * 100.0,
len(nosurv), 1. * len(nosurv) / len(train) * 100.0, len(train)))
# plt.figure(figsize=[12, 10])
# plt.subplot(331)
# sns.distplot(surv['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color=surv_col)
# sns.distplot(nosurv['Age'].dropna().values, bins=range(0, 81, 1), kde=False, color=nosurv_col,
# axlabel='Age')
# plt.subplot(332)
# sns.barplot('Sex', 'Survived', data=train)
# plt.subplot(333)
# sns.barplot('Pclass', 'Survived', data=train)
# plt.subplot(334)
# sns.barplot('Embarked', 'Survived', data=train)
# plt.subplot(335)
# sns.barplot('SibSp', 'Survived', data=train)
# plt.subplot(336)
# sns.barplot('Parch', 'Survived', data=train)
# # have to find a prettier way to display Fare
# plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,
# wspace=0.35)
# if PLOT: plt.show()
# AGE analysis
g = sns.FacetGrid(train, col='Survived')
g = g.map(sns.distplot, "Age")
if PLOT: plt.show()
# interesting -> young people survive more, old people survive less
# FARE analysis
# Fill Fare missing values with the median value (only one example of missing value here, so let s say it s fine)
dataset["Fare"] = dataset["Fare"].fillna(dataset["Fare"].median())
# g = sns.distplot(dataset["Fare"], color="m", label="Skewness : %.2f" % (dataset["Fare"].skew()))
# g = g.legend(loc="best")
# plt.show()
# Fare distribution is very skewed. This can lead to overweigth very high values in the model, even if it is scaled
# it is better to transform it with the log function to reduce this skew.
# Apply log to Fare to reduce skewness distribution (rpil: any other fct such as power( ,1/4) could do well)
# dataset["Fare"] = dataset["Fare"].map(lambda i: np.power(i, 1./4) if i > 0 else 0)
dataset["Fare"] = dataset["Fare"].map(lambda i: np.log(i) if i > 0 else 0)
g = sns.distplot(dataset["Fare"], color="b", label="Skewness : %.2f" % (dataset["Fare"].skew()))
g = g.legend(loc="best")
if PLOT: plt.show()
# GENDER analysis
g = sns.barplot(x="Sex", y="Survived", data=train)
g = g.set_ylabel("Survival Probability")
if PLOT: plt.show()
print(train[["Sex", "Survived"]].groupby('Sex').mean())
# PCLASS analysis
plt.figure()
plt.subplot(211)
sns.barplot(x="Pclass", y="Survived", data=train, palette='muted')
plt.subplot(212)
sns.barplot(x="Pclass", y="Survived", data=train, hue='Sex', palette='muted')
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25, wspace=0.35)
if PLOT: plt.show()
# Embarked analysis
dataset["Embarked"].isnull().sum() # 2
dataset["Embarked"] = dataset["Embarked"].fillna("S")
print(train.isnull().sum())
train['Embarked'] = train["Embarked"].fillna("S")
print(train.isnull().sum())
g = sns.factorplot(x="Embarked", y="Survived", data=train,
size=6, kind="bar", palette="muted")
g.despine(left=True)
g = g.set_ylabels("survival probability")
# Explore Pclass vs Embarked
g = sns.factorplot("Pclass", col="Embarked", data=train,
size=6, kind="count", palette="muted")
g.despine(left=True)
g = g.set_ylabels("Count")
if PLOT: plt.show()
if __name__ == '__main__':
main()
|
import tweepy
import os
import json
from decimal import *
from json.decoder import JSONDecodeError
import TravelDealDB as tddb
import TwitterHelper
from Airports import *
def add_defaults(body):
default = {
"statusCode": 200,
"headers": {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Credentials": True,
"Content-Type": "application/json"
}
}
return {**body, **default}
def convert_tweet_id(tweet):
tweet['tweet_id'] = str(tweet['tweet_id'])
return tweet
def get_airports(event, context):
return add_defaults({"body": json.dumps(airport_dict)})
def get_twitter_list_members(event, context):
return add_defaults({"body": json.dumps(TwitterHelper.get_list_members())})
def get_airport_tweets(event, context):
try:
body = json.loads(event['body'])
except (JSONDecodeError, KeyError, TypeError) as e:
print("%s will return latest results" % type(e).__name__)
body = {}
if 'airport_city' in body:
response = tddb.get_tweets_by_airport_city(body['airport_city'])
else:
response = tddb.get_any_tweet()
output = list(map(convert_tweet_id, response))
return add_defaults({"body": json.dumps(output)}) |
# raw_input() reads a string with a line of input, stripping the '\n' (newline) at the end.
# This is all you need for most Google Code Jam problems.
t = int(raw_input()) # read a line with a single integer
for c in xrange(1, t + 1):
p = raw_input()
n = 0
while '-' in p:
for i in range(len(p), 0, -1):
if p[i-1] == '-':
t = p[0:i]
if t[0] == '+':
k = 1
while k < len(t) and t[k] == '+':
k += 1
t = k * '-' + t[k::]
n += 1
t = t[::-1]
t = t.replace('+','*')
t = t.replace('-','$')
t = t.replace('*','-')
t = t.replace('$','+')
p = t + p[i::]
n += 1
print "Case #{}: {}".format(c, n)
# check out .format's specification for more formatting options
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from bs4 import BeautifulSoup
import time
searchWords = ['hallo','how to kill','my teacher is','what is switzerland']
driver = webdriver.Firefox()
driver.get("https://www.google.ch")
for word in searchWords:
print "searching for: " + word
driver.find_element_by_id("lst-ib").clear()
driver.find_element_by_id("lst-ib").send_keys(word)
time.sleep(3)
source = driver.page_source
soup = BeautifulSoup(source)
#for tag in soup.findAll("div", { "class" : "sbqs_c" }):
for tag in soup.findAll("div", { "class" : "sbqs_c" }):
print tag.text
time.sleep(5)
print 'closing browser'
time.sleep(5)
driver.close() |
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import lit, format_string
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
parking = spark.read.format('csv').options(header='true',inferschema='true').load(sys.argv[1])
#result = parking.groupBy("violation_code").count()
#result.select(format_string("%d",result.count)).show()
parking.createOrReplaceTempView("parking")
result = spark.sql("select violation_code, count(*) as ctr from parking group by violation_code")
result.select(format_string("%d\t%d",result.violation_code, result.ctr)).write.save("task2-sql.out", format="text")
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# ***** BEGIN LICENSE BLOCK *****
# This file is part of EV3WebController.
# Copyright (c) 2014-2015 Cédric Bonhomme.
# All rights reserved.
#
#
#
# ***** END LICENSE BLOCK *****
import os
from flask import Flask
from ev3.ev3dev import Motor
#from ev3.lego import LargeMotor
from ev3.lego import TouchSensor
from ev3.lego import ColorSensor
from ev3.lego import InfraredSensor
import conf
# Create Flask application
app = Flask(__name__)
app.debug = True
# Create a random secrey key so we can use sessions
app.config['SECRET_KEY'] = os.urandom(12)
from ev3.ev3dev import Tone
alarm = Tone()
#head = None#Motor(port=Motor.PORT.A)
right_wheel = None
left_wheel = None
button = None
ir_sensor = None
color_sensor = None
try:
right_wheel = Motor(port=Motor.PORT.B)
left_wheel = Motor(port=Motor.PORT.C)
button = TouchSensor()
#ir_sensor = InfraredSensor()
color_sensor = ColorSensor()
alarm.play(200)
except Exception as e:
alarm.play(200)
alarm.play(200)
raise e
right_wheel.position = 0
left_wheel.position = 0
right_wheel.reset()
left_wheel.reset()
from web import views
|
from django.contrib import admin
from apps.forbidden_words.models import ForbiddenWord
class ForbiddenWordAdmin(admin.ModelAdmin):
list_display = ('id', 'word')
admin.site.register(ForbiddenWord, ForbiddenWordAdmin)
|
from roman import *
file = open('roman.txt', 'r')
content = file.read()
file.close()
s = 0
for u in content.split('\n') :
s+= len(u) - len(roman(dec(u)))
print(s) |
# -*- coding: utf-8 -*-
import os
from setuptools import setup
from setuptools import find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
name = 'fhnw.office2plone'
long_description = (
read('README.rst')
+ '\n' +
read('CHANGES.rst')
+ '\n' +
'Contributors\n'
'============\n'
+ '\n' +
read('CONTRIBUTORS.txt'))
setup(
name=name,
version='0.1.dev0',
description='Upload MS-Office documents to Plone',
long_description = long_description,
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Framework :: Plone :: 4.3",
],
author='FHNW',
author_email='weboffice@fhnw.ch',
url="http://websvn.fhnw.ch/eggs/" + name,
license='BSD',
packages=find_packages('src', exclude=['ez_setup']),
package_dir={'': 'src'},
keywords="FHNW Plone",
test_suite = "fhnw.office2plone.tests",
install_requires=[
'setuptools',
'atreal.massloader',
],
extras_require={
'test': [
'plone.app.testing[robot]',
'flake8',
],
'development': [
'zest.releaser',
'check-manifest',
'i18ndude',
],
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
include_package_data=True,
zip_safe=False,
)
|
from time import sleep
import time
import datetime
from firebase import firebase
import urllib2, urllib, httplib
import json
import os
import Adafruit_DHT
import RPi.GPIO as GPIO
#URL firebase
URL_Firebase ='https://'
#Cria objeto firebase
firebase = firebase.FirebaseApplication(URL_Firebase, None)
#grava no datalogger
def grava_datalog(dado):
with open('datalog.txt','a') as file:
file.write(dado[0])
file.write(' ')
file.write(str(dado[1]))
file.write(' ')
file.write(str(dado[2]))
file.write('\n')
#Monta informacao
def envia_info(temp,umid):
global firebase
data_hora = datetime.datetime.now()
data = data_hora.strftime("%d/%m/%Y %H:%M")
dado = data,temp,umid
dados_firebase = {"temperatura": temp,"umidade": umid,"Data_Hora": data}
grava_datalog(dado)
firebase.post('/sensor',dados_firebase)
return
def main():
#Limpa
GPIO.setwarnings(False)
GPIO.cleanup()
# Objeto sensor
sensor = Adafruit_DHT.DHT22
GPIO.setmode(GPIO.BOARD)
# Define a GPIO (pino de leitura de dado)
pino_sensor = 22
while True:
# Le do sensor
umid, temp = Adafruit_DHT.read_retry(sensor, pino_sensor);
# Se ok, mostra os valores na tela
if umid is not None and temp is not None :
print ("Temp = {0:0.1f} Umidade = {1:0.1f}").format(temp, umid);
envia_info(temp,umid)
print ("Dados enviados\n")
else:
# Caso erro
print ("Falha ao ler dados do sensor !!!");
time.sleep(600)
if __name__ == '__main__':
main()
|
# Formations AI for Mount & Blade by Motomataru
# rel. 01/03/11
# This function attaches AI_triggers only to mission "lead_charge"
# For other missions, add to end of triggers list like so: " ] + AI_triggers "
# Make sure to comment out competing AI triggers in the mission templates modified
# For example, for M&B 1.011 "lead_charge"
# #AI Tiggers
# (0, 0, ti_once, [
# (store_mission_timer_a,":mission_time"),(ge,":mission_time",2),
# ],
# [(call_script, "script_select_battle_tactic"),
# (call_script, "script_battle_tactic_init")]),
# (5, 0, 0, [
# (store_mission_timer_a,":mission_time"),(ge,":mission_time",3),
# (call_script, "script_battle_tactic_apply"),
# ], []),
from header_common import *
from header_operations import *
from module_constants import *
from header_mission_templates import *
#AI triggers v3 by motomataru
AI_triggers = [
(ti_before_mission_start, 0, 0, [], [
(assign, "$cur_casualties", 0),
(assign, "$prev_casualties", 0),
(assign, "$ranged_clock", 1),
(assign, "$battle_phase", BP_Setup),
(assign, "$clock_reset", 0),
(assign, "$team0_default_formation", formation_default),
(assign, "$team1_default_formation", formation_default),
(assign, "$team2_default_formation", formation_default),
(assign, "$team3_default_formation", formation_default),
(init_position, Team0_Cavalry_Destination),
(init_position, Team1_Cavalry_Destination),
(init_position, Team2_Cavalry_Destination),
(init_position, Team3_Cavalry_Destination),
(assign, "$team0_reinforcement_stage", 0),
(assign, "$team1_reinforcement_stage", 0),
]),
(0, AI_Delay_For_Spawn, ti_once, [], [
(set_fixed_point_multiplier, 100),
(call_script, "script_battlegroup_get_position", Team0_Starting_Point, 0, grc_everyone),
(call_script, "script_battlegroup_get_position", Team1_Starting_Point, 1, grc_everyone),
(call_script, "script_battlegroup_get_position", Team2_Starting_Point, 2, grc_everyone),
(call_script, "script_battlegroup_get_position", Team3_Starting_Point, 3, grc_everyone),
(call_script, "script_field_tactics", 1)
]),
(1, .5, 0, [], [ #delay to offset half a second from formations trigger
(try_begin),
(call_script, "script_cf_count_casualties"),
(assign, "$cur_casualties", reg0),
(assign, "$battle_phase", BP_Fight),
(try_end),
(set_fixed_point_multiplier, 100),
(call_script, "script_store_battlegroup_data"),
(try_begin), #reassess ranged position when fighting starts
(ge, "$battle_phase", BP_Fight),
(eq, "$clock_reset", 0),
(call_script, "script_field_tactics", 1),
(assign, "$ranged_clock", 0),
(assign, "$clock_reset", 1),
(else_try), #reassess ranged position every five seconds after setup
(ge, "$battle_phase", BP_Jockey),
(store_mod, reg0, "$ranged_clock", 5),
(eq, reg0, 0),
(call_script, "script_field_tactics", 1),
(assign, "$team0_reinforcement_stage", "$defender_reinforcement_stage"),
(assign, "$team1_reinforcement_stage", "$attacker_reinforcement_stage"),
(else_try),
(call_script, "script_field_tactics", 0),
(try_end),
(try_begin),
(eq, "$battle_phase", BP_Setup),
(assign, ":not_in_setup_position", 0),
(try_for_range, ":bgteam", 0, 4),
(neq, ":bgteam", "$fplayer_team_no"),
(call_script, "script_battlegroup_get_size", ":bgteam", grc_everyone),
(gt, reg0, 0),
(call_script, "script_battlegroup_get_position", pos1, ":bgteam", grc_archers),
(team_get_order_position, pos0, ":bgteam", grc_archers),
(get_distance_between_positions, reg0, pos0, pos1),
(gt, reg0, 500),
(assign, ":not_in_setup_position", 1),
(try_end),
(eq, ":not_in_setup_position", 0), #all AI reached setup position?
(assign, "$battle_phase", BP_Jockey),
(try_end),
(val_add, "$ranged_clock", 1),
]),
]
def modmerge_formAI_mission_templates(orig_mission_templates):
find_i = find_object( orig_mission_templates, "lead_charge" )
orig_mission_templates[find_i][5].extend(AI_triggers)
|
"""
connection.py
"""
import re
import socket
import select
class Connection(object):
buffer = None
line_buffer = None
socket = None
def __init__(self, address):
self.line_buffer = ""
self.buffer = "<b>Attempting to connect to '%s' ...</b>\n" % address
self.socket = socket.socket()
# FIXME: Properly validate this?
address_data = address.split(":")
address = address_data[0]
if len(address_data) == 1:
port = 8888
else:
port = int(address_data[1])
try:
self.socket.connect((address, port))
except socket.gaierror as error:
self.buffer += "<b>Failed to connect: %s</b>" % error
self.socket = None
return
except socket.error as error:
self.buffer += "<b>Failed to connect: %s</b>" % error
self.socket = None
return
self.socket.setblocking(False)
def send(self, text):
"""
Sends some text to the connected server.
Parameters--
text: The text to send to the remote server.
"""
if self.is_connected() is True:
try:
self.socket.send(text + "\n")
except socket.error:
self.socket = None
def is_connected(self):
"""
Returns whether or not this connection object is currently
connected to a remote server.
"""
return self.socket is not None
def disconnect(self):
"""
Disconnects the connection object from the remote server.
"""
if self.is_connected() is False:
return
# FIXME: Configurable
self.send("QUIT")
self.socket.close()
self.socket = None
def update(self):
"""
Update routine to read data from the server and return any lines written,
if any.
Returns: A list of lines written by the server this call. If no lines are written,
an empty list is returned.
"""
if self.is_connected() is False:
return []
new_lines = []
ready_read, ready_write, exception_list = select.select((self.socket,), (), (), 0)
if ready_read:
buffer = self.socket.recv(8)
if len(buffer) == 0:
self.socket = None
return
self.line_buffer += buffer
if "\n" in self.line_buffer:
if self.line_buffer.find("\n") != -1:
lines = self.line_buffer.split("\n")
added_lines = lines[0:len(lines) - 1]
self.line_buffer = lines.pop()
new_lines = added_lines
return new_lines
def acknowledge_lines(self, lines):
"""
Acknowledges lines that were written by the server. This is intended for
code that is using the connection object to have a chance to modify newly received
lines (such as formatters for text patterns) before being inserted into the buffer.
Parameters--
lines: A list of lines to write into the text buffer.
"""
self.buffer += "\n".join(lines)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-11-19 12:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('posts', '0004_auto_20161112_1610'),
]
operations = [
migrations.AddField(
model_name='post',
name='published_at',
field=models.DateTimeField(default=django.utils.timezone.now, help_text='A postagem será exibida somente após essa data.', verbose_name='Publicação'),
),
]
|
DEFAULT_VIEW_NAME = "Quali"
SNMP_CONF_PATH = "/etc/snmp/snmpd.conf"
IFACE_CONF_PATH = "/etc/network/interfaces"
SNMP_SERVICE_NAME = "snmpd"
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('fmExtractors')
import rospy
from fmMsgs.msg import *
from math import *
class matrix:
# implements basic operations of a matrix class
def __init__(self, value):
self.value = value
self.dimx = len(value)
self.dimy = len(value[0])
if value == [[]]:
self.dimx = 0
def zero(self, dimx, dimy):
# check if valid dimensions
if dimx < 1 or dimy < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dimx
self.dimy = dimy
self.value = [[0 for row in range(dimy)] for col in range(dimx)]
def identity(self, dim):
# check if valid dimension
if dim < 1:
raise ValueError, "Invalid size of matrix"
else:
self.dimx = dim
self.dimy = dim
self.value = [[0 for row in range(dim)] for col in range(dim)]
for i in range(dim):
self.value[i][i] = 1
def show(self):
for i in range(self.dimx):
print self.value[i]
print ' '
def __add__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError, "Matrices must be of equal dimensions to add"
else:
# add if correct dimensions
res = matrix([[]])
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] + other.value[i][j]
return res
def __sub__(self, other):
# check if correct dimensions
if self.dimx != other.dimx or self.dimy != other.dimy:
raise ValueError, "Matrices must be of equal dimensions to subtract"
else:
# subtract if correct dimensions
res = matrix([[]])
res.zero(self.dimx, self.dimy)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[i][j] = self.value[i][j] - other.value[i][j]
return res
def __mul__(self, other):
# check if correct dimensions
if self.dimy != other.dimx:
raise ValueError, "Matrices must be m*n and n*p to multiply"
else:
# subtract if correct dimensions
res = matrix([[]])
res.zero(self.dimx, other.dimy)
for i in range(self.dimx):
for j in range(other.dimy):
for k in range(self.dimy):
res.value[i][j] += self.value[i][k] * other.value[k][j]
return res
def transpose(self):
# compute transpose
res = matrix([[]])
res.zero(self.dimy, self.dimx)
for i in range(self.dimx):
for j in range(self.dimy):
res.value[j][i] = self.value[i][j]
return res
# Thanks to Ernesto P. Adorio for use of Cholesky and CholeskyInverse functions
def Cholesky(self, ztol=1.0e-5):
# Computes the upper triangular Cholesky factorization of
# a positive definite matrix.
res = matrix([[]])
res.zero(self.dimx, self.dimx)
for i in range(self.dimx):
S = sum([(res.value[k][i])**2 for k in range(i)])
d = self.value[i][i] - S
if abs(d) < ztol:
res.value[i][i] = 0.0
else:
if d < 0.0:
raise ValueError, "Matrix not positive-definite"
res.value[i][i] = sqrt(d)
for j in range(i+1, self.dimx):
S = sum([res.value[k][i] * res.value[k][j] for k in range(self.dimx)])
if abs(S) < ztol:
S = 0.0
res.value[i][j] = (self.value[i][j] - S)/res.value[i][i]
return res
def CholeskyInverse(self):
# Computes inverse of matrix given its Cholesky upper Triangular
# decomposition of matrix.
res = matrix([[]])
res.zero(self.dimx, self.dimx)
# Backward step for inverse.
for j in reversed(range(self.dimx)):
tjj = self.value[j][j]
S = sum([self.value[j][k]*res.value[j][k] for k in range(j+1, self.dimx)])
res.value[j][j] = 1.0/tjj**2 - S/tjj
for i in reversed(range(j)):
res.value[j][i] = res.value[i][j] = -sum([self.value[i][k]*res.value[k][j] for k in range(i+1, self.dimx)])/self.value[i][i]
return res
def inverse(self):
aux = self.Cholesky()
res = aux.CholeskyInverse()
return res
def __repr__(self):
return repr(self.value)
acc_data = [0.,0.,0.]
gyro_data = [0.,0.,0.]
comp_data = [0.,0.,0.]
dt = 10;
F = matrix([[1., 0.], [0, 1.]]) # next state function
B = matrix([[dt, 0.], [0, dt]]) # control-input-coeff.
x = matrix([[0.], [0.]]) # initial state (location and velocity)
P = matrix([[1000., 0.], [0., 1000.]]) # initial uncertainty
u = matrix([[0.], [0.]]) # external motion
H = matrix([[1., 0.], [0., 1.]]) # measurement function
R = matrix([[0.1, 0.], [0., 0.1]]) # measurement uncertainty
I = matrix([[1., 0.], [0., 1.]]) # identity matrix
def filter():
global F, B, x, P, u, H, R, I, comp_data, acc_data, gyro_data, dt
print 'Start filter:'
u = matrix([[acc_data[0]], [gyro_data[2]]])
Z = matrix([[0., comp_data[2]]])
y = Z.transpose() - (H * x)
S = H * P * H.transpose() + R
K = P * H.transpose() * S.inverse()
x = x + (K * y)
P = (I - K * H) * P
# prediction
x = F * x + B * u
P = F * P * F.transpose()
print 'x= '
x.show()
print 'P= '
P.show()
def acc_callback(data):
global acc_data
acc_data = [data.x, data.y, data.z]
def gyro_callback(data):
global gyro_data
gyro_data = [data.x, data.y, data.z]
def comp_callback(data):
global comp_data
comp_data = [data.x, data.y, data.z]
def timer_callback(event):
rospy.logwarn("Timer times!")
filter()
def kalman_main():
global dt
rospy.init_node('kalman_main')
sub_accelerometer_topic_id = rospy.get_param('~sub_accelerometer_topic_id' , "/default/Accelerometer")
sub_gyroscope_topic_id = rospy.get_param('~sub_gyroscope_topic_id' , "/default/Gyroscope")
sub_magnetometer_topic_id = rospy.get_param('~sub_magnetometer_topic_id' , "/default/Magnetometer")
dt = float(rospy.get_param('~dt', 1.0))
rospy.Subscriber(sub_accelerometer_topic_id, accelerometer, acc_callback)
rospy.Subscriber(sub_gyroscope_topic_id, gyroscope, gyro_callback)
rospy.Subscriber(sub_magnetometer_topic_id, magnetometer, comp_callback)
rospy.Timer(rospy.rostime.Duration(dt), timer_callback)
rospy.spin()
if __name__ == '__main__':
try:
kalman_main()
except rospy.ROSInterruptException: pass |
# -*- coding: utf-8 -*-
import datetime
import pytest
from django.core.exceptions import ValidationError
from apps.merchandise.goods.factories import GoodFactory, ShopFactory
from apps.merchandise.goods.models import Good
pytestmark = pytest.mark.django_db
class TestGoods:
def test_date_validation(self):
with pytest.raises(ValidationError) as execinfo:
GoodFactory(
available_from=datetime.date.today(),
available_until=datetime.date.today() - datetime.timedelta(days=1),
)
message = u'The "Available Until" date is earlier than "Available From".'
assert message in execinfo.execonly()
def test_source_validation(self):
with pytest.raises(ValidationError) as execinfo:
GoodFactory()
message = u'Goods must either originate from a shop or an event.'
assert message in execinfo.execonly()
def test_factory(self):
factory = GoodFactory(
name='test-good', romanized_name='test-good', category='lphoto',
available_from=datetime.date.today() - datetime.timedelta(days=1),
available_until=datetime.date.today(),
shop=ShopFactory(),
)
assert isinstance(factory, Good)
|
#
# @lc app=leetcode.cn id=200 lang=python3
#
# [200] 岛屿数量
#
# @lc code=start
class Solution:
def numIslands(self, grid: List[List[str]]) -> int:
if not grid:
return 0
row, col = len(grid), len(grid[0])
count = 0
def dfs(i,j):
grid[i][j]= '0'
for x, y in [[-1,0],[1,0],[0,-1],[0,1]]:
tmp_i = i + x
tmp_j = j + y
if 0 <= tmp_i< row and 0 <= tmp_j < col and grid[tmp_i][tmp_j]=='1':
dfs(tmp_i, tmp_j)
for i in range(row):
for j in range(col):
if grid[i][j] == '1':
dfs(i,j)
count += 1
return count
# @lc code=end
|
'''
项目名称:
创建时间:
'''
__Author__ = "Shliang"
__Email__ = "shliang0603@gmail.com"
# !/usr/bin/env python
# -*- coding:utf-8 -*-
from PIL import Image
import os
import sys
#IMAGES_PATH = './JPEGImages' # 图片集地址
IMAGES_PATH = sys.argv[1] # 图片集地址
IMAGES_FORMAT = ['.jpg', '.JPG'] # 图片格式
IMAGE_SIZE = 200 # 每张小图片的大小
IMAGE_ROW = None # 图片间隔,也就是合并成一张图后,一共有几行
IMAGE_COLUMN = 10 # 图片间隔,也就是合并成一张图后,一共有几列
# 获取图片集地址下的所有图片名称
image_names = [name for name in os.listdir(IMAGES_PATH) for item in IMAGES_FORMAT if
os.path.splitext(name)[1] == item]
print(image_names)
IMAGE_ROW = int(len(image_names) / IMAGE_COLUMN)
# 简单的对于参数的设定和实际图片集的大小进行数量判断
# if len(image_names) != IMAGE_ROW * IMAGE_COLUMN:
# raise ValueError("合成图片的参数和要求的数量不能匹配!")
# 定义图像拼接函数
def image_compose():
to_image = Image.new('RGB', (IMAGE_COLUMN * IMAGE_SIZE, IMAGE_ROW * IMAGE_SIZE)) # 创建一个新图
# 循环遍历,把每张图片按顺序粘贴到对应位置上
for y in range(1, IMAGE_ROW + 1):
for x in range(1, IMAGE_COLUMN + 1):
from_image = Image.open(IMAGES_PATH + '/' + image_names[IMAGE_COLUMN * (y - 1) + x - 1]).resize(
(IMAGE_SIZE, IMAGE_SIZE), Image.ANTIALIAS)
to_image.paste(from_image, ((x - 1) * IMAGE_SIZE, (y - 1) * IMAGE_SIZE))
return to_image.save("result_trash.jpg") # 保存新图
if __name__ == "__main__":
image_compose() # 调用函数
|
# file: a_simple_sub.py
from mqtt import MQTTClient
import pycom
import sys
import time
import ufun
wifi_ssid = "AndroidAP"
wifi_passwd = "stalin1986"
broker_addr = "192.168.43.113"
#MYDEVID = "iot_10"
dev_id = 'test'
def settimeout(duration):
pass
def on_message(topic, msg):
print("Received msg: ", str(msg), "with topic: ", str(topic))
### if __name__ == "__main__":
ufun.connect_to_wifi(wifi_ssid, wifi_passwd)
client = MQTTClient(dev_id, broker_addr, 1883)
client.set_callback(on_message)
print ("Connecting to broker: " + broker_addr)
try:
client.connect()
except OSError:
print ("Cannot connect to broker: " + broker_addr)
sys.exit()
print ("Connected to broker: " + broker_addr)
client.subscribe('iot_10/battery/#')
print('Waiting messages...')
while 1:
client.check_msg()
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import pandas as pd
names = ['Bob','Jessica','Mary','John','Mel']
grades = [76,95,77,78,99]
bsdegress = [1,1,0,0,1]
msdegrees = [2,1,0,0,0]
phddegrees = [0,1,0,0,0]
GradeList = zip(names,grades,bsdegress,msdegrees,phddegrees)
df = pd.DataFrame(data = GradeList, columns=['Names','Grades','BS','MS','PhD'])
df
# In[ ]:
# We can drop a column by simply adding:
df.drop('PhD', axis=1)
# In[ ]:
# We can add a column filled with 0's by setting the new column name equal to a 0.
df['HighSchool']=0
# In[ ]:
# If you want to set the new columns to equal null values, you can do that too.
df['PreSchool'] = np.nan
# In[ ]:
d = ([0,1,0,1,0])
s = pd.Series(d, index= df.index)
df['DriversLicense'] = s
df
|
##this program allow to input some arguments to the function
def another_Introduction(name, city):
print("hello my name is", name)
print("I live in ", city)
another_Introduction("Maria", "Cork")
another_Introduction("Almir", "Paris")
def num_time(string, times):
for i in range(times):
print(string)
num_time("I love Python", 3)
def number(a, b):
if a > b :
print("higher number is", a)
else:
print("Higher number is ", b)
number(67, 90)
|
from helpers.sql_queries import SqlQueries
from helpers.create_tables import CreateTables
from helpers.check_queries import CheckQueries
__all__ = [
'SqlQueries',
'CreateTables',
'CheckQueries'
] |
'''
The data is standart response.
Postgresql database is being used.
Data from the database is taken as 500000 row.
The dataset size is 500000 rows.
All data is captured, processed and sent at one time. Blocks each other.
At the end of the transaction, the connection is disconnected.
'''
from aiopg.sa import create_engine
from aiohttp import web
import sqlalchemy as sa
from settings import user_name, database_name, host_name, query
async def handle(request):
'''
The standard Response object is used.
'''
async with create_engine(user=user_name, database=database_name, host=host_name) as engine:
meta = sa.MetaData()
meta.bind = engine
async with engine.acquire() as conn:
data = await conn.execute(query)
fetch_data = await data.fetchall()
sum_data = str()
for fetch in fetch_data:
sum_data = sum_data + 'object_id:{} \n'.format(fetch[0])
return web.Response(text=sum_data)
|
import pandas as pd
dataset_2017 = pd.read_csv('datasets/datatran2017.csv', sep= ';', encoding='ISO-8859-1', header=0).dropna().drop_duplicates()
dataset_2018 = pd.read_csv('datasets/datatran2018.csv', sep= ';', encoding='ISO-8859-1', header=0).dropna().drop_duplicates()
dataset_2019 = pd.read_csv('datasets/datatran2019.csv', sep= ';', encoding='ISO-8859-1', header=0).dropna().drop_duplicates()
dataset_2020 = pd.read_csv('datasets/datatran2020.csv', sep= ';', encoding='ISO-8859-1', header=0).dropna().drop_duplicates()
dataset_train = pd.concat([dataset_2017,dataset_2018,dataset_2019])
dataset_test = dataset_2020
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 24 23:01:47 2021
@author: yinxiaoru
"""
import os,sys
input_dir = os.path.abspath(sys.argv[1])
result_dir = os.path.abspath(sys.argv[2])
for dirname, _, filenames in os.walk(input_dir):
for file in filenames:
print(os.path.join(os.path.abspath(dirname),file))
# Start python import
import math, time, random, datetime
# Data Manipulation
import numpy as np
import pandas as pd
# Visualization
import matplotlib.pyplot as plt
#import missingno
import seaborn as sns
#plt.style.use('seaborn-whitergrid')
# Preprocessing
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, label_binarize
#Machine learning
#import catboost
from sklearn.model_selection import train_test_split
from sklearn import model_selection,tree,preprocessing,metrics,linear_model
from sklearn.svm import LinearSVC
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LinearRegression, LogisticRegression, SGDClassifier
from sklearn.tree import DecisionTreeClassifier
#from catboost import CatBoostClassifier,Pool,cv
from sklearn.preprocessing import StandardScaler
from keras.layers.advanced_activations import ReLU
from keras.models import Sequential, Model
from keras.layers import Activation, Convolution2D, MaxPooling2D, BatchNormalization, Flatten, Dense, Dropout, Conv2D, MaxPool2D,ZeroPadding2D
#Let's be rebels and warnings for now
import warnings
warnings.filterwarnings('ignore')
# data append
train_data = pd.read_csv(os.path.join(input_dir,'training.zip'),compression='zip',header=0, sep=',',quotechar='"')
test_data = pd.read_csv(os.path.join(input_dir,'test.zip'),compression='zip', header = 0,sep = ',', quotechar='"')
IdLookupTable = pd.read_csv(os.path.join(input_dir,'IdLookupTable.csv'), header=0,sep = ',',quotechar='"')
SampleSubmission = pd.read_csv(os.path.join(input_dir,'SampleSubmission.csv'),header = 0,sep=',',quotechar='"')
train_data.head()
train_data.head().T.tail()
len(train_data['Image'][2])
train_data.info()
test_data.head()
test_data.info()
IdLookupTable.head()
IdLookupTable.info()
SampleSubmission.head()
# check missing data
## train
null_sum=train_data.isnull().sum()
null_sum.plot(kind='bar',color='pink')
train_data.fillna(method='ffill',inplace=True)
train_data.isnull().sum().plot(kind='bar',color='pink')
test_data.isnull().sum().plot(kind='bar',color='pink',title='Missing Data')
# IDLookupTable
display(IdLookupTable.isnull().sum())
# Visualize Data
vis = []
for i in range(len(train_data)):
vis.append(train_data['Image'][i].split(' '))
## prepare data x train
array_float = np.array(vis, dtype='float')
X_train = array_float.reshape(-1,96,96,1)
## show photo
photo_visualize = array_float[1].reshape(96,96)
plt.imshow(photo_visualize,cmap='pink')
plt.title('viasualize Image')
plt.show()
## Facial Keypoints
facial_pnts_float = train_data.drop(['Image'],axis=1).values
## prepare data y train
training_data = train_data.drop('Image',axis=1)
y_train = training_data.values
## show photo image eith facial points
photo_visualize_pnts = facial_pnts_float[0]
plt.imshow(photo_visualize,cmap = 'gray')
plt.scatter(photo_visualize_pnts[0::2],photo_visualize_pnts[1::2],color = 'Pink', marker= '*')
plt.title("Image eith Facial Keypoints")
plt.show()
# prepare and split data
train_data.shape
# Build model
## keras CNN
model = Sequential()
### layer 1
model.add(Convolution2D(32, (3,3), activation = 'relu', padding='same', use_bias=False, input_shape=(96,96,1)))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
### layer 2
model.add(Convolution2D(32,(3,3),activation = 'relu', padding = 'same', use_bias = False))
model.add(MaxPool2D(pool_size=(2,2)))
### layer 3
model.add(Convolution2D(64,(3,3),activation= 'relu',padding = 'same', use_bias=False))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2,2)))
### layer4
model.add(Convolution2D(128, (3,3), activation = 'relu', padding='same', use_bias=False))
model.add(BatchNormalization())
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(30))
model.summary()
model.compile(optimizer='adam',loss='mean_squared_error',metrics=['accuracy'])
# train data
model.fit(X_train,y_train,epochs = 5,batch_size = 32,validation_split = 0.2)
# prepare data test
test_images = []
for i in range(len(test_data)):
item = test_data['Image'][i].split(' ')
test_images.append(item)
array_float_test = np.array(test_images,dtype = 'float')
X_test = array_float_test.reshape(-1,96,96,1)
# predict
predict = model.predict(X_test)
# submission
#from IdLookupTable
FeatureName = list(IdLookupTable['FeatureName'])
ImageId = list(IdLookupTable['ImageId']-1)
RowId = list(IdLookupTable['RowId'])
# predict results
predict = list(predict)
Data = []
for i in list(FeatureName):
Data.append(FeatureName.index(i))
Data_pre = []
for x,y in zip(ImageId,Data):
Data_pre.append(predict[x][y])
RowId = pd.Series(RowId,name = 'RowId')
Location = pd.Series(Data_pre, name = 'Location')
submission = pd.concat([RowId,Location],axis = 1)
submission.to_csv(os.path.join(result_dir,'Submission.csv'),index=False)
|
from django.contrib import messages
from django.core.mail import send_mail
from django.shortcuts import render
from django.http import HttpResponse
from ecom.models import User, Item, Basket, BasketItem
from django.core import serializers
appname = 'ecom'
def loggedin(f):
def test(request):
if 'username' in request.session:
return f(request)
else:
return render(request, 'ecom/loginpage.html', {})
return test
def index(request):
if loggedin():
return render(request, 'ecom/main.html')
else:
return render(request, 'ecom/loginpage.html')
def basket(request):
return render(request, 'ecom/basket.html')
def signup(request):
return render(request, 'ecom/signup.html',)
def loginpage(request):
return render(request, 'ecom/loginpage.html')
def main(request):
return render(request, 'ecom/main.html')
#@loggedin
def myaccount(request):
pk = request.session['username']
user = User.objects.get(pk=pk)
context = {
"fname": user.firstname,
"uname": user.username,
"email": user.email,
"address": user.address,
"city": user.city,
"postcode": user.postcode,
'loggedin': True
}
return render(request, 'ecom/myaccount.html', context)
def changeQuantity(request):
username = request.session['username']
user = User.objects.get(pk=username)
basket = user.basket
basketItems = basket.basketitem_set.all()
for x in basketItems:
article_id = request.GET.get('article_id')
if x.product.article_id == article_id:
x.quantity = x.quantity + int(request.GET.get('value'))
x.save()
return HttpResponse(200)
def getProductsInbasket(request):
username = request.session['username']
user = User.objects.get(pk=username)
basket = user.basket
basketItems = basket.basketitem_set.all()
products = []
for x in basketItems:
products.append(x.product)
data = serializers.serialize("json", products)
return HttpResponse(data)
def getProducts(request):
data = serializers.serialize("json", Item.objects.all())
return HttpResponse(data)
def buyProduct(request):
username = request.session['username']
user = User.objects.get(pk=username)
pk = request.GET.get("article_id")
item = Item.objects.get(pk=pk)
item.sold = True
basketItem = BasketItem(basket=user.basket, product=item)
basketItem.save()
item.save()
user.save()
subject = 'Thank you for your purchase'
message = 'This is confirmation your order has been placed'
from_email = setting.EMAIL_HOST_USER
to_list = [user.email, settings.EMAIL_HOST_USER]
send_mail(subject, message, from_email, to_list, fail_silently = True)
return HttpResponse(200)
def register(request):
u = request.POST.get('username')
fname = request.POST.get('fname')
sname = request.POST.get('sname')
date = request.POST.get('dob')
phonenum = request.POST.get('phonenum')
email = request.POST.get('email')
address = request.POST.get('address')
city = request.POST.get('city')
postcode = request.POST.get('postcode')
p = request.POST.get('password')
basket = Basket()
basket.save()
user = User(username=u,
firstname=fname,
surname=sname,
dob=date,
phone=phonenum,
email=email,
address=address,
city=city,
postcode=postcode,
password=p,
basket= basket
)
user.save()
return render(request, 'ecom/loginpage.html')
def login(request):
if 'username' not in request.GET:
context = {
'errorText': "Please enter a username"
}
return render(request, 'ecom/loginpage.html', context)
else:
usrn = request.GET['username']
pwd = request.GET['password']
try:
user = User.objects.get(pk=usrn)
except User.DoesNotExist:
context = {
'errorText': "The username you entered does not exist"
}
return render(request, 'ecom/loginpage.html', context)
if pwd == user.password:
request.session['username'] = usrn;
request.session['password'] = pwd;
request.session['basket'] = None;
return render(request, 'ecom/main.html', {
'appname': appname,
'username': usrn,
'loggedin': True}
)
else:
context = {
'errorText': "Incorrect password"
}
return render(request, 'ecom/loginpage.html', context)
#@loggedin
def logout(request):
if 'username' in request.session:
request.session.flush()
return render(request, 'ecom/loginpage.html')
else:
raise Http404("Can't logout, you are not logged in")
|
from Phone import app, api
from Phone.Control import global_controller
from Phone_Config_Boundary.Config_Sample_Boundary \
import Config_Sample_Boundary
from Phone_Config_Boundary.Config_Logger_Boundary \
import Config_Logger_Boundary
#
# Get the version of the API
#
version = global_controller.get_value('version')
api.add_resource(Config_Sample_Boundary,
'/{0}/config/sample'.format(version))
#
# Place config boundaries here
#
# End config boundaries here
#
api.add_resource(Config_Logger_Boundary,
'/{0}/config/logger'.format(version))
|
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Album(models.Model):
id_album = models.FloatField(primary_key=True)
nombre = models.CharField(max_length=50, blank=True, null=True)
fecha = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'album'
class AlbumCancion(models.Model):
id_album_cancion = models.FloatField(primary_key=True)
album = models.CharField(max_length=50, blank=True, null=True)
cancion = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'album_cancion'
class Artista(models.Model):
id_artista = models.FloatField(primary_key=True)
nombre = models.CharField(max_length=50, blank=True, null=True)
pais = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'artista'
class Cancion(models.Model):
id_cancion = models.FloatField(primary_key=True)
nombre = models.CharField(max_length=50, blank=True, null=True)
fecha_lanzamiento = models.CharField(max_length=50, blank=True, null=True)
path = models.CharField(max_length=100, blank=True, null=True)
class Meta:
managed = False
db_table = 'cancion'
class CancionLista(models.Model):
id_cancion_lista = models.FloatField(primary_key=True)
cancion = models.CharField(max_length=50, blank=True, null=True)
lista = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'cancion_lista'
class Conteo(models.Model):
id_conteo = models.FloatField(primary_key=True)
class Meta:
managed = False
db_table = 'conteo'
class Genero(models.Model):
id_genero = models.FloatField(primary_key=True)
genero = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'genero'
class Lista(models.Model):
id_lista = models.FloatField(primary_key=True)
nombre = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'lista'
class Usuario(models.Model):
id_usuario = models.FloatField(primary_key=True)
usuario = models.CharField(max_length=50)
correo = models.CharField(max_length=50)
foto = models.CharField(max_length=50, blank=True, null=True)
nombre = models.CharField(max_length=50, blank=True, null=True)
apellido = models.CharField(max_length=50, blank=True, null=True)
tipo = models.FloatField(blank=True, null=True)
clave = models.CharField(max_length=20, blank=True, null=True)
fecha_registro = models.DateField(blank=True, null=True)
class Meta:
managed = False
db_table = 'usuario'
unique_together = (('correo', 'usuario'),)
class UsuarioArtista(models.Model):
id_usuario_artista = models.FloatField(primary_key=True)
usuario = models.CharField(max_length=50)
artista = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'usuario_artista'
class UsuarioLista(models.Model):
id_usuario_lista = models.FloatField(primary_key=True)
usuario = models.CharField(max_length=50, blank=True, null=True)
lista = models.CharField(max_length=50, blank=True, null=True)
class Meta:
managed = False
db_table = 'usuario_lista'
|
#coding:utf-8
import csv
import hashlib
import json
import random
import re
import string
import time
from random import Random
from googletrans import Translator
from ali1688.helper import str_replace_new
import os
import sys
from random import Random
from hashlib import md5
root_path = os.path.abspath('.')
def fnmd5(str):
m = hashlib.md5()
m.update(str.encode("utf8"))
return m.hexdigest()
def salt():
return ''.join(random.sample(string.ascii_letters + string.digits, 8))
def start():
new_img_path = root_path+'/csv_org/'
lines = []
#r = csv.reader(open(new_img_path+'data_hyg_trans.csv' ,'r' ,encoding='gbk' ))
r = csv.reader(open(new_img_path+'data_baita_01_trans_01.csv' ,'r' ))
#r = csv.reader(open('F:\laragon\www\python\image\data.csv','r'))
for index,row in enumerate(r):
title = row[8].replace(u'\0xb4',u' ')
title = title.replace(u'\0x81',u' ')
#title.encode('utf-8').decode('gbk','ignore').encode('utf-8','ignore')
#title.decode('gbk').encode('utf-8')
#if index > 0 and len(row) > 0 and row is not None :
print(index)
reg = r'offer/(.*?).html'
pattern = re.compile(reg)
out = re.findall(pattern, row[7])
filename = time.strftime("%Y%m%d-%H%M%S", time.localtime()) + '-' +out[0]
str = salt() + '-' + filename
#print(fnmd5( str ))
#print( fnmd5('hellsdfsdfo') )
temp_line = []
temp_line.append('1010292')
temp_line.append(title)
temp_line.append('Woman dress')
temp_line.append('')
temp_line.append('new')
temp_line.append(title)
temp_line.append('9999')
temp_line.append(row[11])
temp_line.append('5')
temp_line.append('https://res.cloudinary.com/dzf6pxzir/image/upload/%s/800/1.jpg' % row[2])
temp_line.append('https://res.cloudinary.com/dzf6pxzir/image/upload/%s/800/2.jpg' % row[2])
temp_line.append('https://res.cloudinary.com/dzf6pxzir/image/upload/%s/800/3.jpg' % row[2])
temp_line.append('')
temp_line.append('Color:'+row[9] +';Size:'+row[5]+';' + 'sku:'+ row[3] )
temp_line.append('Woman Dress')
temp_line.append('Summer Dress')
temp_line.append('Big size dress')
temp_line.append('Hotsale dress')
# temp_line[0] = '1010292' #category
# temp_line[1] = row[8] #title
# temp_line[2] = 'Woman dress' #product_type
# temp_line[3] = '' #brand
# temp_line[4] = 'new' #condition
# temp_line[5] = '' #description
# temp_line[6] = '9999' #quantity
# temp_line[7] = row[11] #price
# temp_line[8] = '5' #shipping
# temp_line[9] = 'https://res.cloudinary.com/dzf6pxzir/image/upload/%s/800/1.jpg' % row[2] #image1
# temp_line[10] = 'https://res.cloudinary.com/dzf6pxzir/image/upload/%s/800/2.jpg' % row[2] #image2
# temp_line[11] = 'https://res.cloudinary.com/dzf6pxzir/image/upload/%s/800/3.jpg' % row[2] #image3
# temp_line[12] = '' #country_shipping
# temp_line[13] = 'Colors:'+row[9] +'Sizes:'+row[5] #item_specifics
# temp_line[14] = 'Woman Dress' #keyword1
# temp_line[15] = 'Summer Dress' #keyword2
# temp_line[16] = 'Big size dress' #keyword3
# temp_line[17] = 'Hotsale dress' #keyword4
lines.append(temp_line)
#new_csv = open('F:\laragon\www\python\image\data_trans.csv','w',newline='')
new_csv = open(new_img_path+'/baita_ioffer.csv', 'a', newline='' )
writer = csv.writer(new_csv)
writer.writerow(( 'category' ,'title','product_type','brand','condition',
'description','quantity','price','shipping','image1','image2','image3',
'country_shipping','item_specifics','keyword1','keyword2','keyword3','keyword4'))
writer.writerows(lines)
new_csv.close()
print('=========本次共有%s个产品==========================='%len(lines))
print(lines)
start()
# /usr/bin/python
# -*- coding:utf-8 -*-
|
import bisect
import functools
class Solution(object):
def searchMatrix(self, matrix, target):
"""
:type matrix: List[List[int]]
:type target: int
:rtype: bool
"""
unfolded = functools.reduce(list.__add__, matrix, [])
pos = bisect.bisect_left(unfolded, target)
return pos < len(unfolded) and unfolded[pos] == target
|
# Uses python3
# Problem Description
# Task. The goal in this problem is to find the minimum number of coins needed to change the input value
# (an integer) into coins with denominations 1, 5, and 10.
# Input Format. The input consists of a single integer m.
# Constraints. 1 ≤ m ≤ 10**3 .
# Output Format. Output the minimum number of coins with denominations 1, 5, 10 that changes m.
# Example :
# 24 <--- input
# 6 <--- output
# 26 <-- input
#4 <-- output
def get_change(m):
count = 0
while(m > 0):
# By Greedy approach, first we subtract highest value from denominations ( 10 here )
# if value of m (i.e. money) is greater than 10
if m > 10:
m -= 10
# if m < 10 but greater than 5, then we will subtract '5' ( second largest value of denomination here)
elif m > 5:
m -= 5
# if m < 5, then we subtract '1' ( lowest value of denomination )
else:
m -= 1
# finally we increase count by 1
count += 1
return count
def main():
m = int(input())
print(get_change(m))
main() |
"""
Defines a Turing machine implementation.
"""
import automata.dfa as dfa
import automata.packs as pk
import automata.state as st
class TuringMachine(dfa.DFA):
"""
A Turing machine implementation.
It is inherited from DFA because it shares the same interface
but adds directly on it. DPDA is not suitable for inheritance
here because we don't need a stack here.
Could've maybe produced Tape inherited from a Stack?
This is not a standard Turing machine because it also
allows for explicitly rejected states (the machine is halted
if it encounters an accepted or rejected state.
All other states are inconclusive and do not stop the machine.
The machine also stops if there are no other possible transitions.
"""
def __init__(self, states, inputs, alphabet, start_state, empty_cell_symbol, head_index = 0):
# don't move this after super().__init__ because it's used in structure check
self.alphabet = alphabet
self.empty_cell_symbol = empty_cell_symbol
super().__init__(states, inputs, start_state)
self._head_index = head_index
self.tape = pk.Tape()
self.tape._index = head_index
def __repr__(self):
def wrap_in_braces(string, last_brace_newline=False):
"""
Wraps up a string in {}.
:param str string: string to be wrapped
:param bool last_brace_newline: defines if newline will be put after braces
:return str: wrapped string
"""
return '{' + string + ('\n}' if last_brace_newline else '}')
def tab(string):
"""
Puts tabs in front of all lines in a string.
Example:
-------------
For example,
this becomes:
-------------
For example,
this becomes:
-------------
:param str string: input string
:return str: tabbed strings
"""
return '\t' + string.replace('\n', '\n\t')
def newline(*lines):
"""
Returns string composed of all line arguments with newline added between them.
:param str lines: lines of text that need to be newlined.
:return: full string composed of individual lines concatenated with newline in-between
"""
res = '\n'
for line in lines:
res += line + '\n'
return res[:-1]
states = ''
final = ''
for state, state_object in sorted(self.states.items(), key=lambda t: t[0]):
states += str(state) + ','
if state_object.value:
# print(final, state)
final += str(state.name) + ','
final = 'F=' + wrap_in_braces(final[:-1])
states = 'Q=' + wrap_in_braces(states[:-1])
inputs = ''
for inp in sorted(self.inputs):
inputs += str(inp) + ','
alphabet = ''
for sym in sorted(self.alphabet):
alphabet += str(sym) + ','
inputs = u'\u03A3=' + wrap_in_braces(inputs[:-1])
alphabet = u'\u03B3=' + wrap_in_braces(alphabet[:-1])
funcs = u'\u03B4=' + wrap_in_braces(self.functions)
try:
assert isinstance(self.start_state, st.State)
except AssertionError as error:
print("Start state is not a state, it's {}".format(type(self.start_state)), error)
raise error
start = 'q0=' + str(self.start_state.name)
try:
assert isinstance(self.start_state, st.State)
except AssertionError as error:
print("Start state is not a state, it's {}".format(type(self.start_state)), error)
raise error
return '{} '.format(type(self).__name__) + wrap_in_braces(tab(
newline(states, inputs, alphabet, funcs, start, final)
), True)
def _check_structure(self):
err = ValueError('Set of all inputs should be a proper subset of an alphabet')
err2 = ValueError('Empty cell symbol should NOT be in inputs')
for item in self.inputs:
if item not in self.alphabet:
raise ValueError('Item {} is not in alphabet {} and it should be.'.format(
item, self.alphabet
))
if len(self.inputs) >= len(self.alphabet):
raise err
if self.empty_cell_symbol not in self.alphabet:
raise err2
return True
@property
def functions(self) -> str:
"""
Returns functions for repr() function.
:return str: string representation of transition functions
"""
result = ''
for state in sorted(self.states.values()):
for event, pack in state.transitions.items():
# extremely bad code, but it's a part of an interface
result += '{},{}->'.format(self._get_alias(state.name), event)
end, symbol, movement = list(pack)[0].unpack
movement = 'L' if pk.TuringOutputPack.LEFT else 'R'
result += '{},{},{}'.format(self._get_alias(end.name), symbol, movement)
result = result + '\n'
return result.strip()
def reset(self):
super().reset()
self.tape.clear()
self._head_index = self._head_index
@property
def rejected_states(self):
"""
Returns all rejected states.
A state is explicitly rejected (in this context)
if it contains value -1.
A state that is not accepted is NOT explicitly
rejected.
:return set: a set of all rejected states.
"""
final = set()
for state in self.states.values():
if state.value == -1:
final.add(state)
return final
@property
def accepted(self):
return self.current.value > 0
def enter(self, *entry):
self.tape.add(*entry)
return super().enter(*entry)
def _access(self, value):
raise NotImplementedError('Access method is not needed in a Turing machine.')
def _process(self, *entry):
"""
Processes the entry arguments.
:param entry: entries that have to be handled.
:return:
"""
records = pk.Records()
records.add_record(pk.RecordPack(self.current, bool(self.accepted)))
# print(self)
# while True:
# if self.current in self.accepted_states | self.rejected_states:
# # print('in accepted')
# break
# read = self.tape.read
# print('-'*35)
# if not read:
# read = self.empty_cell_symbol
# output = self.current.clean_forward(read)
# print(self.current, read, output)
# if output == set():
# # print('no further')
# break
# new_state, symbol, movement = output.unpack
# _ = self.tape.consume
# print(self.current, read, output, symbol, self.tape._index)
#
# if movement == pk.TuringOutputPack.LEFT:
# movement = self.tape.move_left
# elif movement == pk.TuringOutputPack.RIGHT:
# movement = self.tape.move_right
# else:
# raise ValueError('Invalid movement value!')
# movement(symbol)
# self.current = self.states[self._get_alias(new_state.name)]
#
# records.add_record(pk.RecordPack(self.current, bool(self.accepted)))
while True:
if self.current in self.accepted_states | self.rejected_states:
break
# table = repr(self.tape)
read = self.tape.consume
if read is None:
# read = self.empty_cell_symbol WRONG?!
self.tape.move_left() # i absolutely don't have any idea why this solves everything.
break
output = self.current.clean_forward(read)
# print('-'*50)
# print('current: {}, read: {}, output: {}, index: {}'.format(
# self.current, read, output, self.tape._index))
# print(table)
if output == set():
self.tape.add(read)
break
end, symbol, movement = output.unpack
# if self.tape._index
if movement == pk.TuringOutputPack.LEFT:
movement = self.tape.move_left
elif movement == pk.TuringOutputPack.RIGHT:
movement = self.tape.move_right
else:
raise ValueError('Invalid movement value.')
movement(symbol)
# print(self.tape)
self.current = self.states[self._get_alias(end.name)]
self.records.add_record(records)
@staticmethod
def factory(input_text, lexer):
lexer.scan(input_text)
return __class__(lexer.states, lexer.inputs, lexer.alphabet, lexer.start_state, lexer.empty_cell_symbol, lexer.head_index)
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import toml
from argparse import ArgumentParser
from executor.args import create_executor_config
from transpiler.bai_knowledge import create_job_yaml_spec
from uuid import uuid4
def main(argv=None):
# This method is only called when using the transpiler as a module of its own, which
# is getting deprecated very soon
transpiler_config = create_executor_config(argv)
input = get_input_args(argv)
descriptor_data = toml.load(input.descriptor)
# TODO: Pass this as an argument
fetched_data_sources = descriptor_data.get("data", {}).get("sources", [])
yaml_string = create_job_yaml_spec(descriptor_data, transpiler_config, fetched_data_sources, str(uuid4()))
if input.filename:
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, input.filename), "w") as f:
f.write(yaml_string)
else:
print(yaml_string)
def get_input_args(argv):
parser = ArgumentParser()
parser.add_argument("--descriptor", help="Relative path to descriptor file", required=True)
parser.add_argument("-f", "--filename", help="Output to file. If not specified, output to stdout", default=None)
parsed_args, _ = parser.parse_known_args(argv)
return parsed_args
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import time
import socket
import re
import xml.etree.ElementTree as ET
from actions import exec_cmd
import json
import os
def readjson(jfile):
f = open(jfile, 'r')
jsondata = json.load(f)
return jsondata
def setenv(jsondata):
# 各グループ毎にパラメーターの変数格納
groupdict = jsondata["OsEnv"]
alsadev = groupdict["AlsaDev"]
# 事前設定
#print('変更前の設定 -->', end='')
#print(os.getenv('ALSADEV'))
os.environ['ALSADEV'] = alsadev
#print('変更後の設定 -->', end='')
#print(os.getenv('ALSADEV'))
# 設定結果の確認
if os.getenv('ALSADEV') != alsadev:
print('[FAILED]--- : ${ALSADEV}を'+alsadev+'に変更することができませんでした。')
sys.exit()
elif os.getenv('ALSADEV') == alsadev:
print('[SUCCESS]-- : ${ALSADEV}を'+alsadev+'に変更しました。')
def speech2text(jsondata, callback=None):
# 効果音のファイル取得
groupdict = jsondata["Sounds"]
responsesoundfile = groupdict["ResponseSoundFile"]
print("[DEBUG]---- : Sounds :")
print("[DEBUG]---- : responsesoundfile :", responsesoundfile)
# juliusサーバの設定情報取得
groupdict = jsondata["Julius"]
serverip = groupdict["ServerIp"]
serverport = groupdict["ServerPort"]
threshold = groupdict["Threshold"]
print("[DEBUG]---- : Julius :")
print("[DEBUG]---- : serverip :", serverip)
print("[DEBUG]---- : serverport :", serverport)
print("[DEBUG]---- : threshold :", threshold)
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((serverip, int(serverport)))
# Action用のパラメータを読み込み
groupdict = jsondata["Actions"]
pingtarget = groupdict["PingTarget"]
pingcount = groupdict["PingCount"]
pinginterval = groupdict["PingInterval"]
print("[DEBUG]---- : Actions :")
print("[DEBUG]---- : PingTarget :", pingtarget)
print("[DEBUG]---- : PingCount :", pingcount)
print("[DEBUG]---- : PingInterval :", pinginterval)
# juliusサーバからのレスポンスを処理
data = ''
while True:
new_data = client.recv(1024).decode().replace('\n','')
print('[DEBUG]---- : Client is recieved following DATA at this time : \n', \
new_data.replace('>','>\n'))
# レスポンスの中に認識区間の区切りがあった場合
if '</RECOGOUT>' in new_data :
print('[DEBUG]---- : new raw data is from here ---:\n', \
new_data.replace('>','>\n'))
print('[DEBUG]---- : end tag is found !!!\n')
key = ''
data = data + new_data
start_number = data.find('<RECOGOUT>')
end_number = data.find('</RECOGOUT>')+11
print('[DEBUG]---- : data list is updated !!!!')
print('[DEBUG]---- : total raw is from here ------:\n', \
data.replace('>','>\n') + new_data.replace('>','>\n'))
print('[DEBUG]---- : data.find is from here ------:\n', \
data[start_number:end_number].replace('>','>\n'))
print('[DEBUG]---- : RECOGOUT tag length is -: ', \
len(data[start_number:end_number]))
# レスポンスの中に単語が正常に含まれているか確認
if len(data[start_number:end_number]) <= 1:
print('[DEBUG]---- : length of RECOGOUT tag is not enouth to parse')
else:
print('[DEBUG]---- : data.find is picked up collectory and good to trase ---:\n', \
data[start_number:end_number].replace('>','>\n'))
# 完全はXML形式へ整形し、XMLからパラメータをパース
root = ET.fromstring('<?xml version="1.0"?>\n' + data[start_number:end_number].replace('.', ''))
for shypo in root.findall('SHYPO'):
score = shypo.get('SCORE')
print('!!!!SCORE :', score)
for whypo in root.findall('./SHYPO/WHYPO'):
word = whypo.get('WORD')
score = float(whypo.get('CM'))
print('!!!!WORD :', word, 'CM :', score)
# 単語の認識(単語毎に認識合致率の閾値を設定)
if 'Ping' in word and score/1000.0 >= 0.5:
key = key + word
elif '打って' in word and score/1000.0 >= 0.25:
key = key + word
elif '今' in word and score/1000.0 >= 0.3:
key = key + word
elif '何時' in word and score/1000.0 >= 0.3:
key = key + word
elif 'アドレス' in word and score/1000.0 >= 0.3:
key = key + word
elif '教えて' in word and score/1000.0 >= 0.3:
key = key + word
elif ( 'もう一度' in word or 'もう一回' in word) and score/1000.0 >= 0.3:
key = key + word
elif '言って' in word and score/1000.0 >= 0.3:
key = key + word
# 認識された全ての単語に対して処理を実施
print('[DEBUG]---- : input key is ', key)
if 'Ping' in key:
exec_cmd.response(word=key,wav=responsesoundfile)
exec_cmd.ping(target=pingtarget, count=pingcount, interval=pinginterval, read='on')
elif '何時' in key:
exec_cmd.response(word=key,wav=responsesoundfile)
exec_cmd.date(read='on')
elif 'アドレス' in key:
exec_cmd.response(word=key,wav=responsesoundfile)
exec_cmd.getaddress(read='on')
elif ('もう一度' in key or 'もう一回' in key):
exec_cmd.response(word=key,wav=responsesoundfile)
exec_cmd.recall(read='on')
print('[DEBUG]---- : data list is initialized ')
data = ''
else:
print('[DEBUG]---- : NO tag is found !! recieved data is :\n', \
new_data.replace('>','>\n'))
data = data + new_data
if __name__ == '__main__':
# JSONファイルの読み込み
jfile = 'setting.json'
jsondata = readjson(jfile)
# 事前設定
setenv(jsondata)
try:
speech2text(jsondata)
except KeyboardInterrupt:
print('keyboard interrupt')
|
# deseasonlize and normalize the summertime MDA8O3 from 2014 to 2021 in BTH, YRD and PRD regions
import numpy as np
from netCDF4 import Dataset
def moving_avg(data):
data_avg = np.zeros((92))
for m in range(0,len(data)-21):
data_avg[m] = data[m+21]-np.nanmean(data[m:m+21])
return data_avg
def main():
file_o3 = '/home/lss/data/O3_meteo/data_extract/vc_sta_std/CNEMC/CNEMC_MDA8_O3.nc'
dataset = Dataset(file_o3)
o3_bth = dataset.variables['O3_bth'][:]
o3_yrd = dataset.variables['O3_yrd'][:]
o3_prd = dataset.variables['O3_prd'][:]
dataset.close()
O3 = np.append(o3_bth,o3_yrd,axis=1)
O3 = np.append(O3,o3_prd,axis=1)
O3_avg = np.zeros((8,len(O3[0]),92))
O3_de = np.zeros((8,len(O3[0]),92))
for j in range(0,len(O3[0])):
for i in range(0,len(O3)):
O3_avg[i,j,:] = moving_avg(O3[i,j,:])
O3_de[0:4,j] = O3_avg[0:4,j]/np.nanstd(O3_avg[0:4,j])
index = np.where(O3_de[0:4,j]>np.nanpercentile(O3_de[0:4,j],90))
O3_de[4:,j] = O3_avg[4:,j]/np.nanstd(O3_avg[4:,j])
index = np.where(O3_de[4:,j]>np.nanpercentile(O3_de[4:,j],90))
file_output = '/home/lss/data/O3_meteo/data_extract/vc_sta_std/CNEMC/CNEMC_ND_MDA8_O3.nc'
f_w = Dataset(file_output,'w',format='NETCDF4')
f_w.createDimension('year',8)
f_w.createDimension('days',92)
f_w.createDimension('BTH',len(o3_bth[0]))
f_w.createDimension('YRD',len(o3_yrd[0]))
f_w.createDimension('PRD',len(o3_prd[0]))
f_w.createVariable('O3_bth',np.float32,('year','BTH','days'))
f_w.createVariable('O3_yrd',np.float32,('year','YRD','days'))
f_w.createVariable('O3_prd',np.float32,('year','PRD','days'))
f_w.variables['O3_bth'][:] = O3_de[:,0:len(o3_bth[0]),:]
f_w.variables['O3_yrd'][:] = O3_de[:,len(o3_bth[0]):len(o3_bth[0])+len(o3_yrd[0]),:]
f_w.variables['O3_prd'][:] = O3_de[:,len(o3_bth[0])+len(o3_yrd[0]):len(o3_bth[0])+len(o3_yrd[0])+len(o3_prd[0]),:]
f_w.close()
if __name__=='__main__':
main()
|
#Connor Oh & Benjamin Avrahami -- TEAM Socks
#SoftDev2 -- pd9
#K11 -- Ay Mon Go Git It From Yer Flask
#2020-03-19
from flask import Flask, render_template, request, session, redirect, url_for
from utl import movies
import os
import pymongo, json
from bson.json_util import loads
client = pymongo.MongoClient('localhost', 27017) # port 27017
db = client['Socks']
films = db['movies']
if films.count() == 0:
with open('utl/movies.json','r') as jsonfile:
data = jsonfile.read()
content = loads(data)
films.insert(content)
app = Flask(__name__) #create instance of class Flask
app.secret_key = os.urandom(32) #generates a secret key for session to start
@app.route("/", methods=["GET","POST"]) #assign following fxn to run when root route requested
def movie():
return render_template('index.html')
@app.route("/year", methods=["POST"])
def year():
startyear = request.form['start']
endyear = request.form['end']
listmovies = movies.moviesFromTo(int(startyear), int(endyear));
return render_template('index.html', yearsmovie = listmovies)
@app.route("/name", methods=["POST"])
def name():
name = request.form['name']
listmovies = movies.moviesThisPerformerIn(name)
return render_template('index.html', namemovie = listmovies)
@app.route("/genre", methods=["POST"])
def genre():
genre = request.form['genre']
listmovies = movies.moviesInThisGenre(genre)
return render_template('index.html', genremovie = listmovies)
if __name__ == "__main__":
app.debug = True
app.run(host='0.0.0.0')
|
"""
Aggregation functions that can be used as arguments to the Field.apply method
"""
#from numba import jit
import numpy as np
def mask(values, above=None, below=None):
"""Mask values not between above and below, in other words
mask values below above and above below, its confusing but makes sense!
"""
if above != None or below != None:
if above != None:
values = np.ma.masked_less(values, float(above))
if below != None:
values = np.ma.masked_greater(values, float(below))
return values
else:
return np.ma.masked_array(values)
def generic(func, values, axis=0, above=None, below=None):
values = mask(values, above=above, below=below)
return func(values, axis=axis)
# np.ma.sum sets masked values to 0 which isn't what we want, calculate the mean and multiple by the number elements
def total(values, **kwargs):
if 'axis' in kwargs:
axis = kwargs['axis']
else:
axis = 0
return generic(np.ma.mean, values, **kwargs) * np.ma.count(values, axis=axis)
# np.ma.mean doesn't support tuples for axis so we convert to non masked array first
def mean(values, **kwargs):
return generic(np.ma.mean, values, **kwargs)
def max(values, **kwargs):
return generic(np.ma.max, values, **kwargs)
def min(values, **kwargs):
return generic(np.ma.min, values, **kwargs)
def count(values, **kwargs):
return generic(np.ma.count, values, **kwargs)
def maxrun(values, axis=0, above=None, below=None, **kwargs):
"""
Maximum sequence length of non-masked values
"""
# Mask values
values = mask(values, above=above, below=below)
#Inverse of mask as 1s and 0s
ones = (~np.ma.getmaskarray(values)).astype(np.int8)
# Calculate cumulative sums of values in ones along axis resetting at every occurance of zero
runs = np.zeros(ones.shape, dtype=np.int32)
# Setup base slices
s1 = [slice(0,None)] * len(ones.shape)
s2 = [slice(0,None)] * len(ones.shape)
# Set axis dimension slice to zero
s1[axis] = 0
runs[s1] = ones[s1]
for i in range(1, ones.shape[axis]):
# Set axis dimension slice indices
s1[axis] = i - 1
s2[axis] = i
# Calculate the cumulative sum but multiply by the value,
# this resets the sum to zero for every occurance of zero in ones
runs[s2] = (runs[s1] + ones[s2]) * ones[s2]
# Return the maximum run value
return runs.max(axis=axis)
|
from Individual import *
from Problem import *
from copy import deepcopy
from Operators import *
class DifferentialEvolution:
def __init__(self, problem, NP, operator, max_iteration=1000, F=0.9, CR=0.2):
self.NP = NP
self.F = F
self.CR = CR
self.problem = problem
self.individuals = [Individual(problem) for _ in range(NP)]
self.max_iter = max_iteration
self.iter = 0
self.operator = operator
self.global_best = Individual(problem)
self.convergence = []
def memorize(self):
best_solution = min(self.individuals, key=lambda b: b.cost)
if best_solution.cost < self.global_best.cost:
self.global_best = deepcopy(best_solution)
self.convergence.append((self.iter, best_solution.cost))
print(self.global_best.cost)
def Run(self):
while self.iter < self.max_iter:
for ind, individual in enumerate(self.individuals):
# Mutate
v = np.copy(individual.solution)
v = self.operator.get_candidate(v)
# Crossover
d1 = np.random.randint(0, self.problem.dimension - 1)
u = np.copy(individual.solution)
for i in range(self.problem.dimension):
if i == d1 or np.random.rand() < self.CR:
u[i] = v[i]
new_cost = self.problem.objective_function(u)
# Memorize
if new_cost < individual.cost:
individual.solution = np.copy(u)
individual.cost = new_cost
self.memorize()
self.iter += 1
|
import sys
INF = sys.maxsize
def dijkstra(start, goal, n, graph):
distance = [INF] * n
distance[start] = 0
visit = [False] * n
while True:
k = -1
m = INF
for i in range(n):
if m > distance[i] and not visit[i]:
m = distance[i]
k = i
if m == INF:
break
visit[k] = True
for i in range(n):
if visit[i]:
continue
via = distance[k] + graph[k][i]
if via < distance[i]:
distance[i] = via
return distance[goal]
n, e = map(int, input().split())
matrix = [[INF]*n for _ in range(n)]
for _ in range(e):
a, b, c = map(int, input().split())
matrix[a-1][b-1] = c
matrix[b-1][a-1] = c
mid_1, mid_2 = map(int, input().split())
|
from neo4j import GraphDatabase
uri = "neo4j://localhost:7687"
user = "neo4j"
password = "password"
def pupulate():
driver = GraphDatabase.driver(uri, auth=(user, password ))
|
# Generated by Django 3.1.5 on 2021-01-08 13:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nobat', '0004_auto_20210108_1703'),
]
operations = [
migrations.AlterField(
model_name='nobat',
name='amount',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='nobat',
name='name',
field=models.CharField(default='nothing', max_length=100),
),
]
|
# -*- coding:utf-8 -*-
"""
@Time:2018/5/15 20:09
@Author:yuhongchao
"""
import turtle
import random
import numpy as np
from math import *
class Dot:
def __repr__(self):
return "Dot(" + repr(self.xcoord) + ", " + repr(self.ycoord) + ", " + repr(self.color) + ")"
def __init__(self, xcoord, ycoord, color):
self.xcoord = xcoord
self.ycoord = ycoord
self.color = color
def draw(self, turtle):
turtle.goto(self.xcoord, self.ycoord)
turtle.dot(5, self.color)
def main():
turtle.setup(width=0.5, height=0.5, startx=0, starty=0)
# turtle.setpos(60, 30)
turtle.setworldcoordinates(50, 50, 500, 500)
turtle.penup()
turtle.speed(10)
points = []
for i in range(100, 500, 50):
for j in range(100, 500, 50):
points.append([i, j])
for i in range(len(points)):
dot = Dot(points[i][0], points[i][1], 'red')
dot.draw(turtle)
slice = random.sample(points, 10)
slice.sort(key=lambda s: (s[0], s[1])) # 对x进行从小到大的排序,x相同的情况下用y排序
print(slice)
for i in slice:
turtle.penup()
turtle.setposition(i[0], i[1])
turtle.dot(8, "blue")
tu = graham(slice)
# turtle.penup()
turtle.setposition(tu[0][0], tu[0][1])
turtle.pendown()
for i in range(1, len(tu)):
turtle.goto(tu[i][0], tu[i][1])
turtle.goto(tu[0][0], tu[0][1])
turtle.getscreen()._root.mainloop()
# 计算图形
def graham(li):
begin = li[0]
biaozhun = [li[0][0], -1]
jiaodus = {}
points = []
points.append(li[0])
for i in range(1, len(li)):
cha = [begin[0] - li[i][0], begin[1] - li[i][1]]
jiaodu = acos((cha[1] - biaozhun[1]) / sqrt(pow(cha[1] - biaozhun[1], 2) + pow(cha[0] - biaozhun[0], 2)))
jiaodus[i] = jiaodu
jiaos = sorted(jiaodus.items(), key=lambda item: item[1])
print(jiaos)
shunxus = [i[0] for i in jiaos]
shunxus.insert(0, 0)
# points.append(li[shunxus[1]])
return scan(shunxus, li, points)
def scan(shunxu, li, points):
print(shunxu)
for i in range(len(shunxu) - 1):
if fenge(li[shunxu[i + 1]], points[-1], li):
points.append(li[shunxu[i + 1]])
print(li)
print(points)
return points
def fenge(af, be, all):
size = calTri(be, af, all[0])
for i in range(1, len(all)):
if calTri(be, af, all[i]) != size:
return False
return True
def calTri(p1, p2, p3):
size = p1[0] * p2[1] + p2[0] * p3[1] + p3[0] * p1[1] - p3[0] * p2[1] - p2[0] * p1[1] - p1[0] * p3[1]
return True if size >= 0 else False
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 13:20:38 2021
@author: reetb
"""
import os
os.chdir('C:/Users/reetb/Desktop/LouvainInfMax/InitialExpts/Plots')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import statistics
import numpy as np
th_imm_10 = [13547, 13550, 13666, 13605, 13726]
th_imm_15 = [13831, 14156, 13947, 13904, 13761]
th_imm_20 = [14328, 14285, 14051, 14035, 14193]
th_imm_25 = [14348, 14055, 14551, 14446, 14194]
th_imm_50 = [14905, 15152, 14945, 14666, 14868]
th_imm_75 = [15145, 15419, 15369, 15324, 15197]
th_imm_100 = [15749, 15827, 15602, 15604, 15750]
th_louvain_imm_10 = [12969, 12623, 12851, 12912, 13062]
th_louvain_imm_15 = [13274, 13477, 13172, 13274, 13290]
th_louvain_imm_20 = [13666, 13476, 13625, 13512, 13477]
th_louvain_imm_25 = [13630, 13621, 13851, 13577, 13472]
th_louvain_imm_50 = [13440, 13596, 13600, 13583, 13690]
th_louvain_imm_75 = [13737, 13639, 13845, 13568, 13260]
th_louvain_imm_100 = [13801, 13808, 13577, 13581, 13776]
ph_imm_10 = [17526, 17742, 17749, 17798, 17588]
ph_imm_15 = [17904, 17996, 17886, 18143, 18207]
ph_imm_20 = [18387, 18183, 18377, 18458, 18381]
ph_imm_25 = [18586, 18725, 18446, 18524, 18438]
ph_imm_50 = [19155, 19160, 19422, 19332, 19295]
ph_imm_75 = [19790, 19873, 19730, 19745, 19587]
ph_imm_100 = [20152, 20149, 20108, 20268, 19993]
ph_louvain_imm_10 = [16923, 17210, 17395, 17429, 17122]
ph_louvain_imm_15 = [17504, 17409, 17408, 17682, 17379]
ph_louvain_imm_20 = [17716, 17283, 17466, 17747, 17640]
ph_louvain_imm_25 = [17628, 17340, 17405, 17855, 17658]
ph_louvain_imm_50 = [17470, 17660, 17593, 17583, 17376]
ph_louvain_imm_75 = [17294, 17059, 17603, 17449, 17819]
ph_louvain_imm_100 = [18688, 18604, 18822, 18636, 18397]
fig,ax = plt.subplots()
y1 = []
e1 = []
y1.append(statistics.mean(th_imm_10))
e1.append(statistics.stdev(th_imm_10))
y1.append(statistics.mean(th_imm_15))
e1.append(statistics.stdev(th_imm_15))
y1.append(statistics.mean(th_imm_20))
e1.append(statistics.stdev(th_imm_20))
y1.append(statistics.mean(th_imm_25))
e1.append(statistics.stdev(th_imm_25))
y1.append(statistics.mean(th_imm_50))
e1.append(statistics.stdev(th_imm_50))
y1.append(statistics.mean(th_imm_75))
e1.append(statistics.stdev(th_imm_75))
y1.append(statistics.mean(th_imm_100))
e1.append(statistics.stdev(th_imm_100))
y2 = []
e2 = []
y2.append(statistics.mean(th_louvain_imm_10))
e2.append(statistics.stdev(th_louvain_imm_10))
y2.append(statistics.mean(th_louvain_imm_15))
e2.append(statistics.stdev(th_louvain_imm_15))
y2.append(statistics.mean(th_louvain_imm_20))
e2.append(statistics.stdev(th_louvain_imm_20))
y2.append(statistics.mean(th_louvain_imm_25))
e2.append(statistics.stdev(th_louvain_imm_25))
y2.append(statistics.mean(th_louvain_imm_50))
e2.append(statistics.stdev(th_louvain_imm_50))
y2.append(statistics.mean(th_louvain_imm_75))
e2.append(statistics.stdev(th_louvain_imm_75))
y2.append(statistics.mean(th_louvain_imm_100))
e2.append(statistics.stdev(th_louvain_imm_100))
#labels = ['10', '15', '20', '25', '50', '75', '100']
#x = np.arange(len(labels))
x = [10,15,20,25,50,75,100]
ax.errorbar(x, y1, e1, marker='^', label='Imm')
ax.errorbar(x, y2, e2, marker='o', label='Louvain-Imm-Seq')
ax.legend()
ax.set_xlabel('No. Seeds')
ax.set_ylabel('Expected influence')
ax.set_title('cit-HepTh: n = 27,770, m = 352,807')
plt.savefig('cit-HepTh_ExpectedInfluence.png', dpi = 500)
#ax.set_xticklabels(labels)
fig,ax = plt.subplots()
y1 = []
e1 = []
y1.append(statistics.mean(ph_imm_10))
e1.append(statistics.stdev(ph_imm_10))
y1.append(statistics.mean(ph_imm_15))
e1.append(statistics.stdev(ph_imm_15))
y1.append(statistics.mean(ph_imm_20))
e1.append(statistics.stdev(ph_imm_20))
y1.append(statistics.mean(ph_imm_25))
e1.append(statistics.stdev(ph_imm_25))
y1.append(statistics.mean(ph_imm_50))
e1.append(statistics.stdev(ph_imm_50))
y1.append(statistics.mean(ph_imm_75))
e1.append(statistics.stdev(ph_imm_75))
y1.append(statistics.mean(ph_imm_100))
e1.append(statistics.stdev(ph_imm_100))
y2 = []
e2 = []
y2.append(statistics.mean(ph_louvain_imm_10))
e2.append(statistics.stdev(ph_louvain_imm_10))
y2.append(statistics.mean(ph_louvain_imm_15))
e2.append(statistics.stdev(ph_louvain_imm_15))
y2.append(statistics.mean(ph_louvain_imm_20))
e2.append(statistics.stdev(ph_louvain_imm_20))
y2.append(statistics.mean(ph_louvain_imm_25))
e2.append(statistics.stdev(ph_louvain_imm_25))
y2.append(statistics.mean(ph_louvain_imm_50))
e2.append(statistics.stdev(ph_louvain_imm_50))
y2.append(statistics.mean(ph_louvain_imm_75))
e2.append(statistics.stdev(ph_louvain_imm_75))
y2.append(statistics.mean(ph_louvain_imm_100))
e2.append(statistics.stdev(ph_louvain_imm_100))
#labels = ['10', '15', '20', '25', '50', '75', '100']
#x = np.arange(len(labels))
x = [10,15,20,25,50,75,100]
ax.errorbar(x, y1, e1, marker='^', label='Imm')
ax.errorbar(x, y2, e2, marker='o', label='Louvain-Imm-Seq')
ax.legend()
ax.set_xlabel('No. Seeds')
ax.set_ylabel('Expected influence')
ax.set_title('cit-HepPh: n = 34,546, m = 421,578')
plt.savefig('cit-HepPh_ExpectedInfluence.png', dpi = 500)
#ax.set_xticklabels(labels)
|
#https://www.hackerrank.com/challenges/pangrams
# Enter your code here. Read input from STDIN. Print output to STDOUT
string=raw_input()
def isPangram(str):
result=True
for i in range(97,123):
if not (chr(i) in str or chr(i-32) in str):
result=False
break
return result
print ("not " if not isPangram(string) else "")+"pangram" |
from django.shortcuts import render, redirect
from django.core.mail import EmailMessage
from django.template import Context
from django.template.loader import get_template
import datetime
from . import forms, models
# Create your views here.
def index(request):
form_class = forms.ContactForm
is_contact_sent = False;
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
name = request.POST.get('name', '')
email = request.POST.get('email', '')
message = request.POST.get('message', '')
contact = models.Contact(name=name, email=email, message=message,date=datetime.datetime.now())
# performs check before saving
contact.check_save()
is_contact_sent = True
# Email the profile with the
# contact information
# store
"""
TODO: store this in a log somewhere later
template = get_template('contact_template.txt')
context = Context({
'name': name,
'email': email,
'message': message,
})
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"Your website" + '',
['youremail@gmail.com'],
headers={'Reply-To': email}
)
email.send()
"""
return render(request, 'personal/home.html', {'form': form_class, 'success': is_contact_sent})
return render(request, 'personal/home.html', {'form': form_class, 'success': is_contact_sent})
def demos(request):
return render(request, 'personal/demos.html')
|
"""hackerrank_Sam_And_Substrings
https://www.hackerrank.com/challenges/sam-and-substrings
"""
def solve(n):
MOD = 10 ** 9 + 7
acc = 0
for i, char in enumerate(n):
state = int(n[0]) if i == 0 else (int(char) * (i + 1)) % MOD + (state * 10 % MOD) % MOD
acc = (acc + state) % MOD
print acc
def main():
n = raw_input()
solve(n)
if __name__ == '__main__':
main()
|
import webkit, gtk
import os
window = gtk.Window()
browser = webkit.WebView()
window.add(browser)
window.set_default_size(370,170)
window.show()
browser.set_size_request(360,170)
browser.show()
browser.load_uri("file:///home/pi/smartmirror/apps/horoscope/url.html")
window.connect("delete-event",gtk.main_quit)
window.set_title("Horoscop")
retvalue = os.system("/home/pi/smartmirror/speech/speech.sh Smarty Horoscope. Do you feel lucky today\?")
gtk.main()
|
# -*- coding: UTF-8 -*.
'''
Update your function so that when the user calls the function with wrong type
as a parameter it says "One or both of your parameters are wrong type!"
(Don't remove the ZeroDivision exception!)
'''
yks = raw_input("Annappa yks: ")
kaks = raw_input("Annappa toenen: ")
def division(x, y):
try:
div = x/float(y)
return div
except ZeroDivisionError:
return "ZeroDivisionError, cannot divide by 0"
try:
print division(int(yks),int(kaks))
except ValueError:
print "One or both of your parameters are wrong type!"
|
## Exercise 1
## Write a function using recursion to calculate the greatest common divisor of two numbers
## Helpful link:
## https://www.khanacademy.org/computing/computer-science/cryptography/modarithmetic/a/the-euclidean-algorithm
def gcd(x, y):
## get remainder
r = x%y
## base case, no remainder
## then that "divisor" is answer
if r == 0:
return y
## otherwise, call function again with new inputs
return gcd(y, r)
## Exercise 2
## Write a function using recursion that returns prime numbers less than 121
def find_primes(me = 121, primes = []):
if me == 2:
primes.append(2)
return primes
for i in range(2, me):
if me % i == 0:
break
else:
primes.append(me)
return find_primes(me-1, primes)
|
import os
from random import shuffle, choice
from collections import namedtuple
class Juego:
def __init__(self, turnos):
self.mazo = []
self.cartas_j1 = []
self.cartas_j2 = []
self.read_file()
self.repartir_cartas()
self.comenzar_juego(turnos)
def read_file(self):
Crear_tupla = namedtuple('Crear_carta', ['nombre', 'ataque', 'defensa'])
path_cards = os.path.join('contenidos', 'semana-01', \
'ejercicios_propuestos', 'cards.csv')
with open(path_cards, 'rt') as archivo:
lineas = archivo.readlines()
for i in range(len(lineas)):
if i == 0:
pass
else:
carta = lineas[i].strip().split(',')
self.mazo.append(Crear_tupla(carta[0], carta[1], carta[2]))
pass
def repartir_cartas(self):
shuffle(self.mazo)
for i in range(5):
self.cartas_j1.append(self.mazo.pop(0))
self.cartas_j2.append(self.mazo.pop(0))
pass
def atacar(self, atacante, defen):
ptos_atacante = atacante.ataque
ptos_defensa = defen.defensa
if ptos_atacante >= ptos_defensa:
return False
else:
return True
def comenzar_juego(self, turnos):
for i in range(1, turnos + 1):
print(f"Turno número {i}")
if self.cartas_j1 != [] and self.cartas_j2 != []:
if i % 2:
# Ataca el jugador 1
ataque1 = choice(self.cartas_j1)
defensa2 = choice(self.cartas_j2)
if self.atacar(ataque1, defensa2) == True:
self.cartas_j1.remove(ataque1)
print(f'J2 gana el turno {i}')
else:
self.cartas_j2.remove(defensa2)
print(f'J1 gana el turno {i}')
pass
else:
# Ataca el jugador 2
ataque2 = choice(self.cartas_j2)
defensa1 = choice(self.cartas_j1)
if self.atacar(ataque1, defensa2) == True:
self.cartas_j2.remove(ataque2)
print(f'J1 gana el turno {i}')
else:
self.cartas_j1.remove(defensa1)
print(f'J2 gana el turno {i}')
pass
else:
if self.cartas_j2 == []:
print('Gana J1')
else:
print('Gana J2')
break
juego = Juego(10) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 12:35:12 2018
@author: claypooldj
"""
####Load packages
import csv
import os
import random
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import math
import pandas as pd
import seaborn as sns
"""
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#Inputs
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
gtfFileName="/home/claypooldj/genomes/gencode_hg38.longestTranscript.csv" #The intermediate gtf longest splice varient csv file (generated from a seperate script) to use for annotation. {Type - String}
csvDir="/home/claypooldj/myPythonScripts/testBedHM/hInputs/" #Tlhe directory containing the cluster csv files to analyze. All csv files in this directory will be tested (so make sure there aren't other csv files in the directory or it will crash!) {Type - String}
outputDir="/home/claypooldj/myPythonScripts/testBedHM/output/" #The directory where you want to generate all output files. {Type - String}
gtu=[] #The list of genes which you wish to consider. Just put an empty list '[]' if you want to consider all genes. {Type - List[String] or empty List}
clustThreshold=1 #The minimum number of clusters a gene must have to be considered {Type - Int}
boundF="URC" #The continuous field in the clusters file to use to apply filtering bounds (both upper and lower) {Type - String}
lBound=0 #The percentage of clusters to filter based on the lowest values of the bounding continuous variable (boundF) {Type - Num}
uBound=0 #The percentage of clusters to filter based on the largest values of the bounding continuous variable (boundF) {Type - Num}
wValuesByRC=False #Whether or not the program should weight the impact of each indivudal gene on the overall metagene average linearly with the number of clusters that align to that gene {Type - Bool}
#randStates=[7211995,541995,3131994,111,222,333,444,555,888,999] #The random states to use in the binning algorithm, which randomly deals with rounding error and then averages the results. Can be any length. {Type- List[Int]}
randStates=[999]
dpi=250 #The resolution of the output graphs in dpi {Type - Int}
imgFrmt="pdf" #Which format to save the output graphs to. Two options, either "pdf" which saves as pdf otherwise it saves to png format. {Type - String}
wtGE="/Volumes/Untitled/Output/explorador/RNA_Seq_Background/moreWTExpressions/geneExpressionMatrix_TPM_withGeneNames.csv" #A csv file containing the wild type gene expression levels from RNA-Seq(s) of the cell line. Takes average value. If this string is empty, we will not weigh values by WT gene expression.
wtGE=""
mainChromosomes=True #Whether we should only consider the main chromsomes (the autosomal and X/Y) in the analysis and not the strange chromosome constructs used for.
theBED_GTF="/home/claypooldj/genomes/gencode.v30.annotation.gtf"
theBEDAnotScript="/home/claypooldj/clip_metagene/bedannotator.sh"
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
"""
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
##Define objects and their various methods
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
class anotRow:
"""
Anotation Row Object: This object represents a row from the anotation (GTF) file.
Properties:
gene(String) - The name of the gene this row maps to
ty(String) - The type (5'/CDS/3' etc) of this row
start(int) - The starting index of this row
stop(int) - The end index of this row
chrom(String) - The chromosome this row belongs to
orientation(string) - The orientation of the row, either '+' or '-'
Methods:
init - Initializer
str - Conversion to string form for printing
coversIndex - Checks if an index is within the anotation row object
"""
def __init__(self, gene, ty, start, stop, chrom,orientation):
"""
Initalizing method.cl
"""
self.gene=gene
self.ty=ty
self.start=start
self.stop=stop
self.chrom=chrom
self.orientation=orientation
def __str__(self):
"""
Returns a string that represents the object
"""
toPrint="{Annotaion File Row Object} Start: "+str(self.start)+"\tStop: "+str(self.stop)+"\tGene: "+self.gene+"\tChromosome: "+self.chrom+"\tType: "+self.ty
return(toPrint)
def coversIndex(self,index):
"""
Checks to see if a given index is within the boudns of this anotation row object
Inputs:
index(int) - The index to check
Returns:
boolean - Whether the input index is within the bounds of the object
"""
if (self.stop>=index and index>=self.start):
return(True)
return(False)
class anotGene:
"""
Anotation Gene Object: This object represents an anotation gene (a collection of row objects from the annotation gtf file that represent a single gene at the longest transcript representation).
Properties:
gene(String) - The name of the gene these rows map to.
chrom(String) - The chromosome these rows map to.
start(int) - The smallest starting index for all annotation rows in this gene.
stop(int) - The largest ending index for all annotation rows in this gene.
lstOfAnotRows(list[anotRows]) - A list of all of the anotRow objects in this anotationGene object
exonIndexStarts (list[int]) - A list of all the starting positions of exons in the longest transcript splice vairent of this gene.
exonIndexStops (list[int]) - A list of all the stoping positions of exons in the longest transcript splice vairent of this gene.
intronIndexStarts (list[int]) - A list of all the starting positions of introns in the longest transcript splice vairent of this gene.
intronIndexStops (list[int]) - A list of all the stoping positions of introns in the longest transcript splice vairent of this gene.
Methods:
init - Initializer
str - Conversion to string form for printing.
getStart - Returns the smallest(first) starting index of all the rows in this anotationGene.
addAR - Adds a row to this anotationGene object.
getStartandStopIndices - Populates the start and stop lists for this object.
getType - Returns the classification (CDS/TUTR/etc) associated with a given nucleotide index in this anotationGene.
sideOfAdjacentIntron - Check if a given exon in the object is bordered by introns and where.
getExonAt - Returns the exon from this gene located at an inputed position.
exonPositionIntronAdjacent - Checks if a position within an exon is within a half adjacent to an intron.
getType - Indicates the annotation category at a given index within the anotation gene.
"""
def __init__(self,gene,chrom):
self.gene=gene
self.chrom=chrom
self.start=0
self.stop=1
self.lstOfAnotRows=[]
self.exonIndexStarts=[]
self.exonIndexStops=[]
self.intronIndexStarts=[]
self.intronIndexStops=[]
def __str__(self):
toPrint="{Annotaion Gene Object} Gene: "+self.gene+"\tChromosome: "+self.chrom+"\tNumber of Annotation Row Objects: "+str(len(self.lstOfAnotRows))
return(toPrint)
def repairGaps(self):
"""
Searching for gaps between exons in the metagene and, if any are found, fills them with introns.
"""
nList=[]
#Sort
sortedList=self.lstOfAnotRows
sortedList.sort(key=lambda x: int(x.start))
#Loop over each exon...
for i in range(0,len(sortedList)):
ro=sortedList[i]
if ro.ty=="exon":
#Check to make sure that it is immediately followed by an intron (that is, check to be sure there is no gap)
if i+1<len(sortedList):
theNext=sortedList[i+1]
if int(theNext.start)!=(ro.stop)+1:
#If it is going into an intron simply expand the intron
if theNext.ty=="intron":
theNext.start=int(ro.stop)+1
#Otherwise create a new row element
if theNext.ty!="intron":
#Create a new intron
nIntron=anotRow(ro.gene,"intron",int(ro.stop)+1,int(theNext.start)-1,ro.chrom,ro.orientation)
if nIntron not in sortedList:
nList.append(nIntron)
for ro in sortedList:
nList.append(ro)
self.lstOfAnotRows=nList
def getStart(self):
"""
Finds the smallest starting index of the rows in the anotGene.
Returns:
int - The first nucleotide position in the anotGene
"""
possibleVals=[]
for row in self.lstOfAnotRows:
possibleVals.append(row.start)
if (len(possibleVals)==0):
return(False)
theMin = min(possibleVals)
self.start=theMin
return(theMin)
def sideOfAdjacentIntron(self,exon):
"""
Checks if an exon is bordered by introns on the Left (L), Right (R), or Both (B) sides or Neither (N)
Inputs:
exon (anotRow) - The exon to check for bordering introns.
Returns:
Str - Is this exon bordered by introns: on both sides("B"), one the left side ("L"), or the right side ("R"), or on neither side ("N")
"""
intronBefore=False
intronAfter=False
exonIndex=0
#Get the position of the exon within the list of anot rows
for i in range(0,len(self.lstOfAnotRows)):
ro = self.lstOfAnotRows[i]
#If it is the same as the exon
if ro.start==exon.start and ro.stop==exon.stop and ro.ty=="exon":
exonIndex=i
#Check if it has any introns before it
if (exonIndex>0):
j=exonIndex-1
while j!=0:
prevRo=self.lstOfAnotRows[j]
if (prevRo.ty=="exon"):
break
if (prevRo.ty=="intron"):
intronBefore=True
j=j-1
#Check if it has any introns after i
if (exonIndex!=len(self.lstOfAnotRows)):
for k in range(exonIndex+1,len(self.lstOfAnotRows)):
nxtRo = self.lstOfAnotRows[k]
if (nxtRo.ty=="exon"):
break
if (nxtRo.ty=="intron"):
intronAfter=True
#Now figure out what to return
if intronBefore==True and intronAfter==True:
return("B")
if intronBefore==True:
return("L")
if intronAfter==True:
return("R")
return("N")
def getExonAt(self,position):
"""
Returns the exon at a particular position.
Input:
position (Int) - The position to check for an exon.
Returns:
anotRow - The anotation row that is the exon at this position.
"""
for ro in self.lstOfAnotRows:
#If it is exon
if ro.ty=="exon":
if (ro.stop>=position and ro.start<=position):
return(ro)
return(False)
def exonPositionIntronAdjacent(self,position):
"""
Checks if a position within an exon is within a half adjacent to an intron.
Input:
position (int) - The index to check if it is adjacent to an intron.
Returns:
bool (is this position in a half adjacent to an exon?)
"""
#Create the bounds that represent the first and second half of the exon
exon = self.getExonAt(position)
#Get the length
totLength = exon.stop-exon.start
#Divide it by two and round down to an even number
byTwo = int(totLength/2)
endOfFirstHalf=exon.start+byTwo
startOfSecondHalf=endOfFirstHalf+1
#If the exon is not adjacent to anything then we know this index is no good
if(self.sideOfAdjacentIntron(exon)=="N"):
return(False)
#If it is flanekd on both sides by introns then we know any index in the exon will be good
if(self.sideOfAdjacentIntron(exon)=="B"):
return(True)
#If an intronic region flnaks on the left then the index will have to be in the first half of the exon
if (self.sideOfAdjacentIntron(exon)=="L"):
if position<=endOfFirstHalf and position>=exon.start:
return(True)
else:
return(False)
#If an intronic region flanks on the right then the index will have to be in the second half of the exon
if (self.sideOfAdjacentIntron(exon)=="R"):
if position>=startOfSecondHalf and position<=exon.stop:
return(True)
else:
return(False)
def addAR(self,toAdd):
"""
Add an anotRow object to this anotGene
Input:
toAdd (anotRow) - The row object to add.
"""
self.lstOfAnotRows.append(toAdd)
def getStartandStopIndices(self):
"""
Gets the indices for all of the individual groups with respect to the length of the whole hitlits and sets them to the object.
"""
gStart=int(self.getStart())
if (len(self.intronIndexStops)>0 or len(self.intronIndexStarts)>0 or len(self.exonIndexStarts)>0 or len(self.exonIndexStops)>0):
self.intronIndexStops=[]
self.intronIndexStarts=[]
self.exonIndexStarts=[]
self.exonIndexStops=[]
firstExon=True
for ro in self.lstOfAnotRows:
if ro.ty=="intron":
#Get the starting value
fullVal = int(ro.start)
#Get the value in context of the overall
nVal = fullVal-gStart
if nVal not in self.intronIndexStarts:
self.intronIndexStarts.append(nVal)
#Get the stopping value
fullVal2=int(ro.stop)
sVal=fullVal2-gStart
if sVal not in self.intronIndexStops:
self.intronIndexStops.append(sVal)
if ro.ty=="exon":
#If this is the first exon we just need to add one Start and stop
if firstExon==True:
#Get the start
fullVal = int(ro.start)
#Get the original start in context of the overall
nVal = fullVal-gStart
#Get the stop
fullVal2=int(ro.stop)
sVal=fullVal2-gStart
#The first start value - halfway in between (Take the difference and divide by two, rounding down)
theDif = sVal-nVal
##RANDOMLY CHOOSE BETWEEN ROUNDING UP AND ROUNDING DOWN
##NEW
rdru=random.randint(0,1)
#Round down
if rdru==0:
nStart = nVal + int(theDif/2)
#Round up
elif rdru==1:
nStart = nVal + math.ceil(theDif/2)
if nStart not in self.exonIndexStarts:
self.exonIndexStarts.append(nStart)
#The first stop value - the end of the exon
if sVal not in self.exonIndexStops:
self.exonIndexStops.append(sVal)
#Reset the first exon
firstExon=False
#Otherwise we need to add two starts and stops
elif firstExon==False:
#Get the start
fullVal = int(ro.start)
#Get the value in context of the overall
nVal = fullVal-gStart
#Get the stop
fullVal2=int(ro.stop)
sVal=fullVal2-gStart
#Add start 1
if nVal not in self.exonIndexStarts:
self.exonIndexStarts.append(nVal)
#Add stop 1
theDif = sVal-nVal
stop1 = nVal + int(theDif/2)
if stop1 not in self.exonIndexStops:
self.exonIndexStops.append(stop1)
#Add start 2
start2=stop1+1
if start2 not in self.exonIndexStarts:
self.exonIndexStarts.append(start2)
#Add stop 2
if sVal not in self.exonIndexStops:
self.exonIndexStops.append(sVal)
#Remove the last start and stops
self.exonIndexStarts.pop()
self.exonIndexStops.pop()
def getType(self,index):
"""
Returns a character indicating the classification of the gene at a particular index
Input:
index (int) - The index the user wants to know the classification of.
Output
str - Indicates the annotation classification at the inputed index.
Options - "exon", "intron", "FUTR" (5' UTR), "TUTR" (3' UTR), "CDS", and "NC" (no category)
"""
possibleOptions=[]
#Go through each row of the anotGene
for r in range(0,len(self.lstOfAnotRows)):
ro = self.lstOfAnotRows[r]
#If it is contained
if ro.coversIndex(index)==True:
possibleOptions.append(ro.ty)
#Now pic which one to return
if "intron" in possibleOptions:
return("intron")
if "exon" in possibleOptions:
return("exon")
if "FUTR" in possibleOptions:
return("FUTR")
if "TUTR" in possibleOptions:
return("TUTR")
if "CDS" in possibleOptions:
return("CDS")
return("NC")
class chromesome:
"""
Chromosome Object: This object represents a chromosome and stores anotation elements connected to a chromosome ID.
Properties:
ID(String) - The name of the chromosome container
lstOfAnotElements(list) - A list of annotation elements found in this chromosome. Intentionally nonspecific to fascilitate general use
Methods:
init - Initializer
str - Conversion to string form for printing
"""
def __init__(self,ID):
self.ID=ID
self.lstOfAnotElements=[]
def __str__(self):
toPrint="{Chromosome Object} ID: "+self.ID
return(toPrint)
class WTGeneExpression:
"""
WTGeneExpression Object: This object represents the gene expression profile of the wild type cell type of interest
Properties:
fileName (Str) - The file's name with directory if not contained in the working directory.
DF(Pandas DataFrame) - Pandas data frame contianing the gene expression information. The gene name
geDict (Dictionary) - A dictionary which links the gene name (str key) to the average gene
Methods:
init - Initializer
str - Conversion to string form for printing
"""
def __init__(self,fileName):
self.fileName=fileName
self.DF=pd.read_csv(fileName)
self.geDict=self.populateDict()
def __str__(self):
toPrint="{WTGeneExpression Object} ID: "+self.ID
return(toPrint)
def populateDict(self):
"""
Uses the DataFrame stored in this object to generate a dictionary that maps gene symbol to average gene expression.
Returns:
geneDict (Dictionary) - Link between Str (gene symbol) and float (average gene expression in the wild type).
"""
#Initialize the dictionary to be used to save the values
toRet={}
#Loop over the rows
for index, row in self.DF.iterrows():
theKey=str(row["gene_symbol"])
theValue=float(row["Average"])
toRet[theKey]=theValue
return(toRet)
class anotFile:
"""
anotFile Object: The object which contains and manipulates all of the genes in an annotation gtf file (loaded from the custom-made gtf longest transcript intermediate CSV file).
Properties:
lstOfChromosomeNames (List[str]) - The list of the unique chromosomes to be considered.
lstOfChromosomesRows(List[chromosome]) - The list of chromosome objects which contain the anotation rows making up the gene annotation file (sorted by chrmosome).
lstOfChromosomesGenes(List[chromosome]) - The list of chromosome objects which contain the anotation genes making up the gene annotation file (sorted by chrmosome).
Methods:
init - Initializer
str - Conversion to string form for printing.
getNumRows - Determines how many anotRow objects are stored in the sorted chromosome objects of this anotFile object.
getNumGenes - Determines how many anotGene objects are stored in the sorted chromosome objects of this anotFile object.
addAR - Adds an anotRow to the correct chromosome object in this object's list of chromosome rows.
addAG - Adds an anotGene to the correct chromosome object in this object's list of chromosome genes.
populateCh - Populate the lst of chromesome gene objects.
"""
def __init__(self,chromosomeNames):
self.lstOfChromosomeNames=chromosomeNames
self.lstOfChromosomesRows=[]
self.lstOfChromosomesGenes=[]
for indivN in chromosomeNames:
self.lstOfChromosomesRows.append(chromesome(indivN))
self.lstOfChromosomesGenes.append(chromesome(indivN))
def __str__(self):
toPrint="{Annotaion File Object} Number of Annotation Rows: "+str(self.getNumRows())+"\tNumber of Annotation Genes: "+str(self.getNumGenes())
return(toPrint)
def getNumRows(self):
"""
Determines how many anotRow objects are stored in the sorted chromosome objects of this anotFile object.
Returns:
int
"""
toReturn=0
for ch in self.lstOfChromosomesRows:
toReturn=toReturn+len(ch.lstOfAnotElements)
return(toReturn)
def getNumGenes(self):
"""
Adds an anotGene to the correct chromosome object in this object's list of chromosome genes.
Returns:
int
"""
toReturn=0
for ch in self.lstOfChromosomesGenes:
toReturn=toReturn+len(ch.lstOfAnotElements)
return(toReturn)
def addAR(self,rowObj):
"""
Adds an anotRow to the correct chromosome object in this object's list of chromosome rows.
Input:
rowObj(AnotRow) - The row to add to this object.
"""
#Get the correct chromosome
curID = rowObj.chrom
for chrom in self.lstOfChromosomesRows:
if (chrom.ID==curID):
chrom.lstOfAnotElements.append(rowObj)
return()
def addAG(self,anotGeneObj):
"""
Adds an anotGene to the correct chromosome object in this object's list of chromosome genes.
Input:
anotGeneObj (anotGene) - Anotation gene object to store.
"""
curID = anotGeneObj.chrom
for chrom in self.lstOfAnotGenes:
if(chrom.ID==curID):
chrom.lstOfAnotElements.append(anotGeneObj)
return()
def populateCh(self):
"""
Populate the lst of chromesome gene objects stored in this object.
"""
#for each row chromosome
for i in range(0,len(self.lstOfChromosomesRows)):
rChr = self.lstOfChromosomesRows[i]
gChr= self.lstOfChromosomesGenes[i]
#For each row in the chromosome
for indivRow in rChr.lstOfAnotElements:
counter=0
#Check if this is in the corresponding chromosome for the gene list
for indivGene in gChr.lstOfAnotElements:
if (indivGene.gene==indivRow.gene):
indivGene.addAR(indivRow)
counter=1
break
if counter==0:
#Make a new anotGene
nAnotGene=anotGene(indivRow.gene,indivRow.chrom)
nAnotGene.addAR(indivRow)
gChr.lstOfAnotElements.append(nAnotGene)
class cluster:
"""
Cluster Object: This object represents a binding cluster.
Properties:
Pulled from csv rows as strings - chrom (chromosome), strand (strand oriented to), start (starting index in cluster), end (ending index of cluster), gene (gene that paralyzer aligned this cluster to if any), CS (conversion specificity), T2C (T to C fraction), URC (Unique read count for this cluster), RC (total read count)
Methods:
init - Initializer
str - Conversion to string form for printing
addAR - Add an anot
"""
def __init__(self,chrom,strand,start,end,gene,CS,T2C,URC,RC):
self.chrom=chrom
self.strand=strand
self.start=start
self.end=end
self.gene=gene
self.CS=CS
self.T2C=CS
self.URC=URC
self.RC=RC
def __str__(self):
toPrint="{Cluster Object} Gene: "+self.gene+"\tStart: "+str(self.start)+"\tEnd: "+str(self.end)+"\tChromosome: "+str(self.chrom)
return(toPrint)
class clusterGene:
"""
ClusterGene Object: A group of clusters of the same gene
Properties:
gene(string) - The name of the gene around which all contained clusters are aligned
chrom(string) - The name of the chromosome this gene falls on
start(int) - The start location of this clustergene (ie the first start value among contained clusters)
stop(int) - The stop location of this clustergene (ie the last stop value among contained clusters)
lstOfClusters (list[cluster]) - A list of all of the cluster objects aligned with this gene
hitListsIntron (List[List[Int]]) - A list of lists, where each inner list represents the hit list for an intronin this gene. That is to say, the inner list is the same length as the intron and says whether each index overlaps with a binding cluster (1) or does not (0).
hitListsExon (List[List[Int]]) - A list of lists, where each inner list represents the hit list for an exon in this gene. That is to say, the inner list is the same length as the exon and says whether each index overlaps with a binding cluster (1) or does not (0).
hitSums(List[int]) - The sum of hits in all intron/exon splice junctions of this gene.
Methods:
init - Initializer
str - Conversion to string form for printing
addAC - Add a cluster object to this clusterGene
getTotalNumReads - Return the total number of reads in clusters aligned to this gene.
doesGeneHaveNClust - Check if at least N clusters fall within this gene.
populateNumClust - Populates the number of clusters that overlap the intronic and exonic regions of the cluster gene.
containsIndex - Checks if a cluster in this clusterGene contains a given index.
removeGene - Flag this gene so that it is no longer considered for downstream analyses.
getMatchingMeta - Finds the metaGene in an anotFile object which has the same name as this clusterGene.
populateHLTotals - Determines the total number of hits in each EIE junction, which is used to calculate the percentage of the total each overlapped nucleotide represents.
populateHitLists - Populates the hit lists for this clusterGene object (a list which states whether each nucleotide in a region is overlapped with a cluster or not).
checkSumTo100 - Confirms that the percentage lists in this cluster gene indeed sum to 100%.
populateIntronBinsOfPercents - Re-express cluster overlap in the intronic region as percentages in a fixed number of bins.
populateExonBinsOfPercents - Re-express cluster overlap in the exonic region as percentages in a fixed number of bins.
populateGeneDistributions - Averages the percentage lists for each EIE in this gene.
"""
def __init__(self,gene,chrom):
self.gene=gene
self.chrom=chrom
self.start=0
self.stop=1
self.lstOfClusters=[]
self.hitListsIntron=[]
self.hitListsExon=[]
self.hitSums=[]
self.lstOfExonBinLists=[]
self.lstOfIntronBinLists=[]
self.percentDE=[]
self.percentDI=[]
self.coverageLength=1
self.nucPercent=1
self.firstHalfExClustCount=0
self.secondHalfExClustCount=0
self.intronClustCount=0
self.goodCount=0
self.badCount=0
self.removed=False
self.junctionSums=[]
def __str__(self):
"""
Scripted type conversion to string.
Returns:
String representation of this object.
"""
toPrint="{ClusterGene Object} Gene: "+self.gene+"\tChromosome: "+self.chrom+"\tNumber of Clusters: "+str(len(self.lstOfClusters))
return(toPrint)
def addAC(self,toAdd):
"""
Add a cluster object to this clusterGene
Inputs:
toAdd(cluster) - Cluster object to add to the ClusterGene
"""
self.lstOfClusters.append(toAdd)
def getTotalNumReads(self):
"""
Calculates the total number of reads aligned to all clusters aligned to this gene.
Returns:
int (number of reads)
"""
toRet=0
for indivClust in self.lstOfClusters:
toRet=toRet+indivClust.RC
return(toRet)
def doesGeneHaveNClust(self,nClust):
"""
Checks if this gene has at least certain number of clusters. Used for filtering processes.
Input:
nClust(int) - The minimum number of clusters that must be aligned to this gene in order for this method to return true.
Returns:
bool (are there at least this number of clusters aligned to this particular gene?)
"""
numC = len(self.lstOfClusters)
if numC>=nClust:
return(True)
return(False)
def populateNumClust(self,anotFile):
"""
Get the number of clusters that map to the first half of the exonic region of this gene and set this value to the internal count properties of the object.
Input:
anotFile(anotFile) - Annotation file used to map the clusters to a genome.
"""
firstHalfT=False
secondHalfT=False
intronT=False
firstHalf=0
secondHalf=0
intronC=0
matchedMG = self.getMatchingMeta(anotFile)
if matchedMG==False or self.gene=="":
return()
#Loop over each cluster in this gene
for clust in self.lstOfClusters:
#Establish the half value (anything greater than this is part of the 3'Exon half and anything equal or less is part of the 5'exon half)
#Find the start of the region in question (the start of the exonic cluster that contains the cluster in question)
regStart=0
regStop=1
for anotRow in matchedMG.lstOfAnotRows:
if anotRow.coversIndex(int(clust.start))==True or anotRow.coversIndex(int(clust.end))==True:
if anotRow.ty=="exon":
regStart=int(anotRow.start)
regStop=int(anotRow.stop)
#Find the end of the region in question
halfValue = round((regStop-regStart)/2)+regStart
parseStart=int(clust.start)
parseStop=int(clust.end)
if clust.strand=="-":
parseStart=regStop-int(clust.end)
parseStop=(int(clust.end)-int(clust.start))+parseStart
#Go through the indices of the cluster
for i in range(parseStart,parseStop):
#Check if it is exonic
if matchedMG.getType(i)=="exon":
#Check if it is in the first half or the second half
if i<=halfValue:
firstHalfT=True
if i>halfValue:
secondHalfT=True
if matchedMG.getType(i)=="intron":
intronT=True
if firstHalfT==True:
firstHalfT=False
firstHalf=firstHalf+1
if secondHalfT==True:
secondHalfT=False
secondHalf=secondHalf+1
if intronT==True:
intronT=False
intronC=intronC+1
self.firstHalfExClustCount=firstHalf
self.secondHalfExClustCount=secondHalf
self.intronClustCount=intronC
def containsIndex(self,index):
"""
Checks if a nucleotide index is within one of the clusters of the clusterGene object
Inputs:
index(int) - Index to check
Returns:
bool - Is that index within one of the clusters in this object?
"""
#For each cluster
for clust in self.lstOfClusters:
#Check if this index is within the bounds
if (index>=int(clust.start) and int(clust.end)>=index):
return(True)
return(False)
def removeGene(self):
"""
Tag a gene for removal from consideration - ie, it will no longer be considered by the program going forward.
"""
self.removed=True
def getMatchingMeta(self,anotFile):
"""
This function finds the matching gene from a anotFile that coresponds with this clusterGene (returned)
Inputs:
anotFile(anotFile) - An anotFile object containing the genes to check
Returns:
anotGene - The anotGene in the annotation file which matches this clusterGene object.
"""
#Get the chromosome bin to search
chromBin = self.chrom
for curChrom in anotFile.lstOfChromosomesGenes:
if curChrom.ID==chromBin:
#Search this bin for the corresponding gene
for mg in curChrom.lstOfAnotElements:
if (mg.gene==self.gene):
return(mg)
return(False)
def populateHLTotals(self):
"""
Populate the list of values which represent the total number of hits in a given intron / exon pairing. This value will be used to normalize the values of all E/I/E junctions within a given gene so that they can be averaged to create gene-level average distributions (which are then averaged for the metagene)
"""
for i in range(0,len(self.hitListsExon)):
su1 = sum(self.hitListsExon[i])
su2 = sum(self.hitListsIntron[i])
theSum = su1+su2
self.hitSums.append(theSum)
def populateHitLists(self,anotFile):
"""
Populate the hit lists of the clusterGene (each list corresponds to a EIE hit list, either intronic or exonic). In otherwords, pass stepwise over each nucleotide in this gene and see if it is overlapped by a binding cluster (asign a value of 1) or not overlapped (assign a value of 0).
This is the foundation for all downstream analysis. Populates the hit list properties of the gene object.
Input:
anotFile(anotFile) - An anotFile object containing the genes to check
"""
#Get the matching metagene
matchedMG = self.getMatchingMeta(anotFile)
if matchedMG==False or self.gene=="":
return()
matchedMG.getStartandStopIndices()
if matchedMG==False and self.gene!="" :
print("Cant find: ",self.gene)
theCatagories=["exon","intron"]
for gp in theCatagories:
if (matchedMG!=False):
#Establish the start and stop of the specific region
if gp=="intron":
endVal = len(matchedMG.intronIndexStarts)
if gp=="exon":
endVal = len(matchedMG.exonIndexStarts)
if (len(matchedMG.intronIndexStarts)==0 or len(matchedMG.exonIndexStarts)==0):
break
#!!!!!!!!!
if (2*len(matchedMG.intronIndexStarts)!=len(matchedMG.exonIndexStarts)):
print("Repairing for matching error")
print("Matched Gene: ",matchedMG.gene)
matchedMG.repairGaps()
matchedMG.getStartandStopIndices()
if (2*len(matchedMG.intronIndexStarts)!=len(matchedMG.exonIndexStarts)):
print("-----------GTF Matching error-----------")
print("Matched Gene: ",matchedMG.gene)
print(matchedMG.intronIndexStarts)
print(matchedMG.exonIndexStarts)
break
secondExon=False
#POPULATE EACH INDIVIDUAL EIE - taking two start and stops for the exons and and one for the introns
for i in range(0,endVal):
hitListIntron=[]
hitListExon=[]
#----------------------------
self.start=matchedMG.getStart()
if (gp=="exon"):
theStart=matchedMG.exonIndexStarts[i] + self.start
theStop=matchedMG.exonIndexStops[i] + self.start+1
if (gp=="intron"):
theStart=matchedMG.intronIndexStarts[i] + self.start
theStop=matchedMG.intronIndexStops[i] + self.start+1
for i in range(int(theStart),theStop):
#We need to check if there is a cluster there
putAZero=True
if self.containsIndex(i)==True:
#Now that we know there is a cluster, we need to check to see if it is identified as a specific coding region
if (matchedMG.getType(i)==gp):
#Also check that the exon is adjacent to an intron at this point
#ALSO CHECKS TO ENSURE THAT IT IS ADJACENT TO AN INTRON
if(gp=="exon" and matchedMG.exonPositionIntronAdjacent(i)==True):
hitListExon.append(1)
putAZero=False
if(gp=="intron"):
hitListIntron.append(1)
putAZero=False
if(putAZero==True):
if(gp=="exon"):
hitListExon.append(0)
if(gp=="intron"):
hitListIntron.append(0)
#If this is an intron we are going to add it as a seperate hit list to the list of hit lists
if gp=="intron":
self.hitListsIntron.append(hitListIntron)
hitListIntron=[]
#If this is an exon we add it or update the counter
if gp=="exon":
if secondExon==True:
self.hitListsExon.append(hitListExon)
hitListExon=[]
secondExon=False
elif secondExon==False:
secondExon=True
if (len(self.hitListsIntron)!=len(self.hitListsExon)):
self.hitListIntron=[]
self.hitListExon=[]
def checkSumTo100(self):
"""
Checks whether every exon/intron bin list combination (each indivdual exon/intron/exon junction) adds up to either 0 (not considered) or 100 (considered). Reports outliers by printing a warning message to console.
"""
for i in range(0,len(self.lstOfExonBinLists)):
#Get the sum for the intron
theIntronSum=sum(self.lstOfIntronBinLists[i])
#Get the sum for the exon
theExonSum=sum(self.lstOfExonBinLists[i])
total=theIntronSum+theExonSum
if (round(total)!=100 and len(self.lstOfIntronBinLists[i])>1 and len(self.lstOfExonBinLists[i])>1):
print("NOT REACHING 100: ",self.gene)
intSum=0
exSum=0
for i in range(0,len(self.hitListsExon)):
intSum=intSum+sum(self.hitListsIntron[i])
exSum=exSum+sum(self.hitListsExon[i])
def populateIntronBinsOfPercents(self,anotFile,numberOfBins,randomStatesList):
"""
This function uses the hit lists to populate the list of lists that contain the percent distribution for each intron hit list seperated into a given number of bins.
This is a critical normalization step that takes the discussion from the nucelotide level (which does not allow ready comparison between genes because of the vast differences in gene length) to a percentage discussion where all lists are the same length and can be readily compared.
Results are set to the percentile properties of this object.
Input:
anotFile(anotFile) - An anotFile object containing the genes to check
numberOfBins (int) - The number of bins into which the hit list should be spread. To choose this number, consider the average difference in length between introns and exons. Selecting a bin number that matches this difference will create a smoother curve across junctions and make comparison easier.
randomStatesList (List[int]) - The list of random states to use in the random step where rounding must be taken into account when building the bins. The longer this list, the more times the random step will be performed before averaging.
"""
matchedGene = self.getMatchingMeta(anotFile)
if matchedGene==False or self.gene=="":
return()
if (matchedGene==False):
return(False)
matchedGene.getStartandStopIndices()
for h in range(0,len(self.hitListsIntron)):
if self.hitSums[h]==0:
self.lstOfIntronBinLists.append([0])
continue
#Apply loop to stabilize the heuristic algorithm and ensure an end result that is indicative of the biology.
indivPDList=[]
for indivSeed in randomStatesList:
#Determine the percent value of each indivual hit
nucPerc = 100.0/self.hitSums[h]
#Get the hit list
theHitList=self.hitListsIntron[h]
theBinList=[]
#Establish the lengths to parse of the hit list
totLength = len(theHitList)
if totLength==0:
self.lstOfIntronBinLists.append([0])
if totLength<numberOfBins:
#Binsize
lBin=numberOfBins//totLength
#Figure out how much is left over
leftOver = numberOfBins-lBin*totLength
randIndices = getRandLstBounded(leftOver,totLength,indivSeed)
#Now we need to parse over the indices of the cluster
addOne=False
indexInHitsOld=0
indexInHits=0
while indexInHits<totLength:
#If this index is in the list [its +1 from the bin size]
if indexInHitsOld in randIndices:
addOne=True
val = theHitList[indexInHits]
#Add the values
for i in range(0,lBin):
theBinList.append(val*nucPerc)
if (addOne==True):
theBinList.append(val*nucPerc)
indexInHits=indexInHits+1
indexInHitsOld=indexInHitsOld+1
addOne=False
#Now we need to adjust for the added amount
#What it should equal
shouldEqual=1
shouldEqual=sum(theHitList)*nucPerc
#What it currently equals
curEquals=1
curEquals=sum(theBinList)
#Find the adjusting factor
if curEquals!=0:
adjFact = shouldEqual/curEquals
for i in range(0,len(theBinList)):
newEle = theBinList[i]*adjFact
theBinList[i]=newEle
if totLength>=numberOfBins:
#Determine the lengths
#Length of each bin before random addition
bLength= totLength//numberOfBins
#Remainder
rLen = totLength-(numberOfBins*bLength)
#Create the random numbers needed
randIndices = getRandLstBounded(rLen,numberOfBins,indivSeed)
#Now we need to parse over the indices of the cluster
nBins=0
indexInHits=0
while nBins!=numberOfBins:
#If this index is in the list [its +1 from the bin size]
if nBins in randIndices:
upperCount=1+bLength
else:
upperCount=bLength
#Get the total val which represents the number of hits in this region (how many nucleotides of each type fall in the region)
totVal=0
for w in range(indexInHits,indexInHits+upperCount):
totVal = totVal + theHitList[w]
#Get the percent value (multiply by the percent represented by each in idividual nucleotide)
theBinList.append(totVal*nucPerc)
nBins=nBins+1
indexInHits=indexInHits+upperCount
#If we are dealing with a reverse transcript we will need to inverse the percentile terms
tester = self.lstOfClusters[0]
orient2=tester.strand
if orient2=="-":
#Flip
theBinList.reverse()
#Add thisindividual calculation to
indivPDList.append(theBinList)
#Average the loop values for the list
DFI = pd.DataFrame(indivPDList)
self.lstOfIntronBinLists.append(list(DFI.mean(axis = 0)))
def populateExonBinsOfPercents(self,anotFile,numberOfBins,randomStatesList):
"""
This function uses the hit lists to populate the list of lists that contain the percent distribution for each exon hit list seperated into a given number of bins.
This is a critical normalization step that takes the discussion from the nucelotide level (which does not allow ready comparison between genes because of the vast differences in gene length) to a percentage discussion where all lists are the same length and can be readily compared.
Results are set to the percentile properties of this object.
Input:
anotFile(anotFile) - An anotFile object containing the genes to check
numberOfBins (int) - The number of bins into which the hit list should be spread. To choose this number, consider the average difference in length between introns and exons. Selecting a bin number that matches this difference will create a smoother curve across junctions and make comparison easier.
randomStatesList (List[int]) - The list of random states to use in the random step where rounding must be taken into account when building the bins. The longer this list, the more times the random step will be performed before averaging.
"""
matchedGene = self.getMatchingMeta(anotFile)
if matchedGene==False or self.gene=="":
return()
if (matchedGene==False):
return(False)
matchedGene.getStartandStopIndices()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
for h in range(0,len(self.hitListsExon)):
if self.hitSums[h]==0:
self.lstOfExonBinLists.append([0])
continue
indivPDList=[]
for indivSeed in randomStatesList:
#Determine the percent value of each indivual hit
nucPerc = 100.0/self.hitSums[h]
#Get the hit list
theHitList=self.hitListsExon[h]
theBinList=[]
#Establish the lengths to parse of the hit list
totLength = len(theHitList)
if totLength<numberOfBins:
#Binsize
#OLD
lBin=numberOfBins//totLength
#NEW
#Figure out how much is left over
leftOver = numberOfBins-lBin*totLength
randIndices = getRandLstBounded(leftOver,totLength,indivSeed)
#Now we need to parse over the indices of the cluster
addOne=False
indexInHitsOld=0
indexInHits=0
while indexInHits<totLength:
#If this index is in the list [its +1 from the bin size]
if indexInHitsOld in randIndices:
addOne=True
val = theHitList[indexInHits]
#Add the values
#NEW
for i in range(0,lBin):
theBinList.append(val*nucPerc)
if (addOne==True):
theBinList.append(val*nucPerc)
addOne=False
#Now we need to adjust for the added amount
#What it should equal
shouldEqual=sum(theHitList)*nucPerc
#What it currently equals
curEquals=sum(theBinList)
#Find the adjusting factor
if curEquals!=0:
adjFact = shouldEqual/curEquals
for i in range(0,len(theBinList)):
newEle = theBinList[i]*adjFact
theBinList[i]=newEle
indexInHits=indexInHits+1
indexInHitsOld=indexInHitsOld+1
if totLength>=numberOfBins:
#Determine the lengths
rawLength=float(totLength)/float(numberOfBins)
#Length of each bin before random addition
bLength= totLength//numberOfBins
#Remainder
rLen = round((rawLength-bLength)*numberOfBins)
#Create the random numbers needed
randIndices = getRandLstBounded(rLen,numberOfBins,indivSeed)
#Now we need to parse over the indices of the cluster
nBins=0
indexInHits=0
while nBins!=numberOfBins:
#If this index is in the list [its +1 from the bin size]
if nBins in randIndices:
upperCount=1+bLength
else:
upperCount=bLength
#Get the total val which represents the number of hits in this region (how many nucleotides of each type fall in the region)
totVal=0
for w in range(indexInHits,indexInHits+upperCount):
totVal = totVal + theHitList[w]
#file2.write("Value at hit list index: "+str(theHitList[w])+"\n")
#Get the percent value (multiply by the percent represented by each in idividual nucleotide)
theBinList.append(totVal*nucPerc)
nBins=nBins+1
indexInHits=indexInHits+upperCount
#If we are dealing with a reverse transcript we will need to inverse the percentile terms
tester = self.lstOfClusters[0]
orient2=tester.strand
if orient2=="-":
#Flip
theBinList.reverse()
indivPDList.append(theBinList)
#Average the loop values for the list
DFE = pd.DataFrame(indivPDList)
self.lstOfExonBinLists.append(DFE.mean(axis = 0))
def populateGeneDistributions(self,NIntronBins,NExonBins):
"""
Averages the percentage lists for each EIE in this gene in order to get the average intron/exon distribution of clusters across this gene.
It ONLY considers those binned lists which are greater than 1 (smaller values indicate errors that were set to 1 earlier in the process)
Sets results to percent properties of the gene object.
Input:
NIntronBins (Int) - The number of intronic bins into which binding density should be allocated.
NExonBins (Int) - The number of intronic bins into which binding density should be allocated.
"""
#If the hit list sum for this gene is 0 (there is no overlap) simply set the distributions to [0] and exit
if sum(self.hitSums)==0:
self.percentDE=[0]
self.percentDI=[0]
return
intronicBinsUsed=[]
exonicBinsUsed=[]
#INTRONS
#Loop over each bin
for i in range(0,NIntronBins):
IntronEntries = []
#For each EIE
for binList in self.lstOfIntronBinLists:
#If this list is not of the prescribed length, ditch it
if len(binList)!=NIntronBins:
continue
else:
if self.gene=="STRIP1" and binList not in intronicBinsUsed:
intronicBinsUsed.append(binList)
#Otherwise add this to the entry list
IntronEntries.append(binList[i])
#Average the values from all of the genes at this bin locationF
theVal = Average(IntronEntries)
#Add this average to the gene object's distribution list
self.percentDI.append(theVal)
#Exons
#Loop over each bin
for i in range(0,NExonBins):
ExonEntries = []
#For each EIE
for binList in self.lstOfExonBinLists:
#If this list is not of the prescribed length, ditch it
if len(binList)!=NExonBins:
continue
else:
if self.gene=="STRIP1" and list(binList) not in exonicBinsUsed:
exonicBinsUsed.append(list(binList))
#Otherwise add this to the entry list
ExonEntries.append(binList[i])
#Average the values from all of the genes at this bin location
theVal = Average(ExonEntries)
#Add this average to the gene object's distribution list
self.percentDE.append(theVal)
if sum(self.percentDI)==0:
self.percentDI=[0]
if sum(self.percentDE)==0:
self.percentDE=[0]
class parclip:
"""
Parclip Object: This object represents the output from an indivual parclip clusters.csv file.
Properties:
filename(String) - The name of the csv file.
lstOfChromosomesClusters(List[Chromosomes]) - A list of the clusters in a parclip, sorted by chromosome.
lstOfChromosomesClustersGenes (List[Chromosomes]) - A list of the cluster genes in a parclip, sorted by chromosome.
iDistribution (List[float]) - The average distribution of binding density across the intronic region of all genes in the parclip.
eDistribution (List[float]) - The average distribution of binding density across the exonic regions of all genes in the parclip.
eFHDistribution (List[float]) - The average distribution of binding density across the first (5') half of all exonic regions of all genes in the parclip.
eSHDistribution (List[float]) - The average distribution of binding density across the second (3') half of all exonic regions of all genes in the parclip.
allCluster (List[cluster]) - A simple list of all of the clusters in this parclip object (unsorted).
Methods:
init - Initializer
str - Conversion to string form for printing.
addClust - Add a cluster to the parclip object (used when reading in the object).
getNumClusters - Determines how many clusters are present in a parclip object.
getNumGenes - Determines how many genes are present in a parclip object.
removeCluster - Given a cluster, this method goes through the lstOfChromsomesClusters property and removes the inputed cluster.
applyBounds - Impose bounds on the clusters included - throwing out extremes (based on inputed property) of a specified % at the minimum and maximum end of the spectrum.
getNumExonGenesH1 - Determines the number of genes that have cluster overlap within the 5' half of exons in this PAR-CLIP.
getNumExonGenesH2 - Determines the number of genes that have cluster overlap within the 3' half of exons in this PAR-CLIP.
getNumExonClustersH1 - Determines the number of clusters that fall within the 5' half of the exons in genes of this PAR-CLIP.
getNumExonClustersH2 - Determines the number of clusters that fall within the 3' half of the exons in genes of this PAR-CLIP.
getNumIntronGenes - Determines the number of genes that have cluster overlap within intronic regions in this PAR-CLIP.
populateCh - Populate the chromosome lists of this object with the clusters it holds (a sorted storage step).
populateAllHits - Populates the hit lists (and percentages of cluster distribution) for all genes in this object.
populateExonHalves - populates the exon half lists for use later in graphing.
"""
def __init__(self,filename,lstOfUniqueChromosomes):
self.filename=filename
theChromsomeList=[]
for chrN in lstOfUniqueChromosomes:
theChromsomeList.append(chromesome(chrN))
theChromsomeList2=[]
for chrN in lstOfUniqueChromosomes:
theChromsomeList2.append(chromesome(chrN))
self.lstOfChromosomesClusters=theChromsomeList
self.lstOfChromosomesClustersGenes=theChromsomeList2
self.iDistribution=[]
self.eDistribution=[]
self.eFHDistribution=[]
self.eSHDistribution=[]
def __str__(self):
toPrint="{Parclip Object} Name: "+self.filename+"\tNumber of Clusters: "+str(self.getNumClusters())+"\tNumber of Cluster Genes: "+str(self.getNumGenes())
return(toPrint)
def removeClust(self, cluster):
"""
Removes a given cluster from the lstOfChromosomesClusters value.
Inputs:
cluster(cluster) - The cluster to remove from the parclip object.
"""
#Get the appropriate chromosome of this cluster object
toRemChrom=cluster.chrom
for chrom in self.lstOfChromosomesClusters:
if (chrom.ID==toRemChrom):
for clustInChrom in chrom.lstOfAnotElements:
if clustInChrom==cluster:
chrom.lstOfAnotElements.remove(cluster)
return()
def addClust (self,cluster):
"""
Adds a given cluster to the parclip object.
Inputs:
cluster(cluster) - The cluster to add to the parclip object.
"""
#Get the correct chromosome
curID = cluster.chrom
for chrom in self.lstOfChromosomesClusters:
if (chrom.ID==curID):
chrom.lstOfAnotElements.append(cluster)
return()
def getNumClusters(self):
"""
Returns the number of clusters within this parclip object.
Returns:
int - The total count of clusters contained in this parclip object.
"""
toReturn=0
for ch in self.lstOfChromosomesClusters:
toReturn=toReturn+len(ch.lstOfAnotElements)
return(toReturn)
def getNumGenes(self):
"""
Returns the number of genes that are overlapped by clusters within this parclip object.
Returns:
int - The total count of genes that are overlapped by clusters in this parclip object.
"""
toReturn=0
for ch in self.lstOfChromosomesClustersGenes:
toReturn=toReturn+len(ch.lstOfAnotElements)
return(toReturn)
def getNumExonGenesH1(self):
"""
Determines the number of genes in this PAR-CLIP that have cluster overlap in their 5' half of thier exons.
Returns:
int - The number of genes.
"""
toReturn=0
for ch in self.lstOfChromosomesClustersGenes:
for gene in ch.lstOfAnotElements:
if gene.removed==True:
continue
#Get the first half of the exon
endInd=round(len(gene.percentDE)/2)
fh=gene.percentDE[0:int(endInd)]
if sum(fh) >0:
toReturn=toReturn+1
return(toReturn)
def getNumExonGenesH2(self):
"""
Determines the number of genes in this PAR-CLIP that have cluster overlap in their 3' half of thier exons.
Returns:
int - The number of genes.
"""
toReturn=0
for ch in self.lstOfChromosomesClustersGenes:
for gene in ch.lstOfAnotElements:
if gene.removed==True:
continue
#Get the first half of the exon
startInd=round(len(gene.percentDE)/2)
sh=gene.percentDE[int(startInd):len(gene.percentDE)]
if sum(sh) >0:
toReturn=toReturn+1
return(toReturn)
def getNumExonClustersH1(self):
"""
Determines the number of clusters that fall within the 5' half of the exons in genes of this PAR-CLIP.
Returns:
int - The number of clusters.
"""
toRet=0
for ch in self.lstOfChromosomesClustersGenes:
for gene in ch.lstOfAnotElements:
if gene.removed==True:
continue
toRet = toRet+ gene.firstHalfExClustCount
return(toRet)
def getNumExonClustersH2(self):
"""
Determines the number of clusters that fall within the 3' half of the exons in genes of this PAR-CLIP.
Returns:
int - The number of clusters.
"""
toRet=0
for ch in self.lstOfChromosomesClustersGenes:
for gene in ch.lstOfAnotElements:
if gene.removed==True:
continue
toRet = toRet+ gene.secondHalfExClustCount
return(toRet)
def getNumIntronGenes(self):
"""
Determines the number of genes in this PAR-CLIP with cluster density in their intronic regions.
Returns:
int - The number of clusters.
"""
toReturn=0
for ch in self.lstOfChromosomesClustersGenes:
for gene in ch.lstOfAnotElements:
if len(gene.percentDI)==0:
continue
if sum(gene.percentDI) >0:
toReturn=toReturn+1
return(toReturn)
def getNumIntronClusters(self):
"""
Determines the number of clusters that fall within intronic regions of genes in this PAR-CLIP.
Returns:
int - The number of clusters.
"""
toRet=0
for ch in self.lstOfChromosomesClustersGenes:
for gene in ch.lstOfAnotElements:
if gene.removed==True:
continue
toRet = toRet+ gene.intronClustCount
return(toRet)
def populateCh(self):
"""
Populate the chromosome lists with the clusters and clustergenes. You can think of this as a sorting/storage step designed to hasten downstream comparisons.
"""
#for each cluster
for i in range(0,len(self.lstOfChromosomesClusters)):
cChr = self.lstOfChromosomesClusters[i]
gChr= self.lstOfChromosomesClustersGenes[i]
#For each row in the chromosome
for indivClust in cChr.lstOfAnotElements:
counter=0
#Check if this is in the corresponding chromosome for the gene list
for indivGene in gChr.lstOfAnotElements:
if (indivGene.gene==indivClust.gene):
indivGene.addAC(indivClust)
counter=1
break
if counter==0:
#Make a new anotGene
nClustGene=clusterGene(indivClust.gene,indivClust.chrom)
nClustGene.addAC(indivClust)
gChr.lstOfAnotElements.append(nClustGene)
def populateAllHits(self,anotF,NIntronBins,NExonBins,randomStatesList):
"""
Populate the hit lists for the intronic and exonic regions of all genes in this PAR-CLIP. In other words, this part of the program determines the nucleotide-level overlap with binding clusters before re-expressing these lists are binned for comparison later. This binning sometiems require dealing with uneven numbers. When this occurs, random bins are selected to be 1 larger than the average. This is done multiple times and the results averaged.
Input:
anotF (anotFile) - Annotation file that contains the information needed to construct the metagene.
NIntronBins (Int) - THe number of bins to distribute the intronic hit lists into.
NExonBins (Int) - THe number of bins to distribute the exonic hit lists into.
randomStatesList (List[int]) - The list of numbers to use as random seeds in random number generation. The more random states provided, the more times the binning algorithm will execute before the averaging step.
"""
#For each chromosome
for chrom in self.lstOfChromosomesClustersGenes:
#Get each gene
for cGene in chrom.lstOfAnotElements:
#Populate
cGene.populateHitLists(anotF)
#indivHL = cGene.checkHitList()
cGene.populateHLTotals()
cGene.populateIntronBinsOfPercents(anotF,NIntronBins,randomStatesList)
cGene.populateExonBinsOfPercents(anotF,NExonBins,randomStatesList)
cGene.populateNumClust(anotF)
cGene.checkSumTo100()
cGene.populateGeneDistributions(NIntronBins,NExonBins)
def getParclipPercentageDistributions(self,NIntronBins,NExonBins,clusterThreshold,weighValuesByRC,wtGE):
"""
Populate the average distribution of all of the percentage hits in this parclip object. In essence, just average the distribution lists of all of the individual genes in the PAR-CLIP object.
Input:
NIntronBins (Int) - The number of intronic bins the hits were divided into.
NExonBins (Int) - The number of exonic bins the hits were divided into.
clusterThreshold (Int) - The minimum number of clusters a gene must have tobe used.
weighValuesByRC (bool) - Whether or not we want to weight the genes with more aligned reads to be more impactful than those with fewer.
wtGE (WTGeneExpression or False) - Whether to weight the impact of each gene on the metagene by the expression level of that gene in the wild type cell line.
"""
#Remove all of those lists which do not sum to 100 with their partner and ensure that those with hits in only the exonic or intronic regions are still counted.
for chrom in self.lstOfChromosomesClustersGenes:
#Get each gene
for cGene in chrom.lstOfAnotElements:
#IF the gene has fewer than the specified number of clusters set its lists equal to zero
if cGene.doesGeneHaveNClust(clusterThreshold)==False:
cGene.percentDI=[]
cGene.percentDE=[]
cGene.removeGene()
#IF the gene has only introns or only exons fix it so that it is also considered
if len(cGene.percentDI)!=NIntronBins and len(cGene.percentDE)==NExonBins:
cGene.percentDI=[0]*NIntronBins
if len(cGene.percentDI)==NIntronBins and len(cGene.percentDE)!=NExonBins:
cGene.percentDE=[0]*NExonBins
if (round(sum(cGene.percentDI)+sum(cGene.percentDE))) != 100:
if (round(sum(cGene.percentDI)+sum(cGene.percentDE))) !=0 and len(cGene.percentDI)>0:
print("Not reaching 100 or 0: ",cGene.gene)
cGene.percentDI=[0]
cGene.percentDE=[0]
#INTRONS
#Loop over each bin
for i in range(0,NIntronBins):
IntronEntries = []
#For each gene
for chrom in self.lstOfChromosomesClustersGenes:
#Get each gene
for cGene in chrom.lstOfAnotElements:
if round((sum(cGene.percentDI)+sum(cGene.percentDE))) != 100:
continue
#If that gene has an intron distribution list equal to the number of bins...
if len(cGene.percentDI)==NIntronBins:
#Add the value to the options
#If we are weighing by RC
if weighValuesByRC==True and wtGE==False:
for k in range(0,cGene.getTotalNumReads()):
IntronEntries.append(cGene.percentDI[i])
if weighValuesByRC==False and wtGE==False:
IntronEntries.append(cGene.percentDI[i])
#If we are weighing by BOTH wild type gene expression AND read count
if wtGE!=False:
#Get the value for the TPM expression
if cGene.gene in wtGE.geDict:
#Save the zero expression ones
if wtGE.geDict[cGene.gene]!=0:
countUp=int(round(wtGE.geDict[cGene.gene]))
if countUp!=0:
#Get the factor to multiply each by (#CLR/TPM)
multFact=cGene.getTotalNumReads()/countUp
IntronEntries.append(cGene.percentDI[i]*multFact)
#Get the averageclear
avgVal = Average(IntronEntries)
self.iDistribution.append(avgVal)
#ExONS
#Loop over each bin
for i in range(0,NExonBins):
ExonEntries = []
#For each gene
for chrom in self.lstOfChromosomesClustersGenes:
#Get each gene
for cGene in chrom.lstOfAnotElements:
if round((sum(cGene.percentDI)+sum(cGene.percentDE))) != 100:
continue
#If that gene has an intron distribution list equal to the number of bins
if len(cGene.percentDE)==NExonBins:
#Add the value to the options
if weighValuesByRC==True and wtGE==False:
for k in range(0,cGene.getTotalNumReads()):
ExonEntries.append(cGene.percentDE[i])
if weighValuesByRC==False and wtGE==False:
ExonEntries.append(cGene.percentDE[i])
#If we are weighing by both wild type gene expression AND read count
if wtGE!=False:
#Get the value for the TPM expression
if cGene.gene in wtGE.geDict:
#Save the zero expression ones
if wtGE.geDict[cGene.gene]!=0:
countUp=int(round(wtGE.geDict[cGene.gene]))
if countUp!=0:
#Get the factor to multiply each by (#CLR/TPM)
multFact=cGene.getTotalNumReads()/countUp
ExonEntries.append(cGene.percentDE[i]*multFact)
#Get the average
avgVal = Average(ExonEntries)
self.eDistribution.append(avgVal)
print("---------------- Final Intron Distribution: ",self.iDistribution)
print("---------------- Final IExon Distribution: ",self.eDistribution)
print("Length of intron final distribution: ",len(self.iDistribution))
print("Length of exon final distribution: ",len(self.eDistribution))
print("AVG of Exon",Average(self.eDistribution))
print("AVG of Intron",Average(self.iDistribution))
print("Total sum: ",sum(self.eDistribution)+sum(self.iDistribution))
def populateExonHalves(self,NExonBins):
"""
Populates the two half lists of the exonic distribution in order to allow them to be split for graphing later.
Input:
NExonBins (int) - The number of bins the exonic hit lists were compressed or stretched into.
"""
if len(self.eDistribution)!=0:
#Strip the first value
self.eDistribution.pop(0)
self.eDistribution.pop(0)
#Strip the last value
self.eDistribution.pop()
self.eDistribution.pop()
#Get the value of half of the number of bins
hnb=(NExonBins-4)//2
#Initiate the lists
firstHalf = []
secondHalf =[]
for i in range(0,hnb):
toAdd=self.eDistribution[i]
firstHalf.append(toAdd)
for i in range(hnb,(NExonBins-4)):
toAdd=self.eDistribution[i]
secondHalf.append(toAdd)
self.eFHDistribution=firstHalf
self.eSHDistribution=secondHalf
print("-----5' Exon Half: ",self.eFHDistribution)
print("-----3' Exon Half: ",self.eSHDistribution)
def applyBounds(self,lowerBound,upperBound,field):
"""
This method removes clusters from the PAR-CLIP object that are at either % end of the sorted spectrum of all clusters for an inputed property (read count, cross linked reads, etc). Rounds down through Int() typecasting at the final stage.
Inputs:
lowerBound (Int) - The percentage of clusters on the lower end of the value spectrum to remove.
uppBound (Int) - The percentage of clusters on the upper end of the value spectrum to remove.
field (Str) - The cluster.csv field to use as the sorting parameter.
"""
#Populate the allCluster property of this parclip object
allClusters=[]
for indivChrom in self.lstOfChromosomesClusters:
for clust in indivChrom.lstOfAnotElements:
allClusters.append(clust)
if (lowerBound+upperBound)==0 or field=="":
print("No bounds and/or filter category entered to filter by. All clusters used.")
return()
#Readout for filtering results
cCount=len(allClusters)
print("Number of clusters before: ",cCount)
#Sort all of the clusters by the field indicated
acceptableFields=["start","end","CS","URC","RC"]
if field not in acceptableFields:
print("Unable to apply filtering operation because the inputed field is invalid. It must be one of:")
print(acceptableFields)
return()
if field=="start":
print("Sorting by start location for filter...")
sortedList=sorted(allClusters, key=lambda x: int(x.start))
if field=="end":
print("Sorting by end location for filter.")
sortedList=sorted(allClusters, key=lambda x: int(x.end))
if field=="CS":
print("Sorting by CS (conversion specificity) for filter...")
sortedList=sorted(allClusters, key=lambda x: int(x.CS))
if field=="URC":
print("Sorting by URC (unique read count) for filter...")
sortedList=sorted(allClusters, key=lambda x: int(x.URC))
if field=="RC":
print("Sorting by RC (read count) for filter...")
sortedList=sorted(allClusters, key=lambda x: int(x.RC))
#Figure out how many clusters constitue a percent (round down).
clustersInAPercent=len(sortedList)/100.0
#Initialize an empty list to add the clusters to remove into
clustersToRem=[]
#Now we need to figure out how many clusters to remove for the bounds.
#Remove for the lowest bound
#How many clusters constitute the lower bound?
lowerBoundCount=int(clustersInAPercent*lowerBound)
#Loop to remove
for i in range(0,lowerBoundCount):
clustersToRem.append(sortedList[i])
#Remove for the upper bound
#How many clusters constitue the upper bound?
upperBoundCount=int(clustersInAPercent*upperBound)
for j in range(len(sortedList)-1,len(sortedList)-upperBoundCount-1,-1):
clustersToRem.append(sortedList[j])
#Finally, remove these clusters from the chromosome properties (The stoarge mechanism used for calculations).
for indivClust in clustersToRem:
self.removeClust(indivClust)
#Readout for filtering results
cCount2=0
for indivChr in self.lstOfChromosomesClusters:
cCount2=cCount2+len(indivChr.lstOfAnotElements)
print("Number of clusters after filtering by bounds: ",cCount2)
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
##Functions
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def isint(value):
try:
int(value)
return True
except ValueError:
return False
def dictionaryToCluster(cDict):
"""
Takes a dictionary and uses it to generate a cluster object with the fields that are availbile.
Input:
Dictionary of cluster values.
Returns:
cluster
"""
#Mandatory fields
#Chromosome
theChr=cDict.get("Chr")
#Strand
theStrand=cDict.get("Strand")
#Start
theStart=cDict.get("Start")
#End
theEnd=cDict.get("End")
#Necessary field - gene name - generate if missing
theGene=""
if "GeneName" in cDict.keys():
theGene=cDict.get("GeneName")
if "GeneName" not in cDict.keys():
print("Gene not found. Fetching gene...!")
#Optional fields - populate with empty fields if missing
if "ConversionSpecificity" in cDict.keys():
if isfloat(cDict.get("ConversionSpecificity"))==True:
theCS=float(cDict.get("ConversionSpecificity"))
else:
theCS=None
else:
theCS=None
if "T2Cfraction" in cDict.keys():
if isfloat(cDict.get("T2Cfraction"))==True:
T2Cf=float(cDict.get("T2Cfraction"))
else:
T2Cf=None
else:
T2Cf=None
if "UniqueReads" in cDict.keys():
if isint(cDict.get("UniqueReads"))==True:
UR=int(cDict.get("UniqueReads"))
else:
UR=1
else:
UR=None
if "ReadCount" in cDict.keys():
if isint(cDict.get("ReadCount")):
RC=int(cDict.get("ReadCount"))
else:
RC=1
else:
RC=None
nClust=cluster(theChr,theStrand,theStart,theEnd,theGene,theCS,T2Cf,UR,RC)
return(nClust)
def createLstOfParclips (myDir,genesToUse,lowerBound,upperBound,boundFilter,gtfObj,bedGTF,anoterScript,outDir):
"""
Creates a list of parclip objects based on an input directory of cluster.csv files.
Input:
myDir(String) - File path to the directory containing all of the cluster.csv files to be inputs for the program or the name of a single clsuter.csv file to analyze.
genesToUse(List[String]) - A list of the names of genes to be used in subsequent calclulations and visualizations. If all are desired enter an empty list.
sort(Boolean) - Should the sort step, which keeps only those genes which are in the top quartile of total read counts, be performed?
gtfObj (gtfObj) - GTF object that will be used to analyze the parclip clusters.
bedGTF (str) - GTF file to use to annotate bed files.
outDir (str) - Directory to output files into
Returns:
List[parclip] - A list of parclip objects representing the csv files in the directory.
"""
#Set the wd to be myDir
if ".csv" not in myDir and ".bed" not in myDir: #If a directory
os.chdir(myDir)
listOfParclipObjects = []
#The files to be analyzed - either a single file or a directory
filesToAnalyze=[]
if ".csv" in myDir or ".bed" in myDir:
filesToAnalyze.append(myDir)
else:
filesToAnalyze=os.listdir(myDir)
#For each file in the list of files
for i in range(0,len(filesToAnalyze)):
#Read in said file
#Decide if it should be read
filename= filesToAnalyze[i]
if ".csv" not in filename and ".bed" not in filename:
continue
#Determine the type of the file
fileType=""
if ".bed" in filename:
fileType="bed"
if ".csv" in filename:
fileType="csv"
if ".csv" not in myDir and ".bed" not in myDir:
toReadForCSV = myDir+"/"+filename
if ".csv" in myDir or ".bed" in myDir:
toReadForCSV=filename
#Read the file in
currentList = getFile(toReadForCSV,gtfObj,bedGTF,anoterScript,outDir)
#Create a parclipobject with the name
title = filename
nTitle=""
if fileType=="csv":
nTitle = title.replace('.csv', '')
if fileType=="bed":
nTitle = title.replace('.bed', '')
#Establish the chromosomes names
#Identify the unique chromosome names needed
uniqueChromosomes=[]
for rDict in currentList:
curChr = rDict.get("Chr")
if curChr not in uniqueChromosomes:
if curChr in gtfObj.lstOfChromosomeNames:
uniqueChromosomes.append(str(curChr))
newPC = parclip(nTitle,uniqueChromosomes)
#Fill the parclip with the cluster rows
#Initialize the list ot hold the clusters
lstOfClustersOrig = []
#Parse over each dictionary in the list
for cDict in currentList:
#Take the cluster if it is a gene to be used or if there are no gene restrictions
if (cDict.get("GeneName") in genesToUse) or (len(genesToUse))==0:
#Create the corresponding cluster object
nClust = dictionaryToCluster(cDict)
#Add it to the list of possible lusters
lstOfClustersOrig.append(nClust)
for i in range(0,len(lstOfClustersOrig)):
clusterToAdd = lstOfClustersOrig[i]
newPC.addClust(clusterToAdd)
#Blank the genes list
for crClustGen in newPC.lstOfChromosomesClustersGenes:
crClustGen.lstOfAnotElements=[]
#Apply filter(s) if you have a csv file (otherwise you don't have the property needed to apply bounds)
if fileType=="csv":
newPC.applyBounds(lowerBound,upperBound,boundFilter)
newPC.populateCh()
listOfParclipObjects.append(newPC)
return(listOfParclipObjects)
def getFile (myFile,anotObj,bedGTF,anoterScript,outDir):
"""
Takes in a directory and file name and creates a list of dictionaries, each reprenting a row. Dictionary values correspond to columns and list enteries correspond to rows
Input:
myFile(String) - Name of the csv file.
anotObj (anotObj) - Annotation object to be used if bed file does not have gene names.
bedGTF (Str) - GTF file to be used to annotate beds.
outDir(Str) - Output directory to deposit files
Returns:
List[Dictionary] - A list of dictionaries, each which represents a row from the csv to read.
"""
#If the file is a csv cluster file.
if ".csv" in myFile:
#Upload CSV using the CSV package and an instantiated CSV object
readCSV = csv.DictReader(open(myFile))
#Create the master list to be populated
master_list = []
#poulate this list by grabbing ordered dictionaries
for line in readCSV:
#If this is PARALUS notation we need to make a newline
if "chromosome" in line:
nLine={}
nLine["Chr"]=line["chromosome"]
nLine["Start"]=line["start"]
nLine["End"]=line["end"]
nLine["GeneName"]=line["gene_name"]
nLine["Strand"]=line["strand"]
nLine["ClusterSequence"]=line["sequence"]
nLine["Aligned to"]=line["annotation"]
nLine["UniqueReads"]=line["crosslinked_reads"]
nLine["T2Cfraction"]=line["fraction"]
nLine["ReadCount"]=line["total_reads"]
master_list.append(nLine)
else:
master_list.append(line)
return(master_list)
#Otherwise, if the file is a bed file
elif ".bed" in myFile:
#See if the gene name is in the input file
testBed= pd.read_csv(open(myFile),sep='\t')
if "GeneName" in testBed.columns:
readCSV = csv.DictReader(open(myFile),delimiter='\t')
#Create the master list to be populated
master_list = []
#poulate this list by grabbing ordered dictionaries
for line in readCSV:
master_list.append(line)
return(master_list)
#Otherwise we will have to annotate
else:
#Run the annotation program
print("Running annotation program on bed file...")
#Build the command
anotCom=anoterScript+" -i "+myFile+" -G "+bedGTF
os.system(anotCom)
#New file
nFile=myFile.replace(".bed", ".annotated.bed", 1)
nFile2=myFile.replace(".bed", ".annotated_withHeader.bed", 1)
nFile3=myFile.replace(".bed", ".annotated_withHeader_spaceCorrected.bed", 1)
f = open(nFile2, "w")
writer = csv.DictWriter(f, fieldnames=["Chr", "Start","End","GeneName","AlignedTo","Strand"],delimiter='\t')
writer.writeheader()
f.close()
#Add
open(nFile2, "a").writelines([l for l in open(nFile).readlines()])
#Replace eall of the spaces with tabs
#input file
fin = open(nFile2, "rt")
#output file to write the result to
fout = open(nFile3, "wt")
#for each line in the input file
for line in fin:
#Read replace the string and write to output file
fout.write(line.replace(' ', '\t',5))
#close input and output files
fin.close()
fout.close()
readCSV = csv.DictReader(open(nFile3),delimiter='\t')
#poulate this list by grabbing ordered dictionaries
master_list = []
for line in readCSV:
#Adjust for the problem in the annotating script where the "aligned to" section gets swapped with the "GeneName" section somehow
alignValues=["CDS","UTR","gene","transcript"]
for av in alignValues:
if av in line["GeneName"]:
nGeneName=line["Strand"]
nAlignedTo=line["GeneName"]
nStrand=line["AlignedTo"]
line["GeneName"]=nGeneName
line["AlignedTo"]=nAlignedTo
line["Strand"]=nStrand
master_list.append(line)
#move the files to the output director
moveCommand1="rm "+nFile+" "
os.system(moveCommand1)
moveCommand="rm "+nFile2+" "
os.system(moveCommand)
moveCommand="mv "+nFile3+" "+outDir
os.system(moveCommand)
return(master_list)
def getCSV (myFile):
"""
Takes in a directory and file name and creates a list of dictionaries, each reprenting a row. Dictionary values correspond to columns and list enteries correspond to rows
Input:
myFile(String) - Name of the csv file.
Returns:
List[Dictionary] - A list of dictionaries, each which represents a row from the csv to read.
"""
#Upload CSV using the CSV package and an instantiated CSV object
readCSV = csv.DictReader(open(myFile))
#Create the master list to be populated
master_list = []
#poulate this list by grabbing ordered dictionaries
for line in readCSV:
master_list.append(line)
return(master_list)
def readInGTFFile (gtfFile,onlyMain):
"""
Read in the gtf file csv file, returning an anotatedFile object
Input:
gtfFile(String) - Name of the csv file.
onlyMain (bool) - Whether only the main chromosomes should be considered.
Returns:
anotFile - An annotation file object that contains the information from the annotation csv file.
"""
#Import the data
mData = getCSV(gtfFile)
#If need be, construct the list of the main chromsomes
mainChromosomes=[]
if onlyMain==True:
#Establish whether the chromosome names contain "chr"
containC=False
uniqueChromosomes=[]
for rDict in mData:
curChr = rDict.get("Chromosome")
if "chr" in curChr:
containC=True
break
for rDict in mData:
curChr = rDict.get("Chromosome")
if containC==True:
testChr=curChr[3:len(curChr)]
else:
testChr=curChr
if len(testChr)<=2:
if curChr not in mainChromosomes and "M" not in curChr:
mainChromosomes.append(curChr)
#Identify the unique chromosome names needed
uniqueChromosomes=[]
for rDict in mData:
curChr = rDict.get("Chromosome")
if curChr not in uniqueChromosomes:
if onlyMain==True:
if curChr in mainChromosomes:
uniqueChromosomes.append(str(curChr))
else:
uniqueChromosomes.append(str(curChr))
print("Unique Chromsomes for the gtf file: ", uniqueChromosomes)
#Instantiate object
myAFileObject = anotFile(uniqueChromosomes)
#Parse over each dictionary in the list
for rDict in mData:
#Adjust the chromosome field if needed
curChr = rDict.get("Chromosome")
if "chr" not in curChr:
curChr="chr"+curChr
#Create the corresponding anotation row object
nAnotR = anotRow(rDict.get("Gene"),rDict.get("Type"),int(rDict.get("Start")),int(rDict.get("Stop")),curChr,rDict.get("Orientation"))
#Add this to the anotation file object
myAFileObject.addAR(nAnotR)
myAFileObject.populateCh()
return(myAFileObject)
def getRandLst(num,seed):
"""
This function takes in a number and generates a list of that many (semi) unique random numbers between 0 and 99.
Input:
num(int) - The number of random numbers to generate.
seed(num) - Random seed to set.
Returns:
List[int] - A list of unique random numbers between 0 and 99 of length num.
"""
random.seed(seed)
lstOfRandNums = []
#Generate initial numbers
while len(lstOfRandNums)!=num:
nNum = random.randint(0,99)
if nNum not in lstOfRandNums:
lstOfRandNums.append(nNum)
return(lstOfRandNums)
def getRandLstBounded(num,bound,seed):
"""
This function takes in a number and generates a list of that many (semi) unique random numbers between 0 and an inputed bound.
Input:
num(int) - The number of random numbers to generate.
bound(int) - The upper bound of possible value generation.
seed (num) - Random seed to set.
Returns:
List[int] - A list of unique random numbers between 0 and the inputed upper bound of length num.
"""
random.seed(seed)
lstOfRandNums = []
#Generate initial numbers
while len(lstOfRandNums)!=num:
nNum = random.randint(0,bound-1)
if nNum not in lstOfRandNums:
lstOfRandNums.append(nNum)
return(lstOfRandNums)
def createExportTable(lstOfPCs,myDir,genesToUse):
"""
Takes in a list of parclip objects and returns a table which contains the distribution matrices for each of these.
This strange approach was adopted before the author learned to import sophisticated packages to expedite this process.
Inputs:
lstOfPCS (List[parclip]) - The list of parclip objects whose properties will be pulled and then exported in the table.
myDir (Str) - The directory into which the table will be written.
genesToUse (List[Str]) - A list of gene names to be considered (only these genes will have their properties identified and outputed). If empty, all genes are consodered.
Returns:
List[List[]] - A list of lists representing the spreadsheet.
"""
toRetLst=[]
genesStrToAdd=""
for item in genesToUse:
genesStrToAdd=genesStrToAdd+item+"_"
#First create the header for percentiles
perc = []
perc.append("Percentile")
for x in range(1,101):
perc.append(x)
toRetLst.append(perc)
#Now add the 5', cds, 3' sections for each group
#For each parclip
for pc in lstOfPCs:
#Get the row labeling strings to add
labelEH1 = pc.filename+": "+"5' Exon Half"
labelI = pc.filename+": "+"Intron "
labelEH2 = pc.filename+": "+"3' Exon Half "
#Get the EH1'
DEH1 = pc.eFHDistribution
#Add the label
DEH1.insert(0,labelEH1)
toRetLst.append(DEH1)
#Get the Intron
DI = pc.iDistribution
#Add the label
DI.insert(0,labelI)
toRetLst.append(DI)
#Get the second half of the exon
DEH2 = pc.eSHDistribution
#Add the label
DEH2.insert(0,labelEH2)
toRetLst.append(DEH2)
#Export as csv--------
csvfile = myDir
if len(genesToUse)==0:
csvfile=csvfile+"/IntronExonTable.csv"
elif len(genesToUse)!=0:
csvfile=csvfile+"/"+genesStrToAdd+"Intron_Exon_Table.csv"
#Assuming res is a list of lists
with open(csvfile, "w") as output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(toRetLst)
return(toRetLst)
def beforeColon(myString):
"""
This function returns a subtring of all characters in a string before the first colon in that string
Input:
num(int) - String with a colon from which the substring will be pulled.
Returns:
List[int] - A list of unique random numbers between 0 and the inputed upper bound of length num.
"""
finalIndex=0
for ch in myString:
finalIndex=finalIndex+1
if ch==":":
break
#Get the new string
toRet=myString[0:finalIndex-1]
return(toRet)
def indicesToCheck(exportTable):
"""
This function returns a list of indices to check in the sub plot function.
Input:
exportTable(List[List[int]]) - Export table, which is a list of lists, created via the createExportTable function.
Returns:
List[int] - A list of indices in the table that should be checked in creating the distribution plots.
"""
theLength=len(exportTable)
theLength=theLength-1
numEntries=int(theLength/3)
toRet=[]
#Populate with 3's
for i in range(0,numEntries):
toAdd = 1+i*3
toRet.append(toAdd)
return(toRet)
def areAnotRowsOverlapping(row1,row2):
"""
Checks if two anotation rows are overlapping
Input:
anotRow1
anotRow2
Returns:
bool
"""
#Type match
for i in range(0,1):
if i==0:
ro1=row1
ro2=row2
elif i==1:
ro1=row2
ro2=row1
#Run the comparisons
if ro2.start >=ro1.start:
if ro2.start <=ro1.stop:
return(True)
if ro2.stop<=ro1.stop:
if ro2.stop>=ro1.start:
return(True)
if ro2.start<=ro1.start and ro2.stop>=ro1.stop:
return(True)
return(False)
def getHorizontalRange(maxVal):
"""
This function takes in the maximum domain value and returns a list of values corresponding to the y values of the horizontal lines of the distribution plots (indicating breaks across start/stop codon)
Input:
maxVal(int) - Maximum x value to be used in the distribution plots.
Returns:
List[int] - A list of the y values of the horizontal lines to seperate the 5'UTR, CDS, and 3'UTR regions.
"""
#Add two
nMax = maxVal+maxVal/4
#Divide this into 5
inc=nMax/5
toRet=[]
for w in range(0,5):
toRet.append(w*inc)
return(toRet)
def createSubPlots(exportTable,outputDir,NIntronBins,NExonBins,genesToUse,lstOfPCs,inpDPI,imgFormat):
"""
Graphs the binding distribution (across the intronic/exonic regions) of each parclip objects in a line. These individual plots are stacked together and written to a single file.
Input:
exportTable(List[List]) - Representation of all parclip's binding distributions as created by the getExportTable function.
outputDir(str) - Directory in which to creat the sub plots.
NIntronBins(Int) - The number of bins the intronic regions were divided into.
NExonBins(Int) - The number of bins the exonic regions were divided into.
genesToUse(List[str]) - List of genes to consider (identified by name). If empty, all genes considered.
lstOfPCs(List[parclip]) - The list of parclip's whose binding properties are being visualized.
inpDPI(Int) - The resolution of the graph outputed.
imgFormat(Str) - The format to encode the image as. If "pdf" is used, then the file will be exported as a pdf. Otherwise, it will be exported as a png.
"""
#Grab the domain row
#First create the header for percentiles
genesStrToAdd=""
for item in genesToUse:
genesStrToAdd=genesStrToAdd+item+"_"
theDomain = []
for x in range(1,NIntronBins+NExonBins-3):
theDomain.append(x)
fig=plt.figure()
indic=0
for w in indicesToCheck(exportTable):
#Determine the number of intron genes in the PC
numInt=lstOfPCs[indic].getNumIntronGenes()
numIntDisp="Genes: "+str(numInt)
numExo1 = lstOfPCs[indic].getNumExonGenesH1()
numExo1Disp="Genes: "+str(numExo1)
numExo2 = lstOfPCs[indic].getNumExonGenesH2()
numExo2Disp="Genes: "+str(numExo2)
numExo1Clust=lstOfPCs[indic].getNumExonClustersH1()
numExo1ClustDisp="Clusters: "+str(numExo1Clust)
numExo2Clust=lstOfPCs[indic].getNumExonClustersH2()
numExo2ClustDisp="Clusters: "+str(numExo2Clust)
numIntClust=lstOfPCs[indic].getNumIntronClusters()
numIntClustDisp="Clusters: " +str(numIntClust)
#Grab the range, which is actually a collection of three lists
#Get the first list
lst1 = exportTable[w]
#Get the title
sectionTitle= beforeColon(lst1[0])
#Be sure the name of the y axis does not contain the file path
safeTitle=""
for c in reversed(sectionTitle):
if c=="/":
break
else:
safeTitle=safeTitle+c
sectionTitle=safeTitle[::-1]
del lst1[0]
#Get the second list
lst2 = exportTable[w+1]
del lst2[0]
#Get the third list
lst3 = exportTable[w+2]
del lst3[0]
#Get the range
theRange=lst1+lst2+lst3
#Get the max value
mVal = max(theRange)*1.2
val=.1+.4*(indic)
#Add the axes
nAx = fig.add_axes([.1,val,.8,.4], ylim=(0, mVal))
#Add HL's
line1Val = NExonBins//2+0.5
line2Val = line1Val + NIntronBins
#Get the vertical values
vertVals = getHorizontalRange(mVal)
nAx.plot([line1Val,line1Val,line1Val,line1Val,line1Val],vertVals,linestyle='dashed',color='lightskyblue')
nAx.plot([line2Val,line2Val,line2Val,line2Val,line2Val],vertVals,linestyle='dashed',color='lightskyblue')
#Add the number of genes used in the and exonic count
nAx.text(line1Val+.5*NIntronBins, mVal*.926, numIntDisp,fontsize=7,color='m')
nAx.text(0, mVal*.94, numExo1Disp,fontsize=7,color='m')
nAx.text(line2Val, mVal*.94, numExo2Disp,fontsize=7,color='m')
nAx.text(0,mVal*.88,numExo1ClustDisp,fontsize=7,color='m')
nAx.text(line2Val,mVal*.88,numExo2ClustDisp,fontsize=7,color='m')
nAx.text(line1Val+.5*NIntronBins,mVal*.88,numIntClustDisp,fontsize=7,color='m')
#Add the plot
nAx.plot(theDomain, theRange, color='g',linewidth=0.5)
nAx.set_ylabel(sectionTitle, color='g',fontsize=13)
nAx.set_title("Distribution of Intron / Exon Sites",y=1.1,fontsize=15)
nAx.axes.get_xaxis().set_visible(False)
indic=indic+1
if (indic==len(indicesToCheck(exportTable))):
nAx.axes.text(-7, mVal*1.05, "5' Exon Half", fontsize=10,color='b')
nAx.axes.text(line1Val+.5*NIntronBins, mVal*1.04, "Intron", fontsize=10,color='b')
nAx.axes.text(line2Val+.15*NExonBins//2, mVal*1.04, "3' Exon Half", fontsize=10,color='b')
os.chdir(outputDir)
if len(genesToUse)==0:
if imgFormat=="pdf":
toSave='DistributionPlot_Intron_Exon.pdf'
if imgFormat!="pdf":
toSave='DistributionPlot_Intron_Exon.png'
elif len(genesToUse)!=0:
if imgFormat=="pdf":
toSave=genesStrToAdd+"DistributionPlot_Intron_Exon.pdf"
if imgFormat!="pdf":
toSave=genesStrToAdd+"DistributionPlot_Intron_Exon.png"
fig.savefig(toSave, bbox_inches='tight',dpi=inpDPI)
def Average(lst):
"""
Python program to get average of a list
Input:
lst(List[Number]) - A list of numbers.
Returns:
float - The average of the numbers in input list.
"""
if len(lst)!=0:
return sum(lst) / len(lst)
else:
return(0)
def heatmap(data, row_labels, col_labels, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, fraction=0.026, pad=0.01, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom",fontsize="13")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_aspect(10) # X scale matches Y scale
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels,fontsize=8) #Was 15
ax.set_yticklabels(row_labels,fontsize=16)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-90, ha="center",
rotation_mode="default")
# Turn spines off and create white grid.
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3,axis='y')
ax.tick_params(which="minor", bottom=False, left=False)
ax.set_title("Exon/Intron Distribution Heat Map",fontsize=25)
return im, cbar
def scaleList(inpList):
"""
Scales a list of numbers to all be relative to the largest number in the list (all fractions)
Input:
List[Number] - A list of numbers.
Returns:
List[Number] - A scaled list of numbers relative to the largest.
"""
toRetLst = []
#Get the largest value in the list
largestValue = max(inpList)
#Find out what you need to multiply this by in order to get 1
coeff = 1/largestValue
#Now Multiply each value in the list by this scaling value
for i in range(0,len(inpList)):
nVal = inpList[i]*coeff
#Set this to be the value
toRetLst.append(nVal)
return(toRetLst)
def s_heatmap(pdDF,outDir,xAxisLab,genesToUse,inpDPI,imgFrmt):
"""
This function generates and saves a clustered heatmap using seaborn's cluster functionality and dendrogram visualization techinique.
Inputs:
pdDF (Pandas DataFrame) - The dataframe of the metagene results to visualize. Row indices are the name of the metagene.
outDir (Str) - Output directory in which the heatmap is saved.
xAxisLab (List[Str]) - A list of the values that constitute the x axis labels of the heatmap.
genesToUse(List[str]) - List of genes to consider (identified by name). If empty, all genes considered.
inpDPI(Int) - The resolution of the graph outputed.
imgFormat(Str) - The format to encode the image as. If "pdf" is used, then the file will be exported as a pdf. Otherwise, it will be exported as a png.
"""
genesStrToAdd=""
for item in genesToUse:
genesStrToAdd=genesStrToAdd+item+"_"
sns.set(font_scale=1.6)
#Determine the height and width to use
height=len(pdDF.index.values)*0.6
#Intitalize the cluster heat map
g = sns.clustermap(pdDF, col_cluster=False,cmap="coolwarm",xticklabels=xAxisLab,yticklabels=pdDF.index.values,figsize=(60, height),metric="euclidean",method="centroid")
#Move the color bar
#g.cax.set_position((.1,.1,.1,.1))
#Remove the labels on the axes
ax = g.ax_heatmap
ax.set_xlabel("")
ax.set_ylabel("")
cbar = ax.collections[0].colorbar
cbar.set_ticks([0,1])
#Add seperating white lines between ther rows.
for i in range(0,len(pdDF.index.values)):
insAt=i+1
ax.axhline(insAt, 0, insAt, linewidth=3, c='w')
#Rotate the x-axis tick marks
plt.setp(g.ax_heatmap.get_xticklabels(), rotation=90)
#Set the title
plt.title("Intron/Exon' Cluster Distribution", fontsize = 50, loc='center')
#Save the plot to the output directory
if len(genesToUse)==0:
if imgFrmt=="pdf":
toSave='DistributionHeatMap_Intron_Exon.pdf'
elif imgFrmt=="png":
toSave='DistributionHeatMap_Intron_Exon.png'
elif len(genesToUse)!=0:
if imgFrmt=="pdf":
toSave=genesStrToAdd+"DistributionHeatMap_Intron_Exon.pdf"
elif imgFrmt=="png":
toSave='DistributionHeatMap_Intron_Exon.png'
g.savefig(toSave, bbox_inches='tight',dpi=inpDPI)
def createHeatMapSingleton(lstOfParClips,outputDir,numIntronBins,numExonBins,genesToUse,inpDPI,imgFrmt):
"""
Creates and saves the heat map when no clustering is possible (when there is only one input file).
Input:
lstOfPCs(List[parclip]) - The list of parclip's whose binding properties are being visualized.
outputDir (Str) - Output directory in which the heatmap is saved.
NIntronBins(Int) - The number of bins the intronic regions were divided into.
NExonBins(Int) - The number of bins the exonic regions were divided into.
genesToUse(List[str]) - List of genes to consider (identified by name). If empty, all genes considered.
inpDPI(Int) - The resolution of the graph outputed.
imgFormat(Str) - The format to encode the image as. If "pdf" is used, then the file will be exported as a pdf. Otherwise, it will be exported as a png.
"""
genesStrToAdd=""
for item in genesToUse:
genesStrToAdd=genesStrToAdd+item+"_"
#Establish the x axis labels
xAxisLabels=[]
for x in range(0,numIntronBins+numExonBins):
if x==numExonBins//2:
xAxisLabels.append("-Intron Start")
elif x==numExonBins//2+numIntronBins:
xAxisLabels.append("-Intron Stop")
elif x!=numExonBins//2 and x!=numExonBins//2+numIntronBins:
xAxisLabels.append("")
#Establish the y catagory labels
yAxisLabels=[]
for pc in lstOfParClips:
toAdd=""
for c in reversed(pc.filename):
if c=="/":
break
toAdd=toAdd+c
yAxisLabels.append(toAdd[::-1])
#Create the matrix of values
valueMatrix=[]
for pc in lstOfParClips:
#Create the row to add to the value matrix
ro=pc.eFHDistribution+pc.iDistribution+pc.eSHDistribution
#Normalize this row
nRo = scaleList(ro)
valueMatrix.append(nRo)
#Convert this matrix to a numpy array
valueArray = np.array(valueMatrix)
#Creates the plot
fig, ax = plt.subplots()
im, cbar = heatmap(valueArray, yAxisLabels, xAxisLabels, ax=ax,cmap="coolwarm", cbarlabel="Coverage")
fig.tight_layout()
#Determine height
height=len(lstOfParClips)*0.3
os.chdir(outputDir)
fig.set_size_inches(60, height)
if len(genesToUse)==0:
if imgFrmt=="pdf":
toSave='DistributionHeatMap_Intron_Exon.pdf'
elif imgFrmt=="png":
toSave='DistributionHeatMap_Intron_Exon.png'
elif len(genesToUse)!=0:
if imgFrmt=="pdf":
toSave=genesStrToAdd+"DistributionHeatMap_Intron_Exon.pdf"
elif imgFrmt=="png":
toSave='DistributionHeatMap_Intron_Exon.png'
fig.savefig(toSave, bbox_inches='tight',dpi=inpDPI)
def createHeatMap(lstOfParClips,numIntronBins,numExonBins,outputDir,genesToUse,inpDPI,imgFrmt):
"""
Creates a heat map of metagene distribution data for all parclips considered where the intensities are scaled to be relative to the greatest intensity value in each distribution.
Input:
lstOfParClips(List[parclip]) - A list of parclip objects to consider.
numIntronBins(Int) - The number of bins the intronic regions were divided into.
numExonBins(Int) - The number of bins the exonic regions were divided into.
outputDir (Str) - Output directory in which the heatmap is saved.
genesToUse(List[str]) - List of genes to consider (identified by name). If empty, all genes considered.
inpDPI(Int) - The resolution of the graph outputed.
imgFormat(Str) - The format to encode the image as. If "pdf" is used, then the file will be exported as a pdf. Otherwise, it will be exported as a png.
"""
#Establish the x axis labels
xAxisLab=[]
for x in range(0,numIntronBins+numExonBins):
if x==numExonBins//2:
xAxisLab.append("-Intron Start")
elif x==numExonBins//2+numIntronBins:
xAxisLab.append("-Intron Stop")
elif x!=numExonBins//2 and x!=numExonBins//2+numIntronBins:
xAxisLab.append("")
genesStrToAdd=""
for item in genesToUse:
genesStrToAdd=genesStrToAdd+item+"_"
#Establish the y catagory labels
yAxisLabels=[]
for pc in lstOfParClips:
yAxisLabels.append(pc.filename)
#Create the matrix of values
valueMatrix=[]
for pc in lstOfParClips:
#Create the row to add to the value matrix
ro=pc.eFHDistribution+pc.iDistribution+pc.eSHDistribution
#Normalize this row
nRo = scaleList(ro)
valueMatrix.append(nRo)
#Convert this matrix to a pandas dataframe
valueDF = pd.DataFrame(valueMatrix,index=yAxisLabels)
s_heatmap(valueDF,outputDir,xAxisLab,genesToUse,inpDPI,imgFrmt)
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def run(gtfFileName,csvDir,outputDir,NIntronBins,NExonBins,genesToUse,clusterThreshold,lowerBound,upperBound,boundFilter,weighValuesByRC,randomStatesLst,dpi,imgFormat,wildTypeGE,mainChromosomesOnly,bedAnotGTF,anoterScript):
"""
Calculate and visualize the distribution of binding clusters across the intronic and exonic regions of any number of parclip experiments.
Input:
gtfFileName(Str) - The name of the gtf intermediate file (generated with a seperate python program) to use as a reference for annotation purposes.
csvDir(Str) - The directory containing all of the cluster csv files (of the correct format) to search.
outputDir (Str) - Output directory in which the heatmap is saved.
NIntronBins(Int) - The number of bins the intronic regions were divided into.
NExonBins(Int) - The number of bins the exonic regions were divided into.
genesToUse(List[str]) - List of genes to consider (identified by name). If empty, all genes considered.
clusterThreshold (Int) - The minimum number of clusters that must align to a gene in order for it to be considered in the metagen calculations.
lowerBound (num) - The lower percentage of clusters to throw out based on the boundFilter property.
uppBound (num) - The upper percentage of clusters to throw out based on the boundFilter property.
boundFilter(Str) - The property used to sort the clusters in a data set before the bounds are applied to remove clusters based on this property.
Options: "start" (start coordinate of cluster),"end" (end coordinate of cluster),"CS" (Conversion Specificity of cluster),"URC" (unique read count of the cluster),"RC" (read count of the cluster)
weighValuesByRC (Bool) - Apply a weight to each cluster percentage distribution based on the number of reads aligned to the cluster?
randomStatesList (List[int]) - The list of numbers to use as random seeds in random number generation. The more random states provided, the more times the binning algorithm will execute before the averaging step.
dpi(int) - The dpi to save images to.
imgFormat (str) - "pdf" or "png". The format to save images into.
wildTypeGE (str) - Name of a wild type gene expression file which contain "Average" and "Gene Symbol" columns.
mainChromosomesOnly (bool) - Whether the program should only look at the "main" chromsomes (autosomal plus x/y) for analysis.
bedAnotGTF (Str) - GTF File to be used for the bed annotation if need be.
anoterScript (str) - The bed anotation script to use on bed files.
"""
if (len(wildTypeGE)>0):
print("Loading wild type gene expression file...")
wtGE_File=WTGeneExpression(wildTypeGE)
print("...loaded.")
if len(wildTypeGE)==0:
wtGE_File=False
print("Loading gtf longest transcripts intermediate file...")
myGTFObj = readInGTFFile(gtfFileName,mainChromosomesOnly)
print("...loaded.")
print("Loading the list of parclip cluster .csv files from: "+csvDir+"...")
lstOfParclips = createLstOfParclips(csvDir,genesToUse,lowerBound,upperBound,boundFilter,myGTFObj,bedAnotGTF,anoterScript,outputDir)
print("...loaded.")
print("Populating hits and overlap values for all genes in all parclip cluster files...")
for exPC in lstOfParclips:
exPC.populateAllHits(myGTFObj,NIntronBins,NExonBins,randomStatesLst)
exPC.getParclipPercentageDistributions(NIntronBins,NExonBins,clusterThreshold,weighValuesByRC,wtGE_File)
exPC.populateExonHalves(NExonBins)
print("...populated.")
print("Creating the export table...")
exportTable = createExportTable(lstOfParclips,outputDir,genesToUse)
print("...export table created")
###EIlengths = outputAvgLengths(lstOfParclips,outputDir)
print("Creating the binding distribution curves for all parclips in the directory...")
createSubPlots(exportTable,outputDir,NIntronBins,NExonBins,genesToUse,lstOfParclips,dpi,imgFormat)
print("...binding distribution curves generated and merged.")
print("Generating the heat map...")
if len(lstOfParclips)>1:
createHeatMap(lstOfParclips,NIntronBins,NExonBins,outputDir,genesToUse,dpi,imgFormat)
else:
createHeatMapSingleton(lstOfParclips,outputDir,NIntronBins,NExonBins,genesToUse,dpi,imgFormat)
print("... heat map generated.")
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
##Run
##------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#run(gtfFileName,csvDir,outputDir,1250,100,gtu,clustThreshold,lBound,uBound,boundF,wValuesByRC,randStates,dpi,imgFrmt,wtGE,mainChromosomes,theBED_GTF,theBEDAnotScript) |
from django.shortcuts import render
from django.views.generic import (View, TemplateView, DetailView, ListView, CreateView, DeleteView, UpdateView)
from . import models
from django.urls import reverse_lazy
class SchoolListView(ListView):
context_object_name = 'schools'
model = models.School
class SchoolDetailView(DetailView):
context_object_name = 'school_detail'
model = models.School
template_name = 'basic_app/school_detail.html'
class SchoolCreateView(CreateView):
fields = ('name', 'principal', 'location')
model = models.School
class SchoolUpdateView(UpdateView):
fields = ('name', 'principal') #Location ın değiştirilmesini istemiyoruz
model = models.School
class SchoolDeleteView(DeleteView):
model = models.School
success_url = reverse_lazy("basic_app:list")
"""
STUDENT OPERATIONS
"""
class StudentListView(ListView):
context_object_name = 'students'
model = models.Student
class StudentDetailView(DetailView):
context_object_name = 'student_detail'
model = models.Student
template_name = 'basic_app/student_detail.html'
class StudentCreateView(CreateView):
fields = ('name', 'age', 'school')
model = models.Student
class StudentUpdateView(UpdateView):
fields = ('name', 'age', 'school')
model = models.Student
class StudentDeleteView(DeleteView):
model = models.Student
success_url = reverse_lazy("basic_app:student_list")
class IndexView(TemplateView):
template_name = 'index.html'
|
def encrypt(st,key):
en=""
for i in st:
if(i.isupper()):
st=ord(i)+(key%26)
if(st>ord('Z')):
st=st-26
if(i.islower()):
st=ord(i)+(key%26)
if(st>ord('z')):
st=st-26
file2.write(chr(st))
file1=open("aprajita2.txt","r")
file2=open("chhawi2.txt","a")
s,address,key=file1.readline().split(" ")
st=s+address
encrypt(st,int(key))
file1.close()
file2.close()
|
'''
$ python download_and_convert_data.py \
--dataset_name=imagedata \
--dataset_dir=.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets import download_and_convert_imagedata
FLAGS = tf.compat.v1.app.flags.FLAGS
tf.compat.v1.app.flags.DEFINE_string(
'dataset_name',
None,
'The name of the dataset to convert, one of "data"'
)
tf.compat.v1.app.flags.DEFINE_string(
'dataset_dir',
None,
'The directory where the output TFRecords and temporary files are saved.')
tf.compat.v1.flags.DEFINE_float(
'small_object_area_threshold', 0.005,
'For --dataset_name=visualwakewords only. Threshold of fraction of image '
'area below which small objects are filtered')
#tf.flags
tf.compat.v1.flags.DEFINE_string(
'foreground_class_of_interest', 'person',
'For --dataset_name=visualwakewords only. Build a binary classifier based '
'on the presence or absence of this object in the image.')
def main(_):
if not FLAGS.dataset_name:
raise ValueError('You must supply the dataset name with --dataset_name')
if not FLAGS.dataset_dir:
raise ValueError('You must supply the dataset directory with --dataset_dir')
if FLAGS.dataset_name == 'imagedata':
download_and_convert_imagedata.run(FLAGS.dataset_dir)
else:
raise ValueError(
'dataset_name [%s] was not recognized.' % FLAGS.dataset_name)
if __name__ == '__main__':
tf.compat.v1.app.run()
|
# pylint: disable=missing-module-docstring
import os # pylint: disable=unused-import
import sys # pylint: disable=unused-import
def func(name): # pylint: disable=missing-function-docstring,unused-argument
try:
exec("1 + 1") # pylint: disable=exec-used
val = eval("2 + 2") # pylint: disable=eval-used,unused-variable
except BaseException: # pylint: disable=broad-except
pass
global VAR # pylint: disable=global-variable-not-assigned
|
import turtle
#initializing the Drawing Objects
tee = turtle.Turtle()
outline = turtle.Turtle()
eyes = turtle.Turtle()
nose = turtle.Turtle()
eye_ball = turtle.Turtle()
#Adjusting the attributes
eye_ball.speed(0)
outline.speed(0)
eyes.speed(0)
eye_ball.pensize(8)
#Design for the outline
outline.color("black","yellow")
outline.begin_fill()
outline.left(96)
outline.forward(150)
for i in range(8):
outline.right(140)
outline.forward(23)
outline.left(120)
outline.forward(23)
outline.left(17)
outline.right(160)
outline.forward(95)
outline.penup()
outline.forward(53)
outline.pendown()
outline.forward(70)
outline.right(55)
outline.circle(-120,66)
point_2 = (outline.xcor(),outline.ycor())
outline.penup()
outline.goto(5,-1)
outline.pendown()
outline.circle(10,270)
outline.circle(10,-50)
outline.right(100)
outline.forward(70)
outline.left(45)
outline.circle(60,75)
outline.left(45)
co_ord = tuple(map(round,(outline.xcor(),outline.ycor())))
outline.forward(10)
outline.right(70)
outline.circle(10,120)
point_3 = (outline.xcor(),outline.ycor())
outline.end_fill()
outline.hideturtle()
outline.color("yellow","yellow")
outline.penup()
outline.begin_fill()
outline.goto(5,-1)
outline.goto(point_2[0],point_2[1])
outline.goto(point_3[0],point_3[1])
outline.end_fill()
#Design for the eyes
eyes.color("black","white")
eyes.begin_fill()
eyes.penup()
eyes.goto(119,45)
eyes.pendown()
eyes.left(90)
eyes.circle(33)
eyes.circle(-33)
eyes.end_fill()
eyes.hideturtle()
#Design for the nose
nose.penup()
nose.goto(115,0)
nose.pendown()
nose.forward(26)
nose.circle(-10,180)
nose.forward(26)
nose.hideturtle()
#Desing for the eyeball
eye_ball.penup()
eye_ball.goto(75,45)
eye_ball.pendown()
eye_ball.circle(1)
eye_ball.penup()
eye_ball.goto(155,45)
eye_ball.pendown()
eye_ball.circle(1)
eye_ball.hideturtle()
#Design for the tee
tee.penup()
tee.goto(co_ord[0],co_ord[1])
tee.pendown()
tee.color("black","red")
tee.begin_fill()
tee.circle(-12,90)
tee.left(13)
tee.forward(60)
tee.left(40)
tee.circle(-90,35)
tee.right(85)
tee.circle(-200,50)
tee.right(60)
tee.circle(-80,60)
tee.left(54)
tee.forward(45)
tee.right(130)
tee.circle(60,75)
tee.end_fill()
#End the turtle working screen
turtle.done()
|
import global land.land
import test_util
def _test_yaml1:
LandYaml *y = land_yaml_new("test1.yaml")
land_yaml_add_scalar(y, "a b c")
land_yaml_save(y)
land_yaml_destroy(y)
LandYaml *y2 = land_yaml_load("test1.yaml")
assert_string(land_yaml_get_scalar(y2.root), "a b c")
def _test_yaml2:
LandYaml *y = land_yaml_new("test2.yaml")
land_yaml_add_sequence(y)
for int i in range(20):
land_yaml_add_scalar_f(y, "entry %d", i)
land_yaml_done(y)
land_yaml_save(y)
LandYaml *y2 = land_yaml_load("test2.yaml")
assert_length(land_yaml_get_sequence(y2.root), 20)
def _test_yaml3:
LandYaml *y = land_yaml_new("test3.yaml")
land_yaml_add_mapping(y)
for int i in range(20):
land_yaml_add_scalar_f(y, "key %d", i)
land_yaml_add_scalar_f(y, "value %d", i)
land_yaml_done(y)
land_yaml_save(y)
LandYaml *y2 = land_yaml_load("test3.yaml")
#land_yaml_dump(y2)
assert_entries(land_yaml_get_mapping(y2.root), 20)
def _test_yaml4:
LandYaml *y = land_yaml_new("test4.yaml")
land_yaml_add_mapping(y)
for int i in range(3):
land_yaml_add_scalar_f(y, "key %d", i)
land_yaml_add_sequence(y)
for int j in range(5):
land_yaml_add_scalar_f(y, "item %d", j)
land_yaml_done(y)
land_yaml_done(y)
land_yaml_save(y)
LandYaml *y2 = land_yaml_load("test4.yaml")
#land_yaml_dump(y2)
assert_entries(land_yaml_get_mapping(y2.root), 3)
def _test_yaml5:
# read XML into our internal structure
LandYaml *y = land_yaml_load_xml("../../data/test.xml")
# write it out as .yaml
land_yaml_rename(y, "test5.yaml")
land_yaml_save(y)
# and write it out as .xml
land_yaml_rename(y, "test5.yaml.xml")
land_yaml_save_xml(y)
# load that xml and write it again
LandYaml *y2 = land_yaml_load_xml("test5.yaml.xml")
land_yaml_rename(y2, "test5b.yaml")
land_yaml_save(y2)
assert_files_identical("test5.yaml", "test5.yaml")
def _test_yaml6:
LandYaml *y = land_yaml_new("test6.xml")
land_yaml_xml_tag(y, "html")
land_yaml_xml_tag(y, "body")
land_yaml_xml_tag(y, "table")
for int i in range(3):
land_yaml_xml_tag(y, "tr")
for int j in range(3):
land_yaml_xml_tag(y, "td")
char s[1024]
sprintf(s, "%dx%d", i, j)
land_yaml_xml_content(y, s)
land_yaml_xml_end(y)
land_yaml_xml_end(y)
land_yaml_xml_end(y) # table
land_yaml_xml_end(y) # body
land_yaml_xml_end(y) # html
land_yaml_save_xml(y)
def test_yml:
test(yaml1)
test(yaml2)
test(yaml3)
test(yaml4)
test(yaml5)
test(yaml6)
|
import cv2
import time
from predict_image import *
def show_webcam(mirror=False):
json_file = open('nn_struct.json', 'r')
model = json_file.read()
json_file.close()
model = model_from_json(model)
model.load_weights('best_weights_256.hdf5')
cam = cv2.VideoCapture(0)
while True:
ret_val, img = cam.read()
if mirror:
img = cv2.flip(img, 1)
mask = find_ball(img, model)
img[mask > 220] = 255
cv2.imshow("g",img)
# cv2.imshow('my webcam', img)
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
def main():
show_webcam(mirror=True)
if __name__ == '__main__':
main()
|
import tensorflow as tf
class BiLSTMSelfAttention(object):
def __init__(self, params):
self.params = params
def __call__(self, inputs, targets=None):
sequence_length = inputs['sequence_length']
input_embeddings = inputs['input_embeddings']
with tf.variable_scope('bilstm'):
cell_fw = tf.nn.rnn_cell.LSTMCell(self.params['num_hidden_units'])
cell_bw = tf.nn.rnn_cell.LSTMCell(self.params['num_hidden_units'])
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, input_embeddings, sequence_length, dtype=tf.float32)
# for things above sequence length the outputs will be full of 0
hidden = tf.concat(outputs,2) # batch_size x n x 2*num_hidden_units
with tf.variable_scope('attention'):
WS1 = tf.get_variable('WS1',
shape=(self.params['attention_dimension'],2*self.params['num_hidden_units']),
dtype=tf.float32)
WS2 = tf.get_variable('WS2',
shape=(self.params['number_attention_hop'],self.params['attention_dimension']),
dtype=tf.float32)
WS1_HT = tf.transpose(tf.tensordot(WS1,hidden,axes=[[1],[2]]),[1,0,2]) # batch_size x da x n
RAW_A = tf.transpose(tf.tensordot(WS2, tf.nn.tanh(WS1_HT),axes=[[1],[1]]),[1,0,2]) # batch_size x r x n
A = tf.nn.softmax(RAW_A) # batch_size x r x n
if self.params['masking_attention']:
sequence_mask = tf.expand_dims(tf.sequence_mask(sequence_length),1) # batch_size x 1 x n
sequence_mask_value = tf.cast(sequence_mask,tf.float32)
A_masked = A * sequence_mask_value # batch_size x r x n
A_masked = A_masked / tf.reduce_sum(A_masked, axis=2, keepdims=True)
A = A_masked
M = tf.matmul(A,hidden) # batch_size x r x 2*num_hidden_units
flatten_weighted_vectors = tf.layers.flatten(M) # batch_size x r*2*num_hidden_units
with tf.variable_scope('dense'):
out = flatten_weighted_vectors
for i in range(self.params['num_hidden_layers']):
out = tf.layers.dense(out, self.params['num_dense_units'], activation=tf.nn.relu)
logits = tf.layers.dense(out, self.params['num_classes'])
return logits, A |
#元组就是不可改变的列表 元组使用()表示,元素和元素之间使用逗号隔开,数据类型没有限制,可以叫只读列表
#清朝的皇帝(努尔哈赤 皇太极 顺治 康熙 雍正 乾隆)
huang = ("努尔哈赤", "皇太极", "顺治", "康熙", "雍正", "乾隆","嘉庆","道光","光绪","咸丰")
# huang[1] = "朱元璋" #报错 tuple不支持元组的修改
print(huang)
#print((8+3)*7) #小括号不仅表示元组还可以表示优先级
tu = (1)
print(type(tu)) #只有1个元素会先表示优先级表示整型,所以加逗号
tu = (1,)
print(type(tu))
tu = tuple() #空元组表示 列表可以直接使用[]表示
# 元组也有索引和切片 和列表一样和字符串一样
tu = ("iphone","nokia","砸核桃","Lenovo","HTC","Honor")
print(tu[2])
print(tu[1:3])
print(tu[1:5:2])
# 元组不可变指的是第一层元素不可变
tu = (1,2,5,["胡辣汤","猪蹄子","酱猪肘","米饭","炸鸡"])
# tu[3] = "娃哈哈" #报错,第一层没法改变 第二层能否改变取决于数据类型
# print(tu)
tu[3][0] = "科比"
print(tu)
tu[3].append("锅包肉")
print(tu)
#元组只能 count() index() 不能sort
#count计算某个元素出现多少次
print(tu.count(1)) #查看1元素共有几个
print(tu.index(2)) #查看2这个元素在索引哪个位置
# sort使用 默认元组不能sort
# tu.sort()
# print(tu)
tu = ("孙悟空","白骨精","哪吒","二师兄")
for el in tu: # element元素
print(el)
|
# Copyright (c) 2018 CEA
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import numpy as np
from neuroglancer_scripts.transform import (
nifti_to_neuroglancer_transform,
matrix_as_compact_urlsafe_json,
)
def test_matrix_as_compact_urlsafe_json():
mat = np.array([[1, 1.5], [2, 3], [0, -1]])
assert matrix_as_compact_urlsafe_json(mat) == "[[1_1.5]_[2_3]_[0_-1]]"
def test_nifti_to_neuroglancer_transform():
nifti_transform = np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
])
voxel_size = (1.0, 1.0, 1.0)
ng_transform = nifti_to_neuroglancer_transform(nifti_transform, voxel_size)
assert np.array_equal(ng_transform, np.array([
[1, 0, 0, -0.5],
[0, 1, 0, -0.5],
[0, 0, 1, -0.5],
[0, 0, 0, 1]
]))
|
import time
import csv
import requests
from selenium import webdriver
import lxml
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.options import Options
from bs4 import BeautifulSoup
options = Options()
options.set_headless(headless=True)
browser = webdriver.Firefox(firefox_options=options, executable_path=r"C:\Users\kunda\OneDrive\Documents\Python Code\geckodriver.exe")
city = "gaithersburg"
state = "md"
url = "https://www.google.com/"
browser.get(url)
search = browser.find_element_by_name('q')
search.send_keys("population of " + city + " " + state)
search.send_keys(Keys.RETURN)
time.sleep(5)
new_url = browser.current_url
browser.get(new_url)
answer = browser.execute_script( "return document.elementFromPoint(arguments[0], arguments[1]);",250, 250).text
answer = answer.split('(')[0]
print(answer)
browser.quit()
|
##### APP ENDPOINTS #####
from flask import Blueprint, Flask, jsonify, redirect, render_template, request, send_file
from flask_assets import Environment
from flask_cors import CORS, cross_origin
from base64 import b64encode
from boto3.dynamodb.conditions import Key, Attr
from decimal import Decimal
from operator import itemgetter
from io import BytesIO
from PIL import Image
from urlparse import urlparse, urlunparse
import boto3
import calendar
import datetime
import decimal
import json
import importlib
import random
import string
import time
# Import local
import ht_references
import ht_lib_admin
# RECALL THE APP SETTINGS
bp_app_settings = Blueprint('bp_app_settings', __name__)
@bp_app_settings.route('/app/settings', methods=['POST'])
def app_settings():
print("ROUTE: SETTINGS")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
# Replace the periods in the version name to underscores (to match the package name)
app_version_mod = body['app_version'].replace('.', '_')
# import the module needed from the package that matches the version name
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
response['settings'] = util.app_settings()
response['response'] = 'success'
return jsonify(response)
# LOG IN A USER
bp_app_login = Blueprint('bp_app_login', __name__)
@bp_app_login.route('/app/login', methods=['POST'])
def app_login():
print("ROUTE: LOGIN")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
# Replace the periods in the version name to underscores (to match the package name)
app_version_mod = body['app_version'].replace('.', '_')
# import the module needed from the package that matches the version name
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
login_result = util.app_login(body)
response['login_data'] = login_result
if login_result['result'] == 'success':
response['response'] = 'success'
return jsonify(response)
# CREATE A RANDOM ID
bp_app_random_id = Blueprint('bp_app_random_id', __name__)
@bp_app_random_id.route('/app/randomid', methods=['POST'])
def app_random_id():
print("ROUTE: RANDOM ID")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
# Replace the periods in the version name to underscores (to match the package name)
app_version_mod = body['app_version'].replace('.', '_')
# import the module needed from the package that matches the version name
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
response['random_id'] = util.app_random_id(body)
response['response'] = 'success'
return jsonify(response)
# CHECK A USER EXISTENCE
bp_app_user_check = Blueprint('bp_app_user_check', __name__)
@bp_app_user_check.route('/app/user/check', methods=['POST'])
def app_user_check():
print("ROUTE: USER CHECK")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
response['user_exists'] = util.user_check_by_fbid(body)
response['response'] = 'success'
return jsonify(response)
# UPDATE A USER DATA
bp_app_user_update = Blueprint('bp_app_user_update', __name__)
@bp_app_user_update.route('/app/user/update', methods=['POST'])
def app_user_update():
print("ROUTE: USER UPDATE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
if util.user_update(body) == 'success':
response['response'] = 'success'
return jsonify(response)
# RETURN ALL ACTIVE USERS
bp_app_user_query_active = Blueprint('bp_app_user_query_active', __name__)
@bp_app_user_query_active.route('/app/user/query/active', methods=['POST'])
def app_user_query_active():
print("ROUTE: USER QUERY ACTIVE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
response['users'] = util.user_query_active(body)
response['response'] = 'success'
return jsonify(response)
# RETURN ALL USER CONNECTIONS FOR THE PASSED USER
bp_app_user_connection_query = Blueprint('bp_app_user_connection_query', __name__)
@bp_app_user_connection_query.route('/app/user/connection/query', methods=['POST'])
def app_user_connection_query():
print("ROUTE: USER CONNECTION QUERY")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
response['user_connections'] = util.user_connection_query(body)
response['response'] = 'success'
return jsonify(response)
# UPDATE / PUT A USER CONNECTION
bp_app_user_connection_put = Blueprint('bp_app_user_connection_put', __name__)
@bp_app_user_connection_put.route('/app/user/connection/put', methods=['POST'])
def app_user_connection_put():
print("ROUTE: USER CONNECTION PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
if util.user_connection_put(body) == 'success':
response['response'] = 'success'
return jsonify(response)
# RETURN ALL SKILLS FOR THE PASSED USER
bp_app_skill_query = Blueprint('bp_app_skill_query', __name__)
@bp_app_skill_query.route('/app/skill/query', methods=['POST'])
def app_skill_query():
print("ROUTE: SKILL QUERY")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
response['skills'] = util.skill_query(body)
response['response'] = 'success'
return jsonify(response)
# UPDATE / PUT A USER SKILL
bp_app_skill_put = Blueprint('bp_app_skill_put', __name__)
@bp_app_skill_put.route('/app/skill/put', methods=['POST'])
def app_skill_put():
print("ROUTE: SKILL PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
skill_put_response = util.skill_put(body)
if skill_put_response['response'] == 'success':
response['response'] = 'success'
return jsonify(response)
# RETURN ALL STRUCTURES FOR THE PASSED USER
bp_app_structure_query = Blueprint('bp_app_structure_query', __name__)
@bp_app_structure_query.route('/app/structure/query', methods=['POST'])
def app_structure_query():
print("ROUTE: STRUCTURE QUERY")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
query_response = util.structure_query(body)
if query_response['result'] == 'success':
response['structures'] = query_response['structures']
response['repair_settings'] = ht_references.repair_settings
response['response'] = 'success'
return jsonify(response)
# UPDATE / PUT A STRUCTURE
bp_app_structure_put = Blueprint('bp_app_structure_put', __name__)
@bp_app_structure_put.route('/app/structure/put', methods=['POST'])
def app_structure_put():
print("ROUTE: STRUCTURE PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
structure_put_response = util.structure_put(body)
if structure_put_response['response'] == 'success':
response['response'] = 'success'
return jsonify(response)
# DELETE A STRUCTURE
bp_app_structure_delete = Blueprint('bp_app_structure_delete', __name__)
@bp_app_structure_delete.route('/app/structure/delete', methods=['POST'])
def app_structure_delete():
print("ROUTE: STRUCTURE DELETE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
structure_delete_response = util.structure_delete(body)
if structure_delete_response['response'] == 'success':
response['response'] = 'success'
return jsonify(response)
# RETURN ALL STRUCTURE USERS FOR THE PASSED STRUCTURE OR USER, WHICHEVER WAS PASSED
bp_app_structure_user_query = Blueprint('bp_app_structure_user_query', __name__)
@bp_app_structure_user_query.route('/app/structure-user/query', methods=['POST'])
def app_structure_user_query():
print("ROUTE: STRUCTURE-USER QUERY")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
response['structure_users'] = util.structure_user_query(body)
response['response'] = 'success'
return jsonify(response)
# UPDATE / PUT A STRUCTURE-USER
bp_app_structure_user_put = Blueprint('bp_app_structure_user_put', __name__)
@bp_app_structure_user_put.route('/app/structure-user/put', methods=['POST'])
def app_structure_user_put():
print("ROUTE: STRUCTURE-USER PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
structure_user_put_response = util.structure_user_put(body)
if structure_user_put_response['response'] == 'success':
response['response'] = 'success'
return jsonify(response)
# RETURN ALL REPAIRS FOR THE PASSED STRUCTURE ID
bp_app_repair_query = Blueprint('bp_app_repair_query', __name__)
@bp_app_repair_query.route('/app/repair/query', methods=['POST'])
def app_repair_query():
print("ROUTE: REPAIR QUERY")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
query_response = util.repair_query(body)
if query_response['result'] == 'success':
response['repairs'] = query_response['repairs']
response['repair_settings'] = ht_references.repair_settings
response['response'] = 'success'
return jsonify(response)
# UPDATE / PUT A REPAIR
bp_app_repair_put = Blueprint('bp_app_repair_put', __name__)
@bp_app_repair_put.route('/app/repair/put', methods=['POST'])
def app_repair_put():
print("ROUTE: REPAIR PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
util = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_util', 'app.subpkg')
structure_user_put_response = util.repair_put(body)
if structure_user_put_response['response'] == 'success':
response['response'] = 'success'
return jsonify(response)
# RECALL THE SPOT DATA
bp_app_spot_query_active = Blueprint('bp_app_spot_query_active', __name__)
@bp_app_spot_query_active.route('/app/spot/query/active', methods=['POST'])
def app_spot_query_active():
print("ROUTE: SPOT QUERY ACTIVE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
response['spot'] = mapdata.spot_query_active(body)
response['spot_request'] = mapdata.spot_request_query_active(body)
response['response'] = 'success'
return jsonify(response)
# PUT SPOT DATA
bp_app_spot_put = Blueprint('bp_app_spot_put', __name__)
@bp_app_spot_put.route('/app/spot/put', methods=['POST'])
def app_spot_put():
print("ROUTE: SPOT PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
response['response'] = mapdata.spot_put(body)
return jsonify(response)
# UPDATE SPOT CONTENT STATUS
bp_app_spot_content_status_update = Blueprint('bp_app_spot_content_status_update', __name__)
@bp_app_spot_content_status_update.route('/app/spot/spotcontent/statusupdate', methods=['POST'])
def app_spot_content_status_update():
print("ROUTE: SPOT CONTENT STATUS UPDATE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
if mapdata.spot_content_status_update(body) == 'success':
response['response'] = 'success'
return jsonify(response)
# PUT SPOT REQUEST DATA
bp_app_spot_request_put = Blueprint('bp_app_spot_request_put', __name__)
@bp_app_spot_request_put.route('/app/spot/spotrequest/put', methods=['POST'])
def app_spot_request_put():
print("ROUTE: SPOT REQUEST PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
if mapdata.spot_request_put(body) == 'success':
response['response'] = 'success'
return jsonify(response)
# RECALL THE HAZARD DATA
bp_app_hazard_query_active = Blueprint('bp_app_hazard_query_active', __name__)
@bp_app_hazard_query_active.route('/app/hazard/query/active', methods=['POST'])
def app_hazard_query_active():
print("ROUTE: HAZARD QUERY ACTIVE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
response['hazard'] = mapdata.hazard_query_active(body)
response['response'] = 'success'
return jsonify(response)
# PUT HAZARD DATA
bp_app_hazard_put = Blueprint('bp_app_hazard_put', __name__)
@bp_app_hazard_put.route('/app/hazard/put', methods=['POST'])
def app_hazard_put():
print("ROUTE: HAZARD PUT")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
put_response = mapdata.hazard_put(body)
if put_response == 'success':
response['response'] = 'success'
return jsonify(response)
# RECALL THE SHELTER DATA
bp_app_shelter_query_active = Blueprint('bp_app_shelter_query_active', __name__)
@bp_app_shelter_query_active.route('/app/shelter/query/active', methods=['POST'])
def app_shelter_query_active():
print("ROUTE: SHELTER QUERY ACTIVE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
response['shelter'] = mapdata.shelter_query_active(body)
response['response'] = 'success'
return jsonify(response)
# RECALL THE HYDRO DATA
bp_app_hydro_query_active = Blueprint('bp_app_hydro_query_active', __name__)
@bp_app_hydro_query_active.route('/app/hydro/query/active', methods=['POST'])
def app_hydro_query_active():
print("ROUTE: HYDRO QUERY ACTIVE")
# Retrieve the POST json parameters
body = request.get_json(force=True)
# Prep the response and fire the appropriate version of the function
response = {'response' : 'failure'}
if 'app_version' in body:
app_version_mod = body['app_version'].replace('.', '_')
mapdata = importlib.import_module('..v' + app_version_mod + '.ht_app_lib_mapdata', 'app.subpkg')
response['hydro'] = mapdata.hydro_query_active(body)
response['response'] = 'success'
return jsonify(response)
# TEST ENDPOINT - APP
bp_app_test = Blueprint('bp_app_test', __name__)
@bp_app_test.route('/app/test', methods=['GET'])
def app_test():
print("ROUTE: TEST APP API - MOVED")
# Retrieve the POST json parameters
# print(request.get_json(force=True))
# body = request.get_json(force=True)
identity_id = "us-east-1:3332d565-36ca-458c-80db-58fe85ef876f" #body['identity_id']
login_provider = "graph.facebook.com" #body['login_provider']
login_token = "EAACH6QOvruMBAKZCiPGZBMQpnoLZA77AkITLuEE2SvZAPGB504Bkr71TMAsm6xtZAoCmrCC7ZCkB2mvjI0ffHkTAjJiWAEqGN9gEXKGzOHa8i1EL5xmadbepB5nIfhQRo09HgAwr1cv6yzA7DIgJdXaEw7fl3SrAYZAtHb49rUeOPBFBv3cAm1oxWy1HUUUmJBPcnxPgmpSEhawcSZAN9RfrJL1GsRvOFToZD" #body['login_token']
# session = ht_lib_admin.get_session_with_credentials(identity_id, login_provider, login_token, 'dynamodb')
resource = ht_lib_admin.get_resource_with_credentials(identity_id, login_provider, login_token, 'dynamodb', 'us-east-1')
# print(session.get_available_services())
# Retrieve the Hazard data
# hazard_response = dynamo.query(
# TableName=ht_references.table_hazard_name
# , IndexName=ht_references.table_hazard_index
# , KeyConditions={
# 'status': {
# 'AttributeValueList': [{
# 'S': 'active',
# }]
# , 'ComparisonOperator': 'EQ'
# }
# }
# )
table_hazard_name = 'Harvey-Hazard'
table_hazard = resource.Table(table_hazard_name)
table_hazard_index = 'status-timestamp-index'
hazard_response = table_hazard.query(
TableName=table_hazard_name
, IndexName=table_hazard_index
, KeyConditionExpression=Key('status').eq('active')
)
# json_conv = json_util.loads(hazard_response['Items'])
# db_dict = dict(hazard_response['Items'])
# print("HAZARD RESPONSE DICT:")
# print(db_dict)
# db_json = json.dumps(hazard_response['Items'])
# print("HAZARD RESPONSE DUMPS:")
# print(db_json)
# json_conv = json.dumps(hazard_response['Items'])
# print("HAZARD JSON CONV:")
# print(json_conv)
# return json.dumps(hazard_response)
# return json.dumps({'hazard' : json_conv})
return jsonify({'hazard' : hazard_response['Items']}) # Working in virtual env, not reg python? (harveyvenv)
# TEST ENDPOINT
bp_test = Blueprint('bp_test', __name__)
@bp_test.route('/test', methods=['GET'])
def test():
print("ROUTE: TEST API")
util = importlib.import_module('..' + 'v2_0_0' + '.ht_app_lib_util', 'app.subpkg')
return 'TEST'
|
'''
Created on Dec 21, 2019
@author: Florin
'''
from resources.variables import DIVIDE_FACTOR
class NumberGenerator():
def __init__(self, start_value, multiply_factor):
self.current_value = start_value
self._multiply_factor = multiply_factor
def __next__(self):
self.current_value = (self.current_value * self._multiply_factor) % DIVIDE_FACTOR
return self.current_value
|
"""This example composes several key steps into a pipeline that can estimate
the Essential matrix between a pair of images, given an estimate of the
intrinsics of the camera (that took those images.) The estimated essential
matrix is used to triangulate a set of seed points (to create a sparse 3D
point cloud,) and to derive a suitable pair of camera matrices (P0,P1) to
perform a rectification step.
There are three main steps and 2 post-processing steps:
1. compute key points for finding correspondences
2. estimate tentative correspondences between image pair
3. robustly estimate the essential matrix between image pair
4. triangulate a set of sparse points deemed inliers to create a sparse 3D
point cloud
5. use the essential matrix to derive camera matrices and use those to
rectify the image pair
This scripts showcases the outputs of each of these distinct steps.
"""
import os
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import collections as mc
from util import imread, Timer
from spectavi.feature import nn_bruteforcel1k2,nn_cascading_hash
from spectavi.feature import normalize_to_ubyte_and_multiple_16_dim
from spectavi.feature import sift_filter, sift_filter_batch, sift_filter_striped
from spectavi.mvg import ransac_fitter, dlt_triangulate, image_pair_rectification
import argparse
import multiprocessing
def write_ply(plyfile, data, rgb=None):
"""Write a basic ply file using an ndarray of 3d points."""
with open(plyfile, 'w') as f:
f.write('ply\n')
f.write('format ascii 1.0\n')
f.write('element vertex %d\n' % data.shape[0])
f.write('property float x\n')
f.write('property float y\n')
f.write('property float z\n')
if rgb is not None:
f.write('property uchar red\n')
f.write('property uchar green\n')
f.write('property uchar blue\n')
f.write('end_header\n')
if rgb is None:
for p in data:
f.write('%f %f %f\n' % (p[0], p[1], p[2]))
else:
for p, c in zip(data, rgb):
f.write('%f %f %f %d %d %d\n' %
(p[0], p[1], p[2], c[0], c[1], c[2]))
def homogeneous(x):
"""Transforms a matrix of 2d points to 3d homogenous coordinates."""
return np.hstack((x, np.ones((x.shape[0], 1))))
def step1_sift_detect(args):
"""Run SIFT key-point detection and descriptors on images."""
ims = [imread(image_filename, dtype='float32',
force_grayscale=True)
for image_filename in args.images]
with Timer('step1-computation'):
if args.use_sift_striped:
siftkps = [ sift_filter_striped(im,
nthread=args.cpu_count) for im in ims ]
else:
siftkps = sift_filter_batch(ims)
print ('sift 1 #: ', siftkps[0].shape[0] )
print ('sift 2 #: ', siftkps[1].shape[0] )
# Begin Visualize
c_im = np.hstack(ims)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.imshow(c_im, cmap='gray', interpolation='nearest')
x0, y0 = siftkps[0][:, :2].T
x1, y1 = siftkps[1][:, :2].T
shift = ims[0].shape[1]
ax.plot(x0, y0, 'rx', markersize=1)
ax.plot(x1 + shift, y1, 'bx', markersize=1)
ax.autoscale()
ax.set_title('Step1: SIFT Keypoints Detected')
# End Visualize
return siftkps
def step2_match_keypoints(args, step1_out):
"""Using output of step1, find likely matches."""
x, y = step1_out
_x = normalize_to_ubyte_and_multiple_16_dim(x)
_y = normalize_to_ubyte_and_multiple_16_dim(y)
with Timer('step2-computation'):
if args.matching_method == 'bruteforce':
nn_idx, nn_dist = nn_bruteforcel1k2(
(_x+128).astype('uint8'),
(_y+128).astype('uint8'),
nthreads=args.cpu_count)
elif args.matching_method == 'cascading-hash':
nn_idx, nn_dist = nn_cascading_hash(_x, _y)
ratio = nn_dist[:, 1] / nn_dist[:, 0].astype('float64')
pass_idx = ratio >= args.min_ratio
idx0, _ = nn_idx.T
xd = x[idx0[pass_idx]]
yd = y[pass_idx]
# Begin Visualize
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
im0, im1 = imread(args.images[0]), imread(args.images[1])
c_im = np.hstack([im0, im1])
ax.imshow(c_im, cmap='gray', interpolation='nearest')
x0, y0 = xd[:, :2].T
x1, y1 = yd[:, :2].T
shift = im0.shape[1]
x1 = x1.copy() + shift
# plot points
ax.plot(x0, y0, 'rx', markersize=3)
ax.plot(x1, y1, 'bx', markersize=3)
lines = np.asarray(zip(zip(x0, y0), zip(x1, y1)))
# randomize line colors
rand_idx = np.random.randint(lines.shape[0], size=int(
lines.shape[0] * args.percent_to_show))
lines = lines[rand_idx]
lc = mc.LineCollection(lines, cmap=plt.cm.gist_ncar, linewidths=1)
lc.set_array(np.random.random(lines.shape[0]))
ax.add_collection(lc)
ax.autoscale()
ax.set_title('Step2: Match SIFT Keypoints')
# End Visualize
return xd, yd
def step3_estimate_essential_matrix(args, step2_out):
"""Estimate an essential matrix using a robust algorithm (RANSAC) with
matched keypoints."""
xd, yd = step2_out
K = np.loadtxt(fname=args.K)
iK = np.linalg.inv(K)
x0 = np.dot(homogeneous(xd[..., :2]), iK.T)
x1 = np.dot(homogeneous(yd[..., :2]), iK.T)
with Timer('step3-computation'):
ransac_quality = {'low': .6, 'medium': .7,
'high': .75, 'ultra': .8, 'uber': .9}
ransac_options = {'required_percent_inliers':
ransac_quality[args.ransac_quality],
'reprojection_error_allowed': 3.35e-4,
'maximum_tries': 10000000,
'find_best_even_in_failure': False,
'singular_value_ratio_allowed': 1e-3,
'progressbar': False}
ransac = ransac_fitter(x0, x1, options=ransac_options)
# assert ransac['success']
rE = ransac['essential']
print (' Number of keypoints: ', xd.shape[0])
print (' Percent of inliers: ', ransac['inlier_percent'])
_, s, _ = np.linalg.svd(rE)
rE = rE / s[0]
print (' Fundamental Matrix Singular Values: ', s)
print (' Singular Values ratio score: ',
np.abs(s[0] - s[1]) / np.abs(s[0] + s[1]))
return ransac, x0, x1, xd, yd
def step4_triangulate_points(args, step3_out):
"""Triangulate the points detected as inliers from the previous step."""
ransac, x0, x1, xd, yd = step3_out
idx = ransac['inlier_idx']
P1 = ransac['camera']
P0 = np.hstack((np.eye(3), np.zeros((3, 1))))
with Timer('step4-computation'):
RX = dlt_triangulate(P0, P1, x0[idx], x1[idx])
RX = RX[..., :] / RX[..., -1].reshape(-1, 1)
xy0 = xd[idx, :2].astype('int32')
xy1 = yd[idx, :2].astype('int32')
im0, im1 = imread(args.images[0]), imread(args.images[1])
im0v = im0[xy0[:, 1], xy0[:, 0]]
im1v = im1[xy1[:, 1], xy1[:, 0]]
rgb = np.round(255*(im0v + im1v)/2.).astype('uint8')
write_ply(os.path.join(args.outdir, "sparse_inliers.ply"), RX, rgb=rgb)
return RX, ransac
def step5_rectify_images(args, step4_out):
"""Rectify images based on RANSAC fit of essential matrix."""
_, ransac = step4_out
P1 = ransac['camera']
P0 = np.hstack((np.eye(3), np.zeros((3, 1))))
K = np.loadtxt(fname=args.K)
P1 = np.dot(K, P1)
P0 = np.dot(K, P0)
im0, im1 = imread(args.images[0]), imread(args.images[1])
with Timer('step5-computation'):
r0, r1, ri0, ri1 = image_pair_rectification(
P0, P1, im0, im1, sampling_factor=args.rsf)
plt.imsave(os.path.join(args.outdir, "rect-" +
os.path.basename(args.images[0])), r0)
plt.imsave(os.path.join(args.outdir, "rect-" +
os.path.basename(args.images[1])), r1)
ri0.tofile(os.path.join(args.outdir, "rect-idx-" +
os.path.basename(args.images[0])).split('.')[0]
+ '.bin')
ri1.tofile(os.path.join(args.outdir, "rect-idx-" +
os.path.basename(args.images[1])).split('.')[0]
+ '.bin')
def try_open3d_viz(args):
"""Try to visualize sparse 3d point cloud using open3d"""
try:
from open3d import visualization as viz
from open3d import io
ply_file = os.path.join(args.outdir, "sparse_inliers.ply")
pc = io.read_point_cloud(ply_file)
viz.draw_geometries([pc])
except ImportError as err:
print ("Failed to import `open3d` package, can not visualize"
" point-cloud, try installing open3d or use meshlab to visualize"
" ply file.")
def load_cache(args):
"""Loads a cache if it exists."""
filename = os.path.join(args.outdir, 'cache.npz')
if os.path.exists(filename):
data = np.load(filename)
return data['xd'], data['yd']
else:
return None
def save_cache(args, step2_out):
"""Saves a cache."""
filename = os.path.join(args.outdir, 'cache.npz')
xd, yd = step2_out
np.savez_compressed(filename, xd=xd, yd=yd)
def run(args):
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
if args.cache:
cache = load_cache(args)
else:
cache = None
if cache is None:
step1_out = step1_sift_detect(args)
step2_out = step2_match_keypoints(args, step1_out)
else:
step2_out = cache
if cache is None and args.cache:
save_cache(args, step2_out)
step3_out = step3_estimate_essential_matrix(args, step2_out)
step4_out = step4_triangulate_points(args, step3_out)
step5_rectify_images(args, step4_out)
plt.show(block=True)
try_open3d_viz(args)
example_text = '''example:
python ex01_essential_estimation.py ../data/castle/01.jpg ../data/castle/02.jpg ../data/castle/K.txt
'''
if __name__ == '__main__':
# parse arguments
parser = argparse.ArgumentParser(description='Pipeline to estimate essential matrix'
' between image pair; later perform'
' triangulation & rectification',
epilog=example_text,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('images', metavar='IM', type=str, nargs=2,
help='images to estimate essential matrix')
parser.add_argument('K', metavar='K', type=str,
help='intrinsics for camera (assumption is one camera taking two images')
parser.add_argument('--min_ratio', default=1.75, type=float, action='store',
help='min-ratio of second min distance to min distance that is accepted (default=1.75)')
parser.add_argument('--percent_to_show', default=.1, type=float, action='store',
help='percent of matches to show (for legibility) (default=.1)')
parser.add_argument('--ransac_quality', default='ultra', choices=['low', 'medium', 'high', 'ultra', 'uber'], action='store',
help='quality of ransac fit to perform (default=ultra)')
parser.add_argument('--matching_method', default='cascading-hash', choices=['bruteforce', 'cascading-hash'], action='store',
help='which method to use, bruteforce = brute force matching, cascading-hash = variant'+\
' on cascading hash method (default=cascading-hash)')
parser.add_argument('--outdir', default='ex01_out', type=str,
help='output is placed in this directory (default="ex01_out")')
parser.add_argument('--rsf', default=1., type=float, action='store',
help='resampling factor (along epipolar lines) when performing rectification (default=1.)')
parser.add_argument('--cache', action='store_true',
help='cache the keypoint matches per session, if a cached output exists, execution starts at step 3 (default=False)')
parser.add_argument('--use_sift_striped', action='store_true',
help='use striped version of SIFT keypoint computation, may result in slightly different results, but is more efficient (default=False)')
parser.add_argument('--cpu_count', default=8, type=int, action='store',
help='number of cpus to use for multi-threaded code (default=8)')
_args = parser.parse_args()
run(_args)
|
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
import matplotlib.pyplot as plt
from skimage import data, util
from skimage.draw import ellipse
from skimage.measure import label, regionprops
from skimage.transform import rotate
import matplotlib.pyplot as plt
from scipy.stats import skew
from scipy.stats import kurtosis
import math
root = Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
#img = cv.imread(root.filename,0)
root.destroy()
# Write Python code here
# import the necessary packages
import cv2
import argparse
# now let's initialize the list of reference point
ref_point = []
crop = False
def shape_selection(event, x, y, flags, param):
# grab references to the global variables
global ref_point, crop
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that cropping is being performed
if event == cv2.EVENT_LBUTTONDOWN:
ref_point = [(x, y)]
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# center = (x + w//2, y + h//2)
# cv2.ellipse(img, center, (w//2, h//2), 0, 0, 360,(100, 7, 55), 2)
# record the ending (x, y) coordinates and indicate that
# the cropping operation is finished
ref_point.append((x, y))
#frame = cv2.ellipse(image, center, (w//2, h//2), 0, 0, 360,(100, 7, 55), 2)
Punto_antes = ref_point[0]
Punto_actual = ref_point[1]
center = (Punto_antes[0]+(Punto_actual[0]-Punto_antes[0])//2,Punto_antes[1]+(Punto_actual[1]-Punto_antes[1])//2)
#frame = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),1)
cv2.ellipse(image, center, ((Punto_actual[0]-Punto_antes[0])//2,(Punto_actual[1]-Punto_antes[1])//2), 0, 0, 360,(100, 7, 55), 2)
# draw a rectangle around the region of interest
#cv2.rectangle(image, ref_point[0], ref_point[1], (0, 255, 0), 2)
cv2.imshow("image", image)
# load the image, clone it, and setup the mouse callback function
image = cv2.imread(root.filename)
clone = image.copy()
cv2.namedWindow("image")
cv2.setMouseCallback("image", shape_selection)
# keep looping until the 'q' key is pressed
while True:
# display the image and wait for a keypress
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
# press 'r' to reset the window
if key == ord("r"):
image = clone.copy()
# if the 'c' key is pressed, break from the loop
elif key == ord("c"):
break
if len(ref_point) == 2:
crop_img2 = clone[ref_point[0][1]:ref_point[1][1], ref_point[0][0]:
ref_point[1][0]]
height = image.shape[0]
width = image.shape[1]
Punto_antes = ref_point[0]
Punto_actual = ref_point[1]
center = (Punto_antes[0]+(Punto_actual[0]-Punto_antes[0])//2,Punto_antes[1]+(Punto_actual[1]-Punto_antes[1])//2)
# create a mask image of the same shape as input image, filled with 0s (black color)
mask = np.zeros_like(clone)
rows, cols,_ = mask.shape
# create a white filled ellipse
mask = cv2.ellipse(mask, center, ((Punto_actual[0]-Punto_antes[0])//2,(Punto_actual[1]-Punto_antes[1])//2), 0, 0, 360,(255, 255, 255), -1)
# Bitwise AND operation to black out regions outside the mask
crop_img = np.bitwise_and(clone,mask)
# Convert from BGR to RGB for displaying correctly in matplotlib
# Note that you needn't do this for displaying using OpenCV's imshow()
(h, w) = crop_img2.shape[:2] #Calcular tamaño de la imageb
sumCols = []
sumFils=[]
plt.figure(u'Histograma horizontal y vertical')
newarray=np.array(crop_img2.flatten())
meanArr=np.mean(newarray)
varArr=np.var(newarray)
skeArr=skew(newarray)
kurArr=kurtosis(newarray)
#Histograma vertical
for j in range(w):
col = crop_img2[10:h, j:j+1] # y1:y2, x1:x2
sumCols.append(np.sum(col))
plt.subplot(211)
plt.plot(sumCols)
plt.title(u'Histogramas vertical y horizontal')
plt.xlabel(u'Número de columnas')
plt.ylabel(u'Nivel de intensidad')
#Histograma horizontal
for j in range(h):
cole = crop_img2[j:j+1, 10:w ] # y1:y2, x1:x2
sumFils.append(np.sum(cole))
plt.subplot(212)
plt.plot(sumFils)
#plt.title(u'Histograma horizontal')
plt.xlabel(u'Número de filas')
plt.ylabel(u'Nivel de intensidad')
meanH=np.mean(sumFils)
varH= np.var(sumFils)
stdH = np.std(sumFils)
skeH=skew(sumFils)
kurH=kurtosis(sumFils)
meanV=np.mean(sumCols)
varV= np.var(sumCols)
stdV = np.std(sumCols)
skeV=skew(sumCols)
kurV=kurtosis(sumCols)
kernelCross = cv2.getStructuringElement(cv2.MORPH_CROSS,(9,9))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
#kernel9 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(,5))
kerneldiamond = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]], np.uint8)
kernel1 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,9))
# close all open windows
crop_img =cv2.cvtColor(crop_img,cv2.COLOR_RGB2GRAY)
ret,thresh = cv.threshold(crop_img,130,255,cv.THRESH_BINARY_INV)
thresh = cv2.erode(thresh, kernel, iterations=6)
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
contours2, hierarchy = cv2.findContours(255-thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
cnt = contours[0]
cnt2=contours2[0]
M = cv.moments(cnt2)
# calculate x,y coordinate of center
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# put text and highlight the center
cv.circle(crop_img, (cX, cY), 3, (0, 0, 155), -1)
cv.putText(crop_img, "C", (cX - 1, cY - 10),cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 155), 2)
#MAx, MAy = int(0.5 * ellipseMajorAxisx*math.sin(ellipseAngle)), int(0.5 * ellipseMajorAxisy*math.cos(ellipseAngle))
altura=crop_img2.shape[0]
ancho=crop_img2.shape[1]
#area = altura*ancho
#area = cv.contourArea(cnt)
(x,y),(MA,ma),angle = cv.fitEllipse(cnt)
(x2,y2),(MA2,ma2),angle2 = cv.fitEllipse(cnt2)
area=(int(MA2)//2)*(int(ma2)//2)*(math.pi)
xMinor=cX + int((MA/2)*np.cos(angle))
x2Minor=cX - int((MA/2)*np.cos(angle))
yMinor=cY + int((MA/2)*np.sin(angle))
y2Minor=cY - int((MA/2)*np.sin(angle))
xMajor=cX + int((ma/2)*np.sin(angle))
x2Major=cX - int((ma/2)*np.sin(angle))
yMajor=cY -int((ma/2)*np.cos(angle))
y2Major=cY +int((ma/2)*np.cos(angle))
cv.line(crop_img, (xMinor, yMinor),(x2Minor,y2Minor), (0, 0, 155), 1)
cv.line(crop_img, (xMajor, yMajor),(x2Major,y2Major), (0, 0, 155), 1)
#cv.line(im, (xMinor, yMinor),(cX, cY), (0, 255, 0), 1)
#cv.line(im, (xMajor, yMajor),(cX, cY), (0, 255,0), 1)
#cv.putText(crop_img, "A: "+str(int(area)), (cX - int(0.42*height), cY + int(0.42*width)),cv.FONT_HERSHEY_SIMPLEX, 0.42, (255, 55, 0),1)
cv.putText(crop_img, "D.Ma: "+str(int(ma2)), (int(0.1*height), cY-int(0.4*width)),cv.FONT_HERSHEY_SIMPLEX, 0.42, (255, 55, 0),1)
cv.putText(crop_img, "D.Me: "+str(int(MA2)), (int(0.1*height),cY - int(0.5*width)),cv.FONT_HERSHEY_SIMPLEX, 0.42, (255, 55, 0),1 )
print("LOS VALORES OBTENIDOS SON :")
print("El área es "+str(area))
print("La media es: " + str(meanArr) +" Varianza " + str(varArr) +" Oblicuidad " + str(skeArr) +" Kurtosis "+ str(kurArr))
print("MEDIA HORIZONTAL " +str(meanH) + " Vrianza horizonral "+ str(varH) + " Desviación horizontal " + str(stdH))
print("MEDIA VERTICAL " + str(meanV) +" Vrianza VERTICAL " + str(varV) +" Desviación VERTICAL " + str(stdV))
print("Oblicuidad HORIZONTAL " +str(skeH) + " Oblicuidad VERTICAL "+ str(skeV) )
print("kurtosis HORIZONTAL " +str(kurH) + " kurtosis VERTICAL "+ str(kurV) )
cv.drawContours(crop_img,contours,-1,255,2)
#cv.drawContours(im,[cnt],0,(255,0,0),-1)
cv2.imshow('Fitting an Ellipse ',crop_img)
cv2.imshow('Fitting an ',255-thresh)
#cv2.imshow("cropeada",crop_img2)
plt.show()
cv2.waitKey(0)
# close all open windows
cv2.destroyAllWindows() |
# Import modules
import os
# Read input
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
inputFile = open(os.path.join(__location__, 'input.txt'), 'r')
input = inputFile.read().split("\n\n")
# PART 1: Count the number of yes answers for each and sum up
answers = input.copy()
for i in range(len(input)):
answers[i] = input[i].replace('\n', '')
numYes = 0
for answer in answers:
numYes += len(set(answer))
print('Part 1: ', numYes)
# PART 2: Count the number where all in one group answered yes
answers = [group.splitlines() for group in input]
numYes = 0
for group in answers:
groupSets = [set(answer) for answer in group]
numYes += len(set.intersection(*groupSets))
print('Part 2: ', numYes) |
import numpy as np
import os
import mdv
import re
from bert_serving.client import BertClient
from termcolor import colored
from imgcat import imgcat
# config like this:
mdv.term_columns = 60
topk = 5
prefix_q = '##### **Q:** '
stop_prefix = '<h2 align="center">:zap: Benchmark</h2>'
start = False
questions = []
answers = []
def start_client(port=5555, port_out=5556):
# port for pushing data from client to server
# port_out for publishing results from server to client
with BertClient(port=port, port_out=port_out) as bc:
doc_vecs = bc.encode(questions)
try:
while True:
query = input(colored('your question: ', 'green'))
query_vec = bc.encode([query])[0]
# compute simple dot product as score
score = np.sum(query_vec * doc_vecs, axis=1)
topk_idx = np.argsort(score)[::-1][:topk]
idx = topk_idx[0]
print('> %s\t%s' % (colored('%.1f' %
score[idx], 'cyan'), colored(questions[idx], 'yellow')))
format_answer(answers[idx])
print('top %d questions similar to "%s"' %
(topk, colored(query, 'green')))
for idx in topk_idx:
print('> %s\t%s' % (colored('%.1f' %
score[idx], 'cyan'), colored(questions[idx], 'yellow')))
except KeyboardInterrupt:
print("Goodbye...")
pass
def format_answer(answer):
lines = answer.splitlines()
for line in lines:
m = re.search('(?<=src=")[^"\?]+', line)
if m:
imgcat(open("bert-as-service/"+m.group(0)))
else:
formatted = mdv.main(md=line, c_theme=...)
print(formatted)
with open('bert-as-service/README.md') as fp:
for v in fp:
v_strip = v.strip()
if v_strip.startswith(stop_prefix):
print('Done processing')
index = len(questions)
answers[index-1] += v_strip + "\n"
break
if v_strip.startswith(prefix_q):
v_line = v_strip.replace(prefix_q, '').strip()
start = True
questions.append(v_line)
continue
if start:
if v_strip:
index = len(questions)
if len(answers) < index:
answers.append(v_strip)
else:
answers[index-1] += v_strip + "\n"
start_client()
|
# Copyright (c) 2018 Andrew R. Kozlik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
This implements the high-level functions for SLIP-39, also called "Shamir Backup".
See https://github.com/satoshilabs/slips/blob/master/slip-0039.md.
"""
import hmac
from collections import defaultdict
from hashlib import pbkdf2_hmac
from typing import Dict, Iterable, List, Optional, Set, Tuple
from .i18n import _
from .mnemonic import Wordlist
Indices = Tuple[int, ...]
MnemonicGroups = Dict[int, Tuple[int, Set[Tuple[int, bytes]]]]
"""
## Simple helpers
"""
_RADIX_BITS = 10
"""The length of the radix in bits."""
def _bits_to_bytes(n: int) -> int:
return (n + 7) // 8
def _bits_to_words(n: int) -> int:
return (n + _RADIX_BITS - 1) // _RADIX_BITS
def _xor(a: bytes, b: bytes) -> bytes:
return bytes(x ^ y for x, y in zip(a, b))
"""
## Constants
"""
_ID_LENGTH_BITS = 15
"""The length of the random identifier in bits."""
_ITERATION_EXP_LENGTH_BITS = 5
"""The length of the iteration exponent in bits."""
_ID_EXP_LENGTH_WORDS = _bits_to_words(_ID_LENGTH_BITS + _ITERATION_EXP_LENGTH_BITS)
"""The length of the random identifier and iteration exponent in words."""
_CHECKSUM_LENGTH_WORDS = 3
"""The length of the RS1024 checksum in words."""
_DIGEST_LENGTH_BYTES = 4
"""The length of the digest of the shared secret in bytes."""
_CUSTOMIZATION_STRING = b"shamir"
"""The customization string used in the RS1024 checksum and in the PBKDF2 salt."""
_GROUP_PREFIX_LENGTH_WORDS = _ID_EXP_LENGTH_WORDS + 1
"""The length of the prefix of the mnemonic that is common to a share group."""
_METADATA_LENGTH_WORDS = _ID_EXP_LENGTH_WORDS + 2 + _CHECKSUM_LENGTH_WORDS
"""The length of the mnemonic in words without the share value."""
_MIN_STRENGTH_BITS = 128
"""The minimum allowed entropy of the master secret."""
_MIN_MNEMONIC_LENGTH_WORDS = _METADATA_LENGTH_WORDS + _bits_to_words(_MIN_STRENGTH_BITS)
"""The minimum allowed length of the mnemonic in words."""
_BASE_ITERATION_COUNT = 10000
"""The minimum number of iterations to use in PBKDF2."""
_ROUND_COUNT = 4
"""The number of rounds to use in the Feistel cipher."""
_SECRET_INDEX = 255
"""The index of the share containing the shared secret."""
_DIGEST_INDEX = 254
"""The index of the share containing the digest of the shared secret."""
"""
# External API
"""
class Slip39Error(RuntimeError):
pass
class Share:
"""
Represents a single mnemonic and offers its parsed metadata.
"""
def __init__(
self,
identifier: int,
iteration_exponent: int,
group_index: int,
group_threshold: int,
group_count: int,
member_index: int,
member_threshold: int,
share_value: bytes,
):
self.index = None
self.identifier = identifier
self.iteration_exponent = iteration_exponent
self.group_index = group_index
self.group_threshold = group_threshold
self.group_count = group_count
self.member_index = member_index
self.member_threshold = member_threshold
self.share_value = share_value
def common_parameters(self) -> tuple:
"""Return the values that uniquely identify a matching set of shares."""
return (
self.identifier,
self.iteration_exponent,
self.group_threshold,
self.group_count,
)
class EncryptedSeed:
"""
Represents the encrypted master seed for BIP-32.
"""
def __init__(self, identifier: int, iteration_exponent: int, encrypted_master_secret: bytes):
self.identifier = identifier
self.iteration_exponent = iteration_exponent
self.encrypted_master_secret = encrypted_master_secret
def decrypt(self, passphrase: str) -> bytes:
"""
Converts the Encrypted Master Secret to a Master Secret by applying the passphrase.
This is analogous to BIP-39 passphrase derivation. We do not use the term "derive"
here, because passphrase function is symmetric in SLIP-39. We are using the terms
"encrypt" and "decrypt" instead.
"""
passphrase = (passphrase or '').encode('utf-8')
ems_len = len(self.encrypted_master_secret)
l = self.encrypted_master_secret[: ems_len // 2]
r = self.encrypted_master_secret[ems_len // 2 :]
salt = _get_salt(self.identifier)
for i in reversed(range(_ROUND_COUNT)):
(l, r) = (
r,
_xor(l, _round_function(i, passphrase, self.iteration_exponent, salt, r)),
)
return r + l
def recover_ems(mnemonics: List[str]) -> EncryptedSeed:
"""
Combines mnemonic shares to obtain the encrypted master secret which was previously
split using Shamir's secret sharing scheme.
Returns identifier, iteration exponent and the encrypted master secret.
"""
if not mnemonics:
raise Slip39Error("The list of mnemonics is empty.")
(
identifier,
iteration_exponent,
group_threshold,
group_count,
groups,
) = _decode_mnemonics(mnemonics)
# Use only groups that have at least the threshold number of shares.
groups = {group_index: group for group_index, group in groups.items() if len(group[1]) >= group[0]}
if len(groups) < group_threshold:
raise Slip39Error(
"Insufficient number of mnemonic groups. Expected {} full groups, but {} were provided.".format(
group_threshold, len(groups)
)
)
group_shares = [
(group_index, _recover_secret(group[0], list(group[1])))
for group_index, group in groups.items()
]
encrypted_master_secret = _recover_secret(group_threshold, group_shares)
return EncryptedSeed(identifier, iteration_exponent, encrypted_master_secret)
def decode_mnemonic(mnemonic: str) -> Share:
"""Converts a share mnemonic to share data."""
mnemonic_data = tuple(_mnemonic_to_indices(mnemonic))
if len(mnemonic_data) < _MIN_MNEMONIC_LENGTH_WORDS:
raise Slip39Error(_('Too short.'))
padding_len = (_RADIX_BITS * (len(mnemonic_data) - _METADATA_LENGTH_WORDS)) % 16
if padding_len > 8:
raise Slip39Error(_('Invalid length.'))
if not _rs1024_verify_checksum(mnemonic_data):
raise Slip39Error(_('Invalid mnemonic checksum.'))
id_exp_int = _int_from_indices(mnemonic_data[:_ID_EXP_LENGTH_WORDS])
identifier = id_exp_int >> _ITERATION_EXP_LENGTH_BITS
iteration_exponent = id_exp_int & ((1 << _ITERATION_EXP_LENGTH_BITS) - 1)
tmp = _int_from_indices(
mnemonic_data[_ID_EXP_LENGTH_WORDS : _ID_EXP_LENGTH_WORDS + 2]
)
(
group_index,
group_threshold,
group_count,
member_index,
member_threshold,
) = _int_to_indices(tmp, 5, 4)
value_data = mnemonic_data[_ID_EXP_LENGTH_WORDS + 2 : -_CHECKSUM_LENGTH_WORDS]
if group_count < group_threshold:
raise Slip39Error(_('Invalid mnemonic group threshold.'))
value_byte_count = _bits_to_bytes(_RADIX_BITS * len(value_data) - padding_len)
value_int = _int_from_indices(value_data)
if value_data[0] >= 1 << (_RADIX_BITS - padding_len):
raise Slip39Error(_('Invalid mnemonic padding.'))
value = value_int.to_bytes(value_byte_count, "big")
return Share(
identifier,
iteration_exponent,
group_index,
group_threshold + 1,
group_count + 1,
member_index,
member_threshold + 1,
value,
)
def get_wordlist() -> Wordlist:
wordlist = Wordlist.from_file('slip39.txt')
required_words = 2**_RADIX_BITS
if len(wordlist) != required_words:
raise Slip39Error(
f"The wordlist should contain {required_words} words, but it contains {len(wordlist)} words."
)
return wordlist
def process_mnemonics(mnemonics: List[str]) -> Tuple[Optional[EncryptedSeed], str]:
# Collect valid shares.
shares = []
for i, mnemonic in enumerate(mnemonics):
try:
share = decode_mnemonic(mnemonic)
share.index = i + 1
shares.append(share)
except Slip39Error:
pass
if not shares:
return None, _('No valid shares.')
# Sort shares into groups.
groups: Dict[int, Set[Share]] = defaultdict(set) # group idx : shares
common_params = shares[0].common_parameters()
for share in shares:
if share.common_parameters() != common_params:
error_text = _("Share #{} is not part of the current set.").format(share.index)
return None, _ERROR_STYLE % error_text
for other in groups[share.group_index]:
if share.member_index == other.member_index:
error_text = _("Share #{} is a duplicate of share #{}.").format(share.index, other.index)
return None, _ERROR_STYLE % error_text
groups[share.group_index].add(share)
# Compile information about groups.
groups_completed = 0
for i, group in groups.items():
if group:
member_threshold = next(iter(group)).member_threshold
if len(group) >= member_threshold:
groups_completed += 1
identifier = shares[0].identifier
iteration_exponent = shares[0].iteration_exponent
group_threshold = shares[0].group_threshold
group_count = shares[0].group_count
status = ''
if group_count > 1:
status += _('Completed {} of {} groups needed').format(f"<b>{groups_completed}</b>", f"<b>{group_threshold}</b>")
status += ":<br/>"
for group_index in range(group_count):
group_prefix = _make_group_prefix(identifier, iteration_exponent, group_index, group_threshold, group_count)
status += _group_status(groups[group_index], group_prefix)
if groups_completed >= group_threshold:
if len(mnemonics) > len(shares):
status += _ERROR_STYLE % _('Some shares are invalid.')
else:
try:
encrypted_seed = recover_ems(mnemonics)
status += '<b>' + _('The set is complete!') + '</b>'
except Slip39Error as e:
encrypted_seed = None
status = _ERROR_STYLE % str(e)
return encrypted_seed, status
return None, status
"""
## Group status helpers
"""
_FINISHED = '<span style="color:green;">✔</span>'
_EMPTY = '<span style="color:red;">✕</span>'
_INPROGRESS = '<span style="color:orange;">⚫</span>'
_ERROR_STYLE = '<span style="color:red; font-weight:bold;">' + _('Error') + ': %s</span>'
def _make_group_prefix(identifier, iteration_exponent, group_index, group_threshold, group_count):
wordlist = get_wordlist()
val = identifier
val <<= _ITERATION_EXP_LENGTH_BITS
val += iteration_exponent
val <<= 4
val += group_index
val <<= 4
val += group_threshold - 1
val <<= 4
val += group_count - 1
val >>= 2
prefix = ' '.join(wordlist[idx] for idx in _int_to_indices(val, _GROUP_PREFIX_LENGTH_WORDS, _RADIX_BITS))
return prefix
def _group_status(group: Set[Share], group_prefix) -> str:
len(group)
if not group:
return _EMPTY + _('{} shares from group {}').format('<b>0</b> ', f'<b>{group_prefix}</b>') + f'.<br/>'
else:
share = next(iter(group))
icon = _FINISHED if len(group) >= share.member_threshold else _INPROGRESS
return icon + _('{} of {} shares needed from group {}').format(f'<b>{len(group)}</b>', f'<b>{share.member_threshold}</b>', f'<b>{group_prefix}</b>') + f'.<br/>'
"""
## Convert mnemonics or integers to indices and back
"""
def _int_from_indices(indices: Indices) -> int:
"""Converts a list of base 1024 indices in big endian order to an integer value."""
value = 0
for index in indices:
value = (value << _RADIX_BITS) + index
return value
def _int_to_indices(value: int, output_length: int, bits: int) -> Iterable[int]:
"""Converts an integer value to indices in big endian order."""
mask = (1 << bits) - 1
return ((value >> (i * bits)) & mask for i in reversed(range(output_length)))
def _mnemonic_to_indices(mnemonic: str) -> List[int]:
wordlist = get_wordlist()
indices = []
for word in mnemonic.split():
try:
indices.append(wordlist.index(word.lower()))
except ValueError:
if len(word) > 8:
word = word[:8] + '...'
raise Slip39Error(_('Invalid mnemonic word') + ' "%s".' % word) from None
return indices
"""
## Checksum functions
"""
def _rs1024_polymod(values: Indices) -> int:
GEN = (
0xE0E040,
0x1C1C080,
0x3838100,
0x7070200,
0xE0E0009,
0x1C0C2412,
0x38086C24,
0x3090FC48,
0x21B1F890,
0x3F3F120,
)
chk = 1
for v in values:
b = chk >> 20
chk = (chk & 0xFFFFF) << 10 ^ v
for i in range(10):
chk ^= GEN[i] if ((b >> i) & 1) else 0
return chk
def _rs1024_verify_checksum(data: Indices) -> bool:
"""
Verifies a checksum of the given mnemonic, which was already parsed into Indices.
"""
return _rs1024_polymod(tuple(_CUSTOMIZATION_STRING) + data) == 1
"""
## Internal functions
"""
def _precompute_exp_log() -> Tuple[List[int], List[int]]:
exp = [0 for i in range(255)]
log = [0 for i in range(256)]
poly = 1
for i in range(255):
exp[i] = poly
log[poly] = i
# Multiply poly by the polynomial x + 1.
poly = (poly << 1) ^ poly
# Reduce poly by x^8 + x^4 + x^3 + x + 1.
if poly & 0x100:
poly ^= 0x11B
return exp, log
_EXP_TABLE, _LOG_TABLE = _precompute_exp_log()
def _interpolate(shares, x) -> bytes:
"""
Returns f(x) given the Shamir shares (x_1, f(x_1)), ... , (x_k, f(x_k)).
:param shares: The Shamir shares.
:type shares: A list of pairs (x_i, y_i), where x_i is an integer and y_i is an array of
bytes representing the evaluations of the polynomials in x_i.
:param int x: The x coordinate of the result.
:return: Evaluations of the polynomials in x.
:rtype: Array of bytes.
"""
x_coordinates = set(share[0] for share in shares)
if len(x_coordinates) != len(shares):
raise Slip39Error("Invalid set of shares. Share indices must be unique.")
share_value_lengths = set(len(share[1]) for share in shares)
if len(share_value_lengths) != 1:
raise Slip39Error(
"Invalid set of shares. All share values must have the same length."
)
if x in x_coordinates:
for share in shares:
if share[0] == x:
return share[1]
# Logarithm of the product of (x_i - x) for i = 1, ... , k.
log_prod = sum(_LOG_TABLE[share[0] ^ x] for share in shares)
result = bytes(share_value_lengths.pop())
for share in shares:
# The logarithm of the Lagrange basis polynomial evaluated at x.
log_basis_eval = (
log_prod
- _LOG_TABLE[share[0] ^ x]
- sum(_LOG_TABLE[share[0] ^ other[0]] for other in shares)
) % 255
result = bytes(
intermediate_sum
^ (
_EXP_TABLE[(_LOG_TABLE[share_val] + log_basis_eval) % 255]
if share_val != 0
else 0
)
for share_val, intermediate_sum in zip(share[1], result)
)
return result
def _round_function(i: int, passphrase: bytes, e: int, salt: bytes, r: bytes) -> bytes:
"""The round function used internally by the Feistel cipher."""
return pbkdf2_hmac(
"sha256",
bytes([i]) + passphrase,
salt + r,
(_BASE_ITERATION_COUNT << e) // _ROUND_COUNT,
dklen=len(r),
)
def _get_salt(identifier: int) -> bytes:
return _CUSTOMIZATION_STRING + identifier.to_bytes(
_bits_to_bytes(_ID_LENGTH_BITS), "big"
)
def _create_digest(random_data: bytes, shared_secret: bytes) -> bytes:
return hmac.new(random_data, shared_secret, "sha256").digest()[:_DIGEST_LENGTH_BYTES]
def _recover_secret(threshold: int, shares: List[Tuple[int, bytes]]) -> bytes:
# If the threshold is 1, then the digest of the shared secret is not used.
if threshold == 1:
return shares[0][1]
shared_secret = _interpolate(shares, _SECRET_INDEX)
digest_share = _interpolate(shares, _DIGEST_INDEX)
digest = digest_share[:_DIGEST_LENGTH_BYTES]
random_part = digest_share[_DIGEST_LENGTH_BYTES:]
if digest != _create_digest(random_part, shared_secret):
raise Slip39Error("Invalid digest of the shared secret.")
return shared_secret
def _decode_mnemonics(
mnemonics: List[str],
) -> Tuple[int, int, int, int, MnemonicGroups]:
identifiers = set()
iteration_exponents = set()
group_thresholds = set()
group_counts = set()
# { group_index : [threshold, set_of_member_shares] }
groups = {} # type: MnemonicGroups
for mnemonic in mnemonics:
share = decode_mnemonic(mnemonic)
identifiers.add(share.identifier)
iteration_exponents.add(share.iteration_exponent)
group_thresholds.add(share.group_threshold)
group_counts.add(share.group_count)
group = groups.setdefault(share.group_index, (share.member_threshold, set()))
if group[0] != share.member_threshold:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics in a group must have the same member threshold."
)
group[1].add((share.member_index, share.share_value))
if len(identifiers) != 1 or len(iteration_exponents) != 1:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words.".format(
_ID_EXP_LENGTH_WORDS
)
)
if len(group_thresholds) != 1:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics must have the same group threshold."
)
if len(group_counts) != 1:
raise Slip39Error(
"Invalid set of mnemonics. All mnemonics must have the same group count."
)
for group_index, group in groups.items():
if len(set(share[0] for share in group[1])) != len(group[1]):
raise Slip39Error(
"Invalid set of shares. Member indices in each group must be unique."
)
return (
identifiers.pop(),
iteration_exponents.pop(),
group_thresholds.pop(),
group_counts.pop(),
groups,
)
|
# from rest_framework import serializers
# from api.models import Board
#
# class BoardSerializer(serializers.Serializer):
# id = serializers.IntegerField(read_only=True)
# title = serializers.CharField(required=False, allow_blank=True, max_length=100)
# type = serializers.CharField(max_length=20, default='public')
#
# def create(self, validated_data):
# return Board.objects.create(**validated_data)
#
# def update(self, instance, validated_data):
# instance.title = validated_data.get('title', instance.title)
#
# instance.type = validated_data.get('type', instance.type)
# instance.save()
# return instance
from rest_framework import serializers
from django.utils import timezone
from api.models import Board, TaskList, Card
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
boards = serializers.PrimaryKeyRelatedField(many=True, queryset=Board.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'boards')
class CardSerializer(serializers.ModelSerializer):
class Meta:
model = Card
fields = ('name','description','task_list')
class TaskListSerializer(serializers.ModelSerializer):
card = CardSerializer(read_only=True, many=True)
class Meta:
model = TaskList
fields = ('id','board','name','card')
class BoardSerializer(serializers.ModelSerializer):
task_list = TaskListSerializer(read_only=True,many=True)
# print(task_lists)
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Board
fields = ('id','name','pub_date','task_list','owner')
|
import pandas
## reading using read_csv in pandas
df=pandas.read_csv('hrdata.txt')
print(df)
print(type(df['Salary'][0]))
print(type(df['Hire Date'][0]))
df1=pandas.read_csv('hrdata.txt',index_col='Name') ## use the Name field as the index
print(df1)
df2=pandas.read_csv('hrdata.txt',index_col='Name',parse_dates=['Hire Date']) ## formating the date correctly
print(df2)
print(type(df2['Hire Date'][0]))
df3=pandas.read_csv('hrdata.txt',index_col='Employee',parse_dates=['Date'],header=0,names=['Employee','Date','Salary','Days'])
# use the user-defined name
print(df3)
## writing using read_csv in pandas
df2.to_csv('hrdata_write.csv')
import numpy as np
hrdata=np.array(df2)
print(hrdata)
|
from tkinter import *
import messagebox
import tkinter
top = tkinter.Tk()
def p_h():
print("Hello")
mb = Menubutton(top, text = "condiments", )
mb.grid()
mb.menu = Menu(mb, tearoff = 0)
mb["menu"] = mb.menu
mayoVar = IntVar()
ketchVar = IntVar()
mb.menu.add_checkbutton(label = "mayo", variable = mayoVar, command = p_h)
mb.menu.add_checkbutton(label = "ketchup", variable = ketchVar, command = p_h)
top.mainloop() |
# Rename Tool Window Procedure
def RenamerTool(name):
string renameWind = "RenamerScript"
for sels, i in enumerate(sels):
newName = "%s_%i" %(name, i)
cmds.rename(sels, newName)
# ---- Above is the actual function, and Below
class RenamerUI():
def__init__(self):
self.mWindow = "RenamerWindow"
def create(self):
self.delete()
self.mWindow = cmds.window(self.mWindow, title="Renamer")
self.mCol = cmds.columnLayout(parent=self.mWindow, adjustableColumn=True)
self.nameField = cmds.textField(placeholderText="Enter New Name")
#self.value = cmds.textField(self.nameField, q=True, value=True)
#cmds.textField(self.nameField, e=True, enterCommand=lambda x: self.rename_objects(value))
cmds.showWindow(self.mWindow)
def delete(self):
if cmds.window(self.mWindow, q=True, exists=True):
cmds.deleteUI(self.mWindow)
def rename_objects(self, name):
sels = cmds.ls(sl=True)
string renameWind = "RenamerScript"
for sels, i in enumerate(sels):
newName = "%s_%i" %(name, i)
cmds.rename(sels, newName)
if (`window - exists $renameWind
`)
deleteUI $renameWind;
$renameWind = `window - title
"Renamer Tool" $renameWind
`;
// Main
Column
Layout
string $renameWindCol = `columnLayout - parent $renameWind - columnAttach
"both"
20 - rowSpacing
10 - columnWidth
318
`;
// Row
1: the
new
name
of
the
item
string $newNameRow = `rowLayout - parent $renameWindCol - numberOfColumns
2
`;
text - parent $newNameRow - label
"New Name" - width
75;
string $renameText = `textField - parent $newNameRow - placeholderText
"name_##_drb_drb_drb" - width
200
`;
// Row
2: The
first
number in the
sequence
string $newNumRow = `rowLayout - parent $renameWindCol - numberOfColumns
2
`;
text - parent $newNumRow - label
"First Number" - width
75;
string $firstNumber = `intField - parent $newNumRow - value
1 - width
200
`;
string $renameButton = `button - parent $renameWindCol - label
"Generate" - command("Renamer(\"" + $renameText + "\", \"" + $firstNumber + "\")")`;
text - parent $renameWindCol - label
"";
showWindow $renameWind;
}
/ * // Gets
the
Int
Field
Value
from a window
IntField
global proc
int
GetIntVal(string $control){
int $value = `intField - q - v $control
`;
return $value;
} * /
// Gets
a
string
value
from a window
TextField
global proc
string
GetTextVal(string $control){
string $value = `textField - q - text $control
`;
return $value;
}
// Rename
Tool
Function
global proc
Renamer(string $longString, string $startNum){
string $longStringValue = `GetTextVal($longString)`;
int $itemNumInt = `GetIntVal($startNum)`;
string $Sels[] = `ls - selection`; # Puts the selected objects into an array string
$longName[]; # An array to separate the first and last part of the string before / after the hashes tokenize #
$longStringValue "/#" $longName; // Separates the parameter string items into an array
int $num1 = size($longName[0]); // returns
the
lenth
of
the
first
string
int $num2 = size($longName[1]); // returns
the
length
of
the
second
string
int $num3 = size($longStringValue); // returns
the
length
of
the
entire
input
string
int $hashes = ($num3 - ($num2 + $num1)); // number
of
hashtags(subtracts
the
new
strings
from the input
string)
// int $itemNumInt = $startNum; // This is the
variable
that
determines
which
number
each
item
gets.It
starts
out
equal
to
the
input
int
for ($objs in $Sels) {// For each item in the $Sels array...
string $itemNumString = (string($itemNumInt)); // Converts the item number to a string so..
int $num4 = size($itemNumString); //..I can get the size of the string
int $padding = $hashes - $num4; // The padding on each item will be total hashes minus the size of the item number
string $zeros = ""; // A string where I am going to add each zero for the padding
for ($x = 0; $x < $padding; $x++){// Loops as many times as the padding that I need, and adds the appropriate amount of zeros
$zeros = $zeros + "0"; // Each zero adds a "0" to the zeros string
}
string $newName = $longName[0] + $zeros + $itemNumInt + $longName[1]; // A new string where I concatenate all the information in this procedure
rename $objs $newName; // renames each object the new name
$itemNumInt++; // increments the item number
}
}
|
import pickle
from string import punctuation
import nltk
import numpy as np
import pandas as pd
import torch
import torch.utils.data as tdata
from gensim.models import Doc2Vec
from nltk.corpus import stopwords
from sklearn import preprocessing
from tqdm import tqdm
def load_torch_data(dataset: tdata.Dataset, ratio: float, bs: int):
"""Prepare data from torch dataset for training and validation.
Args:
dataset (torch.utils.data.Dataset): loaded dataset
ratio (float): split ratio
bs (int): batch size
Returns:
A tuple of training data loader, validation data loader and
a tuple of size containing training dataset size and validation
dataset size respectively
"""
dataset_size = len(dataset)
# prepare for shuffle
indices = np.arange(dataset_size)
np.random.shuffle(indices)
split_idx = int(np.floor(ratio * dataset_size))
train_indices, val_indices = indices[split_idx:], indices[:split_idx]
# split dataset
train_sampler = tdata.SubsetRandomSampler(train_indices)
val_sampler = tdata.SubsetRandomSampler(val_indices)
train_loader = tdata.DataLoader(dataset, batch_size=bs, sampler=train_sampler)
val_loader = tdata.DataLoader(dataset, batch_size=bs, sampler=val_sampler)
return train_loader, val_loader, (len(train_indices), len(val_indices))
def load_statistical_learning_data(path, model: Doc2Vec):
"""
Load data for statistical learning
Args:
path (str): path to tagged dataset
model (Doc2Vec): gensim doc2vec pretrained model
Return:
A tuple of train set and test set
"""
# load dataset
with open(path, 'rb') as f:
train_tagged, test_tagged = pickle.load(f)
print('Preprocess training data')
train_data = tagged_data_preprocessor(model, train_tagged)
print('Preprocess testing data')
test_data = tagged_data_preprocessor(model, test_tagged)
return train_data, test_data
def tagged_data_preprocessor(model, tagged_doc):
"""
Preprocess tagged doc
Args:
model: pretrained doc2vec model
tagged_doc: tagged document dataset
Return:
Dictionary containing features and labels
"""
sents = tagged_doc.values
labels, features = zip(*[(doc.tags[0], model.infer_vector(doc.words, steps=20)) for doc in tqdm(sents)])
features = np.asarray(features)
labels = np.asarray(list(map(int, labels))).reshape(-1, 1)
return {'features': features, 'label': labels}
def elite_preprocessor(df: pd.DataFrame):
"""Elite net data preprocessor
Args:
df (pd.DataFrame): input data frame
Return:
Processed data frame
"""
# remove unrelated info
df.drop(columns='user_id', inplace=True)
# split by label and balance
positive_df = df.loc[df.elite == 1]
negative_df = df.loc[df.elite == 0].sample(positive_df.shape[0])
# concatenate and shuffle
result = pd.concat([positive_df, negative_df]).sample(frac=1)
# clean
del positive_df, negative_df
# min max scaler
scaler = preprocessing.MinMaxScaler()
return pd.DataFrame(scaler.fit_transform(result))
def multimodal_classification_preprocessor(df: pd.DataFrame):
"""
Multimodal classifier data preprocessor
Args:
df (pd.DataFrame): input data frame read from pandas
Return:
Processed data frame
"""
# remove unrelated info
df.drop(columns='user_id', inplace=True)
# split by label and balance
positive_df = df.loc[df.usefulness == 1]
negative_df = df.loc[df.usefulness == 0].sample(positive_df.shape[0])
# concatenate and shuffle
result = pd.concat([positive_df, negative_df]).sample(frac=1)
# clean
del positive_df, negative_df
# min max scaler
scaler = preprocessing.MinMaxScaler()
result.iloc[:, :13] = scaler.fit_transform(result.iloc[:, :13])
return result
def map_sentence_to_int(word_list, mapping):
res = []
for word in word_list:
if word in mapping:
res.append(mapping[word])
else:
res.append(mapping['unk'])
return res
def text_preprocessor(df: pd.DataFrame, word2int_mapping):
def text2int_vec(text: str):
tokens = nltk.word_tokenize(text)
tokens = list(filter(lambda x: x not in punctuation and x not in stop_words, map(str.lower, tokens)))
int_vec = map_sentence_to_int(tokens, word2int_mapping)
if len(int_vec) > 200:
int_vec = int_vec[: 200]
else:
int_vec = list(np.zeros(200 - len(int_vec))) + int_vec
return pd.Series(int_vec)
stop_words = set(stopwords.words('english'))
vectors = df.text.apply(text2int_vec)
return np.concatenate(
(vectors.values, df.usefulness.values.reshape(-1, 1)),
axis=1
)
def text_lstm_dataloader_factory(x_dir: str, y_dir: str, bs: int):
"""
Data loader factory class for text LSTM
Args:
x_dir (str): directory of features
y_dir (str): directory of labels
bs (int): batch size
Return:
Tuple of data loader and dataset size
"""
# load data
x = np.load(x_dir)
y = np.load(y_dir).squeeze(1)
size = x.shape[0]
# create Tensor datasets
dataset = tdata.TensorDataset(torch.from_numpy(x).to(torch.int64),
torch.from_numpy(y).to(torch.long))
# make sure to SHUFFLE your data
dataloader = tdata.DataLoader(dataset, shuffle=True, batch_size=bs)
return dataloader, size
|
from django.conf.urls import patterns, url
from questions.rest_views import QuestionCatalogueList, QuestionCatalogueDetail, QuestionCatalogueSeevcam, \
QuestionDetails, QuestionListSeevcam, QuestionList
from dashboard.views import DashboardView as EmptyView
rest_patterns = patterns('',
url(r'^catalogue/?$', QuestionCatalogueList.as_view()),
# Seevcam Scope
url(r'^catalogue/seevcam/?$', QuestionCatalogueSeevcam.as_view()),
url(r'^catalogue/seevcam/(?P<question_catalogue>[0-9]+)/list/?$',
QuestionListSeevcam.as_view()),
# Private Scope
url(r'^catalogue/(?P<pk>[0-9]+)/?$', QuestionCatalogueDetail.as_view()),
url(r'^catalogue/(?P<question_catalogue>[0-9]+)/list/?$', QuestionList.as_view()),
url(r'^catalogue/(?P<question_catalogue>[0-9]+)/list/(?P<pk>[0-9]+)/?$',
QuestionDetails.as_view()))
html_patterns = patterns('',
url(r'^$', EmptyView.as_view(), name='questions'),
url(r'(?P<pk>[0-9]+)/?$', EmptyView.as_view(), name='openCatalogue'))
urlpatterns = rest_patterns + html_patterns |
import re
#Question1
def get_middle_chars(given_str):
size = len(given_str)
sep = (size - 3)//2
given_str = given_str[sep:size-sep]
return given_str
#Question2
def append_in_the_middle(s1, s2):
middle = len(s1) // 2
print("{}{}{}".format(s1[:middle], s2, s1[middle:]))
#Question3
def strings(s1, s2):
m1 = len(s1) // 2
m2 = len(s2) // 2
s3 = s1[0] + s2[0] + s1[m1] + s2[m2] + s1[-1] + s2[-1]
return s3
#Question4
def lower_upper(given_str):
lower = []
upper = []
for i in given_str:
if i.isupper():
upper.append(i)
else:
lower.append(i)
return_string = ''.join(lower + upper)
print(return_string)
#Question5
def counting(given_str):
digits = []
lower = []
upper = []
special_symbols = []
for char in given_str:
if char.isdigit():
digits.append(char)
elif char.isupper():
upper.append(char)
elif char.islower():
lower.append(char)
else:
special_symbols.append(char)
print("Chars = {}\nUpper = {}\nLower = {}\nDigits = {}\nSymbols = {}"
.format(len(lower + upper), len(upper), len(lower), len(digits), len(special_symbols)))
#Question6
def mixed_string(s1, s2):
s2 = s2[::-1]
lengthS1 = len(s1)
lengthS2 = len(s2)
length = lengthS1 if lengthS1 > lengthS2 else lengthS2
resultString = ""
for i in range(length):
if (i < lengthS1):
resultString = resultString + s1[i]
if (i < lengthS2):
resultString = resultString + s2[i]
print(resultString)
#Question7
def string_balance_check(s1, s2):
result = True
for i in s1:
if i in s2:
continue
else:
result = False
return result
#Question8
def finding(str, substr):
count = str.lower().count(substr.lower())
print("The {} count is {}:\t".format(substr, count))
#Question9
def digits():
inputStr = "English = 78 Science = 83 Math = 68 History = 65"
markList = [int(num) for num in re.findall(r'\b\d+\b', inputStr)]
totalMarks = 0
for mark in markList:
totalMarks += mark
percentage = totalMarks / len(markList)
print("Total Marks is:", totalMarks, "Percentage is ", percentage)
#Question10
def count_chars(given_str):
count_dict = dict()
for i in given_str:
count = given_str.count(i)
count_dict[i] = count
print(count_dict)
#Question11
def reverse_string(given_string):
print(given_string[::-1])
#Question12
def pos_of_substr():
str1 = "Emma is a data scientist who knows Python. Emma works at google."
print("Original String is:", str1)
index = str1.rfind("Emma")
print("Last occurrence of Emma starts at", index)
#Question13
def split_to_substr():
str1 = "Emma-is-a-data-scientist"
print("Original String is:", str1)
substrings = str1.split("-")
print("Displaying each substring")
for sub in substrings:
print(sub)
#Question14
def removing_empty_strings():
str_list = ["Emma", "Jon", "", "Kelly", None, "Eric", ""]
print("Original list of sting")
print(str_list)
new_str_list = list(filter(None, str_list))
print("After removing empty strings")
print(new_str_list)
#Question15
def without_symbols():
str1 = "/*Jon is @developer & musician"
print("Original string is:\t ", str1)
str2 = re.sub(r'[^\w\s]', '', str1)
print("New string is:\t", str2)
#Question16
def only_digits():
str1 = 'I am 25 years and 10 months old'
print("Original string is", str1)
res = "".join([item for item in str1 if item.isdigit()])
print(res)
#Question17
def alphabets_numbers():
pass
#Question18
def replacing():
pass
# str1 = '/*Jon is @developer & musician!!'
# print("The original string is : ", str1)
# replace_char = '#'
# for char in punctuation:
# str1 = str1.replace(char, replace_char)
# print("The strings after replacement : ", str1)
if __name__ == "__main__":
while True:
case = int(input("\nEnter the number of question which you want to run 1/2/3/.../15, 0 to QUIT\t"))
if case == 1:
print(get_middle_chars('JhonDipPeta'))
elif case == 2:
append_in_the_middle("Ault", "Kelly")
elif case == 3:
str1 = input("Enter s1:\t")
str2 = input("Enter s2:\t")
print(strings(str1, str2))
elif case == 4:
str = input("Enter your line:\t")
lower_upper(str)
elif case == 5:
str = input("Enter your line:\t")
counting(str)
elif case == 6:
str1 = input("Enter s1:\t")
str2 = input("Enter s2:\t")
mixed_string(str1, str2)
elif case == 7:
str1 = input("Enter s1:\t")
str2 = input("Enter s2:\t")
print(string_balance_check(str1, str2))
elif case == 8:
s = input("Input your line:\t")
sub = input("Input what do you want to find:\t")
finding(s, sub)
elif case == 9:
digits()
elif case == 10:
str = input("Enter your line:\t")
count_chars(str)
elif case == 11:
str = input("Enter your line:\t")
reverse_string(str)
elif case == 12:
pos_of_substr()
elif case == 13:
split_to_substr()
elif case == 14:
removing_empty_strings()
elif case == 15:
without_symbols()
elif case == 16:
only_digits()
elif case == 17:
alphabets_numbers()
elif case == 18:
replacing()
elif case == 0:
break
else:
print("Please enter the correct question number") |
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Functional tests for :module:`flocker.node._docker`.
"""
from __future__ import absolute_import
import time
from functools import partial
from docker.errors import APIError
from docker import Client
# Docker-py uses 1.16 API by default, which isn't supported by docker, so force
# the use of the 1.15 API until we upgrade docker in flocker-dev
Client = partial(Client, version="1.15")
from twisted.trial.unittest import TestCase
from twisted.python.filepath import FilePath
from twisted.internet.defer import succeed, gatherResults
from twisted.internet.error import ConnectionRefusedError
from twisted.web.client import ResponseNeverReceived
from treq import request, content
from ...testtools import (
loop_until, find_free_port, DockerImageBuilder, assertContainsAll,
random_name)
from ..test.test_docker import make_idockerclient_tests
from .._docker import (
DockerClient, PortMap, Environment, NamespacedDockerClient,
BASE_NAMESPACE, Volume)
from ...control._model import RestartNever, RestartAlways, RestartOnFailure
from ..testtools import if_docker_configured, wait_for_unit_state
def namespace_for_test(test_case):
return u"ns-" + random_name(test_case)
class IDockerClientTests(make_idockerclient_tests(
lambda test_case: DockerClient(
namespace=namespace_for_test(test_case)
)
)):
"""
``IDockerClient`` tests for ``DockerClient``.
"""
@if_docker_configured
def setUp(self):
pass
class IDockerClientNamespacedTests(make_idockerclient_tests(
lambda test_case: NamespacedDockerClient(
namespace=namespace_for_test(test_case)
)
)):
"""
``IDockerClient`` tests for ``NamespacedDockerClient``.
"""
@if_docker_configured
def setUp(self):
pass
class GenericDockerClientTests(TestCase):
"""
Functional tests for ``DockerClient`` and other clients that talk to
real Docker.
"""
clientException = APIError
@if_docker_configured
def setUp(self):
self.namespacing_prefix = namespace_for_test(self)
def make_client(self):
return DockerClient(namespace=self.namespacing_prefix)
def create_container(self, client, name, image):
"""
Create (but don't start) a container via the supplied client.
:param DockerClient client: The Docker API client.
:param unicode name: The container name.
:param unicode image: The image name.
"""
container_name = client._to_container_name(name)
client._client.create_container(
name=container_name, image=image)
def start_container(self, unit_name,
image_name=u"openshift/busybox-http-app",
ports=None, expected_states=(u'active',),
environment=None, volumes=(),
mem_limit=None, cpu_shares=None,
restart_policy=RestartNever(),
command_line=None):
"""
Start a unit and wait until it reaches the `active` state or the
supplied `expected_state`.
:param unicode unit_name: See ``IDockerClient.add``.
:param unicode image_name: See ``IDockerClient.add``.
:param list ports: See ``IDockerClient.add``.
:param expected_states: A sequence of activation states to wait for.
:param environment: See ``IDockerClient.add``.
:param volumes: See ``IDockerClient.add``.
:param mem_limit: See ``IDockerClient.add``.
:param cpu_shares: See ``IDockerClient.add``.
:param restart_policy: See ``IDockerClient.add``.
:param command_line: See ``IDockerClient.add``.
:return: ``Deferred`` that fires with the ``DockerClient`` when
the unit reaches the expected state.
"""
client = self.make_client()
d = client.add(
unit_name=unit_name,
image_name=image_name,
ports=ports,
environment=environment,
volumes=volumes,
mem_limit=mem_limit,
cpu_shares=cpu_shares,
restart_policy=restart_policy,
command_line=command_line,
)
self.addCleanup(client.remove, unit_name)
d.addCallback(lambda _: wait_for_unit_state(client, unit_name,
expected_states))
d.addCallback(lambda _: client)
return d
def test_default_base_url(self):
"""
``DockerClient`` instantiated with a default base URL for a socket
connection has a client HTTP url after the connection is made.
"""
client = DockerClient()
self.assertEqual(client._client.base_url,
u'http+unix://var/run/docker.sock')
def test_custom_base_url_tcp_http(self):
"""
``DockerClient`` instantiated with a custom base URL for a TCP
connection has a client HTTP url after the connection is made.
"""
client = DockerClient(base_url=b"tcp://127.0.0.1:2375")
self.assertEqual(client._client.base_url, b"http://127.0.0.1:2375")
def test_add_starts_container(self):
"""
``DockerClient.add`` starts the container.
"""
name = random_name(self)
return self.start_container(name)
def test_correct_image_used(self):
"""
``DockerClient.add`` creates a container with the specified image.
"""
name = random_name(self)
d = self.start_container(name)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"Image"],
u"openshift/busybox-http-app")
d.addCallback(started)
return d
def test_add_error(self):
"""
``DockerClient.add`` returns a ``Deferred`` that errbacks with
``APIError`` if response code is not a success response code.
"""
client = self.make_client()
# add() calls exists(), and we don't want exists() to be the one
# failing since that's not the code path we're testing, so bypass
# it:
client.exists = lambda _: succeed(False)
# Illegal container name should make Docker complain when we try to
# install the container:
d = client.add(u"!!!###!!!", u"busybox:latest")
return self.assertFailure(d, self.clientException)
def test_dead_is_listed(self):
"""
``DockerClient.list()`` includes dead units.
We use a `busybox` image here, because it will exit immediately and
reach an `inactive` substate of `dead`.
There are no assertions in this test, because it will fail with a
timeout if the unit with that expected state is never listed or if that
unit never reaches that state.
"""
name = random_name(self)
d = self.start_container(unit_name=name, image_name="busybox:latest",
expected_states=(u'inactive',))
return d
def test_list_with_missing_image(self):
"""
``DockerClient.list()`` can list containers whose image is missing.
The resulting output may be inaccurate, but that's OK: this only
happens for non-running containers, who at worst we're going to
restart anyway.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child(b"Dockerfile.in").setContent(
b"FROM busybox\nCMD /bin/true\n")
image_name = DockerImageBuilder(test=self, source_dir=path,
cleanup=False).build()
name = random_name(self)
d = self.start_container(unit_name=name, image_name=image_name,
expected_states=(u'inactive',))
def stopped_container_exists(_):
# Remove the image:
docker_client = Client()
docker_client.remove_image(image_name, force=True)
# Should be able to still list the container:
client = self.make_client()
listed = client.list()
listed.addCallback(lambda results: self.assertIn(
(name, "inactive"),
[(unit.name, unit.activation_state) for unit in results]))
return listed
d.addCallback(stopped_container_exists)
return d
def test_dead_is_removed(self):
"""
``DockerClient.remove()`` removes dead units without error.
We use a `busybox` image here, because it will exit immediately and
reach an `inactive` substate of `dead`.
"""
name = random_name(self)
d = self.start_container(unit_name=name, image_name="busybox:latest",
expected_states=(u'inactive',))
def remove_container(client):
client.remove(name)
d.addCallback(remove_container)
return d
def request_until_response(self, port):
"""
Resend a test HTTP request until a response is received.
The container may have started, but the webserver inside may take a
little while to start serving requests.
:param int port: The localhost port to which an HTTP request will be
sent.
:return: A ``Deferred`` which fires with the result of the first
successful HTTP request.
"""
def send_request():
"""
Send an HTTP request in a loop until the request is answered.
"""
response = request(
b"GET", b"http://127.0.0.1:%d" % (port,),
persistent=False)
def check_error(failure):
"""
Catch ConnectionRefused errors and response timeouts and return
False so that loop_until repeats the request.
Other error conditions will be passed down the errback chain.
"""
failure.trap(ConnectionRefusedError, ResponseNeverReceived)
return False
response.addErrback(check_error)
return response
return loop_until(send_request)
def test_add_with_port(self):
"""
``DockerClient.add`` accepts a ports argument which is passed to
Docker to expose those ports on the unit.
Assert that the busybox-http-app returns the expected "Hello world!"
response.
XXX: We should use a stable internal container instead. See
https://clusterhq.atlassian.net/browse/FLOC-120
XXX: The busybox-http-app returns headers in the body of its response,
hence this over complicated custom assertion. See
https://github.com/openshift/geard/issues/213
"""
expected_response = b'Hello world!\n'
external_port = find_free_port()[1]
name = random_name(self)
d = self.start_container(
name, ports=[PortMap(internal_port=8080,
external_port=external_port)])
d.addCallback(
lambda ignored: self.request_until_response(external_port))
def started(response):
d = content(response)
d.addCallback(lambda body: self.assertIn(expected_response, body))
return d
d.addCallback(started)
return d
def build_slow_shutdown_image(self):
"""
Create a Docker image that takes a while to shut down.
This should really use Python instead of shell:
https://clusterhq.atlassian.net/browse/FLOC-719
:return: The name of created Docker image.
"""
path = FilePath(self.mktemp())
path.makedirs()
path.child(b"Dockerfile.in").setContent("""\
FROM busybox
CMD sh -c "trap \"\" 2; sleep 3"
""")
image = DockerImageBuilder(test=self, source_dir=path)
return image.build()
def test_add_with_environment(self):
"""
``DockerClient.add`` accepts an environment object whose ID and
variables are used when starting a docker image.
"""
docker_dir = FilePath(self.mktemp())
docker_dir.makedirs()
docker_dir.child(b"Dockerfile").setContent(
b'FROM busybox\n'
b'CMD ["/bin/sh", "-c", '
b'"while true; do env && echo WOOT && sleep 1; done"]'
)
image = DockerImageBuilder(test=self, source_dir=docker_dir)
image_name = image.build()
unit_name = random_name(self)
expected_variables = frozenset({
'key1': 'value1',
'key2': 'value2',
}.items())
d = self.start_container(
unit_name=unit_name,
image_name=image_name,
environment=Environment(variables=expected_variables),
)
def started(_):
output = ""
while True:
output += Client().logs(self.namespacing_prefix + unit_name)
if "WOOT" in output:
break
assertContainsAll(
output, test_case=self,
needles=['{}={}\n'.format(k, v)
for k, v in expected_variables],
)
d.addCallback(started)
return d
def test_pull_image_if_necessary(self):
"""
The Docker image is pulled if it is unavailable locally.
"""
# Use an image that isn't likely to be in use by anything, since
# it's old, and isn't used by other tests:
image = u"busybox:ubuntu-12.04"
# Make sure image is gone:
docker = Client()
try:
docker.remove_image(image, force=True)
except APIError as e:
if e.response.status_code != 404:
raise
name = random_name(self)
client = self.make_client()
self.addCleanup(client.remove, name)
d = client.add(name, image)
d.addCallback(lambda _: self.assertTrue(docker.inspect_image(image)))
return d
def test_namespacing(self):
"""
Containers are created with a namespace prefixed to their container
name.
"""
docker = Client()
name = random_name(self)
client = self.make_client()
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
def added(_):
self.assertTrue(
docker.inspect_container(self.namespacing_prefix + name))
d.addCallback(added)
return d
def test_null_environment(self):
"""
A container that does not include any environment variables contains
an empty ``environment`` in the return ``Unit``.
"""
docker_dir = FilePath(self.mktemp())
docker_dir.makedirs()
docker_dir.child(b"Dockerfile").setContent(
b'FROM scratch\n'
b'MAINTAINER info@clusterhq.com\n'
b'CMD ["/bin/doesnotexist"]'
)
image = DockerImageBuilder(test=self, source_dir=docker_dir)
image_name = image.build()
client = self.make_client()
name = random_name(self)
self.create_container(client, name, image_name)
self.addCleanup(client.remove, name)
d = client.list()
def got_list(units):
unit = [unit for unit in units if unit.name == name][0]
self.assertIsNone(unit.environment)
d.addCallback(got_list)
return d
def test_container_name(self):
"""
The container name stored on returned ``Unit`` instances matches the
expected container name.
"""
client = self.make_client()
name = random_name(self)
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
d.addCallback(lambda _: client.list())
def got_list(units):
unit = [unit for unit in units if unit.name == name][0]
self.assertEqual(unit.container_name,
self.namespacing_prefix + name)
d.addCallback(got_list)
return d
def test_empty_environment(self):
"""
When a container with no custom environment variables is launched via
``DockerClient.add`` the environment in the resulting ``Unit`` returned
from ``DockerClient.list`` will ignore the default HOME and PATH
environment variables, leaving the ``Unit`` with an Environment of
None.
"""
name = random_name(self)
d = self.start_container(name)
def started(client):
deferred_units = client.list()
def check_units(units):
unit = [unit for unit in units if unit.name == name][0]
self.assertIsNone(unit.environment)
deferred_units.addCallback(check_units)
d.addCallback(started)
return d
def test_list_only_custom_environment(self):
"""
When a container containing custom environment variables is launched
and the image used also injects environment variables, only the custom
variables we injected are returned by ``DockerClient.list``, whereas
variables set by the image are discarded.
All Docker containers have a PATH environment variable. In addition,
the openshift/busybox-http-app image contains an STI_SCRIPTS_URL
environment variable. These are therefore disregarded the variables
disregarded in this test, whereas our custom environment is listed in
the returned Units.
https://registry.hub.docker.com/u/openshift/busybox-http/dockerfile/
"""
name = random_name(self)
environment = {
'my_variable': 'some value',
'another_variable': '12345'
}
environment = frozenset(environment.items())
d = self.start_container(
name,
environment=Environment(variables=environment)
)
def started(client):
deferred_units = client.list()
def check_units(units):
unit = [unit for unit in units if unit.name == name][0]
expected = Environment(variables=environment)
self.assertEqual(unit.environment, expected)
deferred_units.addCallback(check_units)
d.addCallback(started)
return d
def test_add_with_volumes(self):
"""
``DockerClient.add`` accepts a list of ``Volume`` instances which are
mounted within the container.
"""
docker_dir = FilePath(self.mktemp())
docker_dir.makedirs()
docker_dir.child(b"Dockerfile").setContent(
b'FROM busybox\n'
b'CMD ["/bin/sh", "-c", '
b'"touch /mnt1/a; touch /mnt2/b"]'
)
image = DockerImageBuilder(test=self, source_dir=docker_dir)
image_name = image.build()
unit_name = random_name(self)
path1 = FilePath(self.mktemp())
path1.makedirs()
path2 = FilePath(self.mktemp())
path2.makedirs()
d = self.start_container(
unit_name=unit_name,
image_name=image_name,
volumes=[
Volume(node_path=path1, container_path=FilePath(b"/mnt1")),
Volume(node_path=path2, container_path=FilePath(b"/mnt2"))],
expected_states=(u'inactive',),
)
def started(_):
expected1 = path1.child(b"a")
expected2 = path2.child(b"b")
for i in range(100):
if expected1.exists() and expected2.exists():
return
else:
time.sleep(0.1)
self.fail("Files never created.")
d.addCallback(started)
return d
def test_add_with_memory_limit(self):
"""
``DockerClient.add`` accepts an integer mem_limit parameter which is
passed to Docker when creating a container as the maximum amount of RAM
available to that container.
"""
MEMORY_100MB = 100000000
name = random_name(self)
d = self.start_container(name, mem_limit=MEMORY_100MB)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"Memory"],
MEMORY_100MB)
d.addCallback(started)
return d
def test_add_with_cpu_shares(self):
"""
``DockerClient.add`` accepts an integer cpu_shares parameter which is
passed to Docker when creating a container as the CPU shares weight
for that container. This is a relative weight for CPU time versus other
containers and does not directly constrain CPU usage, i.e. a CPU share
constrained container can still use 100% CPU if other containers are
idle. Default shares when unspecified is 1024.
"""
name = random_name(self)
d = self.start_container(name, cpu_shares=512)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"CpuShares"], 512)
d.addCallback(started)
return d
def test_add_without_cpu_or_mem_limits(self):
"""
``DockerClient.add`` when creating a container with no mem_limit or
cpu_shares specified will create a container without these resource
limits, returning integer 0 as the values for Memory and CpuShares from
its API when inspecting such a container.
"""
name = random_name(self)
d = self.start_container(name)
def started(_):
docker = Client()
data = docker.inspect_container(self.namespacing_prefix + name)
self.assertEqual(data[u"Config"][u"Memory"], 0)
self.assertEqual(data[u"Config"][u"CpuShares"], 0)
d.addCallback(started)
return d
def start_restart_policy_container(self, mode, restart_policy):
"""
Start a container for testing restart policies.
:param unicode mode: Mode of container. One of
- ``"failure"``: The container will always exit with a failure.
- ``"success-then-sleep"``: The container will exit with success
once, then sleep forever.
- ``"failure-then-sucess"``: The container will exit with failure
once, then with failure.
:param IRestartPolicy restart_policy: The restart policy to use for
the container.
:returns Deferred: A deferred that fires with the number of times the
container was started.
"""
docker_dir = FilePath(__file__).sibling('retry-docker')
image = DockerImageBuilder(test=self, source_dir=docker_dir)
image_name = image.build()
name = random_name(self)
data = FilePath(self.mktemp())
data.makedirs()
count = data.child('count')
count.setContent("0")
marker = data.child('marker')
if mode == u"success-then-sleep":
expected_states = (u'active',)
else:
expected_states = (u'inactive',)
d = self.start_container(
name, image_name=image_name,
restart_policy=restart_policy,
environment=Environment(variables={u'mode': mode}),
volumes=[
Volume(node_path=data, container_path=FilePath(b"/data"))],
expected_states=expected_states)
if mode == u"success-then-sleep":
def wait_for_marker(_):
while not marker.exists():
time.sleep(0.01)
d.addCallback(wait_for_marker)
d.addCallback(lambda ignored: count.getContent())
return d
def test_restart_policy_never(self):
"""
An container with a restart policy of never isn't restarted
after it exits.
"""
d = self.start_restart_policy_container(
mode=u"failure", restart_policy=RestartNever())
d.addCallback(self.assertEqual, "1")
return d
def test_restart_policy_always(self):
"""
An container with a restart policy of always is restarted
after it exits.
"""
d = self.start_restart_policy_container(
mode=u"success-then-sleep", restart_policy=RestartAlways())
d.addCallback(self.assertEqual, "2")
return d
def test_restart_policy_on_failure(self):
"""
An container with a restart policy of on-failure is restarted
after it exits with a non-zero result.
"""
d = self.start_restart_policy_container(
mode=u"failure-then-success", restart_policy=RestartOnFailure())
d.addCallback(self.assertEqual, "2")
return d
def test_restart_policy_on_failure_maximum_count(self):
"""
A container with a restart policy of on-failure and a maximum
retry count is not restarted if it fails as many times than the
specified maximum.
"""
d = self.start_restart_policy_container(
mode=u"failure",
restart_policy=RestartOnFailure(maximum_retry_count=5))
# A Docker change e721ed9b5319e8e7c1daf87c34690f8a4e62c9e3 means that
# this value depends on the version of Docker.
d.addCallback(self.assertIn, ("5", "6"))
return d
def test_command_line(self):
"""
A container with custom command line is run with those arguments.
"""
external_port = find_free_port()[1]
name = random_name(self)
d = self.start_container(
name, image_name=u"busybox",
command_line=[u"sh", u"-c", u"""\
echo -n '#!/bin/sh
echo -n "HTTP/1.1 200 OK\r\n\r\nhi"
' > /tmp/script.sh;
chmod +x /tmp/script.sh;
nc -ll -p 8080 -e /tmp/script.sh
"""],
ports=[PortMap(internal_port=8080,
external_port=external_port)])
d.addCallback(
lambda ignored: self.request_until_response(external_port))
def started(response):
d = content(response)
d.addCallback(lambda body: self.assertEqual(b"hi", body))
return d
d.addCallback(started)
return d
class DockerClientTests(TestCase):
"""
Tests for ``DockerClient`` specifically.
"""
@if_docker_configured
def setUp(self):
pass
def test_default_namespace(self):
"""
The default namespace is `u"flocker--"`.
"""
docker = Client()
name = random_name(self)
client = DockerClient()
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
d.addCallback(lambda _: self.assertTrue(
docker.inspect_container(u"flocker--" + name)))
return d
def test_list_removed_containers(self):
"""
``DockerClient.list`` does not list containers which are removed,
during its operation, from another thread.
"""
namespace = namespace_for_test(self)
flocker_docker_client = DockerClient(namespace=namespace)
name1 = random_name(self)
adding_unit1 = flocker_docker_client.add(
name1, u'openshift/busybox-http-app')
self.addCleanup(flocker_docker_client.remove, name1)
name2 = random_name(self)
adding_unit2 = flocker_docker_client.add(
name2, u'openshift/busybox-http-app')
self.addCleanup(flocker_docker_client.remove, name2)
docker_client = flocker_docker_client._client
docker_client_containers = docker_client.containers
def simulate_missing_containers(*args, **kwargs):
"""
Remove a container before returning the original list.
"""
containers = docker_client_containers(*args, **kwargs)
container_name1 = flocker_docker_client._to_container_name(name1)
docker_client.remove_container(
container=container_name1, force=True)
return containers
adding_units = gatherResults([adding_unit1, adding_unit2])
patches = []
def get_list(ignored):
patch = self.patch(
docker_client,
'containers',
simulate_missing_containers
)
patches.append(patch)
return flocker_docker_client.list()
listing_units = adding_units.addCallback(get_list)
def check_list(units):
for patch in patches:
patch.restore()
self.assertEqual(
[name2], sorted([unit.name for unit in units])
)
running_assertions = listing_units.addCallback(check_list)
return running_assertions
def error_passthrough_test(self, method_name):
"""
If the given method name on the underyling ``Docker`` client has a
non-404 error, that gets passed through to ``Docker.list()``.
:param str method_name: Method of a docker ``Client``.
:return: ``Deferred`` firing on test success.
"""
name = random_name(self)
client = DockerClient()
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
class Response(object):
status_code = 500
content = ""
def error(name):
raise APIError("", Response())
def added(_):
# Monekypatch cause triggering non-404 errors from
# inspect_container is hard.
self.patch(client._client, method_name, error)
return client.list()
d.addCallback(added)
return self.assertFailure(d, APIError)
def test_list_error_inspecting_container(self):
"""
If an error occurs inspecting a container it is passed through.
"""
return self.error_passthrough_test("inspect_container")
def test_list_error_inspecting_image(self):
"""
If an error occurs inspecting an image it is passed through.
"""
return self.error_passthrough_test("inspect_image")
class NamespacedDockerClientTests(GenericDockerClientTests):
"""
Functional tests for ``NamespacedDockerClient``.
"""
@if_docker_configured
def setUp(self):
self.namespace = namespace_for_test(self)
self.namespacing_prefix = BASE_NAMESPACE + self.namespace + u"--"
def make_client(self):
return NamespacedDockerClient(self.namespace)
def create_container(self, client, name, image):
"""
Create (but don't start) a container via the supplied client.
:param DockerClient client: The Docker API client.
:param unicode name: The container name.
:param unicode image: The image name.
"""
container_name = client._client._to_container_name(name)
client._client._client.create_container(
name=container_name, image=image)
def test_isolated_namespaces(self):
"""
Containers in one namespace are not visible in another namespace.
"""
client = NamespacedDockerClient(namespace=namespace_for_test(self))
client2 = NamespacedDockerClient(namespace=namespace_for_test(self))
name = random_name(self)
self.addCleanup(client.remove, name)
d = client.add(name, u"busybox:latest")
d.addCallback(lambda _: client2.list())
d.addCallback(self.assertEqual, set())
return d
|
def convertSeconds (y):
day = y/86400
remainder = y%86400
uren = remainder / 3600
remainder = remainder % 3600
minuten = remainder / 60
seconden = remainder % 60
return '{0}:{1}:{2}:{3}'.format(int(day), int(uren), int(minuten), int(seconden) )
seconden = int(input('Geef het aantal seconden op.'))
print (convertSeconds(seconden)) |
import pygame
from constants import *
class Runner:
def __init__(self, row, col, color, name):
self.row = row
self.col = col
self.name = name
self.color = color
self.x = 0
self.y = 0
self.calc_pos()
def move(self, row, col):
self.row = row
self.col = col
self.calc_pos()
def calc_pos(self):
self.x = (SQUARE_SIZE*self.col) + (SQUARE_SIZE//2) - 15
self.y = (SQUARE_SIZE*self.row) + (SQUARE_SIZE//2) - 15
def draw(self, window):
pygame.draw.rect(window, self.color, pygame.Rect(self.x, self.y, 15,15))
pygame.display.flip()
class Chaser:
def __init__(self, row, col, color, name):
self.row = row
self.col = col
self.name = name
self.color = color
self.x = 0
self.y = 0
self.calc_pos()
def move(self, row, col):
self.row = row
self.col = col
self.calc_pos()
def calc_pos(self):
self.x = (SQUARE_SIZE*self.col) + (SQUARE_SIZE//2) - 15
self.y = (SQUARE_SIZE*self.row) + (SQUARE_SIZE//2) - 15
def draw(self, window):
pygame.draw.rect(window, self.color, pygame.Rect(self.x, self.y, 15,15))
pygame.display.flip()
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return "hello!"
@app.route('/nba')
def show_nba_stuff():
return "Placeholder for nba text Placeholder for nba text Placeholder for nba text Placeholder for nba text"
@app.route('/soccer')
def show_soccer_stuff():
return "Placeholder soccer text Placeholder soccer text Placeholder soccer text Placeholder soccer text Placeholder soccer text Placeholder soccer text"
@app.route('/soccer/players/<playerid>')
def showAPlayer(playerid):
return f"here is info about player number {playerid}"
# repeat/2/potatoes
@app.route('/repeat/<num>/<word>')
def repeatsomestuff(num, word):
thegreatestPlayers = ["Lebron", "Kobe", "Allen Iverson", "Lucka Doncic", "Jordan", "Shaq"]
return render_template("yahoo.html", var1=num, var2=word, var3=thegreatestPlayers)
num = int(num)
return word*num
if __name__ == "__main__":
app.run(debug=True)
# https: / www.espn.com/soccer/team/_/id/363/chealsea
# https: / www.espn.com/soccer/team/_/id/359/arsenal
|
from nose.tools import assert_true
import requests
def test_request_response():
resp = requests.get('http://jsonplaceholder.typicode.com/todos')
assert_true(resp.ok)
|
# Generated by Django 2.1 on 2018-09-03 23:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20180903_1724'),
]
operations = [
migrations.AlterField(
model_name='events',
name='address',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='events',
name='time',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='events',
name='title',
field=models.CharField(max_length=200, null=True),
),
]
|
class Monostate(object):
__internal_state = {'A':5,'B':6}
def __init__(self):
self.__dict__ = self.__internal_state
class Monostatev2(object):
_internal_state = {}
def __new__(cls, *args, **kwargs):
obj = super(Monostatev2, cls).__new__(cls, *args, **kwargs)
obj.__dict__ = cls._internal_state
return obj
A = Monostate()
B = Monostate()
A.x = 10
print(B.x)
B.x = 99
print(A.x)
A = Monostatev2()
B = Monostatev2()
A.y = 100
print(B.y)
|
"""
Created By : <Auto generated code>
Created On :
Reviewed By :
Reviewed On :
Version :
"""
import json
from django.http import HttpRequest
from examsystemapp.api.base_controller import BaseController
from examsystemapp.models.student import StudentModel
from examsystemapp.services.student_service import StudentService
from examsystemapp.utils.constants.constants import DataTypes, HttpMethodType, AppConstants
from examsystemapp.utils.helpers.general_helper import IntHelper, FloatHelper
from examsystemapp.utils.helpers.request_helper import RequestConfig, ParamsObject
class Student(BaseController):
def __init__(self, request):
BaseController.__init__(self, request)
def add(self, request: HttpRequest):
student_json = json.loads(request.POST.get("student_json"))
student_object: StudentModel = StudentModel()
student_object.collegeid = student_json.get("collegeid")
student_object.branchid = student_json.get("branchid")
student_object.currentsemester = student_json.get("currentsemester")
student_object.name = student_json.get("name")
student_object.rollno = student_json.get("rollno")
student_object.add1 = student_json.get("add1")
student_object.add2 = student_json.get("add2")
student_object.add3 = student_json.get("add3")
student_object.cityid = student_json.get("cityid")
student_object.stateid = student_json.get("stateid")
student_object.pin = student_json.get("pin")
student_object.phonenum = student_json.get("phonenum")
student_object.email = student_json.get("email")
student_object.profilepic = student_json.get("profilepic")
student_object.loginid = student_json.get("loginid")
student_object.passwd = student_json.get("passwd")
student_service: StudentService = StudentService()
student_object = student_service.add(student_object)
return self.send_response(student_object)
def update(self, request: HttpRequest):
student_json = json.loads(request.POST.get("student_json"))
student_object: StudentModel = StudentModel()
student_object.studentid = student_json.get("studentid")
student_object.collegeid = student_json.get("collegeid")
student_object.branchid = student_json.get("branchid")
student_object.currentsemester = student_json.get("currentsemester")
student_object.name = student_json.get("name")
student_object.rollno = student_json.get("rollno")
student_object.add1 = student_json.get("add1")
student_object.add2 = student_json.get("add2")
student_object.add3 = student_json.get("add3")
student_object.cityid = student_json.get("cityid")
student_object.stateid = student_json.get("stateid")
student_object.pin = student_json.get("pin")
student_object.phonenum = student_json.get("phonenum")
student_object.email = student_json.get("email")
student_object.profilepic = student_json.get("profilepic")
student_object.loginid = student_json.get("loginid")
student_object.passwd = student_json.get("passwd")
student_service: StudentService = StudentService()
student_object = student_service.update(student_object)
return self.send_response(student_object)
def delete(self, request: HttpRequest):
student_json = json.loads(request.POST.get("student_json"))
student_object: StudentModel = StudentModel()
student_object.studentid = student_json.get("studentid")
student_service: StudentService = StudentService()
student_object = student_service.delete(student_object)
return self.send_response(student_object)
def get(self, request: HttpRequest):
params = [
{"id": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.INT)}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
student_service: StudentService = StudentService()
data = student_service.get(params)
return self.send_response(data)
def get_list(self, request: HttpRequest):
params = [
{"ids": RequestConfig(from_session=False, nullable=False, datatype=DataTypes.STRING, default='')}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
student_service: StudentService = StudentService()
data = student_service.get_list(params)
return self.send_response(data)
def get_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
student_service: StudentService = StudentService()
data = student_service.get_object(params)
return self.send_response(data)
def get_list_object(self, request: HttpRequest):
params = []
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
student_service: StudentService = StudentService()
data = student_service.get_list_object(params)
return self.send_response(data)
def get_list_object_page(self, request: HttpRequest):
params = [
{"collegeID": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"branchID": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=None)},
{"currentSemester": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.STRING, default=None)},
{"studentName": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.STRING, default=None)},
{"rollNo": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.STRING, default=None)},
{"page_Num": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.INT, default=1)},
{"page_Size": RequestConfig(from_session=False, nullable=True, datatype=DataTypes.STRING, default=10)}
]
params: ParamsObject = self.convert_params(request, HttpMethodType.get, params)
student_service: StudentService = StudentService()
data = student_service.get_list_object_paginated(params)
return self.send_response(data)
|
#!/usr/bin/env python3
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.datasets import mnist
from keras.utils import to_categorical
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = np.reshape(x_train, [-1, 784])
x_test = np.reshape(x_test, [-1, 784])
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
model = Sequential()
model.add(Dense(100, activation='sigmoid'))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=200, epochs=10, validation_split=0.2)
print(model.evaluate(x_test, y_test))
|
"""
Making diagrams easily.
Need to have graphviz (pip install graphviz), but also need the backend of this python binder:
Mac: brew install graphviz
Linux: sudo apt-get install graphviz
Windows: google it
"""
import re
from typing import Optional
import json
from collections import defaultdict
from functools import wraps
from graphviz import Digraph, Source
from types import MethodType
# Note: Not used anywhere in the module anymore, but was
"""Get a `re.Pattern` instance (as given by re.compile()) with control over defaults of it's methods.
Useful to reduce if/else boilerplate when handling the output of search functions (match, search, etc.)
See [regex_search_hack.md](https://gist.github.com/thorwhalen/6c913e9be35873cea6efaf6b962fde07) for more explanatoins of the
use case.
Example;
>>> dflt_result = type('dflt_search_result', (), {'groupdict': lambda x: {}})()
>>> p = re_compile('.*(?P<president>obama|bush|clinton)', search=dflt_result, match=dflt_result)
>>>
>>> p.search('I am beating around the bush, am I?').groupdict().get('president', 'Not found')
'bush'
>>>
>>> # if not match is found, will return 'Not found', as requested
>>> p.search('This does not contain a president').groupdict().get('president', 'Not found')
'Not found'
>>>
>>> # see that other non-wrapped re.Pattern methods still work
>>> p.findall('I am beating arcached_keysound the bush, am I?')
['bush']
"""
import re
from functools import wraps
def add_dflt(func, dflt_if_none):
@wraps(func)
def wrapped_func(*args, **kwargs):
result = func(*args, **kwargs)
if result is not None:
return result
else:
return dflt_if_none
return wrapped_func
def re_compile(pattern, flags=0, **dflt_if_none):
"""Get a `re.Pattern` instance (as given by re.compile()) with control over defaults of it's methods.
Useful to reduce if/else boilerplate when handling the output of search functions (match, search, etc.)
Example;
>>> dflt_result = type('dflt_search_result', (), {'groupdict': lambda x: {}})()
>>> p = re_compile('.*(?P<president>obama|bush|clinton)', search=dflt_result, match=dflt_result)
>>>
>>> p.search('I am beating around the bush, am I?').groupdict().get('president', 'Not found')
'bush'
>>> p.match('I am beating around the bush, am I?').groupdict().get('president', 'Not found')
'bush'
>>>
>>> # if not match is found, will return 'Not found', as requested
>>> p.search('This does not contain a president').groupdict().get('president', 'Not found')
'Not found'
>>>
>>> # see that other non-wrapped re.Pattern methods still work
>>> p.findall('I am beating around the bush, am I?')
['bush']
"""
compiled_regex = re.compile(pattern, flags=flags)
intercepted_names = set(dflt_if_none)
my_regex_compilation = type('MyRegexCompilation', (object,), {})()
for _name, _dflt in dflt_if_none.items():
setattr(
my_regex_compilation,
_name,
add_dflt(getattr(compiled_regex, _name), _dflt),
)
for _name in filter(
lambda x: not x.startswith('__') and x not in intercepted_names,
dir(compiled_regex),
):
setattr(my_regex_compilation, _name, getattr(compiled_regex, _name))
return my_regex_compilation
class rx:
name = re.compile('^:(\w+)')
lines = re.compile('\n|\r|\n\r|\r\n')
comments = re.compile('#.+$')
non_space = re.compile('\S')
nout_nin = re.compile(r'(\w+)\W+(\w+)')
arrow = re.compile(r'\s*->\s*')
instruction = re.compile(r'(\w+):\s+(.+)')
node_def = re.compile(r'([\w\s,]+):\s+(.+)')
wsc = re.compile(r'[\w\s,]+')
csv = re.compile(r'[\s,]+')
pref_name_suff = re.compile(r'(\W*)(\w+)(\W*)')
class DDigraph(Digraph):
@wraps(Digraph.__init__)
def __init__(self, *args, **kwargs):
if args:
first_arg = args[0]
if first_arg.startswith(':'):
lines = rx.lines.split(first_arg)
first_line = lines[0]
name = rx.name.search(first_line).group(1)
class ModifiedDot:
class rx:
lines = re.compile('\n|\r|\n\r|\r\n')
comments = re.compile('#.+$')
non_space = re.compile('\S')
nout_nin = re.compile(r'(\w+)\W+(\w+)')
arrow = re.compile(r'\s*->\s*')
instruction = re.compile(r'(\w+):\s+(.+)')
node_def = re.compile(r'([\w\s,]+):\s+(.+)')
wsc = re.compile(r'[\w\s,]+')
csv = re.compile(r'[\s,]+')
pref_name_suff = re.compile(r'(\W*)(\w+)(\W*)')
@staticmethod
def loose_edges(s):
return list(
map(
lambda x: x.groups(),
filter(
None,
map(ModifiedDot.rx.nout_nin.search, ModifiedDot.rx.lines.split(s),),
),
)
)
# https://www.graphviz.org/doc/info/shapes.html#polygon
shape_for_chars = {
('[', ']'): 'box',
('(', ')'): 'circle',
(']', '['): 'square',
('/', '/'): 'parallelogram',
('<', '>'): 'diamond',
('([', '])'): 'cylinder',
('[[', ']]'): 'box3d',
('((', '))'): 'doublecircle',
('/', '\\'): 'triangle',
('\\', '/'): 'invtriangle',
('|/', '\\|'): 'house',
('|\\', '/|'): 'invhouse',
('/-', '-\\'): 'trapezium',
('-\\', '-/'): 'invtrapezium',
}
@staticmethod
def _modified_dot_gen(s, dflt_node_attr='shape', **dflt_specs):
csv_items = lambda x: ModifiedDot.rx.csv.split(x.strip())
pipeline_items = lambda s: list(map(csv_items, s))
for line in ModifiedDot.rx.lines.split(s):
line = ModifiedDot.rx.comments.sub('', line)
statements = ModifiedDot.rx.arrow.split(line)
if len(statements) > 1:
pipeline = pipeline_items(statements)
for nouts, nins in zip(pipeline[:-1], pipeline[1:]):
for nout in nouts:
for nin in nins:
yield 'edge', nout, nin
else:
statement = statements[0].strip()
if ':' in statement:
if statement.startswith(
'--'
): # it's a special instruction (typically, overriding a default)
statement = statement[2:]
instruction, specs = ModifiedDot.rx.node_def.search(statement)
if instruction == 'dflt_node_attr':
dflt_node_attr = specs.strip()
else:
dflt_specs[instruction] = specs.strip()
else: # it's a node definition (or just some stuff to ignore)
if statement.startswith('#'):
continue # ignore, it's just a comment
g = ModifiedDot.rx.node_def.search(statement)
if g is None:
continue
nodes, specs = g.groups()
nodes = csv_items(nodes)
if specs.startswith('{'):
specs = json.loads(specs)
else:
specs = {dflt_node_attr: specs}
for node in nodes:
assert isinstance(
specs, dict
), f'specs for {node} be a dict at this point: {specs}'
yield 'node', node, dict(dflt_specs, **specs)
elif ModifiedDot.rx.non_space.search(statement):
yield 'source', statement, None
@staticmethod
def parser(s, **dflt_specs):
return list(ModifiedDot._modified_dot_gen(s, **dflt_specs))
@staticmethod
def interpreter(commands, node_shapes, attrs_for_node, engine, **digraph_kwargs):
_edges = list()
_nodes = defaultdict(dict)
_sources = list()
for kind, arg1, arg2 in commands:
if kind == 'edge':
from_node, to_node = arg1, arg2
_edge = list()
for node in (from_node, to_node):
pref, name, suff = ModifiedDot.rx.pref_name_suff.search(
node
).groups()
if (
pref,
suff,
) in node_shapes and name not in _nodes: # implies that only first formatting (existence of pref and suff) counts
_nodes[name].update(shape=node_shapes[(pref, suff)])
_edge.append(name)
else:
_edge.append(name)
_edges.append(_edge)
elif kind == 'node':
node, specs = arg1, arg2
_nodes[node].update(**arg2)
elif kind == 'source':
_sources.append(arg1)
digraph_kwargs['body'] = digraph_kwargs.get('body', []) + _sources
d = Digraph(engine=engine, **digraph_kwargs)
d.edges(_edges)
for node, attrs in attrs_for_node.items():
d.node(name=node, **attrs)
for node, attrs in _nodes.items():
d.node(name=node, **attrs)
return d
def dgdisp(
commands,
node_shapes: Optional[dict] = None,
attrs_for_node=None,
minilang=ModifiedDot,
engine=None,
**digraph_kwargs,
):
"""
Make a Dag image flexibly.
Quick links:
- attributes: https://www.graphviz.org/doc/info/attrs.html
- shapes: https://www.graphviz.org/doc/info/shapes.html#polygon
Has a mini-language by default (called `ModifiedDot`).
Example:
```
dgdisp(\"\"\"
key, wf: circle
chk: doublecircle
fv: {"shape": "plaintext", "fontcolor": "blue"}
key -> wf tag
wf -> [chunker] -> chk -> /featurizer/ -> fv
fv tag -> ([model])
\"\"\"
)
```
```
d = dgdisp(\"\"\"
group_tags, orig_tags -> [mapping] -> tags # many-to-1 and path (chain) example
predicted_tags, \\tags/ -> /confusion_matrix/ # you can format the shape of nodes inplace
predict_proba, tag_list -> [[predict]] -> /predicted_tags\\
group_tags: {"fillcolor": "red", "fontcolor": "red"} # you can specify node attributes as json
orig_tags [fontsize=30 fontcolor=blue] # you can write graphviz lines as is
# tag_list [shape=invhouse fontcolor=green] # you can comment out lines
\"\"\", format='svg')
d.render('svg')
```
With ModifiedDot you can:
- specify a bunch of edges at once in a path. For example, a line such as this:
```node1 -> node2 -> node3 -> node4```
will result in these edges
```
[('node1', 'node2'), ('node2', 'node3'), ('node3', 'node4')]
```
- specify 1-to-many, many-to-1 (stars) and many-to-many (bipartite) edges in bulk like this:
```
node1, node2 -> node3, node4, node5 # bipartite graph
```
- use shorthands to shape nodes (see `ModifiedDot.shape_for_chars` for the shape minilanguage, and
know that you can specify your own additions/modifications)
- specify node properties in bulk like this:
```
node1, node2, node3 : node_attrs
```
- specify defaults dynamically, within the statements:
```
--fillcolor: red
--shape: square
...
```
:param commands: The commands (edge and node specifications)
:param node_shapes: Extra tuple-to-shape mappings.
Used to add to, or override the existing defaults (see them here: `dgdisp.shape_for_chars`).
This dict constitutes the mini-language used to give shapes to nodes on the fly.
:param attrs_for_node: See https://www.graphviz.org/doc/info/attrs.html
:param minilang: Object that populates the graph. Needs a parser and an interpreter method.
:param engine:
dot - "hierarchical" or layered drawings of directed graphs.
This is the default tool to use if edges have directionality.
neato - "spring model'' layouts. This is the default tool to use if the graph is not too large (about 100 nodes)
and you don't know anything else about it. Neato attempts to minimize a global energy function,
which is equivalent to statistical multi-dimensional scaling.
fdp - "spring model'' layouts similar to those of neato, but does this by reducing forces rather than
working with energy.
sfdp - multiscale version of fdp for the layout of large graphs.
twopi - radial layouts, after Graham Wills 97. Nodes are placed on concentric circles depending their distance
from a given root node.
circo - circular layout, after Six and Tollis 99, Kauffman and Wiese 02.
This is suitable for certain diagrams of multiple cyclic structures, such as certain telecommunications networks.
:param digraph_kwargs: Other kwargs for graphviz.Digraph(**kwargs)
:return:
"""
attrs_for_node = attrs_for_node or {}
if node_shapes is False:
node_shapes = {}
else:
if node_shapes is True:
node_shapes = {}
node_shapes = dict(minilang.shape_for_chars, **(node_shapes or {}))
if isinstance(commands, str):
commands = minilang.parser(commands)
d = minilang.interpreter(
commands, node_shapes, attrs_for_node, engine=engine, **digraph_kwargs
)
return d
dgdisp.shape_for_chars = ModifiedDot.shape_for_chars
@wraps(dgdisp)
def horizontal_dgdisp(*args, **kwargs):
command, *_args = args
return dgdisp('rankdir="LR"\n' + command, *_args, **kwargs)
dgdisp.h = horizontal_dgdisp
class Struct:
def __init__(self, **kwargs):
for attr, val in kwargs.items():
setattr(self, attr, val)
dgdisp.engines = Struct(
**{x: x for x in ['dot', 'neato', 'fdp', 'sfdp', 'twopi', 'circo']}
)
dagdisp = dgdisp
def dot_to_ascii(dot: str, fancy: bool = True):
"""Convert a dot string to an ascii rendering of the diagram.
Needs a connection to the internet to work.
>>> graph_dot = '''
... graph {
... rankdir=LR
... 0 -- {1 2}
... 1 -- {2}
... 2 -> {0 1 3}
... 3
... }
... '''
>>>
>>> graph_ascii = dot_to_ascii(graph_dot) # doctest: +SKIP
>>>
>>> print(graph_ascii) # doctest: +SKIP
<BLANKLINE>
┌─────────┐
▼ │
┌───┐ ┌───┐ ┌───┐ ┌───┐
┌▶ │ 0 │ ─── │ 1 │ ─── │ │ ──▶ │ 3 │
│ └───┘ └───┘ │ │ └───┘
│ │ │ │
│ └──────────────── │ 2 │
│ │ │
│ │ │
└───────────────────── │ │
└───┘
<BLANKLINE>
"""
import requests
url = 'https://dot-to-ascii.ggerganov.com/dot-to-ascii.php'
boxart = 0
# use nice box drawing char instead of + , | , -
if fancy:
boxart = 1
stripped_dot_str = dot.strip()
if not (
stripped_dot_str.startswith('graph') or stripped_dot_str.startswith('digraph')
):
dot = 'graph {\n' + dot + '\n}'
params = {
'boxart': boxart,
'src': dot,
}
response = requests.get(url, params=params).text
if response == '':
raise SyntaxError('DOT string is not formatted correctly')
return response
|
"""
Spike train synchrony plots
---------------------------
.. autosummary::
:toctree: toctree/spike_train_synchrony
plot_spike_contrast
"""
# Copyright 2017-2023 by the Viziphant team, see `doc/authors.rst`.
# License: Modified BSD, see LICENSE.txt for details.
import matplotlib.pyplot as plt
import numpy as np
from viziphant.rasterplot import rasterplot
def plot_spike_contrast(trace, spiketrains=None, title=None, lw=1.0,
xscale='log', **kwargs):
"""
Plot Spike-contrast synchrony measure :cite:`Ciba18_136`.
Parameters
----------
trace : SpikeContrastTrace
The trace output from
:func:`elephant.spike_train_synchrony.spike_contrast` function.
spiketrains : list of neo.SpikeTrain or None
Input spike trains, optional. If provided, the raster plot will be
shown at the bottom.
Default: None
title : str or None.
The plot title. If None, an automatic description will be set.
Default: None
lw : float, optional
The curves line width.
Default: 1.0
xscale : str, optional
X axis scale.
Default: 'log'
**kwargs
Additional arguments, passed in :func:`viziphant.rasterplot.rasterplot`
Returns
-------
axes : matplotlib.Axes.axes
Examples
--------
Spike-contrast synchrony of homogenous Poisson processes.
.. plot::
:include-source:
import numpy as np
import quantities as pq
from elephant.spike_train_generation import homogeneous_poisson_process
from elephant.spike_train_synchrony import spike_contrast
import viziphant
np.random.seed(24)
spiketrains = [homogeneous_poisson_process(rate=20 * pq.Hz,
t_stop=10 * pq.s) for _ in range(10)]
synchrony, trace = spike_contrast(spiketrains, return_trace=True)
viziphant.spike_train_synchrony.plot_spike_contrast(trace,
spiketrains=spiketrains, c='gray', s=1)
plt.show()
"""
nrows = 2 if spiketrains is not None else 1
fig, axes = plt.subplots(nrows=nrows)
axes = np.atleast_1d(axes)
units = trace.bin_size.units
bin_sizes = trace.bin_size.magnitude
axes[0].plot(bin_sizes, trace.contrast, lw=lw, label=r'Contrast($\Delta$)',
linestyle='dashed', color='limegreen')
axes[0].plot(bin_sizes, trace.active_spiketrains, lw=lw,
label=r'ActiveST($\Delta$)',
linestyle='dashdot', color='dodgerblue')
axes[0].plot(bin_sizes, trace.synchrony, lw=lw,
label=r'Synchrony($\Delta$)', color='black')
bin_id_max = np.argmax(trace.synchrony)
synchrony_loc = bin_sizes[bin_id_max], trace.synchrony[bin_id_max]
axes[0].scatter(*synchrony_loc, s=20, c='red', marker='x')
axes[0].annotate('S', synchrony_loc, color='red', va='bottom', ha='left')
axes[0].legend()
axes[0].set_xscale(xscale)
axes[0].set_xlabel(fr"Bin size $\Delta$ ({units.dimensionality})")
if title is None:
title = "Spike-contrast synchrony measure"
axes[0].set_title(title)
if spiketrains is not None:
rasterplot(spiketrains, axes=axes[1], **kwargs)
axes[1].set_ylabel('neuron')
axes[1].yaxis.set_label_coords(-0.01, 0.5)
plt.tight_layout()
return axes
|
name = []
print(len(name))
name.append("William")
if len(name) == 0:
print("The list is empty")
else:
print("The list contains sth")
name.append("John")
name.append("Amanda")
print(len(name))
print(name[2])
for i in range(len(name)):
print(name[i])
for i in range(len(name)):
print(str(i + 1) + ". " + name[i])
name.pop(1)
name.reverse()
for i in range(len(name)):
print(name[i])
name = []
if len(name) == 0:
print("The list is empty")
else:
print("The list contains sth") |
from locast import get_model
from locast.api import *
def get_comments(request, object, comment_id=None):
comment_model = get_model('comment')
if comment_id:
comment = check_comment(object, comment_id)
return APIResponseOK(content=api_serialize(comment))
comments = comment_model.objects.get_comments(object)
comments, total, pg = paginate(comments, request.GET)
comment_arr=[]
for c in comments:
comment_arr.append(api_serialize(c))
return APIResponseOK(content=comment_arr, total=len(comment_arr), pg=pg)
def post_comment(request, object):
comment = None
json = get_json(request.raw_post_data)
content = get_param(json, 'content')
if content:
c = object.comment(request.user, content)
return APIResponseCreated(content=api_serialize(comment))
def check_comment(object, comment_id):
comment = None
try:
comment = object.comments.get(id=comment_id)
except comment_model.DoesNotExist:
raise exceptions.APIBadRequest('Comment is not part of this object')
return comment
|
#What is the first term in the Fibonacci sequence to contain 1000 digits?
from math import ceil,log10,sqrt
def main(digits = 1000): return ceil((digits-1+log10(sqrt(5)))/log10((1+sqrt(5))/2))
print main()
|
import numpy as np
import pandas as pd
rad_feat = pd.read_csv('/media/bmi/Windows/MICCAI CHALLENGE 2020/BraTs 2020/validation_radiomic_features_full.csv')
rad_feat = rad_feat.sort_values(by = ['Brats20ID'])
surv_data = pd.read_csv('/media/bmi/Windows/MICCAI CHALLENGE 2020/BraTs 2020/survival_evaluation.csv')
# print(rad_feat.shape, surv_data.shape)
feat = []
for pat_id in surv_data['BraTS20ID']:
try:
feat.append(rad_feat[rad_feat['Brats20ID'] == pat_id+'.nii.gz'].values[0])
except:
print(pat_id)
df = pd.DataFrame(feat, columns = rad_feat.columns )
# df = df.sort_values(by = ['Brats20ID'])
# df['Survival_days'] = surv_data['Survival_days']
print(df.columns)
df.to_csv('../valid_test.csv',index = False)
df_std = pd.DataFrame()
df_std[df.columns[0]] = [i[:-7] for i in df[df.columns[0]]]
for column in df.columns[1:]:
print(column)
if column == 'Brats20ID':
df_std[column] = df[column]
if column == 'Survival_days':
print(np.max(df[column].values))
df_std[column] = (df[column].values)/np.max(df[column].values)
else:
# print(df[column].mean)
# print(np.mean(df[column].values),np.std(df[column].values))
try:
df_std[column] = (df[column].values - np.mean(df[column].values))/np.std(df[column].values)
except:
pass
# df_std['Survival_days'] = surv_data['Survival_days']
# df_std.dropna()
df_std.to_csv('../valid_radiomics_feat_seg_map_std.csv',index = False) |
"""
188.
Hard
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time
(ie, you must sell the stock before you buy again).
Example 1:
Input: [2,4,1], k = 2
Output: 2
Explanation: Buy on day 1 (price = 2) and sell on day 2 (price = 4), profit = 4-2 = 2.
Example 2:
Input: [3,2,6,5,0,3], k = 2
Output: 7
Explanation: Buy on day 2 (price = 2) and sell on day 3 (price = 6), profit = 6-2 = 4.
Then buy on day 5 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
History:
2019.07.12
"""
from typing import List
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
days = len(prices)
profits = [[0 for _ in range(days)] for _ in range(k+1)]
if k > 2*days:
return self.helper(prices)
if k == 0 or days == 0:
return 0
for j in range(1, k+1):
max_diff = 0
for d in range(1, days):
if d == 1:
max_diff = profits[j - 1][0] - prices[0]
else:
max_diff = max(max_diff, profits[j - 1][d - 1] - prices[d-1])
profits[j][d] = max(profits[j][d-1], max_diff+prices[d])
return profits[-1][-1]
def helper(self, prices):
res = 0
for i in range(len(prices) - 1):
if prices[i] < prices[i + 1]:
res += prices[i + 1] - prices[i]
return res
def maxProfit_solution(self, k: int, prices: List[int]) -> int:
if k >= int(len(prices) / 2):
res = 0
for i in range(1, len(prices)):
if prices[i] > prices[i - 1]:
res += prices[i] - prices[i - 1]
return res
if len(prices) == 0 or k == 0:
return 0
buy = [-prices[0] for i in range(k)]
sell = [0 for i in range(k)]
for p in prices:
for i in range(k):
if i - 1 >= 0:
buy[i] = max(buy[i], sell[i - 1] - p)
else:
buy[i] = max(buy[i], 0 - p)
sell[i] = max(sell[i], buy[i] + p)
return sell[-1]
def maxProfit_dp(self, k: int, prices: List[int]) -> int:
if not k or not prices:
return 0
if k >= len(prices) / 2:
return self.helper(prices)
local = [[0 for _ in range(len(prices))] for _ in range(k + 1)]
dp = [[0 for _ in range(len(prices))] for _ in range(k + 1)]
for i in range(1, k + 1):
for j in range(1, len(prices)):
local[i][j] = max(dp[i - 1][j - 1], local[i][j - 1]) + prices[j] - prices[j - 1]
dp[i][j] = max(dp[i][j - 1], local[i][j])
return dp[k][len(prices) - 1]
if __name__ == "__main__":
sol = Solution()
method = sol.maxProfit
cases = [
# (method, (1, [1,2]), 1),
# (method, (2, [2,4,1]), 2),
# (method, (2, [3,2,6,5,0,3]), 7),
(method, (2, [6,1,3,2,4,7]), 7),
]
for i, (func, case, expected) in enumerate(cases):
ans = func(*case)
if ans == expected:
print("Case {:d} Passed".format(i + 1))
else:
print("Case {:d} Failed; Expected {:s} != {:s}".format(i + 1, str(expected), str(ans))) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.