text stringlengths 38 1.54M |
|---|
import sys
import time
print sys.path.append("/usr/local/lib")
from libopenrave_interface import Environment, v_string, v_double, v2_double
def test_collision(env):
robot = env.getRobot()
collision_manager = env.getCollisionManager()
joint_angles_v = v_double()
joint_angles_v_2 = v_double()
joint_angles_v[:] = [0.2, 0.0, 0.0]
joint_angles_v_2[:] = [0.2, 0.0, 0.0]
particle_joint_values = v2_double()
env.updateRobotValues(joint_angles_v,
joint_angles_v,
particle_joint_values,
particle_joint_values)
robot_collision_objects_start = robot.createRobotCollisionObjects(joint_angles_v)
robot_collision_objects_goal = robot.createRobotCollisionObjects(joint_angles_v_2)
for i in xrange(len(robot_collision_objects_start)):
in_collision = collision_manager.inCollisionContinuousEnvironment([robot_collision_objects_start[i],
robot_collision_objects_goal[i]])
print in_collision
time.sleep(100)
def propagate(env, sensor_name):
current_state = v_double()
control_input = v_double()
control_error = v_double()
simulation_step_size = 0.0001
duration = 0.003
result = v_double()
robot_dof_values = v_double()
robot_dof_values_start = v_double()
env.getRobotDOFValues(robot_dof_values)
print "len robot_dof_values " + str(len(robot_dof_values))
#cs = [0.0 for i in xrange(len(robot_dof_values) * 2)]
cv = [0.0 for i in xrange(len(robot_dof_values))]
'''cv[0] = 0.3
cv[1] = -0.3
cv[4] = -0.3
cv[5] = 0.3
cv[6] = 0.3
cv[7] = -0.3
cv[8] = 0.3
cv[9] = -0.3
cv[10] = 0.3
cv[11] = -0.3
cv[12] = -1.5
cv[13] = 1.5
cv[14] = -1.5
cv[15] = 1.5
cv[16] = -1.5
cv[17] = 1.5'''
cs = [cv[i] for i in xrange(len(cv))]
cs.extend([0.0 for i in xrange(len(robot_dof_values))])
#cv[2] = 0.0
current_state[:] = cs
control_input[:] = [0.0 for i in xrange(len(robot_dof_values))]
control_error[:] = [0.0 for i in xrange(len(robot_dof_values))]
robot = env.getRobot()
#control_input[1] = 1.0
robot_dof_values[:] = cv
robot_dof_values_start[:] = cv
env.setRobotDOFValues(robot_dof_values)
while True:
#print "propagating"
robot_dof_values_start[:] = [current_state[i] for i in xrange(len(current_state) / 2)]
robot.propagate(current_state,
control_input,
control_error,
simulation_step_size,
duration,
result)
robot_dof_values[:] = [result[i] for i in xrange(len(result) / 2)]
t0 = time.time()
collides = env.robotCollidesContinuous(robot_dof_values_start,robot_dof_values)
t1 = time.time() - t0
if collides.in_collision:
if collides.contact_body_name == "front_left_end_effector" or collides.contact_body_name == "front_left_tibia":
#control_input[1] = -50.0
body_point = v_double()
body_name = "front_left_end_effector"
world_normal = v_double()
body_point[:] = [0.0, 0.0, 0.0]
world_normal[:] = [0.0, 0.0, 1.0]
robot.propagate_constraints(current_state,
control_input,
control_error,
simulation_step_size,
duration,
body_name,
body_point,
world_normal,
result)
print [result[i] for i in xrange(len(result))]
print "prop"
current_state[:] = [result[i] for i in xrange(len(result))]
robot_dof_values[:] = [result[i] for i in xrange(len(result) / 2)]
env.setRobotDOFValues(robot_dof_values)
#print "result_vec " + str(result_vec)
#print "propagated"
time.sleep(0.05)
def prog(joint_angles, sensor_name):
joint_angles_v = v_double()
joint_velocities = v_double()
particle_joint_values = v2_double()
joint_angles_v[:] = [joint_angles[i] for i in xrange(len(joint_angles))]
joint_velocities[:] = [0.0, 0.0, 0.0]
env.getRobot().setState(joint_angles_v, joint_velocities)
env.transformSensorSensorLink(joint_angles_v, sensor_name)
env.updateRobotValues(joint_angles_v,
joint_angles_v,
particle_joint_values,
particle_joint_values)
time.sleep(1)
env.getSensorManager().activateSensor(sensor_name)
time.sleep(1)
env.drawBoxes()
time.sleep(1)
env.getSensorManager().deactivateSensor(sensor_name)
time.sleep(1.0)
sensor_file = "sensor_BaseFlashLidar3D.xml"
sensor_name = "FlashLidar3D"
env = Environment()
env.setupEnvironment("env_3dof.xml")
sensors = v_string()
sensors[:] = [sensor_file]
env.loadSensors(sensors)
env.showViewer()
env.getSensorManager()
env.loadRobotFromURDF("model/block_model.urdf")
#dof_values = v_double()
#dof_values[:] = [1.5778636567,-3.28698057487e-06,4.93129297073e-06,-0.028272851672]
#env.setRobotDOFValues(dof_values)
#time.sleep(100)
env.getRobot().setGravityConstant(9.81)
#env.transformSensorToSensorLink(sensor_name)
env.initOctree(0.1)
robot_dof_values = v_double()
env.getRobotDOFValues(robot_dof_values)
robot_dof_values_arr = [robot_dof_values[i] for i in xrange(len(robot_dof_values))]
propagate(env, sensor_name)
time.sleep(100)
new_robot_dof_values[:] = robot_dof_values_arr
env.setRobotDOFValues(new_robot_dof_values)
time.sleep(3)
robot_trans = v_double()
robot_rot = v_double()
robot_trans[:] = [0.0, 0.0, 0.0]
robot_rot[:] = [0.4, 0.4, 0.4]
env.setRobotTransform(robot_trans, robot_rot)
time.sleep(3)
env.transformSensorToSensorLink(sensor_name)
print "activate"
time.sleep(50)
joint_angles = [0.0, 0.0, 0.0]
prog(joint_angles, sensor_name)
joint_angles[0] = 0.1
prog(joint_angles, sensor_name)
joint_angles[0] = -0.1
prog(joint_angles, sensor_name)
joint_angles[0] = 0.0
joint_angles[1] = 0.1
prog(joint_angles, sensor_name)
joint_angles[0] = 0.3
joint_angles[1] = -0.1
prog(joint_angles, sensor_name)
joint_angles[0] = -0.3
prog(joint_angles, sensor_name)
joint_angles[0] = 0.7
joint_angles[1] = 0.2
joint_angles[2] = -0.2
prog(joint_angles, sensor_name)
time.sleep(20)
|
#! /usr/bin/python3
import time
import json
import iota.harness.api as api
import iota.test.apulu.utils.pdsctl as pdsctl
import iota.test.apulu.utils.misc as misc_utils
def GetBgpNbrEntries(json_out, entry_type):
retList = []
try:
data = json.loads(json_out)
except Exception as e:
api.Logger.error("No valid {0} entries found in {1}".format(entry_type,
json_out))
api.Logger.error(str(e))
return retList
if "spec" in data:
objects = data['spec']
else:
objects = [data]
for obj in objects:
for entry in obj:
if entry['Spec']['Afi'] == entry_type:
api.Logger.info("PeerAddr: %s" % (entry['Spec']['PeerAddr']))
retList.append(entry['Spec']['PeerAddr'])
return retList
def ValidateBGPPeerNbrStatus(json_out, nbr_list):
try:
data = json.loads(json_out)
except Exception as e:
api.Logger.error("No valid BGP Nbr's found in %s" % (json_out))
api.Logger.error(str(e))
return False
if "spec" in data:
objects = data['spec']
else:
objects = [data]
total_entry_found = 0
for obj in objects:
for entry in obj:
for nbr in nbr_list:
if entry['Spec']['PeerAddr'] == nbr and \
entry['Status']['Status'] == "ESTABLISHED":
total_entry_found += 1
api.Logger.info("PeerAddr: %s, Status: %s" % ( \
entry['Spec']['PeerAddr'], entry['Status']['Status']))
# check for total entries in established state
if total_entry_found != len(nbr_list):
# In netagent-IOTA mode, there is only one EVPN Peering with N9K-RR
if api.GlobalOptions.netagent and total_entry_found == 1:
return True
api.Logger.error("Not all BGP Nbr's in Established state, "
"total_entry_found: %s, total peer entries: %s" % ( \
total_entry_found, len(nbr_list)))
return False
return True
def ValidateBGPOverlayNeighborship(node):
if api.GlobalOptions.dryrun:
return True
status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node,
"bgp peers-af",
"--json",
yaml=False)
if not status_ok:
api.Logger.error(" - ERROR: pdstcl show bgp peers-af failed")
return False
api.Logger.info("pdstcl show output: %s" % (json_output))
retList = GetBgpNbrEntries(json_output, "L2VPN")
if not len(retList):
api.Logger.error(" - ERROR: No L2VPN entries found in "
"show bgp peers-af")
return False
api.Logger.info("L2VPN Neighbors : %s" % (retList))
status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node,
"bgp peers",
"--json",
yaml=False)
if not status_ok:
api.Logger.error(" - ERROR: pdstcl show bgp peers failed")
return False
api.Logger.info("pdstcl show output: %s" % (json_output))
if not ValidateBGPPeerNbrStatus(json_output, retList):
api.Logger.error(" - ERROR: Mismatch in BGP Peer status")
return False
return True
def ValidateBGPOverlayNeighborshipInfo():
if api.IsDryrun():
return True
nodes = api.GetNaplesHostnames()
for node in nodes:
if not ValidateBGPOverlayNeighborship(node):
api.Logger.error("Failed in BGP Neighborship validation"
" for node: %s" % (node))
return False
return True
def ValidateBGPUnderlayNeighborship(node):
if api.IsDryrun():
return True
status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node,
"bgp peers-af",
"--json",
yaml=False)
if not status_ok:
api.Logger.error(" - ERROR: pdstcl show bgp peers-af failed")
return False
api.Logger.verbose("pdstcl show output: %s" % (json_output))
bgp_peers = GetBgpNbrEntries(json_output, "IPV4")
if not len(bgp_peers):
api.Logger.error(" - ERROR: No BGP peer entries found in "
"show bgp peers-af")
return False
api.Logger.info("BGP peer Neighbors : %s" % (bgp_peers))
status_ok, json_output = pdsctl.ExecutePdsctlShowCommand(node,
"bgp peers",
"--json",
yaml=False)
if not status_ok:
api.Logger.error(" - ERROR: pdstcl show bgp peers failed")
return False
api.Logger.verbose("pdstcl show output: %s" % (json_output))
if not ValidateBGPPeerNbrStatus(json_output, bgp_peers):
api.Logger.error(" - ERROR: Validating BGP Peer Underlay status")
return False
return True
def ValidateBGPUnderlayNeighborshipInfo():
if api.IsDryrun():
return True
nodes = api.GetNaplesHostnames()
for node in nodes:
if not ValidateBGPUnderlayNeighborship(node):
api.Logger.error("Failed in BGP Underlay Neighborship validation "
"for node: %s" % (node))
return False
return True
def check_underlay_bgp_peer_connectivity(sleep_time=0, timeout_val=0):
api.Logger.info("Starting BGP underlay validation ...")
timeout = timeout_val # [seconds]
timeout_start = time.time()
retry_count = 1
while True:
if ValidateBGPUnderlayNeighborshipInfo():
return api.types.status.SUCCESS
if timeout_val == 0 or time.time() >= timeout_start + timeout:
break
retry_count += 1
api.Logger.verbose("BGP underlay is still not up, will do retry({0}) "
"after {1} sec...".format(retry_count, sleep_time))
if sleep_time > 0:
misc_utils.Sleep(sleep_time)
api.Logger.error("BGP underlay validation failed ...")
return api.types.status.FAILURE
|
#Import necessary libraries
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
import sklearn.utils
#Load Iris dataset
df= pd.read_csv("C:\\Users\\rohit.a\\Downloads\\Iris.csv")
cols=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
epoch=10000
n_nodes_h1=100
n_nodes_h2=100
n_classes=3
#One hot encoding target variables
for i in range(0,len(df)):
if df.Species[i]=='Iris-setosa':
df.Species[i]=np.asarray([1,0,0])
elif df.Species[i]=='Iris-versicolor':
df.Species[i]=np.asarray([0,1,0])
else:
df.Species[i]=np.asarray([0,0,1])
df = sklearn.utils.shuffle(df)
df = df.reset_index(drop=True)
x1=df[cols]
y1=df.Species
xtrain,xtest,ytrain,ytest=train_test_split(x1,y1,test_size=0.2,random_state=20)
x= tf.placeholder(tf.float32, shape=[None,4])
y_=tf.placeholder(tf.float32, shape=[None,3])
#Initializing weights and biases of hidden layers and output layer
hidden_layer1={'weights':tf.Variable(tf.random_normal([4,100])),'bias':tf.Variable(tf.random_normal([100]))}
hidden_layer2={'weights':tf.Variable(tf.random_normal([100,100])),'bias':tf.Variable(tf.random_normal([100]))}
output_layer= {'weights':tf.Variable(tf.random_normal([100,3])),'bias':tf.Variable(tf.random_normal([3]))}
#Activation function of hidden layers and output layer
l1=tf.add(tf.matmul(x,hidden_layer1['weights']),hidden_layer1['bias'])
l1=tf.nn.softmax(l1)
l2=tf.add(tf.matmul(l1,hidden_layer2['weights']),hidden_layer2['bias'])
l2=tf.nn.softmax(l2)
l3=tf.add(tf.matmul(l2,output_layer['weights']),output_layer['bias'])
y=tf.nn.softmax(l3)
#cost function
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
#optimiser
train_step = tf.train.AdamOptimizer(0.01).minimize(cross_entropy)
#calculating accuracy of our model
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess = tf.InteractiveSession()
#initialising variables
init = tf.global_variables_initializer()
sess.run(init)
for step in range(0,epoch):
_,c=sess.run([train_step,cross_entropy], feed_dict={x: xtrain, y_:[t for t in ytrain.as_matrix()]})
if step% 500==0 :
print("Loss at step %d: %f" %(step,c))
print("Accuracy",sess.run(accuracy,feed_dict={x: xtest, y_:[t for t in ytest.as_matrix()]}))
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0003_auto_20190127_2335'),
]
operations = [
migrations.CreateModel(
name='UserCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_count', models.PositiveSmallIntegerField(default=0)),
],
),
]
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""An entry point for invoking remote function inside a job."""
from __future__ import absolute_import
import argparse
import sys
import json
import os
import boto3
from sagemaker.experiments.run import Run
from sagemaker.remote_function.job import (
KEY_EXPERIMENT_NAME,
KEY_RUN_NAME,
)
from sagemaker.session import Session
from sagemaker.remote_function.errors import handle_error
from sagemaker.remote_function import logging_config
SUCCESS_EXIT_CODE = 0
def _parse_agrs():
"""Parses CLI arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--region", type=str, required=True)
parser.add_argument("--s3_base_uri", type=str, required=True)
parser.add_argument("--s3_kms_key", type=str)
parser.add_argument("--run_in_context", type=str)
args, _ = parser.parse_known_args()
return args
def _get_sagemaker_session(region):
"""Get sagemaker session for interacting with AWS or Sagemaker services"""
boto_session = boto3.session.Session(region_name=region)
return Session(boto_session=boto_session)
def _load_run_object(run_in_context: str, sagemaker_session: Session) -> Run:
"""Load current run in json string into run object"""
run_dict = json.loads(run_in_context)
return Run(
experiment_name=run_dict.get(KEY_EXPERIMENT_NAME),
run_name=run_dict.get(KEY_RUN_NAME),
sagemaker_session=sagemaker_session,
)
def _execute_remote_function(sagemaker_session, s3_base_uri, s3_kms_key, run_in_context, hmac_key):
"""Execute stored remote function"""
from sagemaker.remote_function.core.stored_function import StoredFunction
stored_function = StoredFunction(
sagemaker_session=sagemaker_session,
s3_base_uri=s3_base_uri,
s3_kms_key=s3_kms_key,
hmac_key=hmac_key,
)
if run_in_context:
run_obj = _load_run_object(run_in_context, sagemaker_session)
with run_obj:
stored_function.load_and_invoke()
else:
stored_function.load_and_invoke()
def main():
"""Entry point for invoke function script"""
logger = logging_config.get_logger()
exit_code = SUCCESS_EXIT_CODE
try:
args = _parse_agrs()
region = args.region
s3_base_uri = args.s3_base_uri
s3_kms_key = args.s3_kms_key
run_in_context = args.run_in_context
hmac_key = os.getenv("REMOTE_FUNCTION_SECRET_KEY")
sagemaker_session = _get_sagemaker_session(region)
_execute_remote_function(
sagemaker_session=sagemaker_session,
s3_base_uri=s3_base_uri,
s3_kms_key=s3_kms_key,
run_in_context=run_in_context,
hmac_key=hmac_key,
)
except Exception as e: # pylint: disable=broad-except
logger.exception("Error encountered while invoking the remote function.")
exit_code = handle_error(
error=e,
sagemaker_session=sagemaker_session,
s3_base_uri=s3_base_uri,
s3_kms_key=s3_kms_key,
hmac_key=hmac_key,
)
finally:
sys.exit(exit_code)
if __name__ == "__main__":
main()
|
import sys
from string import ascii_lowercase as al
myDict = {}
anagramDict = {}
def enumdict(alpha):
for i, x in enumerate(alpha):
myDict[x] = i
return myDict
def sort_insertion(my_list):
for i in range(1,len(my_list)):
val_current = my_list[i]
pos = i
# check backwards through sorted list for proper pos of val_current
while((pos > 0) and (my_list[pos-1] > val_current)):
my_list[pos] = my_list[pos-1]
pos = pos-1
if pos != i:
my_list[pos] = val_current
return my_list
def create_Signature(word):
word_num_list = []
for char in word:
if char in al:
word_num_list.append(myDict[char])
else:
pass # skip for special characters
# Sort this new list
sort_insertion(word_num_list)
# print word_num_list
# Creating the signature from the sorted list
word_sig = []
for char, value in myDict.items():
for num in word_num_list:
if value == num:
word_sig.append(char)
# print ''.join(word_sig)
n_word = ''.join(word_sig)
if bool(anagramDict):
for key in anagramDict.items():
# print key
if key == n_word:
# Append the anagram to the same signature
anagramDict[key].append(word)
break
else:
n_word_list = []
# Create a new entry
anagramDict.setdefault(n_word, []).append(word)
break
else:
# First entry
# anagramDict.setdefault(n_word, {})[word] = 1
anagramDict.setdefault(n_word, []).append(word)
def main():
enumdict(list(al))
# words = open('/usr/share/dict/american-english','r')
words = open('/home/zerobyte/Desktop/Files/words.txt','r')
for word in words:
word = word.replace("'", "").lower()
create_Signature(word)
print 'Signature created :',word
print anagramDict
main()
|
import copy
import time
import climate
import numpy as np
import theanets
import utils
from RawData import RawData
from TrainingTimeSeries import TrainingTimeSeries
climate.enable_default_logging()
def test_RawData_read_csv_multiple_features(algo, layers, train, valid, test):
net = theanets.Regressor(layers=layers, loss='mse')
assert len(train) == len(test) == len(valid)
networks = []
for i in range(len(train)):
networks.append(theanets.Regressor(layers=layers, loss='mse'))
for j, (tm, vm) in enumerate(networks[i].itertrain(train=train[i],
valid=valid[i],
algo=algo, patience=10, max_updates=5000, learning_rate=0.02,
min_improvement=0.001, momentum=0.9)):
pass
result = []
original = []
for i in range(len(test[0][0])):
orig = []
res = []
training = np.reshape(test[0][0][i], (1, len(test[0][0][i])))
for j in range(len(test)):
orig.append(data.deNormalizeValue(test[j][1][i]))
res.append(data.deNormalizeValue(networks[j].predict(training)[0]))
original.append(np.asarray(orig).flatten())
result.append(np.asarray(res).flatten())
result = np.asarray(result)
original = np.asarray(original)
return original, result, [tm, vm], net.num_params, i
algos = ['nag']
lags = [([1, 6],), ([1, 24],), ([1, 48],),
([1, 24], [48, 72]),
([1, 24], [168, 168 + 6]),
([1, 24], [168, 168 + 24]),
([1, 48], [168, 168 + 12]),
([1, 48], [168, 168 + 24]),
([1, 24], 48, 168)]
rawData = RawData('../../DataManipulation/files/load_weather_jan.csv')
timeSeries = rawData.readAllValuesCSV(targetCol=2)
k = 1
futures = 119
outputCount = 24
modelCount = (futures + 1) / outputCount
for lag in lags:
ts = copy.deepcopy(timeSeries)
data = []
train = []
valid = []
test = []
for i in range(modelCount):
j = i * 24
data.append(TrainingTimeSeries(ts, lags=lag, futures=(j, j + 24)))
train.append([data[i].getTrainingTrain(), data[i].getTrainingTarget()[:, j:(j + 24)]])
valid.append([data[i].getValidationTrain(), data[i].getValidationTarget()[:, j:(j + 24)]])
test.append([data[i].getTestTrain(), data[i].getTestTarget()[:, j:(j + 24)]])
inLayer = theanets.layers.Input(data[0].trainLength, name='inputLayer')
for algo in algos:
for hiddenNeuron in range(70, 126, 5):
hiddenLayer = theanets.layers.Feedforward(hiddenNeuron, inputs=inLayer.size, activation='sigmoid',
name='hiddenLayer')
outLayer = theanets.layers.Feedforward(outputCount, inputs=hiddenLayer.size, activation='linear')
layers = [inLayer, hiddenLayer, outLayer]
start_time = time.time()
orig, result, error, n_params, iterations = test_RawData_read_csv_multiple_features(algo, layers, train,
valid, test)
trainTime = time.time() - start_time
rmse = utils.calculateRMSE(orig, result)
mape = utils.calculateMAPE(orig, result)
smape = utils.calculateSMAPE(orig, result)
# Start the plotting
title = 'algo:%s, lags:%s, hidden neurons:%s, testSample:%s TrainTime:%.2f sec' % (
algo, lag, hiddenNeuron, len(result), trainTime)
utils.plotFigures(orig, result, title, k, locationToSaveImages='../results/multiple_model/')
k += 1
utils.benchmark(str(lag).replace(',', ':'), inLayer.size, hiddenNeuron, outLayer.size, error[1][0]['err'],
error[1][1]['err'],
n_params, rmse, mape, smape, trainTime, iterations,
fileName='../performance/neuralNetBenchmark_m_m.csv')
|
#Filereading
import os as operatingsys
filename = "foo.txt"
targetone = "foo"
targettwo = "Foo"
myarray = []
yescount=0
with open(filename) as file:
content = file.readlines()
for l in content:
myarray.append(l)
for ll in myarray:
if targetone in ll:
yescount+=1
elif targettwo in ll:
yescount+=1
print(myarray)
#print(targettwo+" "+"Count "+str(yescount))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
LOCAL_DEBUG = True
import logging
import re
import sys
import os
import socket
import threading
import time
from lib.common import *
#FLAG = "#d9Fa0j#"
from lib.fast_request import fast_request
from lib.ex_httplib2 import *
import Queue
sys_path = lambda relativePath: "%s/%s"%(os.getcwd(), relativePath)
filename = "CgiCheckScript.py"
path_vul_file = sys_path("/vuls_db/vuldb_5")
#path_vul_file = "C:\\svn\\nvs_phase2\\scripts\\vuls_db_repo\\vuldb_5"
len_dic = {}
def del_continuous(data, continuous_id, max_continuous):
ifcontinuous = False
if max_continuous < 1:
return ifcontinuous, data
ret = data
count = 0
to_dels = []
to_del = []
if len(continuous_id) >= max_continuous:
for i in range(1, len(continuous_id)):
#print continuous_id[i-1], continuous_id[i]
if continuous_id[i] - continuous_id[i - 1] == 1:
count = count + 1
if i-1 not in to_del:
to_del.append(i-1)
if i not in to_del:
to_del.append(i)
#print to_del
else:
if count >= max_continuous:
to_dels.append(to_del)
#end if
to_del = []
count = 0
#end if
#end for
if count >= max_continuous:
to_dels.append(to_del)
#end if
#end if
#print to_dels
if len(to_dels) > 0:
ifcontinuous = True
#del data:
to_del_data = []
for td in to_dels:
for d in td:
to_del_data.append(ret[d])
#end for
#end for
for d in to_del_data:
ret.remove(d)
#del continuous_id:
to_del_data = []
for td in to_dels:
for d in td:
to_del_data.append(continuous_id[d])
#end for
#end for
for d in to_del_data:
continuous_id.remove(d)
#del continuous_id:
return ifcontinuous, ret, continuous_id
#end def
class check_page_thread(threading.Thread):
def __init__(self, ob, domain, basedir, vuls, exp_queue):
threading.Thread.__init__(self)
self.ob = ob
self.domain = domain
self.connect_domain = domain
self.basedir = basedir[0:len(basedir) - 1]
self.vuls = vuls
self.req_method = ""
self.rec = ob["rec"]#这个变量是什么意思
self.isForce = ob['isForce']
self.web_speed = ob["web_speed"]
self.web_minute_package_count = ob["web_minute_package_count"]
self.ssl_enable = False
if ob['scheme'] == 'https':
self.ssl_enable = True
#end if
self.ret = []
self.total_check = 0
self.continuous_id = []
self.max_request = 50
self.no_code_count = 0
self.exp_queue = exp_queue
self.error_len_range = range(-2, 0)
self.http = ex_httplib2(ob['rec'], ob['cookie'])
self.http.httlib2_set_follow_redirects(False)
self.http.httlib2_set_timout(ob['web_timeout'])
#end def
def check_if_support_head(self):
fr = fast_request(self.domain, self.basedir, self.rec, self.ob, self.ssl_enable)
fr.connect()
fr.req_url("/", "HEAD", 512)
fr.close()
if fr.code == "200" or fr.code == "301" or fr.code == "302" or fr.code == "403":
return True
#end if
return False
#end if
def get_error_page_len(self):
fr = fast_request(self.domain, self.basedir, self.rec, self.ob, self.ssl_enable)
fr.connect()
fr.req_url("/ffffffffffffffffffffffff12121212121212121222222222222222222ffffffffffffffffffffffffffffffffffffff", self.req_method, 512)
long_len = fr.ret_len
fr.req_url("/zz", self.req_method, 512)
short_len = fr.ret_len
if fr.code == "200":
if long_len == short_len:
self.error_len_range = range(short_len, short_len + 1)
#end if
if long_len > short_len:
self.error_len_range = range(short_len, long_len + 1)
else:
self.error_len_range = range(long_len, short_len + 1)
#end if
#end if
#end def
def handle_result(self, url, factor, detail, request_data, response_data, len = 0):
#self.ret.append(getRecord(self.ob, url, factor, detail, request_data, response_data))
self.ret.append({'vul':getRecord(self.ob, url, factor, detail, request_data, response_data),'len':len})
if len_dic.has_key(len):
len_dic[len] += 1
else:
len_dic[len] = 1
#end if
self.continuous_id.append(self.total_check)
continuous,ret,continuous_id = del_continuous(self.ret, self.continuous_id, 3)
if continuous:
self.ret = ret
self.continuous_id = continuous_id
#end if
return continuous
#end def
def run(self):
global FLAG #"#d9Fa0j#"
if self.check_if_support_head():
self.req_method = "HEAD"
else:
self.req_method = "GET"
#end if
if checkErrorFileStatus(self.ob['scheme'],self.ob['domain'],self.ob['base_path'],".cgi",self.req_method) == False:
return
#end if
if checkErrorFileStatus(self.ob['scheme'],self.ob['domain'],self.ob['base_path'],".jsp",self.req_method) == False:
return
#end if
self.get_error_page_len()
exec_time = time.time()
need_check_content = []
if self.req_method == "HEAD":
fr = fast_request(self.domain, self.basedir, self.rec, self.ob, self.ssl_enable)
count = 0
for v in self.vuls:
if len(self.ret) > 10:
self.ret = []
logging.getLogger().error("File:%s, too many match urls, task id:%s, domain:%s" % (filename,self.ob['task_id'],str(self.domain)))
return
#end if
#if self.rec.err_out() and not self.isForce:
#return
#end if
try:
'''
if fr.connectOK == False:
fr.connect()
#end if
'''
vs = v.strip().split(FLAG)
if len(vs) != 8:
continue
#end if
if vs[5] != "200" and vs[5] != "403":
need_check_content.append(v)
continue
#end if
if vs[3][0:2] == "/?" or vs[3].find("../") != -1 or vs[3].find("..\\") != -1:
continue
#end if
if count >= self.max_request:
#fr.close()
#fr.connect()
count = 0
#end if
count = count + 1
vs[3] = vs[3].replace("@CGIDIRS", "/cgi-bin/")
self.total_check = self.total_check + 1
url = getFullUrl(self.ob['scheme'],self.ob['domain'],self.ob['base_path'],vs[3])
prev =time.time()
# res, content = self.http.request(url)
res, content = yx_httplib2_request(self.http,url)
if res and res.has_key("status") and res['status'] == vs[5]:
request = getRequest(url)
response = getResponse(res)
content_len = 0
if res and res.has_key('content-length'):
content_len = res['content-length']
#end if
if self.handle_result(url, vs[1], vs[6], request, response, content_len) == True:
return
#end if
#end if
if flowControl(self,time.time()-prev,self.rec,self.isForce,self.web_speed,self.web_minute_package_count,False):
return
'''
recv_data = fr.req_url(vs[3], self.req_method, 512)
if fr.code == "":
self.no_code_count = self.no_code_count + 1
if self.no_code_count > 50:
self.max_request = 1
#end if
fr.close()
fr.connect()
recv_data = fr.req_url(vs[3], self.req_method, 512)
#end if
if fr.ret_len == 0:
continue
#end if
if fr.code.find(vs[5]) != -1:
if fr.ret_len not in self.error_len_range:
if self.handle_result(fr.url, vs[1], vs[6], fr.request_data, fr.response_data) == True:
return
#end if
#end if
#end if
'''
except Exception, e:
logging.getLogger().error("File:" + filename + ", check_page_thread::run function :" + str(e) + ",task id:" + self.ob['task_id'] + ", check_path: " + vs[3] + ",domain:" + str(self.domain))
#end try
#end for
#fr.close()
elif self.req_method == "GET":
fr = fast_request(self.domain, self.basedir, self.rec, self.ob, self.ssl_enable)
for v in self.vuls:
if len(self.ret) > 10:
self.ret = []
logging.getLogger().error("File:%s, too many match urls, task id:%s, domain:%s" % (filename,self.ob['task_id'],str(self.domain)))
return
#end if
#if self.rec.err_out() and not self.isForce:
#return
#end if
try:
if fr.connectOK == False:
fr.connect()
#end if
vs = v.strip().split(FLAG)
if len(vs) != 8:
continue
#end if
if vs[5] != "200" and vs[5] != "403":
need_check_content.append(v)
continue
#end if
if vs[3][0:2] == "/?" or vs[3].find("../") != -1 or vs[3].find("..\\") != -1:
continue
#end if
vs[3] = vs[3].replace("@CGIDIRS", "/cgi-bin/")
request_data = '%s %s HTTP/1.1\r\nHost: %s\r\n\r\n' % (self.req_method, self.basedir + vs[3], self.domain)
prev =time.time()
recv_data = fr.req_url(vs[3], self.req_method, 512)
self.total_check = self.total_check + 1
if fr.ret_len == 0:
continue
#end if
if fr.code.find(vs[5]) != -1 and fr.code.find('http://www.safedog.cn/') < 0: # or fr.code.find("403") != -1:
"""
print "-------------------------------------------------"
print "http://" + self.domain + self.basedir + vs[3]
print "-------------------------------------------------"
print vs[6]
print "-------------------------------------------------"
print fr.request_data
print "-------------------------------------------------"
print fr.response_data
print "-------------------------------------------------"
"""
if fr.ret_len not in self.error_len_range:
if self.handle_result(fr.url, vs[1], vs[6], fr.request_data, fr.response_data, fr.ret_len) == True:
return
#end if
#end if
#end if
fr.close()
if flowControl(self,time.time()-prev,self.rec,self.isForce,self.web_speed,self.web_minute_package_count,False):
return
#end if
except Exception, e:
logging.getLogger().error("File:" + filename + ", check_page_thread::run function :" + str(e) + ",task id:" + self.ob['task_id'] + ", check_path: " + vs[3] + ",domain:" + str(self.domain))
#end try
#end for
#end if
for v in need_check_content:
try:
if len(self.ret) > 10:
self.ret = []
logging.getLogger().error("File:%s, too many match urls, task id:%s, domain:%s" % (filename,self.ob['task_id'],str(self.domain)))
return
#end if
#if self.rec.err_out() and not self.isForce:
#return
#end if
if fr.connectOK == False:
fr.connect()
#end if
vs = v.strip().split("#d9Fa0j#")
if len(vs) != 8:
continue
#end if
vs[3] = vs[3].replace("@CGIDIRS", "/cgi-bin/")
prev =time.time()
recv_data = fr.req_url(vs[3], "GET", 2048)
self.total_check = self.total_check + 1
if fr.ret_len == 0:
continue
#end if
if fr.code == "200" and recv_data.find(vs[5]) != -1 and recv_data.find('http://www.safedog.cn/') < 0:
"""
print "-------------------------------------------------"
print "http://" + self.domain + self.basedir + vs[3]
print "-------------------------------------------------"
print vs[6]
print "-------------------------------------------------"
print request_data
print "-------------------------------------------------"
print response_data
print "-------------------------------------------------"
"""
if fr.ret_len not in self.error_len_range:
if self.handle_result(fr.url, vs[1], vs[6], fr.request_data, fr.response_data, fr.ret_len) == True:
return
#end if
fr.close()
if flowControl(self,time.time()-prev,self.rec,self.isForce,self.web_speed,self.web_minute_package_count,False):
return
#end if
except Exception, e:
#print e
logging.getLogger().error("File:" + filename + ", check_page_thread::run function :" + str(e) + ",task id:" + self.ob['task_id'] + ", check_path: " + vs[3] + ",domain:" + str(self.domain))
#end try
#end for
exec_time = time.time() - exec_time
#end def
def path_check_mgr(ob, domain, basedir, max_thread):
try:
f = open(path_vul_file, "r")
lines = f.readlines()
lines_num = len(lines)
if lines_num < max_thread or lines_num < 200:
max_thread = 1
#end if
thread_data = []
if max_thread > 4:
max_thread = 4
#end if
#max_thread = 1
thread_vul_num = lines_num / max_thread
for i in range(0, max_thread):
tmp = []
for c in range(0, thread_vul_num):
tmp.append(lines.pop())
#end for
thread_data.append(tmp)
#end for
thread_data[max_thread - 1] = thread_data[max_thread - 1] + lines
threads = []
exp_queue = Queue.Queue()
for i in thread_data:
threads.append(check_page_thread(ob, domain, basedir, i, exp_queue))
#end for
for t in threads:
t.start()
#end for
for t in threads:
t.join()
#end for
total_check = 0
for t in threads:
total_check = total_check + t.total_check
#end for
ret = []
for t in threads:
ret = t.ret + ret
#end for
#print total_check
#print "total find:",len(ret)
return ret
except Exception,e:
logging.getLogger().error("File:" + filename + ", path_check_mgr function :" + str(e) + ",task id:" + ob['task_id'] + ", domain:" + ob['domain'])
return []
#end try
#end def
def run_domain(http,ob):
t = time.time()
try:
ret = path_check_mgr(ob, ob["domain"], ob["base_path"], ob["max_thread"])
if len(ob['len_404']) > 10:
pass
else:
for k in len_dic.keys():
if len_dic[k] > 1:
if k in ob['len_404']:
pass
else:
ob['len_404'].append(k)
#end if
#end if
#end for
#end if
list = []
for row in ret:
if row['len'] not in ob['len_404']:
list.append(row['vul'])
#end if
#end for
if list and len(list) > 5:
return []
else:
return list
#end if
#return ret
except Exception,e:
logging.getLogger().error("File:" + filename + ", run_domain function :" + str(e) + ",task id:" + ob['task_id'] + ", domain: " + ob['domain'])
write_scan_log(ob['task_id'],ob['domain_id'],"File:" + filename + ", run_domain function :" + str(e))
return []
#end try
#end def
if __name__ == '__main__':
t = time.time()
#www.vooioov.com
#192.168.9.176
path_check_mgr(None, "119.147.51.146", "/", 10)
print time.time() - t
|
#
# This file is subject to the terms and conditions defined in the
# file 'LICENSE', which is part of this source code package.
#
from collections import OrderedDict
from datetime import date, datetime
import importlib
import re
from flask import abort
from flask_restplus import Resource
from sqlalchemy.inspection import inspect
from sqlalchemy.exc import IntegrityError
from rdr_server.dao.base_dao import BaseDao
from rdr_server.dao.exceptions import RecordNotFoundError
from rdr_server.model.base_model import ModelMixin
def response_handler(func):
"""
A decorator to handle exceptions processing a response
"""
def f(*args, **kwargs):
try:
return func(*args, **kwargs)
# TODO: Log exceptions here, handle more exception types.
except IntegrityError:
abort(409, 'duplicate')
except RecordNotFoundError:
abort(404, 'not found')
except Exception:
abort(500, 'server error')
return f
def user_auth(f):
"""Checks whether user is logged in or raises error 401."""
def decorator(*args, **kwargs):
if True is False:
abort(401)
return f(*args, **kwargs)
return decorator
class BaseDaoApi(Resource):
"""
Generic Api Class with support for DAO objects
"""
dao: BaseDao = None
def to_dict(self, data=None):
"""
Convert sqlalchemy model/result objects into a python dict
:param data: Result data
:return: dict
"""
if not data or (isinstance(data, list) and len(data) == 0):
return dict()
# handle a list of objects
if isinstance(data, list):
results = list()
obj = data[0]
# determine if this is a simple Result object.
if hasattr(obj, '_fields'):
# loop through the items, converting the result object to a dict
for result in data:
item = self.result_to_dict(result)
results.append(item)
return results
# determine if this is a model object
if isinstance(obj, ModelMixin):
for rec in data:
item = self.model_to_dict(rec)
results.append(item)
return results
# handle a single Result record
if hasattr(data, '_fields'):
item = self.result_to_dict(data)
return item
# handle a single Model record
if isinstance(data, ModelMixin):
item = self.model_to_dict(data)
return item
raise ValueError('invalid data, unable to convert to dict()')
def result_to_dict(self, result):
"""
A Result is row data returned from a custom query.
:param result: Result object
:return: dict
"""
od = OrderedDict()
for key in getattr(result, '_fields'):
value = getattr(result, key)
if isinstance(value, (datetime, date)):
od[key] = value.isoformat()
# Check for ModelEnum and set value name string
elif hasattr(value, 'name') and hasattr(value, 'value'):
od[key] = value.name
else:
od[key] = value
return od
def model_to_dict(self, model):
"""
Converts a model to a dict
:param model: SqlAlchemy Model
:return: dict
"""
# to_dict() already handles converting dates and enums
data = model.to_dict()
return data
def dict_to_model(self, data):
"""
Take a dict from an API request and convert it into a model
:param data: request payload data dict
:return: Model
"""
od = OrderedDict()
info = inspect(self.dao.model())
mod = None
for key, value in data.items():
# Find out what class types are associated with this column.
class_str = str(info.mapper.columns[key].base_columns)
# If ModelEnum is found, convert string value to Enum value.
if 'ModelEnum' in class_str:
match = re.match('.*?, ModelEnum\((.*?)\).*', class_str)
enum_name = match.groups()[0]
if mod is None:
mod = importlib.import_module('rdr_server.common.enums')
# TODO: Create new Exception and raise it if this fails.
enum = eval('mod.{0}'.format(enum_name))
od[key] = enum[value]
else:
# We don't worry about Date or DateTime values, the UTCDateTime decorator
# takes care of it.
od[key] = value
return od
class BaseApiCount(BaseDaoApi):
def get(self):
"""
Return the count of all recors in the table
:return: integer
"""
return {'count': self.dao.count()}, 200
class BaseApiList(BaseDaoApi):
"""
Return a all records in the model
"""
def get(self):
"""
Return all the records from the table
:return: return list
"""
data = self.dao.list()
response = self.to_dict(data)
return response
class BaseApiSync(BaseDaoApi):
"""
Return the base model fields
"""
def get(self):
"""
Return the basic record information for all records.
:return: return list
"""
data = self.dao.base_fields()
response = self.to_dict(data)
return response
class BaseApiPut(BaseDaoApi):
pass
class BaseApiPost(BaseDaoApi):
pass
class BaseApiGetId(BaseDaoApi):
"""
Handle get operations using the primary key id
"""
def get(self, pk_id):
rec = self.dao.get_by_id(pk_id)
if not rec:
raise RecordNotFoundError()
return self.to_dict(rec), 200
class BaseApiDeleteId(BaseDaoApi):
"""
Handle DELETE operations using the primary key id
"""
def delete(self, pk_id):
rec = self.dao.get_by_id(pk_id)
if not rec:
raise RecordNotFoundError()
self.dao.delete_by_id(pk_id)
return {'pkId': pk_id}, 200
|
# f = c*1.8 + 32
import tensorflow as tf
import numpy as np
import logging
logger = tf.get_logger().setLevel(logging.ERROR)
celsius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
fahrenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
for i, c in enumerate(celsius_q):
print("Celsius {} is Fahranheit {}", c, fahrenheit_a[i])
# Build the model
layer = tf.keras.layers.Dense(units=1, input_shape=[1])
model = tf.keras.Sequential(
layer
)
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1))
# Train the model
history = model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("The history of training model: {}", history.history) # a list of 500 loss
print("The model of training model: {}", history.model)
print("There are the layer variables: {}\n\n".format(layer.get_weights()))
# Plot the loss over training epoch
import matplotlib.pyplot as plt
plt.xlabel('Epoch')
plt.ylabel('Loss Magnitude')
plt.plot(history.history['loss'])
# prediction
print("10 celsius is {} Fahrenheit.\n\n".format(model.predict(np.array([10]))))
# Experiment, Train neural model with 3 layers
l0 = tf.keras.layers.Dense(units=4, input_shape=[1])
l1 = tf.keras.layers.Dense(units=4)
l2 = tf.keras.layers.Dense(units=1)
model = tf.keras.Sequential([l0, l1, l2])
model.compile(loss='mean_squared_error', optimizer=tf.keras.optimizers.Adam(0.1))
model.fit(celsius_q, fahrenheit_a, epochs=500, verbose=False)
print("In new model predict {}".format(model.predict([10])))
print("L0 variables: {}".format(l0.weights))
print("L1 variables: {}".format(l1.weights))
print("L2 variables: {}".format(l2.weights))
|
# TODO:
# A. PLACEHOLDER
# Library Imports
import random
# Local Imports
import tkinter_gui_app
import pygame_gui_app
import pyopengl_app
import graphics_engine
import shape_generator
import point_cloud
import nsvt_config as config
class PhysicsEngine():
def __init__(self, wrapper_):
self.wrapper = wrapper_
self.playerX = 0
self.playerY = 0
class Wrapper():
def __init__(self):
# RANDOM SEEDING
random.seed(333)
# MEMBER VARIABLE DECLARATION
# ShapeGenerator instance
self.shapeGen = shape_generator.ShapeGenerator()
# GraphicsEngine list
self.gEngines = []
# PhysicsEngine list
self.pEngines = []
# SETUP ACTIONS
# Create a GraphicsEngine instance and a PhysicsEngine instance
self.gEngines.append(graphics_engine.GraphicsEngine(self))
self.pEngines.append(PhysicsEngine(self))
# Create a PointCloudGenerator instance
self.pcGen = point_cloud.PointCloudGenerator(self)
# POST-SETUP ACTIONS
if config.RUN_GUI_APP:
# Create and run the App instance
if config.APP_TYPE_3D:
self.app = pyopengl_app.PyopenglApp(self)
else:
if config.APP_TYPE_PYGAME:
self.app = pygame_gui_app.PygameGuiApp(self)
else:
self.app = tkinter_gui_app.TkinterGuiApp(self)
self.app.mainloop()
def main():
wr = Wrapper()
main() |
# -*- coding: utf-8 -*-
from django import forms
from products.models import Product
class CartUpdateForm(forms.Form):
quantity = forms.IntegerField(initial=1, widget=forms.TextInput(attrs={'class': 'input-small'}))
product = forms.ModelChoiceField(queryset=Product.objects.all(), widget=forms.HiddenInput)
replace = forms.BooleanField(required=False, initial=True)
def clean_quantity(self):
value = self.cleaned_data['quantity']
if value <= 0:
raise forms.ValidationError(u'Неправильное число товаров')
return value
class CartDeleteForm(forms.Form):
product = forms.ModelChoiceField(queryset=Product.objects.all(), widget=forms.HiddenInput)
class ProductSignForm(forms.Form):
product = forms.ModelChoiceField(queryset=Product.objects.all(), widget=forms.HiddenInput)
text = forms.CharField(label=u'Подпись', widget=forms.Textarea)
|
# Simple Trie Implementation to get a feel for things
# uses reference counters to track where strings end and if there are multiple copies of the same string
class Node:
def __init__(self):
self.children = dict()
self.count = 0
def incrementReferenceCount(self):
self.count = self.count + 1
def decrementReferenceCount(self):
self.count = self.count - 1
def addChild(self, value):
self.children[value] = Node()
def getChild(self, value):
return(self.children[value])
def hasChild(self, value):
return(value in self.children)
def deleteChild(self, value):
del(self.children[value])
def isTerminal(self):
refs = 0
for v in self.children.values():
refs = refs + v.count
return refs < self.count
class Trie:
def __init__(self):
self.root = Node()
def has(self, value):
n = self.root
for v in value:
if n.hasChild(v):
n = n.getChild(v)
else:
return False
return n.isTerminal()
def insert(self, value):
n = self.root
n.incrementReferenceCount()
for v in value:
if not n.hasChild(v):
n.addChild(v)
n = n.getChild(v)
n.incrementReferenceCount()
def delete(self, value):
n = self.root
L = []
for v in value:
if n.hasChild(v):
n = n.getChild(v)
L.append((v,n))
else:
return
# Iterate backwards over the nodes, deleting all leaves as they are encountered
last = None
for k,n in reversed(L):
n.decrementReferenceCount()
if last is not None:
lk, ln = last
if ln.count == 0:
n.deleteChild(lk)
last = (k,n)
self.root.decrementReferenceCount()
def assertTrue(expression, message):
if not expression:
print(message)
if __name__ == "__main__":
n = Node()
n.addChild('a')
if n.hasChild('a'):
print("node initialized correctly")
t = Trie()
t.insert("hello")
assertTrue(not t.has("not hello"), "Failed to not find not hello")
assertTrue(t.has("hello"), "Failed to find hello")
t.insert("hell on wheels")
assertTrue(t.has("hell on wheels"), "Failed to find hell on wheels")
assertTrue(not t.has("hell"), "Failed to not find hell")
t.delete("hello")
assertTrue(t.has("hell on wheels"), "Failed to find hell on wheels after deleting hello")
assertTrue(not t.has("hello"), "Failed to not find hello after deleting hello")
t.insert("hello")
t.insert("hello")
assertTrue(t.has("hell on wheels"), "Failed to find hell on wheels after reinserting two hellos")
assertTrue(t.has("hello"), "Failed to find hello after reinserting hello")
t.delete("hello")
assertTrue(t.has("hell on wheels"), "Failed to find hell on wheels after reinsert and delete one hellos")
assertTrue(t.has("hello"), "Failed to find hello after reinsert and delete one hello")
t.delete("hello")
assertTrue(t.has("hell on wheels"), "Failed to find hell on wheels after deleting both hello")
assertTrue(not t.has("hello"), "Failed to not find hello after deleting both hello")
t.delete("hell on wheels")
assertTrue(not t.has("hell on wheels"), "Failed to not find hell on wheels after deleting all")
assertTrue(not t.has("hello"), "Failed to not find hello after deleting all")
|
import pandas as pd
def load_plataforma(perfiles, num_of_students):
# No especificado, quiza el numero de horas que ha estado en la plataforma por materia
# Alberto: Redondear, vector.
lista_plataforma = []
for i in range(1, num_of_students+1):
path_plataforma = "data/apartadolibros/separacion_libros_totales_{}.csv".format(i)
plataforma = pd.read_csv(path_plataforma)
# Se redondean los datos, se elimina la columna de index (primera columna) y se suman todos los elementos de la matriz
sum_plataforma = plataforma.round(0).to_numpy()[:, 1:].sum()
lista_plataforma.append(int(sum_plataforma))
df = pd.Series(lista_plataforma)
perfiles['plataforma'] = df
return perfiles
|
# encoding: utf-8
# module gi.repository.LibvirtGConfig
# from /usr/lib64/girepository-1.0/LibvirtGConfig-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
# Variables with simple values
_namespace = 'LibvirtGConfig'
_version = '1.0'
__weakref__ = None
# functions
def init(argv=None): # real signature unknown; restored from __doc__
""" init(argv:list=None) -> argv:list """
pass
def init_check(argv=None): # real signature unknown; restored from __doc__
""" init_check(argv:list=None) -> bool, argv:list """
return False
def __delattr__(*args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(*args, **kwargs): # real signature unknown
pass
def __eq__(*args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(*args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(*args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getattr__(*args, **kwargs): # real signature unknown
pass
def __ge__(*args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(*args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(*args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(*args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(*args, **kwargs): # real signature unknown
""" Might raise gi._gi.RepositoryError """
pass
def __le__(*args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(*args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(*args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(*args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(*args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(*args, **kwargs): # real signature unknown
pass
def __setattr__(*args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(*args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(*args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(*args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
# classes
from .Object import Object
from .Capabilities import Capabilities
from .CapabilitiesClass import CapabilitiesClass
from .CapabilitiesCpu import CapabilitiesCpu
from .CapabilitiesCpuClass import CapabilitiesCpuClass
from .CapabilitiesCpuFeature import CapabilitiesCpuFeature
from .CapabilitiesCpuFeatureClass import CapabilitiesCpuFeatureClass
from .CapabilitiesCpuFeaturePrivate import CapabilitiesCpuFeaturePrivate
from .CapabilitiesCpuModel import CapabilitiesCpuModel
from .CapabilitiesCpuModelClass import CapabilitiesCpuModelClass
from .CapabilitiesCpuModelPrivate import CapabilitiesCpuModelPrivate
from .CapabilitiesCpuPrivate import CapabilitiesCpuPrivate
from .CapabilitiesCpuTopology import CapabilitiesCpuTopology
from .CapabilitiesCpuTopologyClass import CapabilitiesCpuTopologyClass
from .CapabilitiesCpuTopologyPrivate import CapabilitiesCpuTopologyPrivate
from .CapabilitiesGuest import CapabilitiesGuest
from .CapabilitiesGuestArch import CapabilitiesGuestArch
from .CapabilitiesGuestArchClass import CapabilitiesGuestArchClass
from .CapabilitiesGuestArchPrivate import CapabilitiesGuestArchPrivate
from .CapabilitiesGuestClass import CapabilitiesGuestClass
from .CapabilitiesGuestDomain import CapabilitiesGuestDomain
from .CapabilitiesGuestDomainClass import CapabilitiesGuestDomainClass
from .CapabilitiesGuestDomainPrivate import CapabilitiesGuestDomainPrivate
from .CapabilitiesGuestFeature import CapabilitiesGuestFeature
from .CapabilitiesGuestFeatureClass import CapabilitiesGuestFeatureClass
from .CapabilitiesGuestFeaturePrivate import CapabilitiesGuestFeaturePrivate
from .CapabilitiesGuestPrivate import CapabilitiesGuestPrivate
from .CapabilitiesHost import CapabilitiesHost
from .CapabilitiesHostClass import CapabilitiesHostClass
from .CapabilitiesHostPrivate import CapabilitiesHostPrivate
from .CapabilitiesHostSecModel import CapabilitiesHostSecModel
from .CapabilitiesHostSecModelClass import CapabilitiesHostSecModelClass
from .CapabilitiesHostSecModelPrivate import CapabilitiesHostSecModelPrivate
from .CapabilitiesPrivate import CapabilitiesPrivate
from .Domain import Domain
from .DomainAddress import DomainAddress
from .DomainAddressClass import DomainAddressClass
from .DomainAddressPci import DomainAddressPci
from .DomainAddressPciClass import DomainAddressPciClass
from .DomainAddressPciPrivate import DomainAddressPciPrivate
from .DomainAddressPrivate import DomainAddressPrivate
from .DomainAddressUsb import DomainAddressUsb
from .DomainAddressUsbClass import DomainAddressUsbClass
from .DomainAddressUsbPrivate import DomainAddressUsbPrivate
from .DomainCapabilities import DomainCapabilities
from .DomainCapabilitiesClass import DomainCapabilitiesClass
from .DomainCapabilitiesOs import DomainCapabilitiesOs
from .DomainCapabilitiesOsClass import DomainCapabilitiesOsClass
from .DomainCapabilitiesOsPrivate import DomainCapabilitiesOsPrivate
from .DomainCapabilitiesPrivate import DomainCapabilitiesPrivate
from .DomainDevice import DomainDevice
from .DomainChardev import DomainChardev
from .DomainChannel import DomainChannel
from .DomainChannelClass import DomainChannelClass
from .DomainChannelPrivate import DomainChannelPrivate
from .DomainChannelTargetType import DomainChannelTargetType
from .DomainChardevClass import DomainChardevClass
from .DomainChardevPrivate import DomainChardevPrivate
from .DomainChardevSource import DomainChardevSource
from .DomainChardevSourceClass import DomainChardevSourceClass
from .DomainChardevSourcePrivate import DomainChardevSourcePrivate
from .DomainChardevSourcePty import DomainChardevSourcePty
from .DomainChardevSourcePtyClass import DomainChardevSourcePtyClass
from .DomainChardevSourcePtyPrivate import DomainChardevSourcePtyPrivate
from .DomainChardevSourceSpicePort import DomainChardevSourceSpicePort
from .DomainChardevSourceSpicePortClass import DomainChardevSourceSpicePortClass
from .DomainChardevSourceSpicePortPrivate import DomainChardevSourceSpicePortPrivate
from .DomainChardevSourceSpiceVmc import DomainChardevSourceSpiceVmc
from .DomainChardevSourceSpiceVmcClass import DomainChardevSourceSpiceVmcClass
from .DomainChardevSourceSpiceVmcPrivate import DomainChardevSourceSpiceVmcPrivate
from .DomainChardevSourceUnix import DomainChardevSourceUnix
from .DomainChardevSourceUnixClass import DomainChardevSourceUnixClass
from .DomainChardevSourceUnixPrivate import DomainChardevSourceUnixPrivate
from .DomainClass import DomainClass
from .DomainClock import DomainClock
from .DomainClockClass import DomainClockClass
from .DomainClockOffset import DomainClockOffset
from .DomainClockPrivate import DomainClockPrivate
from .DomainConsole import DomainConsole
from .DomainConsoleClass import DomainConsoleClass
from .DomainConsolePrivate import DomainConsolePrivate
from .DomainConsoleTargetType import DomainConsoleTargetType
from .DomainController import DomainController
from .DomainControllerClass import DomainControllerClass
from .DomainControllerPrivate import DomainControllerPrivate
from .DomainControllerUsb import DomainControllerUsb
from .DomainControllerUsbClass import DomainControllerUsbClass
from .DomainControllerUsbModel import DomainControllerUsbModel
from .DomainControllerUsbPrivate import DomainControllerUsbPrivate
from .DomainCpu import DomainCpu
from .DomainCpuClass import DomainCpuClass
from .DomainCpuFeature import DomainCpuFeature
from .DomainCpuFeatureClass import DomainCpuFeatureClass
from .DomainCpuFeaturePolicy import DomainCpuFeaturePolicy
from .DomainCpuFeaturePrivate import DomainCpuFeaturePrivate
from .DomainCpuMatchPolicy import DomainCpuMatchPolicy
from .DomainCpuMode import DomainCpuMode
from .DomainCpuModel import DomainCpuModel
from .DomainCpuModelClass import DomainCpuModelClass
from .DomainCpuModelPrivate import DomainCpuModelPrivate
from .DomainCpuPrivate import DomainCpuPrivate
from .DomainDeviceClass import DomainDeviceClass
from .DomainDevicePrivate import DomainDevicePrivate
from .DomainDisk import DomainDisk
from .DomainDiskBus import DomainDiskBus
from .DomainDiskCacheType import DomainDiskCacheType
from .DomainDiskClass import DomainDiskClass
from .DomainDiskDriver import DomainDiskDriver
from .DomainDiskDriverClass import DomainDiskDriverClass
from .DomainDiskDriverDiscard import DomainDiskDriverDiscard
from .DomainDiskDriverErrorPolicy import DomainDiskDriverErrorPolicy
from .DomainDiskDriverIoPolicy import DomainDiskDriverIoPolicy
from .DomainDiskDriverPrivate import DomainDiskDriverPrivate
from .DomainDiskFormat import DomainDiskFormat
from .DomainDiskGuestDeviceType import DomainDiskGuestDeviceType
from .DomainDiskPrivate import DomainDiskPrivate
from .DomainDiskSnapshotType import DomainDiskSnapshotType
from .DomainDiskStartupPolicy import DomainDiskStartupPolicy
from .DomainDiskType import DomainDiskType
from .DomainFilesys import DomainFilesys
from .DomainFilesysAccessType import DomainFilesysAccessType
from .DomainFilesysClass import DomainFilesysClass
from .DomainFilesysDriverType import DomainFilesysDriverType
from .DomainFilesysPrivate import DomainFilesysPrivate
from .DomainFilesysType import DomainFilesysType
from .DomainGraphics import DomainGraphics
from .DomainGraphicsClass import DomainGraphicsClass
from .DomainGraphicsDesktop import DomainGraphicsDesktop
from .DomainGraphicsDesktopClass import DomainGraphicsDesktopClass
from .DomainGraphicsDesktopPrivate import DomainGraphicsDesktopPrivate
from .DomainGraphicsPrivate import DomainGraphicsPrivate
from .DomainGraphicsRdp import DomainGraphicsRdp
from .DomainGraphicsRdpClass import DomainGraphicsRdpClass
from .DomainGraphicsRdpPrivate import DomainGraphicsRdpPrivate
from .DomainGraphicsSdl import DomainGraphicsSdl
from .DomainGraphicsSdlClass import DomainGraphicsSdlClass
from .DomainGraphicsSdlPrivate import DomainGraphicsSdlPrivate
from .DomainGraphicsSpice import DomainGraphicsSpice
from .DomainGraphicsSpiceClass import DomainGraphicsSpiceClass
from .DomainGraphicsSpiceImageCompression import DomainGraphicsSpiceImageCompression
from .DomainGraphicsSpicePrivate import DomainGraphicsSpicePrivate
from .DomainGraphicsVnc import DomainGraphicsVnc
from .DomainGraphicsVncClass import DomainGraphicsVncClass
from .DomainGraphicsVncPrivate import DomainGraphicsVncPrivate
from .DomainHostdev import DomainHostdev
from .DomainHostdevClass import DomainHostdevClass
from .DomainHostdevPci import DomainHostdevPci
from .DomainHostdevPciClass import DomainHostdevPciClass
from .DomainHostdevPciPrivate import DomainHostdevPciPrivate
from .DomainHostdevPrivate import DomainHostdevPrivate
from .DomainInput import DomainInput
from .DomainInputBus import DomainInputBus
from .DomainInputClass import DomainInputClass
from .DomainInputDeviceType import DomainInputDeviceType
from .DomainInputPrivate import DomainInputPrivate
from .DomainInterface import DomainInterface
from .DomainInterfaceBridge import DomainInterfaceBridge
from .DomainInterfaceBridgeClass import DomainInterfaceBridgeClass
from .DomainInterfaceBridgePrivate import DomainInterfaceBridgePrivate
from .DomainInterfaceClass import DomainInterfaceClass
from .DomainInterfaceFilterref import DomainInterfaceFilterref
from .DomainInterfaceFilterrefClass import DomainInterfaceFilterrefClass
from .DomainInterfaceFilterrefParameter import DomainInterfaceFilterrefParameter
from .DomainInterfaceFilterrefParameterClass import DomainInterfaceFilterrefParameterClass
from .DomainInterfaceFilterrefParameterPrivate import DomainInterfaceFilterrefParameterPrivate
from .DomainInterfaceFilterrefPrivate import DomainInterfaceFilterrefPrivate
from .DomainInterfaceLinkState import DomainInterfaceLinkState
from .DomainInterfaceNetwork import DomainInterfaceNetwork
from .DomainInterfaceNetworkClass import DomainInterfaceNetworkClass
from .DomainInterfaceNetworkPrivate import DomainInterfaceNetworkPrivate
from .DomainInterfacePrivate import DomainInterfacePrivate
from .DomainInterfaceUser import DomainInterfaceUser
from .DomainInterfaceUserClass import DomainInterfaceUserClass
from .DomainInterfaceUserPrivate import DomainInterfaceUserPrivate
from .DomainLifecycleAction import DomainLifecycleAction
from .DomainLifecycleEvent import DomainLifecycleEvent
from .DomainMemballoon import DomainMemballoon
from .DomainMemballoonClass import DomainMemballoonClass
from .DomainMemballoonModel import DomainMemballoonModel
from .DomainMemballoonPrivate import DomainMemballoonPrivate
from .DomainOs import DomainOs
from .DomainOsBootDevice import DomainOsBootDevice
from .DomainOsClass import DomainOsClass
from .DomainOsFirmware import DomainOsFirmware
from .DomainOsPrivate import DomainOsPrivate
from .DomainOsSmBiosMode import DomainOsSmBiosMode
from .DomainOsType import DomainOsType
from .DomainParallel import DomainParallel
from .DomainParallelClass import DomainParallelClass
from .DomainParallelPrivate import DomainParallelPrivate
from .DomainPowerManagement import DomainPowerManagement
from .DomainPowerManagementClass import DomainPowerManagementClass
from .DomainPowerManagementPrivate import DomainPowerManagementPrivate
from .DomainPrivate import DomainPrivate
from .DomainRedirdev import DomainRedirdev
from .DomainRedirdevBus import DomainRedirdevBus
from .DomainRedirdevClass import DomainRedirdevClass
from .DomainRedirdevPrivate import DomainRedirdevPrivate
from .DomainSeclabel import DomainSeclabel
from .DomainSeclabelClass import DomainSeclabelClass
from .DomainSeclabelPrivate import DomainSeclabelPrivate
from .DomainSeclabelType import DomainSeclabelType
from .DomainSerial import DomainSerial
from .DomainSerialClass import DomainSerialClass
from .DomainSerialPrivate import DomainSerialPrivate
from .DomainSmartcard import DomainSmartcard
from .DomainSmartcardClass import DomainSmartcardClass
from .DomainSmartcardHost import DomainSmartcardHost
from .DomainSmartcardHostCertificates import DomainSmartcardHostCertificates
from .DomainSmartcardHostCertificatesClass import DomainSmartcardHostCertificatesClass
from .DomainSmartcardHostCertificatesPrivate import DomainSmartcardHostCertificatesPrivate
from .DomainSmartcardHostClass import DomainSmartcardHostClass
from .DomainSmartcardHostPrivate import DomainSmartcardHostPrivate
from .DomainSmartcardPassthrough import DomainSmartcardPassthrough
from .DomainSmartcardPassthroughClass import DomainSmartcardPassthroughClass
from .DomainSmartcardPassthroughPrivate import DomainSmartcardPassthroughPrivate
from .DomainSmartcardPrivate import DomainSmartcardPrivate
from .DomainSnapshot import DomainSnapshot
from .DomainSnapshotClass import DomainSnapshotClass
from .DomainSnapshotDisk import DomainSnapshotDisk
from .DomainSnapshotDiskClass import DomainSnapshotDiskClass
from .DomainSnapshotDiskPrivate import DomainSnapshotDiskPrivate
from .DomainSnapshotDomainState import DomainSnapshotDomainState
from .DomainSnapshotMemoryState import DomainSnapshotMemoryState
from .DomainSnapshotPrivate import DomainSnapshotPrivate
from .DomainSound import DomainSound
from .DomainSoundClass import DomainSoundClass
from .DomainSoundModel import DomainSoundModel
from .DomainSoundPrivate import DomainSoundPrivate
from .DomainTimer import DomainTimer
from .DomainTimerClass import DomainTimerClass
from .DomainTimerHpet import DomainTimerHpet
from .DomainTimerHpetClass import DomainTimerHpetClass
from .DomainTimerHpetPrivate import DomainTimerHpetPrivate
from .DomainTimerPit import DomainTimerPit
from .DomainTimerPitClass import DomainTimerPitClass
from .DomainTimerPitPrivate import DomainTimerPitPrivate
from .DomainTimerPrivate import DomainTimerPrivate
from .DomainTimerRtc import DomainTimerRtc
from .DomainTimerRtcClass import DomainTimerRtcClass
from .DomainTimerRtcPrivate import DomainTimerRtcPrivate
from .DomainTimerTickPolicy import DomainTimerTickPolicy
from .DomainVideo import DomainVideo
from .DomainVideoClass import DomainVideoClass
from .DomainVideoModel import DomainVideoModel
from .DomainVideoPrivate import DomainVideoPrivate
from .DomainVirtType import DomainVirtType
from .Interface import Interface
from .InterfaceClass import InterfaceClass
from .InterfacePrivate import InterfacePrivate
from .Network import Network
from .NetworkClass import NetworkClass
from .NetworkFilter import NetworkFilter
from .NetworkFilterClass import NetworkFilterClass
from .NetworkFilterPrivate import NetworkFilterPrivate
from .NetworkPrivate import NetworkPrivate
from .NodeDevice import NodeDevice
from .NodeDeviceClass import NodeDeviceClass
from .NodeDevicePrivate import NodeDevicePrivate
from .ObjectClass import ObjectClass
from .ObjectPrivate import ObjectPrivate
from .Secret import Secret
from .SecretClass import SecretClass
from .SecretPrivate import SecretPrivate
from .StoragePermissions import StoragePermissions
from .StoragePermissionsClass import StoragePermissionsClass
from .StoragePermissionsPrivate import StoragePermissionsPrivate
from .StoragePool import StoragePool
from .StoragePoolClass import StoragePoolClass
from .StoragePoolPrivate import StoragePoolPrivate
from .StoragePoolSource import StoragePoolSource
from .StoragePoolSourceClass import StoragePoolSourceClass
from .StoragePoolSourcePrivate import StoragePoolSourcePrivate
from .StoragePoolTarget import StoragePoolTarget
from .StoragePoolTargetClass import StoragePoolTargetClass
from .StoragePoolTargetPrivate import StoragePoolTargetPrivate
from .StoragePoolType import StoragePoolType
from .StorageVol import StorageVol
from .StorageVolBackingStore import StorageVolBackingStore
from .StorageVolBackingStoreClass import StorageVolBackingStoreClass
from .StorageVolBackingStorePrivate import StorageVolBackingStorePrivate
from .StorageVolClass import StorageVolClass
from .StorageVolPrivate import StorageVolPrivate
from .StorageVolTarget import StorageVolTarget
from .StorageVolTargetClass import StorageVolTargetClass
from .StorageVolTargetFeatures import StorageVolTargetFeatures
from .StorageVolTargetPrivate import StorageVolTargetPrivate
from .__class__ import __class__
# variables with complex values
__loader__ = None # (!) real value is '<gi.importer.DynamicImporter object at 0x7fa8c01e7d00>'
__path__ = [
'/usr/lib64/girepository-1.0/LibvirtGConfig-1.0.typelib',
]
__spec__ = None # (!) real value is "ModuleSpec(name='gi.repository.LibvirtGConfig', loader=<gi.importer.DynamicImporter object at 0x7fa8c01e7d00>)"
|
from datadog import initialize, api
from datadog.api.constants import CheckStatus
options = {
'api_key': '9775a026f1ca7d1c6c5af9d94d9595a4',
'app_key': '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'
}
initialize(**options)
check = 'app.ok'
host = 'app1'
status = CheckStatus.OK # equals 0
api.ServiceCheck.check(check=check, host_name=host, status=status, message='Response: 200 OK')
|
from pytube import YouTube # pip install pytube or pytube3
from pytube import Playlist
import os, re
def Download(yt):
print("Downloading....")
# Filter Streams (Optional)
vids = yt.streams.filter()
# Get only .mp4 format
vids[0].download(r"Tracks/")
def main(c, playlist):
# Filter Playlist Url
playlist._video_regex = re.compile(r"\"url\":\"(/watch\?v=[\w-]*)")
# Iterate Through Playlist
urls = playlist.video_urls
print("Number of tracks: ", len(urls))
for url in urls:
# Handle Url
yt = YouTube(url)
# Filename specification
_filename = yt.title
print(c, ". ", _filename)
# Downloading
Download(yt)
c = c + 1
if __name__ == '__main__':
playlist = Playlist("https://www.youtube.com/playlist?list=PL8A83A276F0D85E70")
main(1, playlist)
|
# Iterate while input not valid, then return input
def choose(text, output, options):
char = None
while True:
output(text)
try:
char = str(input())
except KeyboardInterrupt:
exit()
except:
continue
if char not in options:
continue
else:
break
return char
# Define concatenated print
def concatenate(text):
print(text, end='')
# Return a subset of the list (chosen by the user)
def pickFrom(options):
selected_options = []
confirm = 'x'
skip = ''
to_print = 'Please mark x under desired selected columns then press enter (only press enter otherwise):\n' + ' '.join(options) + '\n'
for i in range(len(options)):
print()
char = choose(to_print, concatenate, [confirm, skip])
if char == confirm:
# selected_options.append(options[i]) # <--- to return string list
selected_options.append(i) # <--- to return indexes list
to_print += confirm * (len(options[i])) + ' '
else:
to_print += '_' * len(options[i]) + ' '
print()
print(to_print)
print()
return selected_options
|
import random
import tensorflow as tf
import tensorflow.layers
import numpy as np
import tflib as lib
import tflib.nn.conv2d
import tflib.nn.linear
from tflib.nn.rmspropgraves import RmsPropGraves
class Agent():
def __init__(self, sess, config, num_actions=18, action_interval=4):
self.sess = sess
self.config = config
self.num_actions = num_actions
self.discount = self.config.discount
self.history_length = self.config.history_length
self.screen_width = self.config.screen_width
self.screen_height = self.config.screen_height
self.learning_rate_minimum = self.config.learning_rate_minimum
self.learning_rate = self.config.learning_rate
self.learning_rate_decay_step = self.config.learning_rate_decay_step
self.learning_rate_decay = self.config.learning_rate_decay
self.data_format = self.config.cnn_format
self.double_q = self.config.double_q
self.s_t = tf.placeholder(tf.float32, shape=(
None, self.history_length, self.screen_width, self.screen_height))
self.s_t_plas_1 = tf.placeholder(tf.float32, shape=(
None, self.history_length, self.screen_width, self.screen_height))
if self.data_format == 'NHWC':
self.s_t = tf.transpose(
self.s_t, (0, 2, 3, 1), name='NCHW_to_NHWC')
self.s_t_plas_1 = tf.transpose(
self.s_t_plas_1, (0, 2, 3, 1), name='NCHW_to_NHWC')
with tf.variable_scope('dqn'):
self.q_value, self.q_action = self.build_model(self.s_t)
with tf.variable_scope('target_network'):
self.target_q_value, _ = self.build_model(self.s_t_plas_1)
if self.double_q == True:
self.target_q_index = tf.placeholder(
'int32', [None, None], 'outputs_idx')
self.target_q_with_index = tf.gather_nd(
self.target_q_value, self.target_q_index)
with tf.name_scope('update_target_q_network'):
self.update_target_q_network_op = self.copy_weight()
with tf.name_scope('dqn_op'):
self.dqn_op, self.loss, self.dqn_summary = self.build_training_op()
def get_action(self, state):
action = self.sess.run(self.q_action, feed_dict={self.s_t: state})
return action
def get_q_value(self, state):
q_value = self.sess.run(
self.q_value, feed_dict={self.s_t: state})
return q_value
def copy_weight(self):
dqn_weights = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='dqn')
target_q_network_weights = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='target_network')
update_target_q_network_op = [target_q_network_weights[i].assign(
dqn_weights[i]) for i in range(len(dqn_weights))]
return update_target_q_network_op
def updated_target_q_network(self):
self.sess.run(self.update_target_q_network_op)
def train(self, state, action, reward, next_state, terminal, step):
if self.double_q == True:
predicted_action = self.get_action(next_state)
max_q_t_plus_1 = self.target_q_with_index.eval({
self.s_t_plas_1: next_state,
self.target_q_index: [[idx, pred_a]
for idx, pred_a in enumerate(predicted_action)]
})
else:
max_q_t_plus_1 = np.max(
self.target_q_value.eval({self.s_t_plas_1: next_state}), axis=1)
terminal = np.array(terminal) + 0.
target_q_t = (1. - terminal) * self.discount * max_q_t_plus_1 + reward
_, q_value, loss, dqn_summary = self.sess.run([self.dqn_op, self.q_value, self.loss, self.dqn_summary], feed_dict={
self.s_t: state,
self.action: action,
self.target_q_t: target_q_t,
self.learning_rate_step: step})
return q_value, loss, dqn_summary
def build_training_op(self):
self.target_q_t = tf.placeholder(
dtype=tf.float32, shape=[None], name='target_q_t')
self.action = tf.placeholder(
dtype=tf.int64, shape=[None], name='action')
action_one_hot = tf.one_hot(
self.action, self.num_actions, 1.0, 0.0, name='action_one_hot')
q_acted = tf.reduce_sum(
self.q_value * action_one_hot, axis=1, name='q_acted')
delta = self.target_q_t - q_acted
def clipped_error(x):
# Huber loss
return tf.where(tf.abs(x) < 1.0, 0.5 * tf.square(x), tf.abs(x) - 0.5)
# If you use RMSpropGraves, this code is tf.reduce_sum(). But it is not Implemented.
loss = tf.reduce_mean(clipped_error(delta), name='loss')
dqn_summary = tf.summary.scalar('dqn_loss', loss)
self.learning_rate_step = tf.placeholder(
tf.int64, None, name='learning_rate_step')
learning_rate_op = tf.maximum(self.learning_rate_minimum,
tf.train.exponential_decay(
self.learning_rate,
self.learning_rate_step,
self.learning_rate_decay_step,
self.learning_rate_decay,
staircase=True))
dqn_op = tf.train.RMSPropOptimizer(
learning_rate_op, decay=0.90, momentum=0.95, epsilon=0.01).minimize(loss)
return dqn_op, loss, dqn_summary
def build_model(self, state):
initializer = tf.truncated_normal_initializer(0.0, 0.02)
# initializer = None
output = lib.nn.conv2d.Conv2D(
'Conv1', self.history_length, 32, 8, state, initializer=initializer, stride=4, padding='VALID', data_format=self.data_format)
output = tf.nn.relu(output, name='Relu1')
# (None, 20, 20, 32)
output = lib.nn.conv2d.Conv2D(
'Conv2', 32, 32*2, 4, output, initializer=initializer, stride=2, padding='VALID', data_format=self.data_format)
output = tf.nn.relu(output, name='Relu2')
# (None, 9, 9, 64)
output = lib.nn.conv2d.Conv2D(
'Conv3', 32*2, 32*2, 3, output, initializer=initializer, stride=1, padding='VALID', data_format=self.data_format)
output = tf.nn.relu(output, name='Relu3')
# (None, 7, 7, 64)
output = tf.layers.flatten(output)
# (None, 3136)
dence_initializer = tf.truncated_normal_initializer(stddev=0.02)
# dence_initializer = None
output = lib.nn.linear.Linear(
'Dence1', 3136, 512, output, initializer=dence_initializer)
output = tf.nn.relu(output, name='Relu4')
# (None, 512)
q_value = lib.nn.linear.Linear(
'Dence2', 512, self.num_actions, output, initializer=dence_initializer)
# (None, num_actions)
q_action = tf.argmax(q_value, axis=1)
return q_value, q_action
|
# Login Form
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
chrome_driver_path = "C:\Development\chromedriver.exe"
driver= webdriver.Chrome(chrome_driver_path)
driver.get("https://the-internet.herokuapp.com")
form_auth = driver.find_element_by_xpath("//a[contains(text(),'Form Authentication')]")
form_auth.click()
user_input = driver.find_element_by_id("username")
user_input.send_keys("tomsmith")
time.sleep(5)
pass_input = driver.find_element_by_id("password")
pass_input.send_keys("SuperSecretPassword!")
time.sleep(5)
login_btn = driver.find_element_by_xpath("//form[@id='login']/button/i").click()
driver.close() |
class Solution:
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
num = 0
digits.reverse()
for i in range(len(digits)):
num += digits[i]*(10**i)
num+=1
s=str(num)
result=[]
for i in range(len(s)):
result.append(int(s[i]))
return result
|
# import libraries
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.externals import joblib
import model_functions as mf
from model_functions import KeywordExtractor
import argparse
#parsing for terminal input
parser = argparse.ArgumentParser()
parser.add_argument('--database', type = str, help = 'input target database', default = './data/DisasterResponse.db')
parser.add_argument('--model', type = str, help = 'input target model to save', default = 'classifier.pkl')
args = parser.parse_args()
if args.database:
database = args.database
if args.model:
model = args.model
#TODO: modularize code, create docstring
database_engine = 'sqlite:///' + database
engine = create_engine(database_engine)
df = pd.read_sql_table('disaster_response', engine)
X = df['message']
y = df[df.columns[4:]]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 42, test_size = 0.2)
parameters = {
'features__nlp_pipeline__vect__ngram_range': ((1, 1), (1, 2)),
'features__nlp_pipeline__vect__max_df': (0.5, 0.75, 1.0),
'features__nlp_pipeline__vect__max_features': (None, 5000, 10000),
'features__nlp_pipeline__tfidf__use_idf': (True, False),
'clf__estimator__n_estimators': [50, 100, 200]
}
pipeline = mf.build_pipeline()
cv, y_pred_df = mf.grid_search(pipeline, X_train, X_test, y_train, y_test, parameters)
for col in y_test.columns:
print(col,'\n', classification_report(y_test[col], y_pred_df[col]))
joblib.dump(cv, model)
|
import numpy
import matplotlib.pyplot as plot
import sys
import math
import scipy.io.wavfile
class Oscillator:
def __init__(self, waveform, frequency, phase_shift, sampling_rate, duration, harmonics=1):
self.waveform = waveform
self.harmonics = harmonics
self.frequency = frequency
self.phase_shift = phase_shift / 360
self.sampling_rate = sampling_rate
self.duration = duration
self.period = 1 / frequency
self.samples = (duration * sampling_rate) + 1
self.phase = sampling_rate * self.period # how many samples per period
self.xn = [None] * int(self.samples)
def sine_wave(self, i, harmonic):
val = (1 / harmonic) * math.sin(harmonic * 2 * math.pi * ((i - (self.sampling_rate * self.phase_shift)) / self.phase))
return val
def generate_waveform(self):
print("Num of samples : {}".format(self.samples))
# Zero out all indices
for i in range(0, int(self.samples)):
self.xn[i] = 0.0
# Check what waveform to generate
if self.waveform == "sin":
for i in range(0, int(self.samples)):
self.xn[i] = self.SineWave(i, self.harmonics)
print(self.xn[i])
elif self.waveform == "square":
for i in range(0, int(self.samples)):
# self.xn[i] = math.sin((2 * math.pi * (i / self.phase))) + ((1/3)*math.sin((3 * 2 * math.pi * (i / self.phase))))
for j in range(self.harmonics + 1):
self.xn[i] = self.xn[i] + self.SineWave(i, (2 * j) + 1)
# elif self.waveform == "saw":
# self.xn[i] =
# elif self.waveform == "triangle":
# self.xn[i] =
# else:
# sys.exit("Invalid waveform!")
plot.ylim(-1, 1)
plot.plot(numpy.arange(0, self.samples) / self.sampling_rate, self.xn, marker='o')
plot.show(block=True)
def write_to_wav(self, file):
data = numpy.asarray(self.xn)
scipy.io.wavfile.write(file, self.sampling_rate, data) |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 30 15:26:47 2021
@author: M Shoaib
"""
def countMin(string):
l=len(string)
#performing a dynamic approach
app = [[0]*l for i in range(l)]
#will check for the toatl rift
for dif in range(1,l):
i=0
for j in range(dif,l):
if dif==1:
if string[i]==string[j]:
app[i][j]=0
else:
app[i][j]=1
else:
if string[i]==string[j]:
app[i][j]=app[i+1][j-1]
else:
#app[i][j]=1+min(app[i][j+1],app[i-11][j])
app[i][j]=1+min(app[i][j-1],app[i+1][j])
i+=1
return app[0][-1]
#print(countMin('abcd'))
|
from dao.seat_table import Seat
from dao.flight_table import Flight
def check_is_seated(seat_code, flight_id):
flight = Flight.query.filter_by(id=flight_id).first()
plane_id = flight.plane_id
seat = Seat.query.filter_by(seat_code=seat_code,
plane_id=plane_id).first()
if seat():
return True
else:
return False
|
import sounddevice
import pydub
import time
import numpy
import queue
class audio():
def __init__(self):
self.samples = None
self.now = {}
self.now.update(place=0)
self.now.update(path="")
def openfile(self, filepath):
if ".mp3" in filepath:
self.segment = pydub.AudioSegment.from_file(filepath,codec="mp3")
elif ".wav" in filepath:
self.segment = pydub.AudioSegment.from_file(filepath,codec="wav")
elif ".mp4" in filepath:
self.segment = pydub.AudioSegment.from_file(filepath)
else:
self.segment = pydub.AudioSegment.from_file(filepath)
if self.now["path"] != filepath:
self.samples = None
self.now.update(place=0)
self.now.update(path=filepath)
def play(self, place=0):
if type(self.samples) == type(None):
if self.segment.channels != 1:
self.samples = numpy.array(self.segment.get_array_of_samples().tolist(),dtype="int16").reshape(-1,2)
else:
self.samples = numpy.array(self.segment.get_array_of_samples().tolist(),dtype='int16')
sounddevice.play(self.samples,self.segment.frame_rate)
else:
self.goto(place)
self.long = len(self.samples)
self.player = sounddevice.get_stream()
def stop(self):
sounddevice.stop()
def goto(self, place):
sounddevice.stop()
self.player = sounddevice.play(self.samples[place*self.segment.frame_rate:-1],self.segment.frame_rate)
def goto(self, place):
sounddevice.stop()
self.player = sounddevice.play(self.samples[place:-1],self.segment.frame_rate)
def get_file_info(self,key=None):
if key == None:
return {}.update([("channel",self.segment.channels),("frame_rate",self.segment.frame_rate),("duration",self.segment.duration_second)])
elif key == "channel":
return self.segment.channels
elif key == "frame_rate":
return self.segment.frame_rate
elif key == "duration_second":
return self.segment.duration_second
elif key == "duration_frame":
return self.long
else:
return None
def get_player():
return self.player
def get_status():
return sounddevice.get_status()
def get_devicelist():
return sounddevice.query_devices()
def get_apilist():
return sounddevice.query_hostapis()
def get_status():
return sounddevice.get_status()
|
import sys
import os
from base64 import b64encode
from json import load, dump
from Crypto.PublicKey import RSA
import requests
from client import Client
SERVER_URI = 'http://0.0.0.0:9000'
if len(sys.argv) < 2:
print('Usage: python download.py <fileid>')
exit(0)
client = Client()
client.get_uuid()
client.get_nonce()
client.init_kex()
client.download(sys.argv[1])
client.save()
|
def soup(matrix,word):
for fila in range(len(matrix)):
for letra in range(len(matrix[fila])):
if matrix[fila-1][letra-1] == word[0]:
if checksoup(matrix,fila-1,letra-1,word[1:]):
return "{0}{1}".format(chr(ord("A")+fila-1),letra)
def checksoup(matrix,fila,letra,word):
if word == "":
return True
else:
for r in range(fila-1,fila+2):
for l in range(letra-1,letra+2):
if r>=0 and l>=0 and r<len(matrix) and l<len(matrix) and word[0] == matrix[r][l]:
matrix[r][l] == ""
if checksoup(matrix,r,l,word[1:]):
return checksoup(matrix,r,l,word[1:])
else:
continue
#print(soup((('X', 'A', 'B', 'N', 'T', 'O'),
#('Y', 'T', 'N', 'R', 'I', 'T'),
#('U', 'P', 'O', 'M', 'D', 'S'),
#('I', 'O', 'H', 'U', 'O', 'O'),
#('R', 'T', 'E', 'L', 'Q', 'X'),
#('I', 'W', 'J', 'K', 'P', 'Z')), 'PORTO')) |
import ants
import importlib_resources
import pandas as pd
from ants.core.ants_image import ANTsImage
SUPPORTED_CONTRASTS = ["t1", "t2"]
def get_mni(contrast: str, bet: bool) -> ANTsImage:
"""Get the correct MNI ICBM152 09c Asym template,
given contrast and BET status.
Args:
contrast (str): MRI's contrast, t1 or t2
bet (bool): Bool to indicate the brain extraction status
Returns:
ANTsImage: Correct MNI template
"""
assert contrast in SUPPORTED_CONTRASTS
betstr = "bet" if bet else ""
template = f"mni_icbm152_{contrast}{betstr}_tal_nlin_sym_09c.nii"
res = importlib_resources.files("roiloc")
data = str(res / "MNI" / "icbm152" / template)
return ants.image_read(str(data), pixeltype="float", reorient="LPI")
def get_roi_indices(roi: str) -> list:
"""Get right and left indices from CerebrA atlas
Args:
roi (str): ROI name
Returns:
list: List of right & left indices
"""
roi = roi.title()
res = importlib_resources.files("roiloc")
data = str(res / "MNI" / "cerebra" / "CerebrA_LabelDetails.csv")
cerebra = pd.read_csv(data, index_col="Label Name")
return [cerebra.loc[roi, "RH Label"], cerebra.loc[roi, "LH Labels"]]
def get_atlas() -> ANTsImage:
"""Get the CerebrA atlas
Returns:
ANTsImage: CerebrA atlas
"""
res = importlib_resources.files("roiloc")
data = str(res / "MNI" / "cerebra" /
"mni_icbm152_CerebrA_tal_nlin_sym_09c.nii")
return ants.image_read(data, pixeltype="unsigned int", reorient="LPI")
|
"""
serializer.py is designed to be used iff you did not properly serialize
all of the information abour your experiment when you ran it in the
first place. You should edit this file to properly describe the
experiment you ran, and then run
>> python serializer.py LOG_FILE_NAME
where LOG_FILE_NAME is the name of the log file generated by Keras
containing the information about the results of your training.
"""
from experiment_pb2 import *
import sys, os
import unittest
class SerializerTestCase(unittest.TestCase):
def setUp(self):
os.mkdir("test")
os.chdir("test")
test_file = open("test.log", "w")
test_file.write("Epoch 1/200\n"
"21s - loss: 0.5758 - acc: 0.6995\n"
"Epoch 2/200\n"
"21s - loss: 0.5649 - acc: 0.7091\n"
"Epoch 3/200\n"
"21s - loss: 0.5605 - acc: 0.7132\n"
"Epoch 4/200\n"
"21s - loss: 0.5579 - acc: 0.7157\n"
"Epoch 5/200\n"
"21s - loss: 0.5560 - acc: 0.7170\n")
test_file.close()
self.losses = [0.5758,
0.5649,
0.5605,
0.5579,
0.5560]
self.times = [21,
21,
21,
21,
21]
self.accuracies = [0.6995,
0.7091,
0.7132,
0.7157,
0.7170]
def test_parse_log_file(self):
self.assertEqual(parse_log_file("test.log"),
(self.times, self.losses, self.accuracies))
def test_create_epochs_from_data(self):
create_epochs_from_data(self.times, self.losses, self.accuracies)
def test_add_epochs_to_experiment(self):
exp = Experiment()
exp = add_epochs_to_experiment(exp, self.times, self.losses,
self.accuracies)
epochs = create_epochs_from_data(self.times, self.losses,
self.accuracies)
self.assertEqual(exp.results[:], epochs) # need [:] because
# the type of exp.results is google.protobuf.internal.
# containers.RepeatedCompositeFieldContainer while the type of
# epochs is list. [:] gives exp.results in list form so equality
# can be tested
def tearDown(self):
import shutil
os.chdir("..")
shutil.rmtree("test")
def parse_log_file(log_file_name):
"""
Parses useful information out of a log file
parse_log_file takes the name of a log file (log_file_name)
formatted by Keras, figures out if it exists, and returns lists
containing the training time (in seconds), the value of the loss
function, and the training accuracy
Parameters
----------
log_file_name : name of a log file formatted by Keras.
If the log file isn't real and you were trying to fool me, the
script will quit.
Returns
-------
times : list of epoch times (in seconds)
losses : list of values of the loss function
accuracies : list of the training accuracies
Notes
-----
Element 0 in the returned lists is always the value of the
parameter for the first epoch
"""
# check that the provided log file is a real file
f = None
try:
f = open(log_file_name, "r")
except IOError:
print ("ERROR -- " + log_file_name + " is not a valid file")
exit(-1)
times = []
losses = []
accuracies = []
while(True):
line = f.readline()
if(line == ""):
break
if("Epoch" in line):
data = f.readline().strip().split(" - ")
# print(data)
time = data[0].split("s")[0]
times.append(float(time))
# print(time)
loss = data[1].split("loss: ")[1]
losses.append(float(loss))
# print(loss)
accuracy = data[2].split("acc: ")[1]
accuracies.append(float(accuracy))
return(times, losses, accuracies)
def create_epochs_from_data(times, losses, train_accuracies,
test_accuracies=None):
"""
Creates a list of experiment_pb2.Epoch() objects
create_epochs_from_data takes lists of times, losses, and
accuracies and generates a list of experiment_pb2.Epoch() objects
that can be added to experiment_pb2.Experiment().results
Parameters
----------
times : list of times (seconds) that epochs took
losses : list of the value of the loss function after each epoch
train_accuracies : list of the training accuracy for each epoch
test_accuracies : list of the testing accuracy for each epoch
This isn't currently implemented in our Keras code, so I left it
as optional for now
Returns
-------
epochs : list of experiment_pb2.Epoch() objects
"""
num_epochs = len(times) # times, losses, and accuracies should
# all be the same length, so my choice of times is arbitrary
epochs = []
for i in range(num_epochs):
a = Epoch()
a.num_seconds = times[i]
a.loss = losses[i]
a.train_accuracy = train_accuracies[i]
epochs.append(a)
return epochs
def add_epochs_to_experiment(experiment, times, losses,
train_accuracies):
num_epochs = len(times) # times, losses, and accuracies should
# all be the same length, so my choice of times is arbitrary
for i in range(num_epochs):
a = experiment.results.add()
a.num_seconds = times[i]
a.loss = losses[i]
a.train_accuracy = train_accuracies[i]
return experiment
if __name__ == "__main__":
# uncomment the next 2 lines to run the tests
# suite = unittest.TestLoader().loadTestsFromTestCase(SerializerTestCase)
# unittest.TextTestRunner(verbosity=2).run(suite)
# check that the right number of arguments were provided
if len(sys.argv) != 2:
print ("ERROR -- Usage:" + sys.argv[0] + " LOG_FILE_NAME")
log_file_path = sys.argv[1]
(times, losses, accuracies) = parse_log_file(log_file_path)
exp = Experiment()
exp = add_epochs_to_experiment(exp, times, losses, accuracies)
log_file_name_extension = os.path.split(log_file_path)[1]
log_file_name = log_file_name_extension.split(".")[0]
output_file_name = ("%s.experiment"
% log_file_name)
print output_file_name
os.chdir("../data/OSU_TTBAR/")
f = open(output_file_name, "wb")
f.write(exp.SerializeToString())
f.close()
|
#!/usr/bin/env python
# coding: utf-8
# ## Machine Learning
# ### Logistic Regression - Titanic
# Layout of this notebook
# -------------------------------------------------------------------------------------
# Step 1 - Frame the problem and look at the big picture<br><br>
# Step 2 - Setup<br>
# 2.1 - Common imports <br>
# 2.2 - Define standard function used in this notebook<br><br>
# Step 3 - Get the data<br>
# 3.1 - Combine train and test data <br><br>
# Step 4 - Explore and process data<br>
# 4.1 - Check basic info and stats<br>
# 4.2 - Fill missing values ['Embarked', 'Fare']<br>
# 4.3 - Rename categorical data<br>
# 4.4 - Modify Cabin data<br>
# 4.5 - Create columns for no. of pass. on same ticket and individual Fare<br>
# 4.6 - Create a new column of family size<br>
# 4.7 - Modify 'SibSp' and 'Parch' data<br>
# 4.8 - Create 'Title' Column<br>
# 4.9 - Create 'Length of Name' Column<br>
# 4.10 - Create prefix of 'Ticket' as new Column<br>
# 4.11 - Process data<br>
# 4.12 - Find and delete rows with outlier data<br>
# 4.13 - Create a new column of Fare Group <br>
# 4.14 - Analyze missing age data in detail<br>
# 4.15 - Fill missing age data<br>
# 4.16 - Create a new column of Age Category<br>
# 4.17 - Visualize numerical features<br>
# 4.18 - Fix skewness and normalize<br>
# 4.19 - Analyze survival data - Visualizing outcome across independent variables<br><br>
# Step 5 - Prepare the data for Machine Learning algorithms<br>
# 5.1 - Process further and create train, target and test dataset <br>
# 5.2 - Visualize how Machine Learning models works on classification with just 2 numerical features<br><br>
# Step 6 - Select and train a model <br>
# 6.1 - Visualize Machine Learning models<br>
# 6.2 - Comparing Classification Machine Learning models with all independent features.<br>
# 6.3 - Extensive GridSearch with valuable parameters / score in a dataframe<br>
# 6.4 - Find best estimators<br>
# 6.5 - Plot learning curves<br>
# 6.6 - Feature Importance<br><br>
# Step 7 - Fine-tune your model<br>
# 7.1 - Create voting classifier<br><br>
# Step 8 - Predict and present your solution<br><br>
# Step 9 - Final words!
# #### **Step 1 - Frame the problem and look at the big picture**
# 1.1 - Define the objective in business terms.<br>
# ans - Complete the analysis of what sorts of people were likely to survive in the unfortunate event of sinking of Titanic. <br>In particular, it is asked to apply the tools of machine learning to predict which passengers survived the tragedy.<br>
# <br>1.2 - How should you frame this problem? (supervised/unsupervised, online/offline, etc.)?<br>
# ans - This is supervised learning task because we know output for a set of passengers' data.<br>
# This is also logistic regression (clsiification) task, since you are asked to predict a binary outcome. <br>
# <br>1.3 - How should performance be measured?<br>
# ans - Metric - Your score is the percentage of passengers you correctly predict. <br>This is known simply as "accuracy”. I will use other performance measures as well, such as K-fold cross validation, <br>f1_score (combination of precison and recall) etc.
# #### **Step 2 - Setup**
# **Common imports**
# In[ ]:
# Start time for script
import time
start = time.time()
# pandas / numpy etc
import pandas as pd
import numpy as np
import scipy.stats as ss
from scipy.special import boxcox1p
# To plot pretty figures
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
get_ipython().magic(u'matplotlib inline')
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
import seaborn as sns
sns.set_style('dark', {'axes.facecolor' : 'lightgray'})
# for seaborn issue:
import warnings
warnings.filterwarnings('ignore')
# machine learning [Classification]
from sklearn.model_selection import (train_test_split, cross_val_score, StratifiedKFold, learning_curve, GridSearchCV)
from sklearn.preprocessing import (StandardScaler)
from sklearn.metrics import (accuracy_score, f1_score, log_loss, confusion_matrix)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier)
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from xgboost import XGBClassifier
kfold = StratifiedKFold(n_splits=5)
rand_st =42
# **Define standard function used in this notebook**
# **Function 1 - outliers_iqr [Function to find and delete ouliers]**
# In[ ]:
# Function to find and delete ouliers. [I have to run few steps outside function to be on safer side]
# It uses multiples of IQR (Inter Quartile Range) to detect outliers in specified columns
def outliers_iqr(df, columns_for_outliers, iqr_factor):
for column_name in columns_for_outliers:
if not 'Outlier' in df.columns:
df['Outlier'] = 0
q_75, q_25 = np.nanpercentile(df[column_name], [75 ,25])
iqr = q_75 - q_25
minm = q_25 - (iqr*iqr_factor)
maxm = q_75 + (iqr*iqr_factor)
df['Outlier'] = np.where(df[column_name] > maxm, 1, np.where(df[column_name] < minm, 1, df['Outlier']))
df['Outlier'] = np.where(df.Survived.notnull(), df['Outlier'], 0) # extra step to make sure only train data rows are deleted
total_rows_del = df.Outlier.sum()
print('Total ', total_rows_del, ' rows with outliers from comb_data can be deleted')
# **Function 2 - plot_class_models_two_num_features [visualize classification Machine Learning models with two numerical features]**
# In[ ]:
# Create function to visualize how different Machine Learning models look, by using just 2 numerical features.
# [It is not possible to plot if more than 2 features are used] [use 'X_num' and 'y_train']
# Define set of classifiers
clf_dict = {"clf_Log_reg" : LogisticRegression(random_state=rand_st),
"clf_Lin_SVC" : SVC(kernel="linear", C=0.1, cache_size=5000, probability=True, random_state=rand_st),
"clf_Poly_SVC" : SVC(kernel="poly", degree=2, random_state=rand_st),
"clf_Ker_SVC" : SVC(kernel="rbf", C=50, cache_size=5000, gamma=0.001, probability=True, random_state=rand_st),
"clf_KNN" : KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=-1, n_neighbors=13, p=2, weights='uniform'),
"clf_GNB" : GaussianNB(),
"clf_MLP" : MLPClassifier(alpha=0.0001, learning_rate_init=0.05, shuffle=True, random_state=rand_st),
"clf_Dec_tr" : DecisionTreeClassifier(criterion='entropy', max_depth=8, min_samples_leaf=1, min_samples_split=2,
splitter='best', random_state=rand_st),
"clf_Gauss" : GaussianProcessClassifier(random_state=rand_st),
"clf_RF" : RandomForestClassifier(criterion='gini', max_depth=6, n_estimators = 350, n_jobs=-1,
random_state=rand_st),
"clf_AdaBoost" : AdaBoostClassifier(algorithm='SAMME.R', base_estimator=DecisionTreeClassifier(criterion='entropy', max_depth=8,
max_features=12, min_samples_leaf=1, min_samples_split=2, random_state=42, splitter='best'), learning_rate=0.2, n_estimators=2, random_state=rand_st),
"clf_GrBoost" : GradientBoostingClassifier(learning_rate=0.1, loss='deviance', max_depth=4, n_estimators=1500,
max_features=12, min_samples_leaf=100, min_samples_split=200, subsample=0.8, random_state=rand_st),
"clf_ExTree" : ExtraTreesClassifier(criterion='gini', max_depth=4, min_samples_leaf=2, min_samples_split=2,
n_estimators=200, n_jobs=-1, random_state=rand_st),
"clf_XGBoost" : XGBClassifier(colsample_bytree=0.6, gamma=0, learning_rate=0.05, max_depth=5, n_estimators=3000,
n_jobs=-1, reg_alpha=0.01, subsample=0.8, random_state=rand_st)}
# Create a function to plot different classification models
# Define fixed inputs
h = .02 # step size in the mesh
# Define function
def plot_class_models_two_num_features(X, y, clf_dict, title):
import warnings
warnings.filterwarnings('ignore')
figure = plt.figure(figsize=(27, 10))
# preprocess dataset, split into training and test part
dataset = (X, y)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(dataset), len(clf_dict) + 1, 1)
ax.set_title(title)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i=2
# iterate over classifiers
for clf_name, clf in clf_dict.items():
ax = plt.subplot(len(dataset), len(clf_dict) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright, edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(clf_name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right', verticalalignment = 'top', color='black', bbox=dict(facecolor='yellow', alpha=0.5))
i += 1
plt.tight_layout()
plt.show()
# **Function 3 - clf_cross_val_score_and_metrics [Function to evaluate various classification models]**
# In[ ]:
# Function to evaluate various classification models [Metrics and cross_val-score]
# Cross validate model with Kfold stratified cross val
def clf_cross_val_score_and_metrics(X, y, clf_dict, CVS_scoring, CVS_CV):
# Train and Validation set split by model_selection
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.25, random_state=rand_st)
metric_cols = ['clf_name', 'Score', 'Accu_Preds', 'F1_Score', 'Log_Loss', 'CVS_Best', 'CVS_Mean', 'CVS_SD']
clf_metrics = pd.DataFrame(columns = metric_cols)
metric_dict = []
# iterate over classifiers
for clf_name, clf in clf_dict.items():
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
Score = "{:.3f}".format(clf.score(X_val, y_val))
Accu_Preds = accuracy_score(y_val, y_pred, normalize=False)
F1_Score = "{:.3f}".format(f1_score(y_val, y_pred))
Log_Loss = "{:.3f}".format(log_loss(y_val, y_pred))
CVS_values = cross_val_score(estimator = clf, X = X, y = y, scoring = CVS_scoring, cv = CVS_CV, n_jobs=-1)
CVS_Best = "{:.3f}".format(CVS_values.max())
CVS_Mean = "{:.3f}".format(CVS_values.mean())
CVS_SD = "{:.3f}".format(CVS_values.std())
metric_values = [clf_name, Score, Accu_Preds, F1_Score, Log_Loss, CVS_Best, CVS_Mean, CVS_SD]
metric_dict.append(dict(zip(metric_cols, metric_values)))
clf_metrics = clf_metrics.append(metric_dict)
# Change to float data type
for column_name in clf_metrics.drop('clf_name', axis=1).columns:
clf_metrics[column_name] = clf_metrics[column_name].astype('float')
clf_metrics.sort_values('CVS_Mean', ascending=False, na_position='last', inplace=True)
print(clf_metrics)
clf_bp = sns.barplot(x='CVS_Mean', y='clf_name', data = clf_metrics, palette='inferno',orient = "h",**{'xerr':clf_metrics.CVS_SD})
clf_bp.set_xlabel("Mean Accuracy")
clf_bp.set_ylabel("Classification Models")
clf_bp.set_title("Cross Validation Scores")
# **Function 4 - clf_GridSearchCV_results [Function to conduct extensive GridSearch and return valuable parameters / score in a dataframe]**
# In[ ]:
# Define estimator and parameters grid for Grid Search CV
# Fitting Neural Net to the Training set
clf_GP_gs = GaussianProcessClassifier(random_state=rand_st)
clf_GP_pg = [{'n_restarts_optimizer': [0],
'warm_start': [True],
'max_iter_predict': [200]}]
clf_RF_gs = RandomForestClassifier(random_state=rand_st)
clf_RF_pg = [{'max_depth': [7, 8, 9],
'max_features': ['auto', 12],
'criterion': ['gini'],
'n_estimators': [200],
"min_samples_split": [10],
"min_samples_leaf": [3]}]
clf_MLP_gs = MLPClassifier(random_state=rand_st)
clf_MLP_pg = [{'activation': ['relu'],
'solver': ['adam'],
'learning_rate': ['adaptive'],
'max_iter': [30],
'alpha': [0.01],
'shuffle': [True, False],
'learning_rate_init': [0.01]}]
clf_Dec_tr = DecisionTreeClassifier(random_state=rand_st)
clf_AdaBoost_gs = AdaBoostClassifier(clf_Dec_tr, random_state=rand_st)
clf_AdaBoost_pg = {"base_estimator__criterion" : ["entropy"],
"base_estimator__splitter" : ["best"],
"algorithm" : ["SAMME.R"],
"n_estimators" :[500],
"learning_rate": [0.1]}
clf_Ex_tr_gs = ExtraTreesClassifier(random_state=rand_st)
clf_Ex_tr_pg = {"max_depth": [None, 8],
"max_features": ['auto', 10],
"min_samples_split": [5],
"min_samples_leaf": [3],
"n_estimators" :[200],
"criterion": ["gini"]}
clf_XGB_gs = XGBClassifier(random_state=rand_st)
clf_XGB_pg = {'learning_rate': [0.01],
'max_depth': [5],
'subsample': [0.8],
'colsample_bytree': [0.6],
'n_estimators': [3000],
'reg_alpha': [0.05]}
clf_GB_gs = GradientBoostingClassifier(random_state=rand_st)
clf_GB_pg = {'min_samples_split' : [100],
'n_estimators' : [3000],
'learning_rate': [0.1],
'max_depth': [4],
'subsample': [0.8],
'min_samples_leaf': [100],
'max_features': ['auto', 10]}
clf_SVC_gs = SVC(random_state=rand_st)
clf_SVC_pg = [{'C': [1],
'kernel': ['linear'],
'gamma': [0.5]}]
clf_models_gs = [clf_GP_gs, clf_RF_gs, clf_MLP_gs, clf_AdaBoost_gs, clf_Ex_tr_gs, clf_XGB_gs, clf_GB_gs, clf_SVC_gs]
clf_models_gs_name = ['clf_GP_gs', 'clf_RF_gs', 'clf_MLP_gs', 'clf_AdaBoost_gs', 'clf_Ex_tr_gs', 'clf_XGB_gs', 'clf_GB_gs', 'clf_SVC_gs']
clf_params_gs = [clf_GP_pg, clf_RF_pg, clf_MLP_pg, clf_AdaBoost_pg, clf_Ex_tr_pg, clf_XGB_pg, clf_GB_pg, clf_SVC_pg]
gs_metric_cols = ['clf_name', 'Best_Score', 'Mean_Train_Score', 'Mean_Test_Score', 'Mean_Test_SD', 'Best_Estimator', 'Best_Params']
gs_metrics = pd.DataFrame(columns = gs_metric_cols)
# Define function to conduct extensive GridSearch and return valuable parameters / score in a dataframe
def clf_GridSearchCV_results(gs_metrics, X_train, y_train, GS_scoring, GS_CV):
gs_metric_dict = []
# iterate over classifiers and param grids
for clf_gs_name, clf_gs, params_gs in zip(clf_models_gs_name, clf_models_gs, clf_params_gs):
clf_gs = GridSearchCV(clf_gs,param_grid = params_gs, cv=GS_CV, scoring=GS_scoring, n_jobs= -1, verbose = 1)
clf_gs.fit(X_train,y_train)
clf_name = clf_gs
Best_Score = clf_gs.best_score_
Mean_Train_Score = np.mean(clf_gs.cv_results_['mean_train_score'])
Mean_Test_Score = np.mean(clf_gs.cv_results_['mean_test_score'])
Mean_Test_SD = np.mean(clf_gs.cv_results_['std_test_score'])
Best_Estimator = clf_gs.best_estimator_
Best_Params = clf_gs.best_params_
gs_metric_values = [clf_gs_name, Best_Score, Mean_Train_Score, Mean_Test_Score, Mean_Test_SD, Best_Estimator, Best_Params]
gs_metric_dict.append(dict(zip(gs_metric_cols, gs_metric_values)))
gs_metrics = gs_metrics.append(gs_metric_dict)
return gs_metrics
# **Function 5 - plot_learning_curve [Function to plot learning curves]**
# In[ ]:
# Define function to plot learning curves
def plot_learning_curve(estimator, title, X_train, y_train, ylim=None, cv=None,
n_jobs=-1, train_sizes=np.linspace(.1, 1.0, 5)):
"""Generate a simple plot of the test and training learning curve"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X_train, y_train, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
# #### **Step 3 - Get the data**
# In[ ]:
# Manual method
train_data = pd.read_csv('../input/train.csv')
test_data = pd.read_csv('../input/test.csv')
# Combine dataset to create one dataframe for exploration and pre-processing purpose
comb_data = pd.concat([train_data, test_data])
# Mark 'train' and 'test' dataset
comb_data['DataType'] = np.where(comb_data[['Survived']].isnull().all(1), 'test', 'train')
comb_data.head()
# *For exploration and pre-processing, I combined data which can be later split in train and test set.*
# #### **Step 4 - Explore and process data**
# **Check basic info and stats**
# In[ ]:
# Check basic stats [Numerical features]
comb_data.describe().transpose()
# *It seems Fare, Parch and SibSp features have outliers*
# In[ ]:
# Check basic stats [Categorical features]
comb_data.describe(include=['object', 'category']).transpose()
# In[ ]:
# Check basic info
print("-----------------------Train Data-----------------------------")
comb_data[comb_data.DataType == 'train'].info()
print("-----------------------Test Data-----------------------------")
comb_data[comb_data.DataType == 'test'].info()
print("-----------------------Combined Data-----------------------------")
comb_data.info()
# *Notice that many categorical attributes are treated as numerical or object data type. There are missing values too.*
# In[ ]:
# Check null values in each column
print(comb_data.isnull().sum())
# *Inferences:*<br>
# *1. 'Age' and 'Fare' are two numerical variables.<br>
# *2. 'Cabin' column has 77.4% missing values. This column can be transformed to indicate 'No Cabin'(null values) and 'Deck' level.<br>
# *3. 'Embarked' column has only 2 missing values, so it can be easily filled with the most frequent category.<br>
# *4. 'Fare' column has only 1 missing values. It can be filled with median value.<br>
# *5. 'Age' column, which might be important for modeling, has 20% missing values. <br> We will handle it separately with some exploration*<br>
# *6. It looks like 'Parch' and 'SibSp' columns have subcategories which can be grouped into one.*
# **Fill missing values ['Embarked', 'Fare']**
# In[ ]:
# Fill with median values [only columns in which missing values are very few].
for column_name in ['Embarked', 'Fare']:
comb_data[column_name].fillna(comb_data[column_name].value_counts().index[0], inplace=True)
# **Rename categorical data**
# In[ ]:
# Change categorical values to more meaningful values [For visulaization only, I will convert to numerical type before running ML models]
comb_data['Pclass'] = np.where(comb_data['Pclass']==1, 'UpperClass', np.where(comb_data['Pclass']==2, 'MiddleClass', 'LowerClass'))
comb_data['Embarked'].replace(['C','Q', 'S'],['Cherbourg','Queenstown', 'Southampton'], inplace=True)
comb_data['Sex'] = np.where(comb_data['Sex']=='male', 'Male', 'Female')
comb_data['Survived'].replace([0,1],['No','Yes'], inplace=True)
# **Modify Cabin data**
# In[ ]:
# Recreate 'Cabin' with first character which represents deck and 'N' for null values.
comb_data['Cabin'] = np.where(comb_data[['Cabin']].isnull().all(1), 'N', comb_data.Cabin.str[0])
comb_data['Cabin'].value_counts().sort_values(ascending=False)
# **Create columns for no. of pass. on same ticket and individual Fare**
# In[ ]:
# There are many passengers travelling on same ticket and 'Fare' is total fare for all passengers on the same ticket.
comb_data[['Ticket', 'Fare']].groupby(['Ticket'], as_index=False).count().sort_values(by='Fare', ascending=False).head()
# In[ ]:
# Create 'PassCountTicket' column to show no. of passengers on same ticket
# Let us explore 11 passengers travelling on same ticket
comb_data['PassCountTicket'] = comb_data['Ticket'].map(comb_data['Ticket'].value_counts())
comb_data[comb_data.Ticket=='CA. 2343']
# In[ ]:
# Create 'IndFare' column by dividing 'Fare' by no. of passengers on same ticket
comb_data['IndFare'] = comb_data.Fare / comb_data.PassCountTicket
# Check 'IndFare' data
comb_data[comb_data.Ticket=='CA. 2343']
# In[ ]:
# Check how many passengers are in each unique 'PassCountTicket' value
comb_data['PassCountTicket'].value_counts().sort_values(ascending=False)
# **Create a new column of family size**
# In[ ]:
# A person with zero 'SibSp' and 'Parch' is travelling alone
comb_data['FamSize'] = comb_data['SibSp'] + comb_data['Parch'] + 1
print(comb_data['FamSize'].value_counts().sort_values(ascending=False))
# Visulize 'FamSize' data across 'Fare' and 'Survived'
v0 = sns.violinplot(data=comb_data[comb_data.DataType=='train'], x='FamSize', y='Fare', hue='Survived', scale='count', split=True, inner="stick")
v0.set_title('Survival across Family Size & Age', fontsize = 15)
plt.show()
# In[ ]:
# Create 'Single', 'Small' and 'Large' Category
comb_data['FamSize'] = np.where(comb_data['FamSize']<2, 'Single', np.where(comb_data['FamSize']<5, 'Small', 'Large'))
comb_data['FamSize'] = comb_data['FamSize'].astype('category')
# **Modify 'SibSp' and 'Parch' data**
# In[ ]:
# Check the count of unique nos. in 'SibSp' and 'Parch' columns
print(comb_data['Parch'].value_counts().sort_values())
print(comb_data['SibSp'].value_counts().sort_values())
# In[ ]:
# Reduce the number of categories in Parch and SibSp as more than 4 are insignificant.
comb_data['Parch'].replace([5, 6, 9],[4, 4, 4], inplace=True)
comb_data['SibSp'].replace([5, 8],[4, 4], inplace=True)
# **Create 'Title' Column**
# In[ ]:
# Create Title columns from 'Name'
comb_data['Title'] = comb_data.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
print(comb_data['Title'].value_counts().sort_values(ascending=False))
# In[ ]:
# Clean up Title column categories
comb_data['Title'] = comb_data['Title'].replace('Mlle', 'Miss')
comb_data['Title'] = comb_data['Title'].replace('Ms', 'Miss')
comb_data['Title'] = comb_data['Title'].replace('Mme', 'Mrs')
comb_data['Title'] = comb_data['Title'].replace(['Lady', 'Countess','Capt', 'Col','Don', 'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
print(comb_data['Title'].value_counts().sort_values(ascending=False))
# **Create 'Length of Name' Column**
# In[ ]:
# Create feture for length of name
# The .apply method generates a new series
comb_data['NameLength'] = comb_data['Name'].apply(lambda x: len(x))
# **Create prefix of 'Ticket' as new Column**
# In[ ]:
# Create new column from 'Ticket' by extracting the ticket prefix. When there is no prefix it returns "N".
TicketTrim = []
for i in list(comb_data.Ticket):
if not i.isdigit() :
TicketTrim.append(i.replace(".","").replace("/","").strip().split(' ')[0]) #Take prefix
else:
TicketTrim.append("N")
comb_data["TicketTrim"] = TicketTrim
comb_data["TicketTrim"].value_counts().sort_values(ascending=False).head(10)
# **Process data**
# In[ ]:
# Drop columns not needed further
comb_data = comb_data.drop(labels = ['Name', 'Ticket', 'PassCountTicket'],axis = 1)
comb_data.head()
# In[ ]:
# Check data types of columns
comb_data.dtypes
# In[ ]:
# Change PassengerId data type so that it does not show up in plots. (change back to integer before applying ML models)
comb_data["PassengerId"] = comb_data["PassengerId"].astype(str)
# Change categorical columns to category data type
for column_name in ['Cabin', 'Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked', 'FamSize', 'Title', 'TicketTrim']:
comb_data[column_name] = comb_data[column_name].astype('category')
comb_data.dtypes
# **Find and delete rows with outlier data**
# In[ ]:
# Drop rows with extreme outlier data [iqr_factor=10, normally this is equal to 1.5]
# List columns for outliers processing
columns_for_outliers = ['Age', 'Fare', 'NameLength']
# Run function
outliers_iqr(comb_data, columns_for_outliers, 10)
# Delete rows with outlier
comb_data = comb_data[comb_data.Outlier != 1]
# Drop temp. column
comb_data = comb_data.drop(['Outlier'], axis=1)
# In[ ]:
# Check remaining no. of rows
comb_data.shape
# In[ ]:
# Let us check stats once again.
print("----------Stats of numerical columns---------------")
print(comb_data.describe().transpose())
print("----------Stats of categorical columns---------------")
print(comb_data.describe(include=['category']).transpose())
print("-------------Count of null values------------------")
print(comb_data.isnull().sum())
print("-------Count of values in each category------------")
for column_name in comb_data.select_dtypes(include=['category']).columns:
print(comb_data[column_name].value_counts().sort_values(ascending=False))
# **Create a new column of Fare Group**
# In[ ]:
comb_data['FareGroup'] = np.where(comb_data['Fare']<7.73, 'Tier1', np.where(comb_data['Fare']<10.5, 'Tier2', np.where(comb_data['Fare']<52.5, 'Tier3', 'Tier4')))
comb_data['FareGroup'] = comb_data['FareGroup'].astype('category')
# **I will analyze missing age data in detail to check if missing data is totally random or skewed.<br>
# The goal is to find features which have most no. of missing data and have most variation in age.,<br>
# Then we will use Random Forest Regressor to fill missing age data**
# In[ ]:
# Create a new column to mark missing fares
comb_data['AgeData'] = np.where(comb_data[['Age']].isnull().all(1), 'No', 'Yes')
# In[ ]:
# Find columns which have proportionally more missing age data
f, axes = plt.subplots(3,4, figsize = (28, 21), sharey=True)
for i, col_name in enumerate(comb_data.select_dtypes(include=['category']).columns):
row = i // 4
col = i % 4
ax_curr = axes[row, col]
ax1 = sns.barplot(x=col_name, y='Fare', data=comb_data[comb_data.AgeData == 'Yes'], color='blue', alpha = 0.5, estimator=lambda col_name: len(col_name) / len(comb_data[comb_data.AgeData == 'Yes']) * 100, ax = ax_curr)
ax2 = sns.barplot(x=col_name, y='Fare', data=comb_data[comb_data.AgeData == 'No'], color='orange', alpha = 0.5, estimator=lambda col_name: len(col_name) / len(comb_data[comb_data.AgeData == 'No']) * 100, ax = ax_curr)
ax1.set_ylabel('Percentage')
plt.show()
# *What we can infer is that missing age data is fairly random.
# <br>However, it seems to be more in case of Queenstown, LowerClass, Survived=0, SibSP=0, Single family size, Cabin = 'N' (null values) and Parch=0.*
# In[ ]:
# Find columns which have more variatation in age [Categorical features]
f, axes = plt.subplots(3,4, figsize = (28, 21), sharey=True)
for i, col_name in enumerate(comb_data.select_dtypes(include=['category']).columns):
row = i // 4
col = i % 4
ax_curr = axes[row, col]
sns.barplot(x=col_name, y='Age', data=comb_data[comb_data.AgeData == 'Yes'], ax = ax_curr)
plt.show()
# In[ ]:
# Find if a column can be explained by other column, i.e. highly dependent (correlated)
# Drawing correlation matrix - Standard Pearson coefficients
# Compute the correlation matrix
corr_mat = comb_data[comb_data.AgeData == 'Yes'].corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr_mat, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(6, 6))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(240, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr_mat, mask=mask, cmap=cmap, vmax=.8, center=0, square=True, annot=True, linecolor='black', linewidths=0, cbar_kws={"shrink": .4}, fmt='.2f')
plt.show()
# *We conclude that 'Fare', 'Sex', 'Embarked', 'Namelength', 'FareGroup' etc. do not explain variation in Age.<br>
# Therefore, we can exclude these columns when computing missing Age data*
# **Fill missing age data**
# In[ ]:
# Predict missing values in age using Random Forest
AgeData = comb_data[['Age', 'Parch', 'SibSp', 'TicketTrim', 'Title','Pclass','FamSize', 'Cabin']]
# Transform categorical features to dummy variables
cat_col_names = AgeData.select_dtypes(include=['category']).columns
AgeData = pd.get_dummies(AgeData, columns=cat_col_names, prefix=cat_col_names)
# Split sets into train and test
train_Age = AgeData.loc[(AgeData.Age.notnull())]
test_Age = AgeData.loc[(AgeData.Age.isnull())]
# Create target and feature set
X_train_Age = train_Age.values[:, 1::]
y_train_Age = train_Age.values[:, 0]
X_test_Age = test_Age.values[:, 1::]
# Create and fit a model
regr = RandomForestRegressor(max_depth = 8, n_estimators=2000, n_jobs=-1)
regr.fit(X_train_Age, y_train_Age)
# Use the fitted model to predict the missing values
Age_pred = regr.predict(X_test_Age)
# Assign those predictions to the full data set
comb_data.loc[(comb_data.Age.isnull()), 'Age'] = Age_pred
# Check null values in each column
print(comb_data.isnull().sum())
# In[ ]:
# Check how missing age data distribution looks like after imputation
f, axes = plt.subplots(3,4, figsize = (28, 21), sharey=True)
for i, col_name in enumerate(comb_data.select_dtypes(include=['category']).columns):
row = i // 4
col = i % 4
ax_curr = axes[row, col]
sns.barplot(x=col_name, y='Age', data=comb_data[comb_data.AgeData == 'No'], ax = ax_curr)
plt.show()
# *Distribution of missing Age after computation looks similar. So, we are good.*
# **Create a new column of Age Category**
# In[ ]:
comb_data['AgeCat'] = np.where(comb_data['Age']<9, 'Child', np.where(comb_data['Age']<20, 'Young', np.where(comb_data['Age']<60, 'Adult', 'Senior')))
comb_data['AgeCat'] = comb_data['AgeCat'].astype('category')
# **Visualize numerical features**
# In[ ]:
# A quick way to get a feel of numerical data is to plot histograms for numerical variables
comb_data.hist(bins=80, figsize=(27,6))
plt.show()
# **Fix skewness and normalize**
# In[ ]:
# Check the skewness of all numerical features
num_cols = comb_data.select_dtypes(include=['float', 'int64']).columns
skewed_cols = comb_data[num_cols].apply(lambda x: ss.skew(x.dropna())).sort_values(ascending=False)
skewness = pd.DataFrame({'Skew' :skewed_cols})
skewness = skewness[abs(skewness) > 0.75]
skewness = skewness.dropna()
print(skewness)
skewed_cols = skewness.index
print("There are {} skewed [skewness > 0.75] numerical features in comb_data to fix".format(skewness.shape[0]))
# In[ ]:
# Fix skewness
# Use boxcox1p method
'''lam = 0.5
comb_data['Fare'] = boxcox1p(comb_data['Fare'], lam)
comb_data['Fare'] = boxcox1p(comb_data['NameLength'], lam)'''
# Use log1p method
comb_data[['Fare']] = np.log1p(comb_data[['Fare']])
comb_data[['NameLength']] = np.log1p(comb_data[['NameLength']])
comb_data[['IndFare']] = np.log1p(comb_data[['IndFare']])
# *log1p method gave better result than boxcox1p method. I learned that method to deal with skewness will affect accuracy of the model.*
# In[ ]:
# Find if a column can be explained by other column, i.e. highly dependent (correlated)
# Drawing correlation matrix - Standard Pearson coefficients
# Compute the correlation matrix
corr_mat = comb_data.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr_mat, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(6, 6))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(240, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr_mat, mask=mask, cmap=cmap, vmax=.8, center=0, square=True, annot=True, linecolor='black', linewidths=0, cbar_kws={"shrink": .4}, fmt='.2f')
plt.show()
# In[ ]:
# Visualizing numerical data along x and y axis
comb_data.plot(kind = "scatter", x = "Fare", y = "Age", figsize=(24, 6), color = 'green')
plt.show()
# In[ ]:
# Now look at the stats once again.
print("-------------Basic info of columns-----------------")
print(comb_data.info())
print("----------Stats of numerical columns---------------")
print(comb_data.describe().transpose())
print("-------------Count of null values------------------")
print(comb_data.isnull().sum())
print("-------Count of values in each category------------")
for column_name in comb_data.select_dtypes(include=['category']).columns:
print(comb_data[column_name].value_counts())
# **Analyze survival data - Visualizing outcome across independent variables**
# In[ ]:
# Create a subset that has survival outcome of all passengers
surv = comb_data[comb_data.DataType == 'train']
surv.head()
# In[ ]:
print("------------------------Count & %age---------- ----------------")
print("Survived: %i (%.1f percent), Not Survived: %i (%.1f percent), Total: %i" %(len(surv[surv.Survived == 'Yes']), 1.*len(surv[surv.Survived == 'Yes']) /len(surv)*100.0,len(surv[surv.Survived == 'No']), 1.*len(surv[surv.Survived == 'No'])/len(surv)*100.0, len(surv)))
print("------------------------Mean Age-------------------------------------")
print("Mean age survivors: %.1f, Mean age non-survivers: %.1f" %(np.mean(surv[surv.Survived == 'Yes'].Age), np.mean(surv[surv.Survived == 'No'].Age)))
print("------------------------Median Fare-------------------------------------")
print("Median Fare survivors: %.1f, Median Fare non-survivers: %.1f" %(np.median(surv[surv.Survived == 'Yes'].Fare), np.median(surv[surv.Survived == 'No'].Fare)))
# In[ ]:
# Visulaizing survival data against numerical columns
# Create violin plot to compare against numerical columns.
f, axes = plt.subplots(ncols=4, figsize = (24, 6))
v1 = sns.violinplot(data = surv, x = 'Survived', y = 'Fare', ax = axes[0])
v1.set_title('Survived vs. Fare', fontsize = 12)
v2 = sns.violinplot(data = surv, x = 'Survived', y = 'IndFare', ax = axes[1])
v2.set_title('Survived vs. IndFare', fontsize = 12)
v3 = sns.violinplot(data = surv, x = 'Survived', y = 'NameLength', ax = axes[2])
v3.set_title('Survived vs. NameLength', fontsize = 12)
v4 = sns.violinplot(data = surv, x = 'Survived', y = 'Age', ax = axes[3])
v4.set_title('Survived vs. Age', fontsize = 12)
plt.show()
# *Age data does not look different in dead or alive group.*
# In[ ]:
# Visulaizing survival data against numerical columns
# Create distribution plot to compare against numerical columns.
f, axes = plt.subplots(ncols=4, figsize = (28, 6))
d1 = sns.distplot(surv[surv.Survived == 'Yes']['Age'].dropna().values, color='Green', ax = axes[0], label = 'Survived')
d2 = sns.distplot(surv[surv.Survived == 'No']['Age'].dropna().values, color='Red', ax = axes[0], label = 'Not Survived')
d1.set_title('Survived vs. Age', fontsize = 12)
d1.legend(loc='best')
d1.set(xlabel="Age", ylabel="No. of Passengers")
d3 = sns.distplot(surv[surv.Survived == 'Yes']['Fare'].dropna().values, color='Green', ax = axes[1], label = 'Survived')
d4 = sns.distplot(surv[surv.Survived == 'No']['Fare'].dropna().values, color='Red', ax = axes[1], label = 'Not Survived')
d3.set_title('Survived vs. Fare', fontsize = 12)
d3.legend(loc='best')
d3.set(xlabel="Fare", ylabel="No. of Passengers")
d5 = sns.distplot(surv[surv.Survived == 'Yes']['IndFare'].dropna().values, color='Green', ax = axes[2], label = 'Survived')
d6 = sns.distplot(surv[surv.Survived == 'No']['IndFare'].dropna().values, color='Red', ax = axes[2], label = 'Not Survived')
d5.set_title('Survived vs. Ind. Fare', fontsize = 12)
d5.legend(loc='best')
d5.set(xlabel="IndFare", ylabel="No. of Passengers")
plt.show()
d7 = sns.distplot(surv[surv.Survived == 'Yes']['NameLength'].dropna().values, color='Green', ax = axes[3], label = 'Survived')
d8 = sns.distplot(surv[surv.Survived == 'No']['NameLength'].dropna().values, color='Red', ax = axes[3], label = 'Not Survived')
d7.set_title('Survived vs. Name Length', fontsize = 12)
d7.legend(loc='best')
d7.set(xlabel="NameLength", ylabel="No. of Passengers")
plt.show()
# *Above plots show more detail in variation.*
# In[ ]:
# Caluculate assciation between 2 columns - Cramer's V score [Categorical Features]
# Change Survived data type so that it does not mess up calculation below. (change back to integer before applying ML models)
comb_data["Survived"] = comb_data["Survived"].astype(str)
for i in comb_data.select_dtypes(include=['category']).columns:
col_1 = i
for j in comb_data.select_dtypes(include=['category']).columns:
col_2 = j
if col_1 == col_2:
break
confusion_matrix = pd.crosstab(comb_data[col_1], comb_data[col_2])
chi2 = ss.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2/n
r,k = confusion_matrix.shape
phi2corr = max(0, phi2 - ((k-1)*(r-1))/(n-1))
rcorr = r - ((r-1)**2)/(n-1)
kcorr = k - ((k-1)**2)/(n-1)
Cramer_V = np.sqrt(phi2corr / min( (kcorr-1), (rcorr-1)))
if Cramer_V > 0.5:
print("The Cramer's V score bettween ", col_1, " and ", col_2, " is : ", (Cramer_V))
result = Cramer_V
# *These values shows correlation among categorical features.<br>
# Clearly Title and Sex is very highly correlated. We have to pick one to avoid Multicollinearity.*
# In[ ]:
# Visulaizing survival data across categorical columns, using Fare numerical column on Y-axis.
f, axes = plt.subplots(4,3, figsize = (28, 21), sharey=True)
for i, col_name in enumerate(comb_data.select_dtypes(include=['category']).columns):
row = i // 3
col = i % 3
ax_curr = axes[row, col]
sns.violinplot(data=surv, x=col_name, y='Fare', hue='Survived', ax = ax_curr)
plt.show()
# In[ ]:
# Creating bar plots.
# I will convert Survived values to number type so that it can be used for bar plots.
surv['Survived'].replace(['No','Yes'],[0,1], inplace=True)
f, axes = plt.subplots(3,4, figsize = (28, 16), sharey=True)
for i, col_name in enumerate(comb_data.select_dtypes(include=['category']).columns):
row = i // 4
col = i % 4
ax_curr = axes[row, col]
sns.barplot(x=col_name, y='Survived', data=surv, ax = ax_curr)
plt.show()
# In[ ]:
# Find columns which have proportionally more death counts
f, axes = plt.subplots(3,4, figsize = (28, 21), sharey=True)
for i, col_name in enumerate(comb_data.select_dtypes(include=['category']).columns):
row = i // 4
col = i % 4
ax_curr = axes[row, col]
ax1 = sns.barplot(x=col_name, y='Fare', data=surv[surv.Survived == 0], color='orange', alpha = 0.5, estimator=lambda col_name: len(col_name) / len(surv[surv.Survived == 0]) * 100, ax = ax_curr)
ax2 = sns.barplot(x=col_name, y='Fare', data=surv[surv.Survived == 1], color='blue', alpha = 0.5, estimator=lambda col_name: len(col_name) / len(surv[surv.Survived == 1]) * 100, ax = ax_curr)
ax1.set_ylabel('Percentage')
plt.show()
# In[ ]:
# In last set of charts, I will narrow down data by applying filters and combination of categories to visualize where exactly most no. of dead passengers count is.
# Factorplots are good to see counts across categorical columns, as shown below.
f1 = sns.factorplot(x='FamSize', data=surv, hue='Survived', kind='count', col='Sex', size=6)
plt.show()
# In[ ]:
# Filtered passengers count data across categories
f2 = sns.factorplot(x='Embarked', data=surv, hue='Survived', kind='count', col='Sex', size=6)
plt.show()
# In[ ]:
# Filtered passengers count data across categories
f8 = sns.factorplot(x='FamSize', data=surv[(surv['Title'] == 'Mr') & (surv['Cabin'] == 'N')], hue='Survived', kind='count', col='Embarked', size=6)
plt.show()
# *After extensive visualization of underlying data, I am ready to go on to next step.*
# #### **Step 5 - Prepare the data for Machine Learning algorithms**
# **Process further and create train, target and test dataset**
# In[ ]:
# Check combined data
comb_data.head()
# In[ ]:
# Save processed comb_data [for sanity check before splitting train and test data]
'''comb_data.to_csv('comb_data_Titanic.csv')'''
# Check data types
comb_data.dtypes
# In[ ]:
# Convert binary categorical columns to integer 0/1
comb_data['Survived'].replace(['No','Yes'],[0,1], inplace=True)
comb_data['Sex'].replace(['Male','Female'],[0,1], inplace=True)
comb_data["Sex"] = comb_data["Sex"].astype(int)
# Change back PassengerId to integer before applying ML models
comb_data["PassengerId"] = comb_data["PassengerId"].astype(int)
# Drop columns to avoid overfitting
# comb_data = comb_data.drop(labels = ["Age", "Sex", "Parch", "SibSp", "IndFare", FareGroup", "AgeCat", "IndFare", "AgeData"],axis = 1)
# comb_data = comb_data.drop(labels = ["Sex", "FareGroup", "AgeCat", "IndFare", "AgeData"],axis = 1)
comb_data = comb_data.drop(labels = ["Sex", "SibSp", "FareGroup", "AgeCat", "AgeData"],axis = 1)
# Transform categorical features in to dummy variables
comb_data["DataType"] = comb_data["DataType"].astype(str) # to exclude from dummy function
# Get the list of category columns
cat_col_names = comb_data.select_dtypes(include=['category']).columns
comb_data = pd.get_dummies(comb_data, columns=cat_col_names, prefix=cat_col_names)
comb_data.head()
# *I tried multiple combination of features and kept the one set which gave me best accuracy.*
# In[ ]:
# Create train and test subset from survival column
print(comb_data.shape)
train = comb_data[comb_data.DataType == 'train']
print(train.shape)
test = comb_data[comb_data.DataType == 'test']
print(test.shape)
train_id = train["PassengerId"]
test_id = test["PassengerId"]
train["Survived"] = train["Survived"].astype(int)
y_train = train["Survived"]
print(y_train.shape)
print(y_train.head())
X_train = train.drop(labels = ["Survived", "PassengerId", "DataType"],axis = 1)
print(X_train.shape)
print(X_train.head())
X_test = test.drop(labels = ["Survived", "PassengerId", "DataType"],axis = 1)
print(X_test.shape)
print(X_test.head())
# Make sure there is no null values in train and test data and also no. of categories in categorical values are equal
print("-------------Null values in Train set------------------")
print(X_train.isnull().values.any())
print("-------------Null values in Test set------------------")
print(X_test.isnull().values.any())
# In[ ]:
# Check number of columns and name of columns match between X_train and X_test
print(X_train.shape)
print(X_test.shape)
print(set(X_train.columns) == set(X_test.columns))
print('--------columns present in X_train but not in X_test-------')
missing_col_tt = [i for i in list(X_train) if i not in list(X_test)]
print(missing_col_tt)
print('--------columns present in X_test but not in X_train-------')
missing_col_tr = [i for i in list(X_test) if i not in list(X_train)]
print(missing_col_tr)
# Drop these columns and test again
X_train = X_train.drop(missing_col_tt, axis=1)
X_test = X_test.drop(missing_col_tr, axis=1)
print(X_train.shape)
print(X_test.shape)
print(set(X_train.columns) == set(X_test.columns))
print('--------columns present in X_train but not in X_test-------')
missing_col_tt = [i for i in list(X_train) if i not in list(X_test)]
print(missing_col_tt)
print('--------columns present in X_test but not in X_train-------')
missing_col_tr = [i for i in list(X_test) if i not in list(X_train)]
print(missing_col_tr)
# *I am not sure if above step was necessary. I assume order and no. columns in train and test data should be same.*
# #### **Step 6 - Select and train a model**
# **Visualize how Machine Learning models works on classification with just 2 numerical features**
# In[ ]:
# Extract numerical columns from train dataseet
X_num = X_train.iloc[:, [0, 1]]
print(X_num.head())
# *'X_num' and 'y_train' will be used to visualize Machine Learning classification models with two numerical independent features*
# In[ ]:
# Run function to plot graphs
plt_start = time.time()
plot_class_models_two_num_features(X = X_num, y = y_train, clf_dict = {k: clf_dict[k] for k in clf_dict.keys() & {'clf_Log_reg', 'clf_KNN', 'clf_RF', 'clf_ExTree', 'clf_XGBoost', 'clf_MLP'}}, title = 'Age & Fare')
plt_end = time.time()
# *As expected, using numerical columns only (fare and age) do not give good accuracy.<br>
# However, nice way to see how algorithms work on classification.*
# **Comparing Classification Machine Learning models with all independent features. [Use 'X_train' and 'y_train']**
# In[ ]:
# Run function to evaluate various classification models [Metrics and cross_val-score]
clf_cross_val_score_and_metrics(X=X_train, y=y_train, clf_dict=clf_dict, CVS_scoring = "accuracy", CVS_CV=kfold)
# **Extensive GridSearch with valuable parameters / score in a dataframe**
# In[ ]:
# Get GridSearch results in a dataframe
gs_start = time.time()
gs_metrics = clf_GridSearchCV_results(gs_metrics, X_train=X_train, y_train=y_train, GS_scoring = "accuracy", GS_CV=kfold)
gs_end = time.time()
# In[ ]:
# Check GridSearch metric data
'''gs_metrics.to_csv('Titanic_GS_Result.csv')'''
gs_metrics
# **Find best estimators**
# In[ ]:
# Extract best estimators
Best_Estimator_RF = gs_metrics.iloc[1, 5]
Best_Estimator_XGB = gs_metrics.iloc[5, 5]
Best_Estimator_MLP = gs_metrics.iloc[2, 5]
Best_Estimator_ExT = gs_metrics.iloc[4, 5]
Best_Estimator_GB = gs_metrics.iloc[6, 5]
Best_Estimator_SVC = gs_metrics.iloc[7, 5]
# AdaBoost for feature importance plot
Best_Estimator_AdaB = gs_metrics.iloc[3, 5]
# **Plot learning curves**
# In[ ]:
# Run function to plot learning curves for top 4 models
plot_learning_curve(Best_Estimator_RF,"RF mearning curves",X_train,y_train,cv=kfold)
plot_learning_curve(Best_Estimator_ExT,"ExtraTrees learning curves",X_train,y_train,cv=kfold)
plot_learning_curve(Best_Estimator_XGB,"XGB learning curves",X_train,y_train,cv=kfold)
plot_learning_curve(Best_Estimator_MLP,"MLP learning curves",X_train,y_train,cv=kfold)
# **Feature Importance**
# In[ ]:
nrows = ncols = 2
fig, axes = plt.subplots(nrows = nrows, ncols = ncols, sharex="all", figsize=(25,20))
names_clf = [("ExtraTrees",Best_Estimator_ExT),("RandomForest",Best_Estimator_RF),("XGBoosting",Best_Estimator_XGB), ("AdaBoosting", Best_Estimator_AdaB)]
nclf = 0
for row in range(nrows):
for col in range(ncols):
name = names_clf[nclf][0]
clf = names_clf[nclf][1]
# Plot feature importance
feature_importance = clf.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)[::-1][:32]
pos = feature_importance[sorted_idx][:32]
g = sns.barplot(y=X_train.columns[sorted_idx][:32],x = pos, orient='h', palette='inferno', ax=axes[row][col])
g.set_xlabel("Relative importance",fontsize=12)
g.set_ylabel("Features",fontsize=12)
g.tick_params(labelsize=9)
g.set_title(name + " feature importance")
nclf += 1
# #### **Step 7 - Fine-tune your model**
# In[ ]:
vote_clf = VotingClassifier(estimators=[('rfc', Best_Estimator_RF),
('ext', Best_Estimator_ExT), ('xgb',Best_Estimator_XGB)], voting='soft', weights=[1, 1, 1], n_jobs=-1)
# #### **Step 8 - Predict and present your solution**
# In[ ]:
# Fitting / Predicting using Voting Classifier
# vote_clf.fit(X_train, y_train)
# y_pred = vote_clf.predict(X_test)
# Fitting / Predicting using Random Forest classifier
# rfc = RandomForestClassifier(max_depth=8, n_estimators = 500, n_jobs=-1, random_state=rand_st)
# rfc.fit(X_train, y_train)
# y_pred = rfc.predict(X_test)
# Fitting / Predicting using Extra Tree classifier
Best_Estimator_ExT.fit(X_train, y_train)
y_pred = Best_Estimator_ExT.predict(X_test)
print(y_pred)
# *Extra tree gave me better result than voting classifier or Random Forest Classifier*
# In[ ]:
# Combine PassengerId and prediction
Titanic_prediction = np.vstack((test_id, y_pred))
# In[ ]:
# Create output file
np.savetxt('Titanic_Kaggle_Result.csv', np.transpose(Titanic_prediction), delimiter=',', fmt="%s")
# #### **Step 9 - Final words!**
# 1- My first attempt (version 1) was without looking into any solution online.
# 2- This version is modified version after looking at works of others.
e.g. - 'Titanic Top 4% with ensemble modeling' kernel by Yassine Ghouzam, PhD
# 3- I underestimated feature engineering before.
# 4- Got slighlty better result of 0.80332 [Top 8%]
# 5- I am more interested in approach and doing all the steps correctly along with necessary checks.
# 6- Let me know if I missed anything critical
# In[ ]:
end = time.time()
print('Time taken to plot ML models : ' + str("{:.2f}".format((plt_end - plt_start)/60)) + ' minutes')
print('Time taken to perform Grid Search : ' + str("{:.2f}".format((gs_end - gs_start)/60)) + ' minutes')
print('Total running time of the script : ' + str("{:.2f}".format((end - start)/60)) + ' minutes')
|
#!/usr/bin/env python3
import bs4, requests
# res = requests.get('https://www.amazon.co.uk/PS4-PRO-Red-Dead-Redemption/dp/B07HKV4TR4/ref=sr_1_1?s=videogames&ie=UTF8&qid=1541109558&sr=1-1&keywords=ps4+pro')
# print(res.raise_for_status)
# soup = bs4.BeautifulSoup(res.text, 'html.parser') # Returns a soup object.
# elems = soup.select('#priceblock_dealprice')
# print(elems)
# print(elems[0].text.strip())
def getAmazonPrice(productUrl):
res = requests.get(productUrl)
res.raise_for_status() #Check for an exception if there is a problem downloading this
soup = bs4.BeautifulSoup(res.text, 'html.parser')
elems = soup.select('#priceblock_ourprice')
return elems[0].text #Removes whitespace to get our price
price = getAmazonPrice('https://www.amazon.co.uk/Nintendo-Switch-Super-Ultimate-Download/dp/B07HFQ46LS/ref=sr_1_7?s=videogames&ie=UTF8&qid=1541112710&sr=1-7&keywords=nintendo+switch')
print('The price of this product is ' + price + '.')
|
#instance methods
#If we are using atleast one instance variable ---->instance methods.
class Student:
def __init__(self,name,marks):
self.name=name
self.marks=marks
def display(self): # instance method ,bcz we are accessing instance variable.The first arguments of the instance method should be self
print('Hi',self.name)
print('Your markks are',self.marks)
def grade(self):
if self.marks>=60:
print('First Grade')
elif self.marks>=50:
print('Second Grdae')
elif self.marks>=35:
print('You got Third Grdae')
else:
print('You are Fialed')
n=int(input('Enter Number of Students:'))
for i in range(n):
name=input('Enter Name:')
marks=int(input('Enter Marks:'))
s=Student(name,marks)
s.display() # Calling instance method
s.grade() # Calling instance method
print('*'*20)
# setter and getter methods--------------------------------------------------
# setter method:- to set the data to the object.
# getter method:- to get the data from the object.
#setter Syntax:---
def setVariableName(self,variableName):
self.varaibleName=variableName
def setMarks(self,marks):
self.marks=marks
# getter Syntax:--
def getMarks(self):
#//Validation stuff
return self.marks
#---------------------------------------------------------------------------------------------
#print(s.name) // direct access no validations
#print(s.getName()) // Hiding data behind method --->Encapuslation
#
#s.name='randhir' // not good programming
#
#-----------------Examples-----------------------------------------------------------------------------
class Student:
def setName(self,name):
self.name=name
def getName(self):
return self.name
def setMarks(self,marks):
self.marks=marks
def getMarks(self):
return self.marks
n=int(input('Enter Number of Students:'))
for i in range(n):
name=input('Enter Name:')
marks=int(input('Enter Marks:'))
s=Student()
s.setName(name)
s.setMarks(marks)
print('Hi',s.getName())
print('Your Marks are',s.getMarks())
print('*'*20)
#
|
__author__ = 'liushuo'
from scrapy.cmdline import execute
#from scrapy import cmdline
#命令行运行scrapy库相当于运行此命令;run configuration中配置script params为crawl spidername
cmd = 'scrapy crawl ZhihuSpider'
execute() |
import json
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
def splitData(data):
xs=[]
ys=[]
zs=[]
for y in range(len(data)):
for x in range(len(data[y])):
z=data[y][x]
xs.append(x)
ys.append(y)
zs.append(z)
return (xs, ys, zs)
f=open('seqs/protocols/HTTPRSS')
data=f.read()
f.close()
data=json.loads(data)
xs, ys, zs=splitData(data['incoming'])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.bar3d(xs, ys, zs, 1, 1, 1)
#ax=Axes3D(fig)
#ax.hist((xs, ys, zs))
plt.show()
#plt.savefig('HTTPRSS-incoming.pdf')
|
import threading
from time import sleep
e = threading.Event()
#冲破阻塞wait挡不住它
e.set()
event = e.wait()
print event
e.clear()
event = e.wait(2)
print('timeout:',event)
|
from turtle import Screen
from paddle import Paddle
from ball import Ball
import time
from scoreboard import ScoreBoard
screen = Screen()
screen.setup(width=800, height=600)
screen.bgcolor("black")
screen.title("Pong")
screen.tracer(n=0)
right_pad = Paddle((350, 0))
left_pad = Paddle((-350, 0))
ball = Ball()
score = ScoreBoard()
screen.listen()
screen.onkey(right_pad.up, "Up")
screen.onkey(left_pad.up, "w")
screen.onkey(right_pad.down, "Down")
screen.onkey(left_pad.down, "s")
while not score.game_over():
screen.update()
time.sleep(ball.move_speed)
ball.move()
if ball.ycor() > 280 or ball.ycor() < -280:
ball.wall_collision()
elif (ball.distance(right_pad) < 50 and ball.xcor() > 350) or (ball.distance(left_pad) < 50 and ball.xcor() < -350):
ball.paddle_collision()
elif ball.xcor() > 350 or ball.xcor() < -350:
score.update_score(ball.xcor())
ball.miss()
screen.exitonclick()
|
def lcSubstring(S1, S2, n, m):
dp=[[0 for i in range(m+1)]for i in range(n+1)]
result=0
for i in range(1,n+1):
for j in range(1,m+1):
if S1[i-1]==S2[j-1]:
dp[i][j]=1+dp[i-1][j-1]
result=max(result,dp[i][j])
else:
dp[i][j]=0
return result
class Solution:
def findLength(self, nums1: List[int], nums2: List[int]) -> int:
m=len(nums1)
n=len(nums2)
answer=lcSubstring(nums1,nums2,m,n)
return answer
|
# File Open
# File Handling
# open(filename, mode)
"""
"r" - Read - Default value. Opens a file for reading,
error if the file does not exist
"a" - Append - Opens a file for appending,
creates the file if it does not exist
"w" - Write - Opens a file for writing, creates the file if it does not exist
"x" - Create - Creates the specified file, returns an error if the file exists
"t" - Text - Default value. Text mode
"b" - Binary - Binary mode (e.g. images)
"""
# open a file
f = open("demofile.txt")
f = open("demofile.txt", "rt")
###############################################################################
|
import pytest
from pizza_orders.models import FoodItem, FoodImage
@pytest.fixture(scope="function")
def add_food_item():
def _add_food_item(name: str, item_type: str, price: float, img_file=None):
food_item = FoodItem.objects.create(
name=name, item_type=item_type, price=price
)
if img_file is not None:
food_image = FoodImage(food_id=food_item, img_file=img_file)
food_image.save()
return food_item
return _add_food_item
@pytest.fixture(scope="function")
def add_multiple_items():
def _add_multiple_items():
food_item_1 = FoodItem.objects.create(
name="Test Pizza 1", item_type="pizza", price=16.99
)
food_item_2 = FoodItem.objects.create(
name="Test Extra 1", item_type="extra", price=2.0
)
food_item_3 = FoodItem.objects.create(
name="Test Extra 2", item_type="extra", price=1.0
)
return (food_item_1, food_item_2, food_item_3)
return _add_multiple_items
|
# Generated by Django 2.1.4 on 2018-12-29 20:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('carts', '0002_cart_subtotal'),
]
operations = [
migrations.AddField(
model_name='cart',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
|
from poutyne.framework.callbacks import Callback
import torch
from torch.nn.functional import mse_loss
class MseMetaTest(Callback):
"""
Callback object that estimates the MSE loss on the meta test set after each batch or epoch.
"""
def __init__(self, meta_test, filename, periodicity='epoch'):
"""
Parameters
----------
meta_test: meta dataset.
The meta-dataset used for testing.
filename: str
The name of the csv file to populate.
periodicity: str, {'batch', 'epoch'}
The periodicity of the test.
"""
assert periodicity in {'batch', 'epoch'}, "Periodicity should be either 'batch' or 'epoch'."
super(MseMetaTest, self).__init__()
self.meta_test = meta_test
self.periodicity = periodicity
self.filename = filename
self.writer = None
def on_train_begin(self, logs):
self.writer = open(self.filename, 'a')
if self.periodicity == 'batch':
self.writer.write('epoch, batch, mse\n')
else:
self.writer.write('epoch, mse\n')
def get_mse(self):
"""
Gets the mean squared error on the meta test set.
Returns
-------
mse: torch.Tensor
1D tensor representing the mean squared error on the meta test set.
"""
predictions = []
targets = []
for i, batch in enumerate(self.meta_test):
for j, (episode, y_test) in enumerate(zip(*batch)):
if self.model.model.return_var:
y_pred, _ = self.model.make_prediction([episode])[0]
else:
y_pred = self.model.make_prediction([episode])[0]
y_pred = y_pred.detach()
predictions.append(y_pred)
targets.append(y_test)
if j > 20:
break
if i > 40:
break
predictions = torch.cat(tuple(predictions))
targets = torch.cat(tuple(targets))
mse = mse_loss(predictions, targets)
return mse
def on_batch_end(self, batch, logs):
if self.periodicity == 'batch':
mse = self.get_mse()
epoch = logs['epoch']
self.writer.write(f'{epoch}, {batch}, {mse}\n')
def on_epoch_end(self, epoch, logs):
if self.periodicity == 'epoch':
mse = self.get_mse()
self.writer.write(f'{epoch}, {mse}\n')
def on_train_end(self, logs):
self.writer.close()
|
'''
today's tasks
task1: 筛选出运算符
task2: 根据task1中的运算符分割输入字串
task3: 打印运算结果
'''
import re
def subtract(c,d):
return c-d
def multiply(c,d):
return c*d
def divide(c,d):
return c/d
def add(c,d):
return c+d
def calculator1():
args=["+","-","*","/"]
while 1:
s=input("请输入:")
for arg in args:
arg1=s.split(arg,2)
if arg1.__len__()==2:
a=float(arg1[0])
b=float(arg1[1])
if arg == "+":
print(add(a,b))
elif arg == "-":
print(subtract(a,b))
elif arg == "*":
print(multiply(a,b))
elif arg == "/":
print(divide(a,b))
#进阶版 使用正则操作
def calculator():
while 1:
s=input("请输入:")
arg=re.findall(r'[+\-*/%]',s)
if not arg.__len__()==1:
print("目前只支持单个运算符")
continue
arg=arg[0]
arg1 = s.split(arg, 2)
a = float(arg1[0])
b = float(arg1[1])
if arg == "+":
print(add(a, b))
elif arg == "-":
print(subtract(a, b))
elif arg == "*":
print(multiply(a, b))
elif arg == "/":
print(divide(a, b))
elif arg == "%":
print(a % b)
calculator()
|
from django.core.urlresolvers import reverse
from django.test import TestCase
class CustomTestCase(TestCase):
# TODO: Is there a way to have a single argument turn into a list of one
# when passed to a method?
def assertResponseStatus(self, success_codes, view, args=[], kwargs={}):
self.assertResponseStatusSuccess(view, args, kwargs, success_codes)
def assertResponseStatusNot(
self, failure_codes, view, args=[], kwargs={}):
self.assertResponseStatusNotFailure(view, args, kwargs, failure_codes)
def assertResponseStatusSuccess(
self, view, args=[], kwargs={}, success_codes=[200]):
response_status_code = self.getStatusCode(view, args, kwargs)
if not response_status_code in success_codes:
self.fail('%s not in %s' % (response_status_code, success_codes))
def assertResponseStatusNotFailure(
self, view, args=[], kwargs={}, failure_codes=[404, 500]):
response_status_code = self.getStatusCode(view, args, kwargs)
if response_status_code in failure_codes:
self.fail('%s in %s' % (response_status_code, failure_codes))
def getStatusCode(self, view, args={}, kwargs={}):
response = self.getResponseFromView(view, args, kwargs)
return response.status_code
def getResponseFromView(self, view, args=[], kwargs={}):
view_url = reverse(view, args=args, kwargs=kwargs)
return self.client.get(view_url)
def tryLogin(self, username, password=None):
if not password:
login = self.client.login(username=username, password=username)
else:
login = self.client.login(username=username, password=password)
if not login:
raise CouldNotLoginException('Could not login as %s' % username)
class CouldNotLoginException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) |
'''
Given two words (beginWord and endWord), and a dictionary, find the length of shortest transformation sequence from beginWord to endWord, such that:
Only one letter can be changed at a time
Each intermediate word must exist in the dictionary
For example,
Given:
start = "hit"
end = "cog"
dict = ["hot","dot","dog","lot","log"]
As one shortest transformation is "hit" -> "hot" -> "dot" -> "dog" -> "cog",
return its length 5.
Note:
Return 0 if there is no such transformation sequence.
All words have the same length.
All words contain only lowercase alphabetic characters.
'''
class Solution:
# @param {string} beginWord
# @param {string} endWord
# @param {set<string>} wordDict
# @return {integer}
def ladderLength(self, beginWord, endWord, wordDict):
if not beginWord or not endWord or not wordDict:
return 0
# word : path_count
table = {beginWord:1}
alphabets = set("abcdefghijklmnopqrstuvwxyz")
while table:
temp_table = table.copy() #To avoid: RuntimeError: dictionary changed size during iteration
for word, count in temp_table.viewitems():
del table[word] #We need to delete this inorder to move forward
# if current word can be transformed into end, then we're done
if self.lastWord(word, endWord):
return count + 1
else:
# if not then convert into dict word
for i in range(len(word)):
for c in alphabets:
# Replace ith word with alaphabets
next_word = word[:i] + c + word[i+1:]
if next_word in wordDict:
table[next_word] = count + 1
wordDict.remove(next_word) #Can not use one word twice
return 0
def lastWord(self, word, end):
diff = 0
for i in range(len(word)):
if word[i] != end[i]:
diff += 1
if diff > 1:
return False
else:
return True
s = Solution()
print s.ladderLength("hit", "cog", ["hot","dot","dog","lot","log"])
|
# Generated by Django 2.2 on 2019-09-06 19:57
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Geography',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField(blank=True)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
('geo_level', models.CharField(blank=True, choices=[('NONE', 'N/A'), ('BLOCKGROUP', 'Block group'), ('TRACT', 'Tract'), ('COUNTY_SUBDIVISION', 'County Subdivision'), ('COUNTY', 'County'), ('PLACE', 'Place'), ('PUMA', 'PUMA'), ('STATE_HOUSE', 'State House'), ('STATE_SENATE', 'State Senate'), ('SCHOOL_DISTRICT', 'School District')], default='NONE', max_length=30)),
],
),
migrations.CreateModel(
name='CensusGeography',
fields=[
('geography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='geo.Geography')),
('affgeoid', models.CharField(max_length=21, unique=True)),
('lsad', models.CharField(max_length=2)),
('aland', models.BigIntegerField(verbose_name='Area (land)')),
('awater', models.BigIntegerField(verbose_name='Area (water)')),
],
bases=('geo.geography',),
),
migrations.CreateModel(
name='BlockGroup',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('countyfp', models.CharField(max_length=3)),
('tractce', models.CharField(max_length=6)),
('blkgrpce', models.CharField(max_length=1)),
],
options={
'verbose_name_plural': 'Block Groups',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='County',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('countyfp', models.CharField(max_length=5)),
('countyns', models.CharField(max_length=8)),
],
options={
'verbose_name_plural': 'Counties',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='CountySubdivision',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('countyfp', models.CharField(max_length=3)),
('cousubfp', models.CharField(max_length=5)),
('cousubns', models.CharField(max_length=8)),
],
options={
'verbose_name_plural': 'County Subdivisions',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='Place',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('placefp', models.CharField(max_length=5)),
('placens', models.CharField(max_length=8)),
],
options={
'verbose_name_plural': 'Places',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='Puma',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('pumace', models.CharField(max_length=5)),
],
options={
'verbose_name': 'PUMA',
'verbose_name_plural': 'PUMAS',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='SchoolDistrict',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('unsdlea', models.CharField(max_length=5)),
('placens', models.CharField(max_length=8)),
],
options={
'verbose_name_plural': 'School Districts',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='StateHouse',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('sldlst', models.CharField(max_length=5)),
('lsy', models.CharField(max_length=4)),
],
options={
'verbose_name': 'State House District',
'verbose_name_plural': 'State House Districts',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='StateSenate',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('sldust', models.CharField(max_length=5)),
('lsy', models.CharField(max_length=4)),
],
options={
'verbose_name': 'State Senate District',
'verbose_name_plural': 'State Senate Districts',
},
bases=('geo.censusgeography',),
),
migrations.CreateModel(
name='Tract',
fields=[
('censusgeography_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, to='geo.CensusGeography')),
('geoid', models.CharField(max_length=12, primary_key=True, serialize=False)),
('statefp', models.CharField(max_length=2)),
('countyfp', models.CharField(max_length=3)),
('tractce', models.CharField(max_length=6)),
],
options={
'verbose_name_plural': 'Tracts',
},
bases=('geo.censusgeography',),
),
]
|
from django.urls import path
from . import views
from accounts.views import registration_view
# ApiListView
from rest_framework.authtoken.views import obtain_auth_token
app_name = 'accounts'
urlpatterns = [
path('login/',views.login,name='login'),
path('register/',views.register,name='register'),
path('logout/',views.logout,name='logout'),
path('api/reg/',registration_view,name='apireg'),
path('api/login',obtain_auth_token,name='apilogin'),
path('api/password-reset/',views.passwordReset,name='passwordReset'),
# path('list/',ApiListView.as_view(),name='listpai')
]
|
import datetime
import urlfetch
import copy
from mint.tags import *
from mint.utils import *
class TransactionSet(object):
def __init__(self, mint, query_string='', pyfilters=[]):
self.mint = mint
self.query_string = query_string
self.pyfilters = pyfilters
def filter(self, query=None, description=None, tag=None, category=None):
queries = []
if self.query_string:
queries.append(self.query_string)
if query:
queries.append(query)
if description:
queries.append('description:"%s"' % description)
if tag:
queries.append('tag:"%s"' % tag.name)
if category:
queries.append('category:"%s"' % category)
query = " ".join(queries)
return TransactionSet(self.mint, query)
@staticmethod
def get_filter_fn(fn=None, **kwargs):
fns = [fn] if fn else []
for key, value in kwargs.items():
if key == 'tag':
fns.append(lambda tx: value in tx.tags)
elif key == 'tags__len':
fns.append(lambda tx: len(tx.tags) == value)
elif key == 'description':
fns.append(lambda tx: tx.merchant.find(value) != -1)
elif key == 'category':
fns.append(lambda tx: tx.category.find(value) != -1)
else:
raise ValueError("filter '%s' not recognized" % key)
return lambda tx: all(fn(tx) for fn in fns)
def pyfilter(self, **kwargs):
fn = TransactionSet.get_filter_fn(**kwargs)
return TransactionSet(self.mint, self.query_string, self.pyfilters + [fn])
def pyexclude(self, **kwargs):
fn_ = TransactionSet.get_filter_fn(**kwargs)
fn = lambda tx: not fn_(tx)
return TransactionSet(self.mint, self.query_string, self.pyfilters + [fn])
def __iter__(self):
for transaction in self.mint.get_transactions(query=self.query_string):
if all(fn(transaction) for fn in self.pyfilters):
yield transaction
def add_tag(self, tag):
for tx in self:
tx.tags.add(tag)
def remove_tag(self, tag):
for tx in self:
tx.tags.remove(tag)
def commit(self):
for tx in self:
tx.commit()
MAPPING = {
'Description' : 'description',
'Original Description' : 'original_description',
'Amount' : 'amount',
'Transaction Type' : 'transaction_type',
'Category' : 'category',
'Account Name' : 'account_name',
'Notes' : 'notes',
}
JSON_KEYS = ['amount', 'id', 'note', 'merchant', 'omerchant', 'categoryId', 'category', 'fi', 'account']
import weakref
class Transaction(Flyweight):
@classmethod
def commit_dirty(cls):
for v in cls._pool[cls].values():
v.commit()
@staticmethod
def from_json(data, mint, year=None):
d = {'mint' : mint}
d['date'] = parse_date(data['date'], year or datetime.datetime.now().year)
d['tags'] = TagSet.from_json(data['labels'], name_key='name', mint=mint)
for key in JSON_KEYS:
d[key] = data[key]
return Transaction(**d)
def __init__(self, **kwargs):
if self.diff():
return self.update(**kwargs)
self.mint = kwargs.pop('mint', None)
self.originals = {}
for key, value in kwargs.items():
setattr(self, key, value)
self.originals[key] = copy.copy(value)
def update(self, **kwargs):
return
def diff(self):
if not getattr(self, 'originals', None):
return {}
k = object()
diffs = {}
for key, value in self.originals.items():
if value == getattr(self, key, k):
continue
diffs[key] = (value, getattr(self, key, None))
if 'tags' in diffs:
orig, new = diffs['tags']
added, removed = orig.diff(new)
diffs['tags'] = {'added' : added, 'removed' : removed}
return diffs
def __repr__(self):
return '<Transaction: %s>' % unicode(self)
def __unicode__(self):
return "%s %s" % (self.merchant, self.date.strftime("%m-%d-%Y"))
def commit(self, force=False):
diff = self.diff()
if not diff and not force:
logger.debug("Not committing %s: %s" % (self, diff))
return False
d = {'task' : 'txnedit', 'txnId' : "%s:false" % self.id, 'token' : self.mint.token}
tags = diff.pop('tags', {})
added, removed = tags.get('added', []), tags.get('removed', [])
for tag in self.mint.tags:
value = 2 if tag in added else (0 if tag in removed else 1)
d['tag%d' % tag.id] = value
for key, value in diff.items():
d[key] = value[1]
logger.warning("Committing %s" % self)
logger.debug("Commit data: %s" % d)
response = self.mint.post_url('updateTransaction.xevent', d)
return response
|
from __future__ import unicode_literals, print_function
import contextlib
import timeit
import traceback
from ..compat import collections_abc, PY2
from .strings import *
from .algorithms import *
from .paths import *
def is_collection(x):
if not isinstance(x, collections_abc.Iterable):
return False
return not isinstance(x, basestring) if PY2 else not isinstance(x, (str, bytes))
def method_decorator(decorator):
def new_decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
bounded = functools.partial(f, self)
functools.update_wrapper(bounded, f)
return decorator(bounded)(*args, **kwargs)
return wrapper
return new_decorator
class _Timer(object):
def __init__(self):
self.elapsed = 0
self.start_time = 0
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self):
self.start_time = timeit.default_timer()
def stop(self):
if self.start_time == 0:
raise Exception('Timer was not started')
self.elapsed += timeit.default_timer() - self.start_time
self.start_time = 0
timer = _Timer
@contextlib.contextmanager
def suppress_exceptions():
try:
yield
except Exception:
traceback.print_exc() |
class Assembler(object):
def __init__(self, asmpath='', mripath='', rripath='', ioipath='') -> None:
"""
Assembler class constructor.
Initializes 7 important properties of the Assembler class:
- self.__address_symbol_table (dict): stores labels (scanned in the first pass)
as keys and their locations as values.
- self.__bin (dict): stores locations (or addresses) as keys and the binary
representations of the instructions at these locations (job of the second pass)
as values.
- self.__asmfile (str): the file name of the assembly code file. This property
is initialized and defined in the read_code() method.
- self.__asm (list): list of lists, where each outer list represents one line of
assembly code and the inner list is a list of the symbols in that line.
for example:
ORG 100
CLE
will yiels __asm = [['org', '100'] , ['cle']]
Notice that all symbols in self.__asm are in lower case.
- self.__mri_table (dict): stores memory-reference instructions as keys, and their
binary representations as values.
- self.__rri_table (dict): stores register-reference instructions as keys, and their
binary representations as values.
- self.__ioi_table (dict): stores input-output instructions as keys, and their
binary representations as values.
Thie constructor receives four optional arguments:
- asmpath (str): path to the assembly code file.
- mripath (str): path to text file containing the MRI instructions. The file should
include each intruction and its binary representation separated by a space in a
separate line. Their must be no empty lines in this file.
- rripath (str): path to text file containing the RRI instructions. The file should
include each intruction and its binary representation separated by a space in a
separate line. Their must be no empty lines in this file.
- ioipath (str): path to text file containing the IOI instructions. The file should
include each intruction and its binary representation separated by a space in a
separate line. Their must be no empty lines in this file.
"""
super().__init__()
# Address symbol table dict -> {symbol: location}
self.__address_symbol_table = {}
# Assembled machine code dict -> {location: binary representation}
self.__bin = {}
# Load assembly code if the asmpath argument was provided.
if asmpath:
self.read_code(asmpath)
# memory-reference instructions
self.__mri_table = self.__load_table(mripath) if mripath else {}
# register-reference instructions
self.__rri_table = self.__load_table(rripath) if rripath else {}
# input-output instructions
self.__ioi_table = self.__load_table(ioipath) if ioipath else {}
def read_code(self, path:str):
"""
opens .asm file found in path and stores it in self.__asmfile.
Returns None
"""
assert path.endswith('.asm') or path.endswith('.S'), \
'file provided does not end with .asm or .S'
self.__asmfile = path.split('/')[-1] # on unix-like systems
with open(path, 'r') as f:
# remove '\n' from each line, convert it to lower case, and split
# it by the whitespaces between the symbols in that line.
self.__asm = [s.rstrip().lower().split() for s in f.readlines()]
def assemble(self, inp='') -> dict:
assert self.__asm or inp, 'no assembly file provided'
if inp:
assert inp.endswith('.asm') or inp.endswith('.S'), \
'file provided does not end with .asm or .S'
# if assembly file was not loaded, load it.
if not self.__asm:
self.read_code(inp)
# remove comments from loaded assembly code.
self.__rm_comments()
# do first pass.
self.__first_pass()
# do second pass.
self.__second_pass()
# The previous two calls should store the assembled binary
# code inside self.__bin. So the final step is to return
# self.__bin
return self.__bin
# PRIVATE METHODS
def __load_table(self, path) -> dict:
"""
loads any of ISA tables (MRI, RRI, IOI)
"""
with open(path, 'r') as f:
t = [s.rstrip().lower().split() for s in f.readlines()]
return {opcode:binary for opcode,binary in t}
def __islabel(self, string) -> bool:
"""
returns True if string is a label (ends with ,) otherwise False
"""
return string.endswith(',')
def __rm_comments(self) -> None:
"""
remove comments from code
"""
for i in range(len(self.__asm)):
for j in range(len(self.__asm[i])):
if self.__asm[i][j].startswith('/'):
del self.__asm[i][j:]
break
def __format2bin(self, num:str, numformat:str, format_bits:int) -> str:
"""
converts num from numformat (hex or dec) to binary representation with
max format_bits. If the number after conversion is less than format_bits
long, the formatted text will be left-padded with zeros.
Arguments:
num (str): the number to be formatted as binary. It can be in either
decimal or hexadecimal format.
numformat (str): the format of num; either 'hex' or 'dec'.
format_bits (int): the number of bits you want num to be converted to
"""
if numformat == 'dec':
return '{:b}'.format(int(num)).zfill(format_bits)
elif numformat == 'hex':
return '{:b}'.format(int(num, 16)).zfill(format_bits)
else:
raise Exception('format2bin: not supported format provided.')
def __first_pass(self) -> None:
"""
Runs the first pass over the assmebly code in self.__asm.
Should search for labels, and store the labels alongside their locations in
self.__address_symbol_table. The location must be in binary (not hex or dec).
Returns None
"""
num = 0
for i in self.__asm:
if i[0] == "org":
num = int(i[1] , 16)
continue
elif i[0] == "end":
break
elif i[0][-1] == "," :
self.__address_symbol_table[i[0]] = self.__format2bin(str(num), "dec" , 12 )
num += 1
def __second_pass(self) -> None:
"""
Runs the second pass on the code in self.__asm.
Should translate every instruction into its binary representation using
the tables self.__mri_table, self.__rri_table and self.__ioi_table. It should
also store the translated instruction's binary representation alongside its
location (in binary too) in self.__bin.
"""
num = 0
for i in self.__asm:
if i[0][-1] == ",":
i = i[1:]
if i[0] == "org":
num = int(i[1] , 16)
continue
elif i[0] == "end":
break
elif i[0] == "dec" or i[0] =="hex":
x = self.__format2bin(i[1] , i[0] , 16 )
self.__bin[self.__format2bin(str(num),"dec" , 12 )] = x
elif i[0] in self.__mri_table.keys():
y = self.__mri_table[i[0]]
x = self.__address_symbol_table[i[1] + ","]
if "i" in i :
F = "1"+y+x
else:
F = "0"+ y + x
self.__bin[self.__format2bin(str(num), "dec", 12)] = F
else:
if i[0] in self.__rri_table.keys():
self.__bin[self.__format2bin(str(num), "dec", 12)] = self.__rri_table[i[0]]
elif i[0] in self.__ioi_table.keys():
self.__bin[self.__format2bin(str(num), "dec", 12)] = self.__ioi_table[i[0]]
else:
raise Exception("Not Valid")
num += 1
|
from typing import AnyStr
import hashlib
from functools import wraps
import os
def str_to_byte(func):
"""
decorator adapted from https://forum.kodi.tv/showthread.php?tid=330975
:param func: func taking string as arg
:return: wrapped func
"""
@wraps(func)
def wrapped(*args, **kwargs):
new_args = (x.encode() if isinstance(x, str) else x for x in args)
new_kwargs = {
k: v.encode() if isinstance(v, str) else v for k, v in kwargs.items()
}
return func(*new_args, **new_kwargs)
return wrapped
def get_csci_salt(keyword="CSCI_SALT", convert_to_bytes="yes") -> bytes:
"""
:param keyword: str to call environmental variable
:param convert_to_bytes: whether to convert to bytes or not
:return: returns bytes format of environmental variable
"""
"""Returns the appropriate salt for CSCI E-29"""
salt_hex = os.getenv(keyword)
if convert_to_bytes == "yes":
return bytes.fromhex(salt_hex)
else:
return salt_hex
@str_to_byte
def hash_str(some_val: AnyStr, salt: AnyStr = ""):
"""Converts strings to hash digest
See: https://en.wikipedia.org/wiki/Salt_(cryptography)
:param some_val: thing to hash
:param salt: Add randomness to the hashing
:rtype: bytes
"""
m = hashlib.sha256()
m.update(salt)
m.update(some_val)
# print(m.digest().hex()[:6])
return m.digest()
def get_user_id(username: str) -> str:
salt = get_csci_salt()
return hash_str(username.lower(), salt=salt).hex()[:8]
|
# -*- coding: utf-8 -*-
from imagekit.models.fields import ProcessedImageField
from imagekit.processors import ResizeToFit, ResizeToFill, Adjust
from django.db import models
from productsapp.models import TechnicalSolutions
# Create your models here.
class News(models.Model):
""" Модель новости"""
name = models.CharField(verbose_name='название', max_length=256, unique=True)
slug = models.SlugField(verbose_name='слаг', max_length=128, unique=True)
description = models.TextField(verbose_name='описание', blank=True)
# image = models.ImageField(upload_to='news_avatars', blank=True)
image = ProcessedImageField(upload_to='news_avatars', processors = [ResizeToFill(840, 470)], format="JPEG", options={'quality': 90}, blank=True)
creation_date = models.DateTimeField(verbose_name='создан', auto_now_add=True, auto_now=False)
updated = models.DateTimeField(verbose_name='обновлен', auto_now=True)
def get_absolute_url(self):
return reverse('news:news_detail', args=[str(self.id)])
class Meta:
ordering = ('-updated',)
verbose_name = 'Новость'
verbose_name_plural = 'Новости'
def get_products(self):
return self.solutions.select_related()
def __unicode__(self):
return self.name
class NewsHasTechnicalSolutions(models.Model):
""" Модель связи технических решений применяемых на объекте с указанием их объема """
name = models.CharField(verbose_name='название конструкции или участка', max_length=256, blank=True, null=True)
news = models.ForeignKey(News, verbose_name='Новость', related_name="solutions",
on_delete=models.CASCADE)
techsol = models.ForeignKey(TechnicalSolutions, verbose_name='Техническое решение', related_name='news',
on_delete=models.CASCADE)
value = models.DecimalField(verbose_name='Объем работ', max_digits=18, decimal_places=2)
is_active = models.BooleanField(verbose_name='Показывать', default=True)
created = models.DateTimeField(auto_now_add=True, auto_now=False)
updated = models.DateTimeField(auto_now_add=False, auto_now=True)
class Meta:
verbose_name = 'Тех решение проекта'
verbose_name_plural = 'Тех решения проекта'
|
from subprocess import Popen, PIPE
def get_param_list(params):
""" transform params from dict to list.
"""
if isinstance(params, dict):
for k, v in params.items():
yield str(k)
yield str(v)
else:
raise ValueError("job params can only be dict")
def get_tfjob_cmd(params):
""" get tfjob command for local execution
"""
cmd = ['python', '-m', 'gdmix.gdmix']
for param in params:
for k, v in param.items():
if v != "" and v is not None:
cmd.append(f"--{k}={v}")
return cmd
def get_sparkjob_cmd(class_name, params, jar='gdmix-data-all_2.11.jar'):
""" get spark command for local execution
"""
cmd = ['spark-submit',
'--class', class_name,
'--master', 'local[*]',
'--num-executors', '1',
'--driver-memory', '1G',
'--executor-memory', '1G',
'--conf', 'spark.sql.avro.compression.codec=deflate',
'--conf', 'spark.hadoop.mapreduce.fileoutputcommitter.marksuccessfuljobs=false',
jar]
cmd.extend(get_param_list(params))
return cmd
def run_cmd(cmd):
""" run gdmix job locally.
Params:
cmd: shell command, e.g. ['spark-submit', '--class', ...]
"""
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
# wait for the process to terminate
out, err = process.communicate()
print(out.decode("utf-8"))
if process.returncode:
raise RuntimeError(f"ERROR in executing command: {str(' '.join(cmd))}\n\nError message:\n{err.decode('utf-8')}")
else:
print(err.decode("utf-8"))
|
# -*- coding:utf-8 -*-
__author__ = 'yyp'
__date__ = '2018-5-27 18:09'
class Solution:
"""
Time: O(n)
Space:O(1)
"""
def findAnagrams(self, s, p):
"""
:type s: str
:type p: str
:rtype: List[int]
"""
res = []
left, right = 0, len(p) - 1
temp = [0] * 26
for i in s[left:right + 1]:
temp[ord(i)-ord('a')] += 1
p_freq = [0] * 26
for i in p:
p_freq[ord(i) - ord('a')] += 1
while right < len(s):
if temp == p_freq:
res.append(left)
if (right + 1 < len(s)):
temp[ord(s[left])-ord('a')] -= 1
temp[ord(s[right + 1])-ord('a')] += 1
left += 1
right += 1
return res
if __name__ == "__main__":
print(Solution().findAnagrams("cbaebabacd", "abc"))
|
# function similar to describe() with missing value
def func_df_describe_all(df): ## input a dataframe
"""function similar to describe() with missing value
Keyword arguments: df (dataframe); Return: df_summary
"""
df_summary = df.describe(include='all').T
df_summary['miss_perc'] = (df.isnull().sum()/df.shape[0]*100).values
return df_summary
|
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
from bs4 import BeautifulSoup
from time import sleep
from random import randint
import os.path
import pandas as pd
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None
"""
try:
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None
def is_good_response(resp):
"""
Returns true if the response seems to be HTML, false otherwise
"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200
and content_type is not None
and content_type.find('html') > -1)
def log_error(e):
"""
It is always a good idea to log errors.
This function just prints them, but you can
make it do anything.
"""
print(e)
def test_simple_get():
assert simple_get('https://www.google.com/') is not None
def test_log_error():
assert log_error('This is a sentence') == print('This is a sentence')
def scrape_articles():
"""Scrapes text from the articles that correspond to the article
links generated by 'get_article_links.py'
Returns
-------
text files
For every article that has text (e.g., isn't simply just an
image) a new .txt file is created. All of the text from that
article is dumped into that text file. The text file is named
with the same naming convention The Dartmouth uses for their
articles URLs.
"""
# need two different ways to set directory to pass
# Travis CI (because .travis file is called from
# different directory)
current_file = os.getcwd().split('/')[-1]
if current_file == 'scrape_training_data':
base_dir = os.getcwd()
else:
base_dir = os.path.join(
os.getcwd(), 'topic_modeling', 'scrape_training_data')
# make directory to hold articles
article_directory = os.path.join(base_dir, 'training_data')
if not os.path.exists(article_directory):
os.makedirs(article_directory)
# read in article links
links_pandas = pd.read_csv(
os.path.join(base_dir, 'article_links.txt'), header=None)
links = links_pandas[0]
for link in links:
article_text = []
raw_html = simple_get(link)
title = link.split('/')[-1]
if not os.path.exists(os.path.join(
base_dir, 'training_data', '{0}.txt'.format(title))):
html = BeautifulSoup(raw_html, 'html.parser')
html = html.findAll('p', attrs={'class': None})
for line_num, line_text in enumerate(html):
article_text.append(line_text.text)
paragraphs = article_text[2:] # get rid of header info
s = ' '.join(paragraphs)
s = s.split('\n', 1)[0]
with open(
os.path.join(
base_dir, 'training_data',
'{0}.txt'.format(title[:199])
), "wb") as text_file:
text_file.write(s.encode('utf8'))
# Pause the loop
sleep(randint(8, 15))
if __name__ == '__main__':
scrape_articles()
|
import sys
try:
import os
import sqlite3
from platform import system
from termcolor import colored
from pyfiglet import figlet_format
from prettytable import PrettyTable
except ModuleNotFoundError as error:
print(colored(error, color="red"))
input(colored("[!!]Press Any Key To Exit...", color="red"))
sys.exit()
clear = lambda: os.system("cls") if system() == "Windows" else os.system("clear")
db = sqlite3.connect(r"C:\Users\Dell\Desktop\password_safe.db")
cr = db.cursor()
cr.execute("CREATE TABLE IF NOT EXISTS safe_password(id INTEGER PRIMARY KEY AUTOINCREMENT, website_name TEXT, "
"username TEXT NOT NULL, "
"password TEXT NOT NULL)")
db.commit()
def addData(website_name: str, username: str, password: str) -> None:
cr.execute("INSERT INTO safe_password (website_name, username, password) VALUES (?, ?, ?)", (website_name, username,
password))
db.commit()
def showData() -> None:
cr.execute("SELECT * FROM safe_password ORDER BY id")
data = cr.fetchall()
if len(data) == 0:
print(colored("[!!]NO DATA", color="red"))
else:
table = PrettyTable(["ID", "Website", "Username", "Password"])
print(colored(f"You Have {len(data)} Skill.", color="blue"))
print("\n")
for row in data:
table.add_row(row)
print(colored(table, color="blue"))
def search(website=None, username=None) -> None:
if website is None and username is None:
cr.execute("SELECT * FROM safe_password ORDER BY id")
data = cr.fetchall()
if len(data) == 0:
print(colored("[!!]NO DATA", color="red"))
else:
table = PrettyTable(["ID", "Website", "Username", "Password"])
print(colored(f"You Have {len(data)} Skill.", color="blue"))
print("\n")
for row in data:
table.add_row(row)
print(colored(table, color="blue"))
elif website is None:
cr.execute(f"SELECT * FROM safe_password WHERE username='{username}' ORDER BY id")
data = cr.fetchall()
if len(data) == 0:
print(colored("[!!]NO DATA", color="red"))
else:
table = PrettyTable(["ID", "Website", "Username", "Password"])
print(colored(f"You Have {len(data)} Skill.", color="blue"))
print("\n")
for row in data:
table.add_row(row)
print(colored(table, color="blue"))
elif username is None:
cr.execute(f"SELECT * FROM safe_password WHERE website_name='{website}' ORDER BY id")
data = cr.fetchall()
if len(data) == 0:
print(colored("[!!]NO DATA", color="red"))
else:
table = PrettyTable(["ID", "Website", "Username", "Password"])
print(colored(f"You Have {len(data)} Skill.", color="blue"))
print("\n")
for row in data:
table.add_row(row)
print(colored(table, color="blue"))
else:
cr.execute(f"SELECT * FROM safe_password WHERE website_name='{website}' AND username='{username}' ORDER BY id")
data = cr.fetchall()
if len(data) == 0:
print(colored("[!!]NO DATA", color="red"))
else:
table = PrettyTable(["ID", "Website", "Username", "Password"])
print(colored(f"You Have {len(data)} Skill.", color="blue"))
print("\n")
for row in data:
table.add_row(row)
print(colored(table, color="blue"))
def deleteData(id_: int) -> None:
cr.execute(f"DELETE FROM safe_password WHERE id={id_}")
db.commit()
def updateData(new_password: str, id_: int) -> None:
cr.execute(f"UPDATE safe_password SET password='{new_password}' WHERE id={id_}")
db.commit()
def Exit() -> None:
db.commit()
db.close()
sys.exit()
def main() -> None:
try:
print(colored("""
What do you want to do?
[1]Show All Data
[2]Add Date
[3]Delete Data
[4]Update Data
[5]Search About Data
[0]Quit App
""", color="blue"))
while True:
print("")
option = int(input(colored("[++]Choose Option: ", color="blue")))
print("\n")
if option == 1:
showData()
elif option == 2:
websiteName = str(input(colored("[+]WebSite Name: ", color="blue")))
Username = str(input(colored("[+]Username: ", color="blue")))
Password = str(input(colored("[+]Password: ", color="blue")))
addData(website_name=websiteName, username=Username, password=Password)
print("\n")
print(colored(f"[++]===================== Add IS DONE =====================[++]", color="blue"))
elif option == 3:
ID = int(input(colored("[+]ID: ", color="blue")))
deleteData(id_=ID)
print("\n")
print(colored(f"[++]===================== DELETE IS DONE =====================[++]", color="blue"))
elif option == 4:
ID = int(input(colored("[+]ID: ", color="blue")))
newPassword = str(input(colored("[+]New Password: ", color="blue")))
updateData(newPassword, id_=ID)
elif option == 5:
websiteName = str(input(colored("[+]WebSite Name: ", color="blue")))
Username = str(input(colored("[+]Username: ", color="blue")))
if websiteName == "" and Username == "":
search()
elif websiteName == "":
search(username=Username)
elif Username == "":
search(website=websiteName)
else:
search(website=websiteName, username=Username)
elif option == 0:
Exit()
else:
print(colored(f"[!!]Invalid Option", color="red"))
except Exception as er:
print(colored(er, color="red"))
if __name__ == '__main__':
clear()
print()
print(colored(figlet_format("Safe Password App"), color="blue"))
print("\n")
main()
|
import torch
from torch import nn, optim
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms
import src.notebook.assets.h.helper
class Network(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Linear(784, 512)
self.layer2 = nn.Linear(512, 256)
self.layer3 = nn.Linear(256, 128)
self.layer4 = nn.Linear(128, 64)
self.layer5 = nn.Linear(64, 10)
# Dropout module with 0.2 drop probability
self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
# make sure input tensor is flattened
x = x.view(x.shape[0], -1)
# Now with dropout
x = self.dropout(F.relu(self.layer1(x)))
x = self.dropout(F.relu(self.layer2(x)))
x = self.dropout(F.relu(self.layer3(x)))
x = self.dropout(F.relu(self.layer4(x)))
# output so no dropout here
x = F.log_softmax(self.layer5(x), dim=1)
return x
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
train_set = datasets.mnist.FashionMNIST('~/.pytorch/F_MNIST_data/',
download=True,
train=True, transform=transform)
test_set = datasets.mnist.FashionMNIST('~/.pytorch/F_MNIST_data/',
download=True,
train=False, transform=transform)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)
model = Network()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
epochs = 5
steps = 0
train_loss, test_loss = [], []
for e in range(epochs):
running_loss = 0
for images, labels in train_loader:
optimizer.zero_grad()
log_ps = model(images)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
testing_loss = 0
accuracy = 0
with torch.no_grad():
model.eval()
for images, labels in test_loader:
# no need to optimizer zero gradient
test_ps = model(images)
t_loss = criterion(test_ps, labels)
testing_loss += t_loss
# probability
ps = torch.exp(test_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class = labels.view(*top_class.shape)
mean = torch.mean(equals.type(torch.FloatTensor))
accuracy += mean
# back to train
model.train()
train_loss.append(running_loss / len(train_loader))
test_loss.append(testing_loss / len(test_loader))
print("Epoch: {}/{}.. ".format(e + 1, epochs),
"Training Loss: {:.3f}.. ".format(train_loss[-1]),
"Test Loss: {:.3f}.. ".format(test_loss[-1]),
"Test Accuracy: {:.3f}".format(accuracy / len(test_loader)))
print("Epoch:{}/{}..".format(e + 1, epochs),
"Training Loss: {:.3f}..".format(train_loss[-1]),
"Test Loss: {:.3f}..".format(test_loss[-1]),
"Test Accuracy: {:.3f}".format(accuracy / len(test_loader)))
|
# coding:utf-8
from bs4 import BeautifulSoup
import requests
import os
r = requests.get("http://699pic.com/sousuo-218808-13-1.html")
fengjing = r.content
soup = BeautifulSoup(fengjing, "html.parser")
# 找出所有的标签
images = soup.find_all(class_="lazy")
print images # 返回list对象
# for i in images:
# jpg_rl = i["data-original"]
# title = i["title"]
# print title
# print jpg_rl
# print ""
# #requests里get打开图片的url地址,
# #content方法返回的是二进制流文件,可以直接写到本地
# with open(os.getcwd()+"\\img\\"+title+'.jpg', "wb") as f:
# f.write(requests.get(jpg_rl).content) |
import numpy as np
#import os
#import math
#from collections import defaultdict
class bModel:
"Create matrices"
def __init__(self,regN):
self.state_vector_curr = np.empty([(2*regN+1),1])
#self.P = []
self.Ep = np.random.normal(0.1, 0.1, 4212)
#self.X_all = []
self.F = np.zeros([(2*regN+1),(2*regN+1)])
self.V = np.zeros([(2*regN+1),1])
self.G = np.identity((2*regN+1))#np.zeros((2*regN+1))
self.H = np.zeros([1,(2*regN+1)])
self.SHAI = np.empty((regN))
self.PHI = np.empty((regN))
self.B = np.random.normal(0.1, 0.1, regN)
self.C = np.random.normal(0.1, 0.1, (regN,regN))
self.w = np.random.normal(0,0.1,4212)
self.v = np.random.normal(0,0.1,4212)
#self.probW = np.repeat(1/1000, (2*regN+1))
#self.probV = np.repeat(1/1000, (2*regN+1))
self.Q = 0.00
self.R = 0.00
self.V[regN] = 1
self.V[2*regN] = 1#+1)-1] = 1
self.H[0][2*regN] = 1
#self.formAutoCov()
def createModel(self,fObj,vphi,len_pred_nodes,state_inp,path_no,n,time_KF,curr_node,it):
X = np.zeros((n))
sumC = 0
pred = 0
if (path_no>1):
cn = curr_node
w = n-1
r =0
while (w>=0):
pred = it.pi_v[cn]
if (pred == 0):
X[w]=state_inp[(2*n)-r]
r=r+1
w=w-1
else:#:
X[w] =fObj.edge_cost[pred][cn]
#if (curr_node == 256):
#print("X[w]", X[w])
cn = pred
w=w-1
#end if
#end while
constant_miu = np.mean(X)
self.state_vector_curr[0]= 1
t=n
for m in range(1,(n+1)):
self.state_vector_curr[m] = self.Ep[(time_KF-t)]
t=t-1
#end for
for o in range((n+1), ((2*n)+1)):
self.state_vector_curr[o] = X[o-(n+1)]
elif (path_no ==1):
cn = curr_node
w = n-1
while (w>=0):
pred = it.pi_v[cn]
if (pred == 0):
X[w] = 0
w = w-1
else :
X[w] = fObj.edge_cost[pred][cn]
cn = pred
w =w -1
#end if
#end while
constant_miu = np.mean(X)
self.state_vector_curr[0]= 1
t=n
for m in range(1,(n+1)):
self.state_vector_curr[m] = self.Ep[(time_KF-t)]
t=t-1
#end for
for o in range((n+1), ((2*n)+1)):
self.state_vector_curr[o] = X[o-(n+1)]
#end for
shai = np.ones(n)*10
phi = np.ones(n)*10
for z in range(0,len(shai)):
for p in range(0,len(shai)):
sumC = sumC + self.C[z][p]*X[(n-1)-p]
#end for
shai[z]=self.B[z]+sumC
sumC =0
phi[z] = vphi
#end for
a = np.ones((n,), dtype=np.int)*n
b = range(0,(n))
c = a-b
d = c*(-1)
s = [0]
index = np.concatenate((s,c, d), axis=0)
#print(index)
r = 1
for w in range(1,(2*n+1)):
if (index[w]> 0):
row = n - abs(index[w])
#print('row in if',row)
else:
row = n - abs(index[w])
if row>0:
row=row+n
#print('row in else',row)
#end if
if row >0:
self.F[row][w] =1
#end if
if w <=n:
#print('value of w, when it is less than or equal to reg_n:',w)
self.F[(2*n+1)-1][w] = shai[n-w]
elif w<=(2*n+1) and w > n:
#print('value of w, when it is greater than reg_n and less than or equal to 2*reg_n+1:',w)
self.F[(2*n+1)-1][w] = (-1)*phi[w-(2*r)]
r = r+1
#end if
# end for
self.F[0][0]=1
self.F[(2*n+1)-1][0] = constant_miu
self.SHAI = shai
self.PHI = phi
def formAutoCov(self,time_KF):
#p1,q1 = (np.shape(self.w))
#p2,q2 = (np.shape(self.v))
#var = 0.0001
#for i in range(0, q1):
# for j in range(0,q1):
# if (i==j):
self.Q = np.cov(self.w[time_KF],self.w[time_KF])
self.R = np.cov(self.v[time_KF],self.v[time_KF])
# for i in range(0, q2):
# for j in range(0,q2):
# if (i==j):
#return state_vector_x, f, g, h, shai, phi
|
import time
import numpy as np
def standard(all_elements, subset_elements):
"""
Standard way to find the intersection of two sets
:param all_elements:
:param subset_elements:
:return:
"""
start = time.time()
verified_elements = []
for element in subset_elements:
if element in all_elements:
verified_elements.append(element)
print(len(verified_elements))
print('Duration: {} seconds'.format(time.time() - start))
def numpy(all_elements, subset_elements):
"""
Intersection of two sets using built-in functions of numpy
:param all_elements:
:param subset_elements:
:return:
"""
start = time.time()
verified_elements = np.intersect1d(np.array(subset_elements), np.array(all_elements))
print(len(verified_elements))
print('Duration: {} seconds'.format(time.time() - start))
def usingsets(all_elements, subset_elements):
"""
Intersection of two sets using built-in set data structure of Python
:param all_elements:
:param subset_elements:
:return:
"""
start = time.time()
verified_elements = set(subset_elements) & set(all_elements)
print(len(verified_elements))
print('Duration: {} seconds'.format(time.time() - start))
with open('subset_elements.txt') as f:
subset_elements = f.read().split('\n')
with open('all_elements.txt') as f:
all_elements = f.read().split('\n')
print(standard.__doc__)
standard(all_elements, subset_elements)
print(numpy.__doc__)
numpy(all_elements, subset_elements)
print(usingsets.__doc__)
usingsets(all_elements, subset_elements)
|
def get_input():
with open('day4input.txt') as f:
r = f.read().split('-')
return int(r[0]), int(r[1])
def part1_condition(n):
s = str(n)
prev = 0
repeated = False
nRepeats = 1
for c in s:
digit = int(c)
if digit < prev:
return False
if digit == prev:
nRepeats += 1
else:
if nRepeats >= 2:
repeated = True
nRepeats = 1
prev = digit
if nRepeats >= 2:
repeated = True
return repeated
def part1():
low, high = get_input()
count = 0
for i in range(low, high + 1):
if part1_condition(i):
count += 1
return count
def part2_condition(n):
s = str(n)
prev = 0
repeated = False
repeatedOutsideString = False
nRepeats = 1
for c in s:
digit = int(c)
if digit < prev:
return False
if digit == prev:
nRepeats += 1
else:
if nRepeats == 2:
repeatedOutsideString = True
nRepeats = 1
prev = digit
if nRepeats == 2:
repeatedOutsideString = True
return repeatedOutsideString
def part2():
low, high = get_input()
count = 0
for i in range(low, high + 1):
if part2_condition(i):
count += 1
return count
print(part1())
print(part2()) |
# coding: utf-8
# entradas:
NC = int(input())
i, n, k, remover = 0, 0, 0, 0
l = []
while (i < NC):
remover = 0
i += 1
n, k = [int(a) for a in input().split(" ")]
# computações
l = list(range(1, (n+1)))
while (len(l) > 1):
remover += (k - 1)
while (remover >= n):
remover -= n
del l[remover]
n -= 1
# saída
print('Case %d: %d' % (i, l.pop(0)))
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2019, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from py2neo import Node, Relationship
from py2neo.internal.compat import long
def test_single_node_creation(graph):
a = Node("Person", name="Alice")
assert a.labels == {"Person"}
assert a["name"] == "Alice"
graph.create(a)
assert isinstance(a.identity, int)
assert graph.exists(a)
def test_can_create_local_node(graph):
a = Node("Person", name="Alice", age=33)
assert set(a.labels) == {"Person"}
assert dict(a) == {"name": "Alice", "age": 33}
def test_can_create_remote_node(graph):
a = Node("Person", name="Alice", age=33)
graph.create(a)
assert set(a.labels) == {"Person"}
assert dict(a) == {"name": "Alice", "age": 33}
def test_bound_node_equals_unbound_node_with_same_properties(graph):
alice_1 = Node(name="Alice")
alice_1.graph = graph
alice_1.identity = 999
alice_2 = Node(name="Alice")
assert set(alice_1.labels) == set(alice_2.labels)
assert dict(alice_1) == dict(alice_2)
def test_bound_node_equality(graph):
alice_1 = Node(name="Alice")
alice_1.graph = graph
alice_1.identity = 999
graph.node_cache.clear()
alice_2 = Node(name="Alice")
alice_2.graph = alice_1.graph
alice_2.identity = alice_1.identity
assert alice_1 == alice_2
def test_unbound_node_equality(graph):
alice_1 = Node("Person", name="Alice")
alice_2 = Node("Person", name="Alice")
assert set(alice_1.labels) == set(alice_2.labels)
assert dict(alice_1) == dict(alice_2)
def test_can_merge_unsaved_changes_when_querying_node(graph):
a = Node("Person", name="Alice")
b = Node()
graph.create(a | b | Relationship(a, "KNOWS", b))
assert dict(a) == {"name": "Alice"}
a["age"] = 33
assert dict(a) == {"name": "Alice", "age": 33}
_ = list(graph.match((a, None), "KNOWS"))
assert dict(a) == {"name": "Alice", "age": 33}
def test_pull_node_labels_if_stale(graph):
a = Node("Thing")
graph.create(a)
a.remove_label("Thing")
a._stale.add("labels")
labels = a.labels
assert set(labels) == {"Thing"}
def test_pull_node_property_if_stale(graph):
a = Node(foo="bar")
graph.create(a)
a["foo"] = None
a._stale.add("properties")
assert a["foo"] == "bar"
def test_can_create_concrete_node(graph):
alice = Node.cast({"name": "Alice", "age": 34})
graph.create(alice)
assert isinstance(alice, Node)
assert alice["name"] == "Alice"
assert alice["age"] == 34
def test_all_property_types(graph):
data = {
"nun": None,
"yes": True,
"no": False,
"int": 42,
"float": 3.141592653589,
"long": long("9223372036854775807"),
"str": "hello, world",
"unicode": u"hello, world",
"boolean_list": [True, False, True, True, False],
"int_list": [1, 1, 2, 3, 5, 8, 13, 21, 35],
"str_list": ["red", "orange", "yellow", "green", "blue", "indigo", "violet"]
}
foo = Node.cast(data)
graph.create(foo)
for key, value in data.items():
assert foo[key] == value
def test_node_hashes(graph):
node_1 = Node("Person", name="Alice")
node_1.graph = graph
node_1.identity = 999
node_2 = Node("Person", name="Alice")
node_2.graph = node_1.graph
node_2.identity = node_1.identity
assert node_1 is not node_2
assert hash(node_1) == hash(node_2)
def test_cannot_delete_uncreated_node(graph):
a = Node()
graph.delete(a)
def test_node_exists(graph):
a = Node()
graph.create(a)
assert graph.exists(a)
def test_node_does_not_exist(graph):
a = Node()
assert not graph.exists(a)
def test_can_name_using_name_property(graph):
a = Node(name="Alice")
assert a.__name__ == "Alice"
def test_can_name_using_magic_name_property(graph):
a = Node(__name__="Alice")
assert a.__name__ == "Alice"
|
"""
OBJECTIVE: Given a list, sort it from low to high using the SELECTION SORT algorithm
The selection sort algorithm sorts an array by repeatedly finding the minimum element (considering ascending order)
from unsorted part and putting it at the beginning. The algorithm maintains two subarrays in a given array.
1) The subarray which is already sorted.
2) Remaining subarray which is unsorted.
In every iteration of selection sort, the minimum element (considering ascending order) from the unsorted subarray
is picked and moved to the sorted subarray.
https://www.geeksforgeeks.org/selection-sort/
"""
def selection_sort(array: list) -> list:
return []
|
from flask.ext.restful import Resource
from flask import send_file
class Documentation(Resource):
def get(self):
return send_file('documentation.html')
|
def reverse(string, low, way, array):
if low == len(string):
if len(way) ==3:
array.append(way)
return
for end in range(low + 1, len(string) + 1):
sub = string[low: end]
if sub == sub[::-1]:
reverse(string, end, way + [sub], array)
def palindrome_string(string):
array = []
reverse(string, low=0, way=[], array=array)
return array
string = input()
if len(string+1)%3 != 0:
print("Impossible")
else:
arr = []
arr = (palindrome_string(string))
for i in arr[0]:
print(i)
|
1. Which of the following are constant in regards to time complexity?
a. variable assignments
b. accessing an element in an array by index
c. searching for an element in a linked list
d. in a loop where there are only arithmetic operations inside the loop
e. arithmetic operations
f. searching for an element in an array
2. How could you simplify the big O expression of each of the following?
a. O(n + 10)
b. O(2n)
c. O(1000n + 500n)
d. O(5n^2 + 100n + 25)
e. O(1000)
f. O(n^2 + n^3)
3. What is the time complexity of the following:
a. for i in range(5000):
print(i + i)
b. for i in range(n):
print(i + i)
c. for i in range(n):
for j in range(n):
return i * j
4. Place the following big O expressions in order from fast to slow:
a. O(log n)
b. O(n)
c. O(1)
d. O(n^2)
|
from typing import List, Dict, Any
from mapnlp.alg.chunking import Chunker
from mapnlp.data.chunk import Chunk
from mapnlp.data.morpheme import Morpheme
@Chunker.registry
class IndependentRuleChunker(Chunker):
"""
Simple Rule-based Chunker
where each chunk has only one independent word and some adjunct/other words
Reference: http://www.nltk.org/book-jp/ch12.html#id56
"""
ALGORITHM_NAME = "independent-rule"
def __init__(self):
pass
def run(self, morphs: List[Morpheme]) -> List[Chunk]:
# assert morphs[0].is_independent(), "Head of morpheme should be independent"
chunks = []
morph_buff = []
morph_buff.append(morphs[0])
for m in morphs[1:]:
if m.is_independent(): # independent word
chunks.append(Chunk(morph_buff))
morph_buff = []
morph_buff.append(m)
chunks.append(Chunk(morph_buff))
return chunks
@classmethod
def build(cls, config: Dict[str, Any]):
return cls()
|
#Colt Bradley
#2.25.16
#Homework 12
#import modules
import numpy as n
#define variables
m = 14
l = 1.2
g = 9.8
theta = 35
#Define the matricies
big = n.matrix([[1, 0 ,-n.cos(theta)],[0,1,n.sin(theta)],\
[0, -l/2., n.sin(theta)*l/2]])
col = n.matrix([[0],[m*g],[0]])
#use linear algebra package, print result
print n.linalg.solve(big,col)
############################################################################
#Part 2
############################################################################
#define variables, emf in Volts and r in ohms
emf_1 = 12
emf_2 = 9
r_1 = 100
r_2 = 130
r_3 = 65
#define matricies
res = n.matrix([[r_1,0,r_3],[0,r_2,r_3],[1,1,-1]])
emfs = n.matrix([[emf_1],[emf_2],[0]])
#use linear algebra package, print result
print n.linalg.solve(res,emfs) |
# transformer.py
#
# Copyright(c) Exequiel Ceasar Navarrete <esnavarrete1@up.edu.ph>
# Licensed under MIT
# Version 2.0.0
import os
import re
from PIL import Image
from app.cards.card import Card, SHAPES, FACE_VALUES
from app.cards.error import TransformerError
from app.blackjack.game.error import GameError
MOVE_X = 78
MOVE_Y = 120
BLANK_COORDS = (158, 492, 237, 615)
class Transformer(object):
"""Base class for card transformers in this module."""
pass
class CardToTextTransformer(Transformer):
"""Transformer class for cards to text for sending over the network"""
def __init__(self, card=None):
Transformer.__init__(self)
# Initialize card to None
self.card = None
if card != None:
self.set_card(card)
def set_card(self, card: Card):
self.card = card
def transform(self):
if self.card is None:
raise TransformerError("Set a card first before transforming it")
return "%s of %s" % (self.card.get_face_value(), self.card.get_shape())
class TextToCardTransformer(Transformer):
"""Transformer class for deserializing text back to card instances"""
def __init__(self, text=None):
Transformer.__init__(self)
# Initialize card to None
self.text = None
if text != None:
self.set_text(text)
def set_text(self, text):
self.text = text
def transform(self):
if self.text is None or self.text == '':
raise TransformerError("Set a serialized text of the card first before transforming it")
matches = re.search('([0-9JQKA]+) of (' + '|'.join(SHAPES) + ')', self.text)
if not matches:
raise TransformerError("Cant deserialize card: %s" % self.text)
return Card(matches.group(2), matches.group(1))
class CardImagePositionToCardTransformer(Transformer):
"""Transformer class for x and y coordinates back to Card Instance"""
def __init__(self, position=None):
Transformer.__init__(self)
# Initialize coordinates to None
self.position = None
if position != None:
self.set_position(position)
def set_position(self, position):
self.position = position
def transform(self):
if self.position is None:
raise TransformerError("Set coordinates first before transforming it.")
card_attrs = get_card_attrs(self.position)
return Card(card_attrs['shape'], card_attrs['face'])
class CardToCardImagePositionTransformer(Transformer):
"""Transformer class for cards to be converted to x and y coordinates"""
def __init__(self, card=None):
Transformer.__init__(self)
# Initialize card to None
self.card = None
if card != None:
self.set_card(card)
def set_card(self, card: Card):
self.card = card
def transform(self):
if self.card is None:
raise TransformerError("Set a card first before transforming it")
return get_card_coords(self.card.get_face_value(), self.card.get_shape())
# [Image Position Resolution] ::start
card_img_path = os.path.join(os.getcwd(), "resources/images/cards.png")
if not os.path.exists(card_img_path):
raise GameError("Card Faces sprite does not exist!")
# open the image file
card_img = Image.open(card_img_path)
# destructure list
width, height = card_img.size
CARD_WIDTH = int(width / 13)
CARD_HEIGHT = int(height / 5)
top = 0
right = CARD_WIDTH
bottom = CARD_HEIGHT
left = 0
# create tmp coordinates
tmp_top = top
tmp_right = right
tmp_bottom = bottom
tmp_left = left
# prevent garbage collection that's why we are storing the reference to the window object
resolved_cards = {}
for shape_idx in range(0, len(SHAPES)):
for face_idx in range(0, len(FACE_VALUES)):
# assemble dictionary key
key = "%s of %s" % (FACE_VALUES[face_idx], SHAPES[shape_idx])
# store the position
resolved_cards[key] = (tmp_left, tmp_top, tmp_right, tmp_bottom)
# adjust left and right coordinates
tmp_left += right
tmp_right += right
# reset top and left coordinates
tmp_left = 0
tmp_right = right
# adjust top and bottom coordinates
tmp_top += bottom
tmp_bottom += bottom
# close the file pointer
card_img.close()
# [Image Position Resolution] ::end
# utility functions
def get_card_attrs(coords):
found_card = None
for card, card_coords in resolved_cards.items():
if set(coords) == set(card_coords) and len(coords) == len(card_coords):
found_card = card
break
if found_card is None:
raise TransformerError("Cant transform to card by its position")
# convert to card instance to extract face and shape
transformed_card = TextToCardTransformer(found_card).transform()
return {
'face': transformed_card.get_face_value(),
'shape': transformed_card.get_shape()
}
def get_card_coords(face_value, shape):
if not face_value in FACE_VALUES:
raise TransformerError("Face Value: %s is not a valid face." % face_value)
if not shape in SHAPES:
raise TransformerError("Shape: %s is not a valid card shape." % shape)
# assemble dictionary key
card_key = "%s of %s" % (face_value, shape)
# throw error when card_key does not exist
if not card_key in resolved_cards:
raise TransformerError("Card coordinates not found.")
return resolved_cards[card_key]
|
#Tic-Tac-Toe
#top-L, top-M, top-R
#mid-L, mid-M, mid-R
#low-L, low-M, low-R
import pprint
import time
theBoard = {'top-L':' ','top-M':' ','top-R':' ',
'mid-L':' ','mid-M':' ','mid-R':' ',
'low-L':' ','low-M':' ','low-R':' ',}
reset = {'top-L':' ','top-M':' ','top-R':' ',
'mid-L':' ','mid-M':' ','mid-R':' ',
'low-L':' ','low-M':' ','low-R':' ',}
score = {'X':0, 'O':0}
def printBoard(board):
print(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])
print('-+-+-')
print(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])
print('-+-+-')
print(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])
def solutions(board):
#Horizontal
if board['top-L'] == board['top-M'] and board['top-L'] == board['top-R'] and board['top-L'] != ' ':
return True
elif board['mid-L'] == board['mid-M'] and board['mid-L'] == board['mid-R'] and board['mid-L'] != ' ':
return True
elif board['low-L'] == board['low-M'] and board['low-L'] == board['low-R'] and board['low-L'] != ' ':
return True
#Vertical
elif board['top-L'] == board['mid-L'] and board['top-L'] == board['low-L'] and board['top-L'] != ' ':
return True
elif board['top-M'] == board['mid-M'] and board['top-M'] == board['low-M'] and board['top-M'] != ' ':
return True
elif board['top-R'] == board['mid-R'] and board['top-R'] == board['low-R'] and board['top-R'] != ' ':
return True
#Diagonal
elif board['top-L'] == board['mid-M'] and board['top-L'] == board['low-R'] and board['top-L'] != ' ':
return True
elif board['top-R'] == board['mid-M'] and board['top-R'] == board['low-L'] and board['top-R'] != ' ':
return True
else:
return False
turn = 'X'
while True:
printBoard(theBoard)
print('Turn for ' + turn + '. Move on which space?')
move = input()
if move == '':
break
elif move == 'score':
print('The current scores are:')
print('X: ' + str(score['X']) + ' to O: ' + str(score['O']))
else:
try:
if theBoard[move] != ' ':
print('That position is already taken, try another!')
else:
theBoard[move] = turn
if solutions(theBoard):
print('Congratulations ' + turn + ', you won!')
score[turn] = str(score[turn] + 1)
print('The current scores are:')
print('X: ' + str(score['X']) + ' to O: ' + str(score['O']))
print()
print()
print()
print('play again!')
theBoard = reset
if turn == 'X':
turn = 'O'
else:
turn = 'X'
except KeyError:
print()
print('that is not a valid position, sorry! Try again!')
print('the options for rows are: top, mid, low')
print('the options for columns are: L, M, R')
print('and are separated with a dash. eg. top-L')
print()
print('Your go again!')
time.sleep(1)
continue
printBoard(theBoard)
|
from django.shortcuts import render
# Create your views here.
import time
from pool import SQLPoll
from django.shortcuts import render, redirect
from django.core.paginator import Paginator
# Create your views here.
def index(request):
request.encoding = 'utf-8'
pag = request.GET.get('pag')
if pag:
pag = int(pag)
else:
pag = 1
rq2=''
sql='SELECT id,xlqk,gzcl,xjr,shr,rq,bz FROM b_yysxj ORDER BY rq'
with SQLPoll() as db:
students = db.fetch_all(sql, None)
p = Paginator(students, 10)
students = p.get_page(pag)
page_num = p.page_range
wz="/yunyingshang/?pag="
return render(request, 'yunyingshang/index.html',
{
'students': students,
'wz': wz,
'page_num': page_num,
'rq2': rq2
}
)
def find(request):
request.encoding = 'utf-8'
if 'rq1' in request.GET and request.GET['rq1']:
rq1= request.GET['rq1']
rq2= request.GET['rq1']
pag= request.GET['pag']
if pag:
pag = int(pag)
else:
pag = 1
rq1=time.strftime("%Y.%m.%d",time.strptime(rq1,"%Y-%m-%d"))
sql='SELECT id,xlqk,gzcl,xjr,shr,rq,bz FROM b_yysxj where rq =%s ORDER BY rq'
with SQLPoll() as db:
students = db.fetch_all(sql, rq1)
p = Paginator(students, 10)
students= p.get_page(pag)
wz="/yunyingshang/find?rq1="+rq2+"&pag="
page_num=p.page_range
return render(request, 'yunyingshang/index.html',
{
'students': students,
'wz':wz,
'page_num':page_num,
'rq2': rq2
}
)
else:
return redirect('../')
# 学生信息新增处理函数
def add(request):
if request.method == 'GET':
return render(request, 'yunyingshang/add.html')
else:
xlqk = request.POST.get('xlqk', '')
gzcl = request.POST.get('gzcl', '')
xjr = request.POST.get('xjr', '')
shr = request.POST.get('shr', '')
rq = request.POST.get('rq', '')
bz = request.POST.get('bz', '')
sql ="INSERT INTO b_yysxj (xlqk,gzcl,xjr,shr,rq,bz) values (%s,%s,%s,%s,%s,%s)"
args=(xlqk,gzcl,xjr,shr,rq,bz)
with SQLPoll() as db:
db.execute(sql, args)
return redirect('../')
# 学生信息修改处理函数
def edit(request):
if request.method == 'GET':
id = request.GET.get("id")
sql ="SELECT id,xlqk,gzcl,xjr,shr,rq,bz FROM b_yysxj where id =%s"
with SQLPoll() as db:
student = db.fetch_one(sql, id)
return render(request, 'yunyingshang/edit.html', {'student': student})
else:
id = request.POST.get('id', '')
xlqk = request.POST.get('xlqk', '')
gzcl = request.POST.get('gzcl', '')
xjr = request.POST.get('xjr', '')
shr = request.POST.get('shr', '')
rq = request.POST.get('rq', '')
bz = request.POST.get('bz', '')
sql ="UPDATE b_yysxj set xlqk=%s,gzcl=%s,xjr=%s,shr=%s,rq=%s,bz=%s where id =%s"
args=(xlqk,gzcl,xjr,shr,rq,bz,id)
with SQLPoll() as db:
db.execute(sql, args)
return redirect('../')
# 学生信息删除处理函数
def delete(request):
id = request.GET.get("id")
sql = "DELETE FROM b_yysxj WHERE id =%s"
with SQLPoll() as db:
db.execute(sql, id)
return redirect('../') |
from django.shortcuts import render, redirect
from .models import Course
# Create your views here.
def index(request):
context = {
"courses": Course.objects.all()
}
print "hello i am the index page return statement"
return render(request, 'coursesapp/index.html', context)
def addcourse(request):
#add this user to the db
Course.objects.create(name=request.POST['name'], description=request.POST['description'])
return redirect('/')
def confirmdel(request, id):
banana = Course.objects.get(id = id)
context = {
"course": banana
}
return render(request, 'coursesapp/confirm.html', context)
def delete(request, id):
#actually delete from db after confirmation
blah = Course.objects.get(id = id)
blah.delete()
#get this, put it in a variable, then delete the variable. why does this work?
#this is based on id as a variable <id> and then delete based on id.
#query = "delete from table where id = <id>""
return redirect('/')
#create database
#make appropriate references to it from this code.
#dont forget to migrate.
|
from django.urls import path, re_path
from django.views.generic import TemplateView
from workouts import views
from workouts import api_views
app_name='workouts'
urlpatterns = [
############################
# normal django view urls
############################
# workouts index view
path('', TemplateView.as_view(template_name='workouts/workout_index.html'), name='WorkoutIndex'),
# sessions
path('sessions/', views.SessionList.as_view(), name='SessionList'),
path('sessions/new/', views.SessionCreate.as_view(), name='SessionCreate'),
re_path(r'^sessions/(?P<pk>[0-9]+)/$', views.SessionDetail.as_view(), name='SessionDetail'),
re_path(r'^sessions/(?P<pk>[0-9]+)/update/$', views.SessionUpdate.as_view(), name='SessionUpdate'),
re_path(r'^sessions/(?P<pk>[0-9]+)/delete/$', views.SessionDelete.as_view(), name='SessionDelete'),
# sessions - sets
re_path(r'^sessions/(?P<pk>[0-9]+)/sets/$', views.SetListBySession.as_view(), name='SetListBySession'),
re_path(r'^sessions/(?P<pk>[0-9]+)/sets/new$', views.SetCreate.as_view(), name='SetCreate'),
# exercises
path('exercises/', views.ExerciseList.as_view(), name='ExerciseList'),
path('exercises/new/', views.ExerciseCreate.as_view(), name='ExerciseCreate'),
re_path(r'^exercises/(?P<slug>[-\w]*)/$', views.ExerciseDetail.as_view(), name='ExerciseDetail'),
re_path(r'^exercises/(?P<slug>[-\w]*)/update/$', views.ExerciseUpdate.as_view(), name='ExerciseUpdate'),
re_path(r'^exercises/(?P<slug>[-\w]*)/delete/$', views.ExerciseDelete.as_view(), name='ExerciseDelete'),
# exercises - sets
re_path(r'^exercises/(?P<slug>[-\w]*)/sets/$', views.SetListByExercise.as_view(), name='SetListByExercise'),
############################
# django rest framework urls
############################
# sessions
path('api/sessions/', api_views.SessionList.as_view(), name='SessionListAPI'),
re_path(r'^api/sessions/(?P<pk>[0-9]+)/$', api_views.SessionDetail.as_view(), name='SessionDetailAPI'),
#sets
path('api/sets/', api_views.SetList.as_view(), name='SetListAPI'),
re_path(r'^api/sets/(?P<pk>[0-9]+)/$', api_views.SetDetail.as_view(), name='SetDetailAPI'),
# exercises
path('api/exercises/', api_views.ExerciseList.as_view(), name='ExerciseListAPI'),
re_path(r'^api/exercises/(?P<pk>[0-9]+)/$', api_views.ExerciseDetail.as_view(), name='ExerciseDetailAPI'),
############################
# graphql urls ?
############################
]
|
"""
Generates sample directories for FID scoring
"""
import argparse
import logging
import pickle
from pathlib import Path
import imageio
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from model.vae import VAE
from model.hm import HM
from dataset.celeba import build_datasets, IM_DIMS
def _build_parser():
parser = argparse.ArgumentParser(description="Train various models")
parser.add_argument('save_path', type=Path,
help='Path to saved model')
parser.add_argument('im_path', type=Path,
help='Path to training images')
# TODO: save model type with .pt files
parser.add_argument('--model', type=str, choices=['vae', 'hm'], default='vae',
help='Type of model to train, either vae for variational autoencoder or hm for helmholtz machine. ' +\
'Defaults to vae')
parser.add_argument('--samples', type=int, default=10000,
help='number of samples to generate. Defaults to 10000')
parser.add_argument('--batch', type=int, default=1000,
help='number of samples to hold in memory at one time. Defaults to 1000.')
parser.add_argument('--out', type=Path, default='out/',
help='output directory for samples and true images. Defaults to out/')
return parser
# TODO: debug FID with small dataset size
# TODO: doesn't work with GPU
def _sample_images(model, batch_size, num_images, dataset, out_dir, workers=4):
batch_size = min(batch_size, num_images)
total = 0
samp_dir = out_dir / 'sample'
im_dir = out_dir / 'image'
if not samp_dir.exists():
samp_dir.mkdir()
if not im_dir.exists():
im_dir.mkdir()
pbar = tqdm(total=num_images)
with torch.no_grad():
loader = DataLoader(dataset,
batch_size=batch_size,
num_workers=workers,
pin_memory=torch.cuda.is_available())
for images in loader:
samp = model.reconstruct(images)
for i, reco in enumerate(samp):
idx = pbar.n
samp_path = samp_dir / ('%d.png' % idx)
im_path = im_dir / ('%d.png' % idx)
real_im = images[i].reshape(*IM_DIMS, 3).numpy()
reco = reco.reshape(*IM_DIMS, 3). numpy()
imageio.imwrite(samp_path, _to_rgb(reco))
imageio.imwrite(im_path, _to_rgb(real_im))
pbar.update(1)
if pbar.n == pbar.total:
pbar.close()
return
# def _sample_images(model, batch_size, num_images, dataset, out_dir):
# batch_size = min(batch_size, num_images)
# total = 0
# samp_dir = out_dir / 'sample'
# im_dir = out_dir / 'image'
# if not samp_dir.exists():
# samp_dir.mkdir()
# if not im_dir.exists():
# im_dir.mkdir()
# pbar = tqdm(total=num_images)
# with torch.no_grad():
# while total < num_images:
# samp = model.sample(batch_size).reshape(batch_size, *(IM_DIMS), 3)
# samp = samp.numpy()
# for i, image in enumerate(samp):
# idx = total + i
# samp_path = samp_dir / ('%d.png' % idx)
# im_path = im_dir / ('%d.png' % idx)
# real_im = dataset[idx].reshape(*(IM_DIMS), 3)
# real_im = real_im.numpy()
# imageio.imwrite(samp_path, _to_rgb(image))
# imageio.imwrite(im_path, _to_rgb(real_im))
# pbar.update(1)
# total += batch_size
# batch_size = min(batch_size, num_images - total)
# pbar.close()
def _to_rgb(im):
return np.uint8(im * 255)
def main():
parser = _build_parser()
args = parser.parse_args()
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG)
device = torch.device('cpu')
if torch.cuda.is_available():
device = torch.device('cuda')
model = None
if args.model == 'vae':
model = VAE().double().to(device)
elif args.model == 'hm':
model = HM().double().to(device)
else:
logging.critical('model unimplemented: %s' % args.model)
return
if not args.out.exists():
args.out.mkdir(parents=True)
_, test_ds = build_datasets(args.im_path, train_test_split=1)
ckpt = torch.load(args.save_path, map_location=device)
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
_sample_images(model, args.batch, args.samples, test_ds, args.out)
if __name__ == '__main__':
main() |
import socket
import threading
import queue
import sys
from my_AES import AESCipher
from my_RSA import RSAClass
class ClientCom:
def __init__(self,server_ip, port, msg_q):
self.running = False
self.my_socket = socket.socket()
self.server = server_ip
self.port = port
self.msg_q = msg_q
self.key = None
self.cry = None
self.connect()
def sendTo(self, msg):
try:
self.my_socket.send(msg.encode())
except:
self.running = False
def sendEnc(self, msg):
try:
msg = self.cry.encrypt(msg)
self.my_socket.send(msg)
except Exception as e:
print(2222, str(e))
self.running = False
def connect(self):
try:
self.my_socket.connect((self.server, self.port))
self.running = True
except:
pass
def recv(self):
while self.running:
try:
data = self.my_socket.recv(1024).decode()
except:
self.running = False
else:
self.msg_q.put(data)
def server_status(self):
return self.running
def switch_keys(self):
try:
myRsa = RSAClass()
public_key = myRsa.get_public_key_pem()
self.my_socket.send(public_key)
key = self.my_socket.recv(1024)
self.key = myRsa.decrypt_msg(key).decode()
self.cry = AESCipher(self.key)
except Exception as e:
print(str(e))
self.running = False
threading.Thread(target=self.recv).start()
|
"""
This module should be used to test the parameter and return types of your
functions. Before submitting your assignment, run this type-checker. This
typechecker expects to find files twitterverse_functions.py, small_data.txt,
and typecheck_query.txt in the same folder.
If errors occur when you run this typechecker, fix them before you submit
your assignment.
If no errors occur when you run this typechecker, then the type checks passed.
This means that the function parameters and return types match the assignment
specification, but it does not mean that your code works correctly in all
situations. Be sure to test your code thoroughly before submitting.
"""
import builtins
# Check for use of functions print, input and open.
our_print = print
our_input = input
our_open = open
def disable_print(*args):
raise Exception("You must not call built-in function print!")
def disable_input(*args):
raise Exception("You must not call built-in function input!")
def disable_open(*args):
raise Exception("You must not call built-in function open!")
builtins.print = disable_print
builtins.input = disable_input
builtins.open = disable_open
import twitterverse_functions
# typecheck the twitterverse_functions.py functions
# Type check twitterverse_functions.process_data
open_data_file = our_open('small_data.txt')
result = twitterverse_functions.process_data(open_data_file)
open_data_file.close()
assert isinstance(result, dict), \
'''process_data should return a dict, but returned {0}''' \
.format(type(result))
for item in result:
assert isinstance(item, str), \
'process_data should return a dict with str keys, ' \
'but returned a dict with {0} keys'\
.format(type(item))
assert isinstance(result[item], dict), \
'process_data should return a dict with dict values, but returned ' \
'a dict with {0} values'\
.format(type(result[item]))
for key in result[item]:
assert isinstance(key, str), \
'process_data should return an inner dict with str keys, ' \
'but returned an inner dict with {0} keys'\
.format(type(key))
assert isinstance(result[item][key], str) or \
isinstance(result[item][key], list), \
'process_data should return an inner dict with str or list '\
'values, but returned an inner dict with {0} values'\
.format(type(item))
# Type check twitterverse_functions.process_query
open_query_file = our_open('typecheck_query.txt')
result = twitterverse_functions.process_query(open_query_file)
open_query_file.close()
assert isinstance(result, dict), \
'''process_query should return a dict, but returned {0}''' \
.format(type(result))
# Query dictionary
assert 'search' in result, '''key 'search' missing from query dictionary'''
assert 'filter' in result, '''key 'filter' missing from query dictionary'''
assert 'present' in result, '''key 'present' missing from query dictionary'''
assert len(result) == 3, '''query dictionary has incorrect length'''
# Search specification
assert len(result['search']) == 2, \
'''search spec dictionary has incorrect length'''
assert 'username' in result['search'], \
'''key 'username' missing from search specification dictionary'''
assert isinstance(result['search']['username'], str), \
"key 'username' should have value of type str, " \
"but has value of type {0}"\
.format(type(result['search']['username']))
assert 'operations' in result['search'], \
'''key 'operations' missing from search specification dictionary'''
assert isinstance(result['search']['operations'], list), \
"key 'operations' should have value of type list, " \
"but has value of type {0}"\
.format(type(result['search']['operations']))
# Filter specification
assert len(result['filter']) == 4, \
'''filter spec dictionary has incorrect length'''
for item in result['filter']:
assert item in ['following', 'follower', 'name-includes', \
'location-includes'], \
'''invalid key {0} in filter specification dictionary'''\
.format(item)
assert isinstance(result['filter'][item], str), \
'values in filter specification dictionary should have type str, ' \
'but has type {0}'\
.format(type(result['filter'][item]))
# Presentation specification
assert len(result['present']) == 2, \
'''present spec dictionary has incorrect length'''
assert 'sort-by' in result['present'], \
'''key 'sort-by' missing from present specification dictionary'''
assert isinstance(result['present']['sort-by'], str), \
"key 'sort-by' should have value of type str, " \
"but has value of type {0}"\
.format(type(result['present']['sort-by']))
assert 'format' in result['present'], \
'''key 'format' missing from present specification dictionary'''
assert isinstance(result['present']['format'], str), \
"key 'format' should have value of type str, " \
"but has value of type {0}"\
.format(type(result['present']['format']))
# Type check twitterverse_functions.get_search_results
twitter_data = {'tomCruise': {'following': ['katieH'], 'name': 'Tom Cruise',
'bio': 'Official TomCruise.com crew tweets. ' + \
'We love you guys!\nVisit us at Facebook!',
'web': 'http://www.tomcruise.com',
'location': 'Los Angeles, CA'},
'katieH': {'following': [], 'name': 'Katie Holmes',
'bio': '', 'web': 'www.tomkat.com', 'location': ''}}
search_spec = {'operations': ['following'], 'username': 'tomCruise'}
result = twitterverse_functions.get_search_results(twitter_data, search_spec)
assert isinstance(result, list), \
'''get_search_results should return a list, but returned {0}''' \
.format(type(result))
for item in result:
assert isinstance(item, str), \
'get_search_results should return a list of str, ' \
'but returned a list of {0}'\
.format(type(item))
# Type check twitterverse_functions.get_filter_results
twitter_data = {'tomCruise': {'following': ['katieH'], 'name': 'Tom Cruise',
'bio': 'Official TomCruise.com crew tweets. ' + \
'We love you guys!\nVisit us at Facebook!',
'web': 'http://www.tomcruise.com',
'location': 'Los Angeles, CA'},
'katieH': {'following': [], 'name': 'Katie Holmes',
'bio': '', 'web': 'www.tomkat.com', 'location': ''}}
filter_spec = {}
result = twitterverse_functions.get_filter_results(twitter_data, ['katieH'], \
filter_spec)
assert isinstance(result, list), \
'''get_filter_results should return a list, but returned {0}''' \
.format(type(result))
for item in result:
assert isinstance(item, str), \
'get_filter_results should return a list of str, '\
'but returned a list of {0}'\
.format(type(item))
# Type check twitterverse_functions.get_present_string with long format
twitter_data = {'tomCruise': {'following': ['katieH'], 'name': 'Tom Cruise',
'bio': 'Official TomCruise.com crew tweets. ' + \
'We love you guys!\nVisit us at Facebook!',
'web': 'http://www.tomcruise.com',
'location': 'Los Angeles, CA'},
'katieH': {'following': [], 'name': 'Katie Holmes',
'bio': '', 'web': 'www.tomkat.com', 'location': ''}}
present_spec = {'sort-by': 'username', 'format': 'long'}
result = twitterverse_functions.get_present_string(twitter_data, \
['katieH', 'tomCruise'], \
present_spec)
assert isinstance(result, str), \
'''get_present_string should return a str, but returned {0}''' \
.format(type(result))
long_result = """----------
katieH
name: Katie Holmes
location:
website: www.tomkat.com
bio:
following: []
----------
tomCruise
name: Tom Cruise
location: Los Angeles, CA
website: http://www.tomcruise.com
bio:
Official TomCruise.com crew tweets. We love you guys!
Visit us at Facebook!
following: ['katieH']
----------
"""
assert result == long_result, \
'''incorrect formatting of presentation string, expected {0}\n \
got {1}\n'''.format(long_result, result)
# Type check twitterverse_functions.get_present_string with short format
twitter_data = {'tomCruise': {'following': ['katieH'], 'name': 'Tom Cruise',
'bio': 'Official TomCruise.com crew tweets. ' + \
'We love you guys!\nVisit us at Facebook!',
'web': 'http://www.tomcruise.com',
'location': 'Los Angeles, CA'},
'katieH': {'following': [], 'name': 'Katie Holmes',
'bio': '', 'web': 'www.tomkat.com', 'location': ''}}
present_spec = {'sort-by': 'username', 'format': 'short'}
result = twitterverse_functions.get_present_string(twitter_data, ['katieH'], \
present_spec)
assert isinstance(result, str), \
'''get_present_string should return a str, but returned {0}''' \
.format(type(result))
short_result = "['katieH']"
assert result == short_result, \
'''incorrect formatting of presentation string, expected {0}\n \
got {1}\n'''.format(short_result, result)
# Type check and simple test of twitterverse_functions.all_followers
twitter_data = {'a':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['c']}, \
'b':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':['c']}, \
'c':{'name':'', 'location':'', 'web':'', \
'bio':'', 'following':[]}}
result = twitterverse_functions.all_followers(twitter_data, 'c')
assert isinstance(result, list), \
'''all_followers should return a list, but returned {0}'''.\
format(type(result))
assert 'a' in result and 'b' in result and len(result) == 2, \
'''all_followers should return ['a', 'b'] but returned {0}'''\
.format(result)
our_print("""
Yippee! The type checker program completed without error.
This means that the functions in twitterverse_functions.py:
- are named correctly,
- take the correct number of arguments, and
- return the correct types
This does NOT mean that the functions are correct!
Be sure to thoroughly test your functions yourself before submitting.""")
# Restore functions.
builtins.print = our_print
builtins.input = our_input
builtins.open = our_open
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import unittest.mock
from datetime import datetime
import pytest
from airflow import DAG
from airflow.models import DagBag
from airflow.models.expandinput import EXPAND_INPUT_EMPTY
from airflow.models.serialized_dag import SerializedDagModel
from airflow.operators.empty import EmptyOperator
from airflow.security import permissions
from tests.test_utils.api_connexion_utils import assert_401, create_user, delete_user
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
@pytest.fixture(scope="module")
def configured_app(minimal_app_for_api):
app = minimal_app_for_api
create_user(
app, # type: ignore
username="test",
role_name="Test",
permissions=[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
],
)
create_user(app, username="test_no_permissions", role_name="TestNoPermissions") # type: ignore
yield app
delete_user(app, username="test") # type: ignore
delete_user(app, username="test_no_permissions") # type: ignore
class TestTaskEndpoint:
dag_id = "test_dag"
mapped_dag_id = "test_mapped_task"
task_id = "op1"
task_id2 = "op2"
task_id3 = "op3"
mapped_task_id = "mapped_task"
task1_start_date = datetime(2020, 6, 15)
task2_start_date = datetime(2020, 6, 16)
@pytest.fixture(scope="class")
def setup_dag(self, configured_app):
with DAG(self.dag_id, start_date=self.task1_start_date, doc_md="details") as dag:
task1 = EmptyOperator(task_id=self.task_id, params={"foo": "bar"})
task2 = EmptyOperator(task_id=self.task_id2, start_date=self.task2_start_date)
with DAG(self.mapped_dag_id, start_date=self.task1_start_date) as mapped_dag:
EmptyOperator(task_id=self.task_id3)
# Use the private _expand() method to avoid the empty kwargs check.
# We don't care about how the operator runs here, only its presence.
EmptyOperator.partial(task_id=self.mapped_task_id)._expand(EXPAND_INPUT_EMPTY, strict=False)
task1 >> task2
dag_bag = DagBag(os.devnull, include_examples=False)
dag_bag.dags = {dag.dag_id: dag, mapped_dag.dag_id: mapped_dag}
configured_app.dag_bag = dag_bag # type:ignore
@staticmethod
def clean_db():
clear_db_runs()
clear_db_dags()
clear_db_serialized_dags()
@pytest.fixture(autouse=True)
def setup_attrs(self, configured_app, setup_dag) -> None:
self.clean_db()
self.app = configured_app
self.client = self.app.test_client() # type:ignore
def teardown_method(self) -> None:
self.clean_db()
class TestGetTask(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [self.task_id2],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {
"foo": {
"__class": "airflow.models.param.Param",
"value": "bar",
"description": None,
"schema": {},
}
},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={"REMOTE_USER": "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_mapped_task(self):
expected = {
"class_ref": {"class_name": "EmptyOperator", "module_path": "airflow.operators.empty"},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"is_mapped": True,
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "microseconds": 0, "seconds": 300},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "mapped_task",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
}
response = self.client.get(
f"/api/v1/dags/{self.mapped_dag_id}/tasks/{self.mapped_task_id}",
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_200_serialized(self):
# Get the dag out of the dagbag before we patch it to an empty one
SerializedDagModel.write_dag(self.app.dag_bag.get_dag(self.dag_id))
dag_bag = DagBag(os.devnull, include_examples=False, read_dags_from_db=True)
patcher = unittest.mock.patch.object(self.app, "dag_bag", dag_bag)
patcher.start()
expected = {
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [self.task_id2],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {
"foo": {
"__class": "airflow.models.param.Param",
"value": "bar",
"description": None,
"schema": {},
}
},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}", environ_overrides={"REMOTE_USER": "test"}
)
assert response.status_code == 200
assert response.json == expected
patcher.stop()
def test_should_respond_404(self):
task_id = "xxxx_not_existing"
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks/{task_id}", environ_overrides={"REMOTE_USER": "test"}
)
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks/{self.task_id}")
assert_401(response)
def test_should_raise_403_forbidden(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={"REMOTE_USER": "test_no_permissions"}
)
assert response.status_code == 403
class TestGetTasks(TestTaskEndpoint):
def test_should_respond_200(self):
expected = {
"tasks": [
{
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [self.task_id2],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {
"foo": {
"__class": "airflow.models.param.Param",
"value": "bar",
"description": None,
"schema": {},
}
},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "op1",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
},
{
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-16T00:00:00+00:00",
"task_id": self.task_id2,
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
},
],
"total_entries": 2,
}
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks", environ_overrides={"REMOTE_USER": "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_get_tasks_mapped(self):
expected = {
"tasks": [
{
"class_ref": {"class_name": "EmptyOperator", "module_path": "airflow.operators.empty"},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"is_mapped": True,
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "microseconds": 0, "seconds": 300},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": "mapped_task",
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
},
{
"class_ref": {
"class_name": "EmptyOperator",
"module_path": "airflow.operators.empty",
},
"depends_on_past": False,
"downstream_task_ids": [],
"end_date": None,
"execution_timeout": None,
"extra_links": [],
"operator_name": "EmptyOperator",
"owner": "airflow",
"params": {},
"pool": "default_pool",
"pool_slots": 1.0,
"priority_weight": 1.0,
"queue": "default",
"retries": 0.0,
"retry_delay": {"__type": "TimeDelta", "days": 0, "seconds": 300, "microseconds": 0},
"retry_exponential_backoff": False,
"start_date": "2020-06-15T00:00:00+00:00",
"task_id": self.task_id3,
"template_fields": [],
"trigger_rule": "all_success",
"ui_color": "#e8f7e4",
"ui_fgcolor": "#000",
"wait_for_downstream": False,
"weight_rule": "downstream",
"is_mapped": False,
},
],
"total_entries": 2,
}
response = self.client.get(
f"/api/v1/dags/{self.mapped_dag_id}/tasks", environ_overrides={"REMOTE_USER": "test"}
)
assert response.status_code == 200
assert response.json == expected
def test_should_respond_200_ascending_order_by_start_date(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks?order_by=start_date",
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200
assert self.task1_start_date < self.task2_start_date
assert response.json["tasks"][0]["task_id"] == self.task_id
assert response.json["tasks"][1]["task_id"] == self.task_id2
def test_should_respond_200_descending_order_by_start_date(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks?order_by=-start_date",
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 200
# - means is descending
assert self.task1_start_date < self.task2_start_date
assert response.json["tasks"][0]["task_id"] == self.task_id2
assert response.json["tasks"][1]["task_id"] == self.task_id
def test_should_raise_400_for_invalid_order_by_name(self):
response = self.client.get(
f"/api/v1/dags/{self.dag_id}/tasks?order_by=invalid_task_colume_name",
environ_overrides={"REMOTE_USER": "test"},
)
assert response.status_code == 400
assert response.json["detail"] == "'EmptyOperator' object has no attribute 'invalid_task_colume_name'"
def test_should_respond_404(self):
dag_id = "xxxx_not_existing"
response = self.client.get(f"/api/v1/dags/{dag_id}/tasks", environ_overrides={"REMOTE_USER": "test"})
assert response.status_code == 404
def test_should_raises_401_unauthenticated(self):
response = self.client.get(f"/api/v1/dags/{self.dag_id}/tasks")
assert_401(response)
|
# Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Tests that recipes are on their best behavior.
Checks that recipes only import modules from a whitelist. Imports are
generally not safe in recipes if they depend on the platform, since
e.g. you can run a recipe simulation for a Windows recipe on Linux.
"""
# TODO(luqui): Implement lint for recipe modules also.
from __future__ import absolute_import
import re
import types
MODULES_WHITELIST = [
r'ast',
r'base64',
r'collections',
r'contextlib',
r'copy',
r'datetime',
r'functools',
r'google\.protobuf',
r'hashlib',
r'itertools',
r'json',
r'math',
r're',
r'urlparse',
r'zlib',
]
def ImportsTest(recipe_path, recipe_name, whitelist, universe_view):
"""Tests that recipe_name only uses allowed imports.
Returns a list of errors, or an empty list if there are no errors (duh).
"""
recipe = universe_view.load_recipe(recipe_name)
for _, val in sorted(recipe.globals.iteritems()):
if isinstance(val, types.ModuleType):
module_name = val.__name__
for pattern in whitelist:
if pattern.match(val.__name__):
break
else:
yield ('In %s:\n'
' Non-whitelisted import of %s' %
(recipe_path, module_name))
def add_subparser(parser):
# TODO(iannucci): merge this with the test command, doesn't need to be top
# level.
helpstr = 'Check recipes for stylistic and hygenic issues.'
lint_p = parser.add_parser(
'lint', help=helpstr, description=helpstr)
lint_p.add_argument(
'--whitelist', '-w', action='append', default=[],
help='A regexp matching module names to add to the default whitelist. '
'Use multiple times to add multiple patterns,')
lint_p.set_defaults(func=main)
def main(package_deps, args):
from . import loader
universe = loader.RecipeUniverse(package_deps, args.package)
universe_view = loader.UniverseView(universe, package_deps.root_package)
whitelist = map(re.compile, MODULES_WHITELIST + args.whitelist)
errors = []
for recipe_path, recipe_name in universe_view.loop_over_recipes():
errors.extend(
ImportsTest(recipe_path, recipe_name, whitelist, universe_view))
if errors:
for line in map(str, errors):
print line
return 1
return 0
|
from datetime import datetime, timedelta
from typing import List
from entities.GpsFix import GpsFix
class LiveStayPoint(object):
"""
Represents a stay point as a spatial object. In conjunction with Visit, it would represent the full spatial time
relevance in user mobility
Attributes:
latitude: the latitude coordinate of stay point
longitude: the longitude coordinate of stay point
arrival_time: the arrival time to the stay point
departure_time: the departure time to the stay point
visit_count: the visit count of stay point
amount_of_fixes: amount of GpsFixes used to calculate this stay point
"""
def __init__(self, latitude, longitude, arrival_time: datetime, departure_time: datetime,
visit_count=0,
amount_of_fixes=0):
"""
Creates a new StayPoint object
:param latitude: The latitude to assign to stay point
:param longitude: The longitude to assign to stay point
:param arrival_time: The arrival time to the stay point
:param departure_time: the departure time to the stay point
:param visit_count: The visit count to assign (if apply...)
:param amount_of_fixes: The amount of GpsFixes used to calculate this stay point
"""
self.latitude = latitude
self.longitude = longitude
self.arrival_time = arrival_time
self.departure_time = departure_time
self.visit_count = visit_count
self.amount_of_fixes = amount_of_fixes
@staticmethod
def create_from_sublist(gps_fixes: List[GpsFix], start_index, end_index) -> 'LiveStayPoint':
"""
Creates a StayPoint from the specified arguments
:param gps_fixes: A list of GpsFix
:param start_index: The start index for sublist
:param end_index: The end index for sublist
:return: A LiveStayPoint object
"""
size_of_portion = end_index - start_index + 1
sum_latitude = 0.0
sum_longitude = 0.0
for x in range(start_index, end_index + 1):
sum_latitude += gps_fixes[x].latitude
sum_longitude += gps_fixes[x].longitude
latitude = sum_latitude / size_of_portion
longitude = sum_longitude / size_of_portion
arrival_time = gps_fixes[start_index].timestamp
departure_time = gps_fixes[end_index].timestamp
amount_of_fixes = size_of_portion
return LiveStayPoint(latitude, longitude, arrival_time, departure_time, 0, amount_of_fixes)
@staticmethod
def create_from_list(gps_fixes: List[GpsFix]) -> 'LiveStayPoint':
return LiveStayPoint.create_from_sublist(gps_fixes, 0, len(gps_fixes) - 1)
def distance_to(self, other: 'LiveStayPoint'):
"""
Calculates the distance to other LiveStayPoint object.
:param other: The LiveStayPoint object to find the distance to
:return: The distance between the stay points, in meters
"""
self_as_fix = GpsFix(self.latitude, self.longitude, datetime.now(), altitude=0, speed=0, battery_level=0,
detected_activity=0)
other_as_fix = GpsFix(other.latitude, other.longitude, datetime.now(), altitude=0, speed=0, battery_level=0,
detected_activity=0)
return self_as_fix.distance_to(other_as_fix)
def __str__(self, *args, **kwargs):
date_format = '%Y-%m-%d %H:%M:%S'
return '{},{},{},{},{}'.format(self.latitude, self.longitude, self.arrival_time.strftime(date_format),
self.departure_time.strftime(date_format),
self.amount_of_fixes)
def __eq__(self, other):
if not isinstance(other, LiveStayPoint):
return False
if self.latitude != other.latitude:
return False
if self.longitude != other.longitude:
return False
if (self.arrival_time - other.arrival_time) != timedelta(0):
return False
if (self.departure_time - other.departure_time) != timedelta(0):
return False
return True
|
import json
import csv
import os
import sys
import pandas
from pandas.io.json import json_normalize
import pandas.io.json
arg1="/home/urvi/Downloads/samplenestedjson/"
file_list=[]
if os.path.exists(arg1):
l=os.listdir(arg1)
for each_file in l:
if each_file.endswith(".json"):
print("Iteration")
file_list.append(each_file)
else:
print("Not found")
sys.exit()
head = []
with open("22combine.json", "w") as outfile:
for f in file_list:
with open(f, 'rb') as infile:
file_data = json.load(infile)
head += file_data
json.dump(head, outfile)
outfile.close()
with open("22combine.json",'r') as file1:
data1 = json.load(file1)
data2 = json_normalize(data1)
data2.to_csv("22result.csv", mode="a", index=False)
file1.close()
|
import json
import graphene
import uuid
from pprint import pprint
class User(graphene.ObjectType):
id = graphene.ID()
username = graphene.String()
n_posts = graphene.Int(required=False)
users = [
User(id=uuid.uuid4(), username="Jack", n_posts=0),
User(id=uuid.uuid4(), username="Bob", n_posts=10),
User(id=uuid.uuid4(), username="Alice", n_posts=100)
]
class Query(graphene.ObjectType):
hello = graphene.String(name=graphene.String())
users = graphene.List(User, post_threshold=graphene.Int())
def resolve_hello(self, info, name):
return "Hello" + name
def resolve_users(self, info, post_threshold):
# print(args)
return [user for user in users if user.n_posts >= post_threshold]
class CreateUser(graphene.Mutation):
class Arguments:
username = graphene.String()
user = graphene.Field(User)
def mutate(self, info, username):
user = User(id=uuid.uuid4(), username=username)
return CreateUser(user=user)
class Mutations(graphene.ObjectType):
create_user = CreateUser.Field()
schema = graphene.Schema(query=Query, mutation=Mutations)
result = schema.execute('''
mutation {
createUser(username: "Jack"){
user {
username
id
}
}
}
''')
print(result) |
""" Binary Search """
def binary_search(arr, item):
arr.sort()
first = 0
last = len(arr)
found = False
while first<=last and not found:
middle = (first+last)//2
if arr[middle] == item:
found = True
else:
if item < arr[middle]:
last = middle-1
else:
first = middle+1
if found:
return found, arr.index(item)
else:
return found
""" Running some tests """
def test():
print(binary_search([1, 2, 3, 45, 84, 100], 100))
print(binary_search([54, 23, 12, 100, 93, 82, 73, 1, 43, 22, 76], 12))
print(binary_search([12, 43, 54, 65, 1, 2, 57, 90, 43, 22], 65))
def main():
test()
if __name__ == "__main__":
main() |
#!/usr/bin/env python
#Author: Dale Housler
#Date: 10-10-2013
#OS: UNIX
#Program Description: This file should run the os commands
#Last updated: 13-04-2014
#Open each PDB directory of interest (manually)
#Make sure ('export LD_LIBRARY_PATH=/usr/local/lib') is typed at the command
#prompt if running python3.3 and openbabel (applies to chain separate program)
#In this directory:
#run the Chain separate program and store it in this directory
#run the proACT2.py program and save the files in the proACT_run directory
#run the contacts.py program and store the number of polar apolar bonds
import os
from os import listdir
from os.path import isfile,isdir
import shutil #allows files to be copied or moved between dirs
import sys # Allows exit for error messages
#os.system('python3.3 /root/Scorpio2_PDB_Files/SetLibPath.py') # runs the external script to set the library and allow openbabel to run in python3.3 env.
start_directory = os.getcwd()
### Removes the PDB file needed by chimera if option 2 selected
def removePDBRemnants():
PDB_dir = os.chdir('/root/proCLic_package/')
#print(PDB_dir)
PDBfile = [f for f in os.listdir(PDB_dir) if f.endswith(".pdb")]
if len(PDBfile) > 0:
i = 0
for i in range(len(PDBfile)):
if (len(PDBfile[i])) == 8:
os.remove(PDBfile[i])
i+=1
os.chdir(start_directory)
###
def CheckMultiLigDirs():
start_directory = os.getcwd()
notFile = ("pdb", "mol2", "csv", "txt", "py","kin","log","rsa") # this is the NOT set only want directories NOT files remove by file type .XXX
#Get the ligand directories
directories = [f for f in os.listdir(start_directory) if not f.endswith(notFile)]
dirLength = len(directories)
if dirLength > 0:
for i in range(0,dirLength):
if len(directories[i]) > 1:
print("There are multiple ligand directories, You can either: ")
print("1. Change dir (cd) to the ligand of interest manually, or")
print("2. Loop through each ligand directory automatically.")
choice = input()
if choice == "1":
sys.exit(1)
if choice == "2":
os.system('python3.3 /root/proCLic_package/NavigateLigandDirs.py')
else:
print("Invalid Seletion.")
sys.exit(1)
loop = False
if (loop == False):
###PROGRAM MENU###
print("\nPROGRAM MENU")
print("1. Run Chain Separate on the PDB file.\n2. Run Chimera.\n3. Run Directory Manager.\n4. Generate pro_CLic counts - Active Site.\n5. Generate pro_CLic counts - Change Unbound to Bound.\n")
print("MISCELLANEOUS PROGRAMS")
print("I. Count Residues\nII. Mol2 Fixer\nIII. Create Conformation Directories\n")
print("q to quit")
menu = input()
path = os.getcwd()
###Run chain separate program###
if (menu == "1"):
removePDBRemnants()
os.chdir(path)
os.system('python3.3 /root/proCLic_package/ChainSeparate2PDB_PtnLigUNIX_dir.py')
#else:
# loop = True
###Run Chimera###
if (menu == "2"):
#This runs chimera
###Go into the pdb dir copy the original pdb file, dump it in the pro_CLic packages folder
PDB_dir = os.getcwd()
print(PDB_dir)
os.chdir(path)
PDBfile = [f for f in os.listdir(PDB_dir) if f.endswith(".pdb")]
i = 0
for i in range(len(PDBfile)):
if (len(PDBfile[i])) == 8:
shutil.copy(PDBfile[i],'/root/proCLic_package/')
i += 1
#RUN CHIMERA
os.chdir ('/root/chimera/bin/')
os.system('./chimera --nogui /root/proCLic_package/importPDB.py')
#remove the pdb file from the proCLic_Package folder
###Run Directory Manager###
if (menu == "3"):
removePDBRemnants()
os.chdir(path)
CheckMultiLigDirs()
os.system ('python3.3 /root/proCLic_package/DirectoryManagerTypeIII.py')
### Run the binding waters program to get the Polar Apolar bond counts, the HOH binding water counts and generate the log file ###
if (menu == "4"):
removePDBRemnants()
os.chdir(path)
CheckMultiLigDirs()
os.system ('python3.3 /root/proCLic_package/pro_CLic_runDirectories.py')
if (menu == "5"):
os.chdir(path)
os.system('python3.3 /root/proCLic_package/Phase2/ChangeInContactsLog.py')
####################
###MISCELLANEOUS####
if (menu == "i") or (menu == "I") :
os.chdir(path)
os.system('python3.3 /root/proCLic_package/Misc/CountResidues.py')
if (menu == "ii") or (menu == "II"):
os.chdir(path)
os.system('python3.3 /root/proCLic_package/Misc/Mol2Fixer.py')
if (menu == "iii") or (menu == "III"):
os.chdir(path)
os.system('python3.3 /root/proCLic_package/Misc/conformationFolders.py')
####################
else:
loop = True
|
#
# calculator for cross-sectional properties
#
# x is the axial direction and y,z and the cross-sectional axes
#
import numpy as np
# ===================================================================
# solid circle
# ===================================================================
density = 2700 # SI [kg/m3] Aluminum
length = 1 # SI [m]
outerDia = 2*7.89206e-2 #10 * 1.e-2 # SI [m]
innerDia = 0.85*outerDia # SI [m]
#innerDia = 5 * 1.e-2 # SI [m]
r2 = 0.5*outerDia
r1 = 0.5*innerDia
area = np.pi*(r2*r2-r1*r1)
Ix = np.pi/2*(r2**4 - r1**4)
Iy = 0.5*Ix
Iz = Iy
J = np.pi/2*(r2**4 - r1**4)
print ' - Density %10.4e kg/m3' % density
print ' - Length %10.4e m' % length
print ' - Outer diamater %10.4e m' % outerDia
print ' - Inner diamater %10.4e m' % innerDia
print ' - Area %10.4e m2' % area
print ' - Iy %10.4e kg-m2' % Iy
print ' - Iz %10.4e kg-m2' % Iz
#print ' - Ix %10.4e kg-m2' % Ix
print ' - J %10.4e kg-m2' % J
print ' - Max y dimension %10.4e m' % r2
print ' - Max z dimension %10.4e m' % r2
|
#! /usr/local/bin/python
# runs extract.py and compiles and runs nn-nlp.c
# Anthony Pasqualoni
# Independent Study: Neural Networks and Pattern Recognition
# Adviser: Dr. Hrvoje Podnar, SCSU
# June 27, 2006
import os
import random
import sys
# amount of runs:
if (len(sys.argv) > 1):
runs = sys.argv[1]
else:
runs = 1
runs = int(runs)
random.seed(1)
arg = [0,0,0,0,0,0,0,0]
for i in range(runs):
# random parameters for threshold values in extract.py:
# (used only if use_rand > 0 in extract.py)
arg[0] = (random.uniform(1.2,8.0))
arg[1] = (random.uniform(0.2,0.7))
arg[2] = (random.uniform(3.0,8.0))
arg[3] = (random.uniform(0.2,0.7))
arg[4] = (random.uniform(4.0,10.0))
arg[5] = (random.uniform(0.2,0.7))
arg[6] = (random.uniform(4.0,10.0))
arg[7] = (random.uniform(0.2,0.7))
# print random parameters to stdout:
if (len(sys.argv) > 1):
out = "parameters: \n"
for j in range(len(arg)):
out += str(arg[j])
if not (j%2):
out += " "
else:
out += "\n"
os.system("echo -n '" + out + "'")
# run feature extraction script:
cmd = "python extract.py "
for j in range(len(arg)):
cmd += str(arg[j]) + " "
cmd += " > extract.out"
print cmd
os.system(cmd)
# exit(0)
# compile and run neural network:
os.system("gcc nn-nlp.c -lm")
os.system("./a.out")
|
import feedparser
from sys import argv
abc=feedparser.parse('https://www.abc.es/rss/feeds/abc_EspanaEspana.xml')
veintem=feedparser.parse('https://www.20minutos.es/rss/')
rtve=feedparser.parse('http://api2.rtve.es/rss/temas_espana.xml')
w=int(argv[1])
noticias_abc = abc['entries'][:w]
noticias_veintem = veintem['entries'][:w]
noticias_rtve = rtve['entries'][:w]
i=0
while i<3:
entry=input("Elige un noticiero: ")
entry2=entry.lower()
if entry2!="":
if entry2=="abc" or entry2=="rtve" or entry2=="20m" or entry2=="20minutos" or entry2=="20min":
if entry2=="abc":
n=0
while n <= w:
print(" --- " + noticias_abc[n].title + " --- ")
if n == (w-1):
break
i+=1
else:
n+=1
else:
if entry2=="rtve":
o=0
while o <= w:
print(" --- " + noticias_rtve[o].title + " --- ")
if o == (w-1):
break
i+=1
else:
o+=1
else:
if entry2=="20m" or entry2=="20min" or entry2=="20minutos":
k=0
while k <= w:
print(" --- " + noticias_veintem[k].title + " --- ")
if k == (w-1):
break
i+=1
else:
k+=1
else:
print("No ha elegido un noticiero valido.")
i+=1
else:
print("Saliendo... ")
break |
from analysis.blasting import blast_record_set
__author__ = 'GG'
class GenomeSet(object):
"""Store data relative to a whole genome or set of contigs."""
def main(self):
print "This is a GenomeSet object."
def __init__(self, in_file, name):
self.in_file = in_file
self.name = name
self.match_sets = []
def run_blast(self, blast_run):
query_segs = blast_run.query.query_segs
blast_prefs = blast_run.parameters
matches_dict = blast_record_set(self.name, query_segs, blast_prefs)
self.match_sets.append({'run_id': blast_run.run_id,
'matches': matches_dict})
class FisherQuery(object):
"""Store data relative to a query used to blast a genome set."""
def main(self):
print "This is a FisherQuery object."
def __init__(self, query_id, query_segs, query_file):
self.query_id = query_id
self.query_segs = query_segs
self.query_file = query_file
class BlastRun(object):
"""Store data relative to a blast run."""
def main(self):
print "This is a BlastRun object."
def __init__(self, run_id, query, parameters):
self.run_id = run_id
self.query = query
self.parameters = parameters
class GenomicStrip(object):
"""Store data from a single DNA entity."""
def main(self):
print "This is a GenomicStrip object."
def __init__(self, index, name, seqfile, offset, ordinal):
self.index = index
self.name = name
self.seqfile = seqfile
self.offset = offset
self.ordinal = ordinal
self.seg_sets = []
def load_seqfile_contents(self):
# get sequence file contents
from methods.analytics.sequence_file_processes import ensure_fasta,\
load_fasta
self.fasta_f = ensure_fasta(self.seqfile)
self.seqrecord = load_fasta(self.fasta_f)
return self.seqrecord
def add_segments_set(self, segment_set):
# store segment data from coordinates file
self.segments = segment_set
return self |
from oldowan.mtconvert.seq2sites import seq2sites
from oldowan.polymorphism import Polymorphism
def test_normal_polyC():
"""Normal Poly C stretch at end of HVR3
Seq: CAAAGACACCCCCCACA
Seq: CAAAGACACCCCCCACA
rCRS: CAAAGACACCCCCCACA
Sites: <None>
"""
seq = 'CAAAGACACCCCCCACA'
result = seq2sites(seq)
assert len(result) == 0
def test_expanded_polyC():
"""Expanded Poly C stretch at end of HVR3
Seq: CAAAGACACCCCCCCCCACA
Seq: CAAAGACACCCCCCCCCACA
rCRS: CAAAGACACCCCCC---ACA
Sites: 573.1C 573.2C 573.3C
"""
a = Polymorphism(573,1,'C')
b = Polymorphism(573,2,'C')
c = Polymorphism(573,3,'C')
d = Polymorphism(573,4,'C')
e = Polymorphism(573,5,'C')
f = Polymorphism(573,6,'C')
seq = 'ACCCCATACCCCGAACCAACCAAACCCCAAAGACACCCCCCCCCCCCACA'
result = seq2sites(seq)
assert len(result) == 6
assert a in result
assert b in result
assert c in result
assert d in result
assert e in result
assert f in result
|
import os
import json
from hotbox_designer.reader import HotboxWidget
from hotbox_designer.data import load_templates, load_json
from hotbox_designer.manager import (
launch_manager, initialize, show, hide, switch, load_hotboxes) |
"""
UTILITIES FOR SALES TREND ANALYSIS
CLASSES:
-- ImportSalesData(product_id)
-- SalesTrendsDF(ts, period_wks, end_date=None, MA_params=None,
exp_smooth_params=None, normed=True)
-- RankProductsPlacesPlaces(product_stats_df, N_results=None)
MAJOR FUNCTIONS:
-- SalesStatsDF(product_IDs, period_wks, end_date=None, MA_params=None,
exp_smooth_params=None, normed=True, compute_on_sales=True)
-- CompTrendsDF(product_IDs, period_wks, end_date=None, MA_param=None,
exp_smooth_param=None, shifted=False, normed=False,
compute_on_sales=True)
A LA CARTE FUNCTIONS:
-- compute_rolling_avg(df, window_wks, data_col='ttl_sales')
-- slice_timeseries(data, period_wks, end_date=None)
-- norm_Series(ts)
-- trend_AUC(ts, normalize=False)
-- add_rolling_avg_col(df, window_wks, data_col='ttl_sales')
"""
from datetime import datetime
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from collections import OrderedDict
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
from id_dict import (strain_dict, names_formatted, locations_dict,
product_name_from_ID, locations_name_from_ID)
class ImportSalesData(object):
"""
Query weekly aggregated sales data from postgres and import to pandas data objects.
Initialize with any one of the following filter options:
A. ONE product ID (int) or product name (string)
B. COMBINATION OF ONE product (ID or name) AND NO MORE THAN ONE location (ID or name)
OR city (str) OR zipcode (5-digit int)
C. OMIT product AND SPECIFY NO MORE THAN ONE location OR city OR zipcode
D. DEFAULT (all filters = None), imports sales data aggregated statewide for all products
To retain data at original weekly frequency (versus daily), initialize with
upsample=False
Then run main() method to populate attributes.
ATTRIBUTES:
-- product_df: pandas time series (DataFrame) with daily sales in dollars and units
-- sales: pandas time series (Series) of total daily sales
-- units_sold: pandas time series (Series) of total daily units sold
-- product_id (int or None)
-- product_name (string or None)
-- location_id (int or None)
-- location_name (string or None)
-- ts_start, ts_end (Datetime) start and end dates for time series, to assist
testing for continuity and synchronization among comparative products
NOTE: DataFrame and Series title strings with product name and ID may be accessed
via DataFrame.name and Series.name attributes
"""
def __init__(self, product=None, location=None, city=None, zipcode=None, upsample=True):
self.product = product
self.location = location
self.city = city
self.zipcode = zipcode
self.upsample = upsample
self._connection_str = 'postgresql:///uplift'
# populated via main() method
self.product_id = None
self.product_name = None
self.location_id = None
self.location_name = None
self._query = None
self._conn = None
self.product_df = None
self.sales = None
self.units_sold = None
self.ts_start, self.ts_end = None, None
# elements for SQL query and for pd.DataFrame and pd.Series names
self.toggles = {'toggle_1': '--', 'filter_1': '', 'value_1': '',
'filter_2': '', 'value_2': '',
'toggle_2': '--'}
self.city_formatted = (
"\'" + self.city.upper() + "\'" if self.city is not None else None
)
self.df_name = None
def main(self):
self._retrieve_IDs()
self._set_query_toggles()
self._compose_df_name()
self._query_sales()
self._connect_to_postgres()
self._SQL2pandasdf()
def _retrieve_IDs(self):
if self.product is not None:
if type(self.product) == str:
key = self.product.lower()
try:
self.product_id = strain_dict[key]
except KeyError:
return
else:
self.product_name = names_formatted[self.product.lower()]
else:
self.product_id = self.product
try:
self.product_name = names_formatted[product_name_from_ID(self.product)]
except KeyError:
return
if self.location is not None:
if type(self.location) == str:
self.location_name = self.location.upper()
key = self.location_name
try:
self.location_id = locations_dict[key]
except KeyError:
return
else:
self.location_name = locations_name_from_ID(self.location)
self.location_id = self.location
def _set_query_toggles(self):
"""Convert initialization parameters into query terms placed in a dictionary"""
arg_list = [self.product_id, self.location_id, self.city_formatted, self.zipcode]
fields = ['strain_id', 'location_id', 'city', 'zip']
mask = lambda x: x is not None
bool_list = map(mask, arg_list)
if bool_list.count(True) != 0:
idx_1 = bool_list.index(True)
if bool_list.count(True) == 1: # if only one filter specified...
self.toggles['toggle_1'] = ''
self.toggles['filter_1'] = fields[idx_1]
self.toggles['value_1'] = arg_list[idx_1]
if bool_list.count(True) == 2: # if two filters specified...
self.toggles['toggle_1'] = ''
self.toggles['filter_1'] = fields[0] # Toggle on product filter
self.toggles['value_1'] = arg_list[0]
idx_2 = bool_list[1:].index(True) + 1 # Grab index of second filter
self.toggles['toggle_2'] = ''
self.toggles['filter_2'] = fields[idx_2]
self.toggles['value_2'] = arg_list[idx_2]
def _compose_df_name(self):
# Statewide data for all products
if self.toggles['toggle_1'] == '--':
self.df_name = 'All Cannabis Products, Statewide'
# If only ONE product or geographic filter is specified
if self.toggles['toggle_1'] == '' and self.toggles['toggle_2'] == '--':
if self.toggles['filter_1'] == 'strain_id':
self.df_name = '{} (ID: {}) Statewide'.format(self.product_name,
self.product_id)
elif self.toggles['filter_1'] == 'location_id':
self.df_name = 'Location: {} (ID: {})'.format(self.location_name,
self.location_id)
elif self.toggles['filter_1'] == 'city':
self.df_name = 'City: {}'.format(self.city.upper())
else:
self.df_name = 'Zipcode: {}'.format(self.zipcode)
# If one product and one geographic filter are specified
if self.toggles['toggle_1'] == '' and self.toggles['toggle_2'] == '':
A = '{} (ID: {})'.format(self.product_name, self.product_id)
if self.toggles['filter_2'] == 'location_id':
B = ', Location: {} (ID: {})'.format(self.location_name, self.location_id)
elif self.toggles['filter_2'] == 'city':
B = ', City: {}'.format(self.city.upper())
else:
B = ', Zipcode: {}'.format(self.zipcode)
self.df_name = A + B
def _query_sales(self):
self._query = ("""
SELECT CAST(DATE_TRUNC('day', week_beginning) AS DATE) as week_beg
, ROUND(SUM(retail_price)) as ttl_sales
, ROUND(SUM(retail_units)) as ttl_units_sold
FROM weekly_sales
{0:}WHERE {1:} = {2:}
{3:}AND {4:} = {5:}
GROUP BY week_beg
ORDER BY week_beg;
""").format(self.toggles['toggle_1'],
self.toggles['filter_1'],
self.toggles['value_1'],
self.toggles['toggle_2'],
self.toggles['filter_2'],
self.toggles['value_2']
)
def _connect_to_postgres(self):
self._conn = create_engine(self._connection_str)
def _SQL2pandasdf(self):
stage_1 = pd.read_sql_query(self._query, self._conn)
stage_2 = pd.DataFrame(stage_1[['ttl_sales', 'ttl_units_sold']])
stage_2.index = pd.DatetimeIndex(stage_1['week_beg'])
try:
# Construct continuous time series even if data is discontinuous
self.ts_start, self.ts_end = stage_2.index[0], stage_2.index[-1]
except IndexError:
print "\nERROR: NO SALES DATA FOR THE FILTERS SPECIFIED"
return None
else:
if self.upsample: # Construct daily DF from weekly aggregated data
main_idx = pd.date_range(start=self.ts_start, end=self.ts_end, freq='W-MON')
stage_3 = pd.DataFrame(index=main_idx)
stage_3['ttl_sales'] = stage_2['ttl_sales']
stage_3['ttl_units_sold'] = stage_2['ttl_units_sold']
stage_4 = stage_3 / 7 # convert weekly values to daily values
stage_5 = stage_4.resample('D').asfreq() # upsample from weeks to days
extended_idx = pd.DatetimeIndex(
start=stage_5.index[0], end=stage_5.index[-1] + 6, freq='D'
) # extend upsampled index to include days from last week of data
self.product_df = stage_5.reindex(extended_idx).ffill(limit=6)
# forward-fill available daily values, retaining NaNs over those weeks
# where NaNs appear in original weekly data
self.product_df.name = self.df_name
self.sales = self.product_df['ttl_sales']
self.sales.name = self.df_name + ' -- Daily Sales'
self.units_sold = self.product_df['ttl_units_sold']
self.units_sold.name = self.df_name + ' -- Daily Units Sold'
else: # preserve data in original weekly aggregated form
main_idx = pd.date_range(start=self.ts_start, end=self.ts_end, freq='W-MON')
self.product_df = pd.DataFrame(index=main_idx)
self.product_df['ttl_sales'] = stage_2['ttl_sales']
self.product_df['ttl_units_sold'] = stage_2['ttl_units_sold']
self.product_df.name = self.df_name
self.sales = self.product_df['ttl_sales']
self.sales.name = self.df_name + ' -- Weekly Sales'
self.units_sold = self.product_df['ttl_units_sold']
self.units_sold.name = self.df_name + ' -- Weekly Units Sold'
class SalesTrendsDF(object):
"""Convert raw time series sales or unit-sales data for a single product into
engineered trend data, including rolling averages and exponentially smoothed
trends for both absolute and normalized (rescaled) values.
INPUT:
-- ts: ImportSalesData.sales or .units_sold object (pandas Series)
-- period_wks: (int) date span of sampling period in weeks measured back
from most recent datum or from user-supplied end_date
-- end_date: (date string of form: '07/15/2016', default=None) alternative
end-date for sampling period; default uses most recent datum
-- MA_params: (list of ints, default=None) one or more rolling "boxcar"
windows, in weeks, by which to generate distinct columns of moving-
average data
-- exp_smooth_params: (list of floats, default=None) one or more alpha
smoothing factors (0 < alpha < 1) by which to generate distinct columns
of exponentially smoothed data
-- normed: (bool, default=True) add a column for each moving-average or
exponentially smoothed column that computes on data rescaled (-1, 1)
and then shifted per baseline parameter.
-- baseline: (str, default='t_zero') baseline for shifing data; values:
* 't_zero' -- shift data by value at t0
* 'mean' -- shift data by the mean
* 'median' -- shift data by the median
-- NaN_filler: (float or None, default=0.0) fillna value for raw data, allows marking
converted NaNs by using tag value such as 0.0001. Set to None to generate
a trendsDF with only raw sample data, NaNs in place. Note: All computations
and statistical aggregation on smoothed trends use fillna = 0.0.
ATTRIBUTES:
-- trendsDF: (pandas DataFrame)
-- trend_stats: (OrderedDict) aggregate statistics, single record for insertion
into comparison DF
-- product_name: (str) extracted from ts.name
-- product_ID: (int) extracted from ts.name
-- sales_col_name: (str) either 'daily sales' or 'daily units sold', extracted
from ts.name
-- NaNs_ratio: (float) ratio of NaNs to total days in ts sample
METHODS:
-- main(): run after initialization to populate trendsDF
-- aggregate_stats(): populates trend_stats containing record for product
aggregated from trendsDF object
-- norm_Series(col): rescales (-1, 1) and shifts selected data column
-- trend_AUC(ts): computes area under curve for time series
-- compute_aggr_slope(ts): returns slope of line describing avg growth rate
over selected time series data
"""
def __init__(self, ts, period_wks, end_date=None, MA_params=None,
exp_smooth_params=None, normed=True, baseline='t_zero',
NaN_filler=0.0):
self.ts = ts
self.raw_df = None
self.period_wks = period_wks
self._period_days = period_wks * 7
self.end_date = end_date
self.MA_params = MA_params
self.exp_smooth_params = exp_smooth_params
self.normed = normed
self.baseline = baseline
self.NaN_filler = NaN_filler
self.product_name = None
self.product_ID = None
self.place_name = None
self.place_ID = None
self.ts_sample = None
self.trendsDF = None
self.trend_stats = OrderedDict()
self.NaNs_ratio = None
try:
self.sales_col_name = self.ts.name.split(' -- Daily ')[-1]
except AttributeError:
pass
def main(self):
if self.ts is not None:
self._constuct_basic_trendsDF()
if self.NaN_filler is not None:
if self.MA_params:
self._compute_rolling_averages()
self.aggregate_stats()
else:
print "\nERROR: Initialization ts is NoneType."
def _constuct_basic_trendsDF(self):
"""DF with sales over period"""
# populate ts_sample attribute
self._slice_timeseries()
# compute ratio of NaNs in ts_sample
self.NaNs_ratio = (
self.ts_sample.isnull().sum() / float(len(self.ts_sample))
)
# contruct base trendsDF object
self.trendsDF = pd.DataFrame(
data=self.ts_sample.values,
columns=[self.sales_col_name.lower()],
index=self.ts_sample.index
)
if self.NaN_filler is not None:
self.trendsDF.fillna(self.NaN_filler, inplace=True)
self.trendsDF.name = self._trendsDF_name()
# Only add columns for shifted and normed trends on NaN-filled data
if self.NaN_filler is not None:
if self.baseline == 't_zero':
self.trendsDF['SHIFTED to t0=0'] = \
self.trendsDF.iloc[:,0] - self.trendsDF.iloc[:,-1][0]
if self.baseline == 'mean':
self.trendsDF['SHIFTED to mean=0'] = \
self.trendsDF.iloc[:,0] - self.trendsDF.iloc[:-1].mean()
if self.baseline == 'median':
self.trendsDF['SHIFTED to median=0'] = \
self.trendsDF.iloc[:,0] - np.median(self.trendsDF.iloc[:,-1])
if self.normed:
self.trendsDF['NORMD'] = \
self.norm_Series(self.trendsDF.iloc[:,0])
def _compute_rolling_averages(self):
self.raw_df = pd.DataFrame(self.ts)
rounder = (lambda x: round(x, 0))
for wk_window in self.MA_params:
boxcar = wk_window * 7
col_name = '{}wk MA'.format(wk_window)
self.raw_df[col_name] = \
self.ts.fillna(0).rolling(window=boxcar).mean()
self.trendsDF[col_name] = \
self.raw_df[col_name][self.trendsDF.index].apply(rounder)
# Shift moving averages to baseline
if self.baseline == 't_zero':
self.trendsDF[col_name + ' SHIFTED to t0=0'] = \
self.trendsDF[col_name] - self.trendsDF[col_name][0]
if self.baseline == 'mean':
self.trendsDF[col_name + ' SHIFTED to mean=0'] = \
self.trendsDF[col_name] - self.trendsDF[col_name].mean()
if self.baseline == 'median':
self.trendsDF[col_name + ' SHIFTED to median=0'] = \
self.trendsDF[col_name] - np.median(self.trendsDF[col_name])
if self.normed:
normed_col_name = '{}wk MA NORMD'.format(wk_window)
self.trendsDF[normed_col_name] = \
self.norm_Series(self.trendsDF[col_name])
def aggregate_stats(self):
"""Compute statistics on each data column and output trend_stats attribute
(OrderedDict)"""
self.trend_stats['product_name'] = self.product_name
self.trend_stats['product_id'] = self.product_ID
self.trend_stats['place_name'] = self.place_name
self.trend_stats['place_id'] = self.place_ID
self.trend_stats['avg weekly ' + self.sales_col_name.lower()] = \
round(self.trendsDF.iloc[:,0].sum() / self.period_wks, 0)
if 'units' in self.sales_col_name.lower():
sales_or_units = ' (units)'
else:
sales_or_units = ' ($)'
for column in self.trendsDF.columns[1:]:
if 'NORMD' in column:
self.trend_stats[column + ' growth rate'] = \
(7 * self.compute_aggr_slope(self.trendsDF[column]))
if 'SHIFTED' in column:
self.trend_stats[column + ' avg weekly gain' + sales_or_units] = \
round(7 * self.compute_aggr_slope(self.trendsDF[column]), 0)
def _slice_timeseries(self):
"""Construct ts_sample attribute"""
offset = pd.DateOffset(self._period_days - 1)
if self.end_date:
idx_end = pd.to_datetime(self.end_date)
sample_idx = pd.date_range(
start=idx_end - offset, end=idx_end
)
sample_df = pd.DataFrame(index=sample_idx)
sample_df['vals'] = self.ts[sample_idx]
self.ts_sample = sample_df.iloc[:,0]
self.ts_sample.name = self.ts.name
else: # else use most recent date available
idx_end = self.ts.index[-1]
sample_idx = pd.date_range(
start=idx_end - offset, end=idx_end
)
sample_df = pd.DataFrame(index=sample_idx)
sample_df['vals'] = self.ts[sample_idx]
self.ts_sample = sample_df.iloc[:,0]
self.ts_sample.name = self.ts.name
def _trendsDF_name(self):
"""Construct string for trendsDF pandas DataFrame name attribute"""
A = ''
B = 'Statewide '
first_parse = self.ts.name.split(' -- Daily ')
if 'ID' in first_parse[0] and 'Location' not in first_parse[0]:
self.product_name = first_parse[0].split(' (ID: ')[0]
self.product_ID = int(first_parse[0].split(' (ID: ')[1].split(')')[0])
A = '{} (ID: {}), '.format(self.product_name, self.product_ID)
if 'Location' in first_parse[0] and first_parse[0].count('ID') == 1:
self.place_name = first_parse[0].split(': ')[-2][:-4]
self.place_ID = int(
first_parse[0].split(' (ID: ')[-1].split(')')[0]
)
B = 'Location: {} (ID: {}), '.format(self.place_name, self.place_ID)
if first_parse[0].count('ID') == 2:
self.product_name = first_parse[0].split(' (ID: ')[0]
self.product_ID = int(first_parse[0].split(' (ID: ')[1].split(')')[0])
A = '{} (ID: {}), '.format(self.product_name, self.product_ID)
self.place_name = first_parse[0].split(': ')[-2][:-4]
self.place_ID = int(
first_parse[0].split(' (ID: ')[-1].split(')')[0]
)
B = 'Location: {} (ID: {}), '.format(self.place_name, self.place_ID)
if 'City' in first_parse[0]:
self.place_name = first_parse[0].split('City: ')[-1]
B = 'City: {}, '.format(self.place_name)
if 'Zipcode' in first_parse[0]:
self.place_name = first_parse[0].split('Zipcode: ')[-1]
B = 'Zipcode: {}, '.format(self.place_name)
if not self.end_date:
ending = self.ts.index[-1].strftime('%m/%d/%Y')
else:
ending = self.end_date
DF_name = (A + B + 'Trends in {} over {} Weeks Ending {}').format(
self.sales_col_name,
self.period_wks,
ending
)
return DF_name
def norm_Series(self, col):
"""Return time series rescaled then shifted to baseline.
"""
values = col.fillna(0).values
values = values.reshape(-1,1)
scaler = MinMaxScaler(feature_range=(-50,50))
scaler = scaler.fit(values)
scaled_vals = scaler.transform(values).flatten()
if self.baseline == 't_zero':
normed_trend = pd.Series(scaled_vals - scaled_vals[0], index=col.index)
return normed_trend
if self.baseline == 'mean':
normed_trend = pd.Series(scaled_vals - scaled_vals.mean(), index=col.index)
return normed_trend
if self.baseline == 'median':
normed_trend = pd.Series(scaled_vals - np.median(scaled_vals), index=col.index)
return normed_trend
def trend_AUC(self, ts, log_scaled=False, sqrt_scaled=False):
"""Compute trend AUC or (optionally) log-scaled AUC or sqrt_scaled AUC
for column in trendsDF
"""
if log_scaled:
if np.trapz(ts.values) < 0:
return -1 * np.log(-1 * np.trapz(ts.fillna(0)))
else:
return np.log(np.trapz(ts.fillna(0)))
elif sqrt_scaled:
if np.trapz(ts.values) < 0:
return -1 * np.sqrt(-1 * np.trapz(ts.fillna(0)))
else:
return np.sqrt(np.trapz(ts.fillna(0)))
else:
return np.trapz(ts.fillna(0))
def compute_aggr_slope(self, ts):
"""Redistribute AUC under straight line and return slope of line. For
raw figures, units represent avg sales (or units sold) gained/lost per day"""
AUC = self.trend_AUC(ts)
return (2 * AUC) / (len(ts)**2)
def SalesStatsDF(period_wks, end_date, products=[None], locations=[None],
cities=[None], zipcodes=[None], MA_params=[5], normed=True,
baseline='t_zero', compute_on_sales=True, NaN_allowance=5,
print_rejects=False, return_rejects=False):
"""Construct DataFrame showing comparative sales stats among multiple products
or places. See output DataFrame.name attribute for title.
ARGUMENTS:
-- period_wks: (int) sampling period in weeks
-- end_date: (date string of form: '07/15/2016') date string defining
end of sampling period for comparison
PROVIDE ONE OF THE BELOW OR A COMBINATION OF TWO ARGUMENTS with ONE OF THE
TWO CONTAINING ONLY ONE VALUE IN ITS LIST
-- products: (list of ints or strings) list of product names and/or IDs for
filtering or statistical comparison
-- locations: (list of ints or strings) list of retail store names and/or
IDs for filtering or statistical comparison
-- cities: (list of strings) list of cities for filtering or statistical
comparison
-- zipcodes: (list of 5-digit zipcodes as ints) list of zipcodes for filtering
or statistical comparison
OPTIONAL:
-- MA_params: (list of ints, default=5) one or more rolling "boxcar"
windows, in weeks, by which to compute moving averages
-- normed: (bool, default=True) add a column for each rolling average or expon.
smoothed column that computes on data that has been rescaled (-1, 1)
and then shifted to baseline.
-- baseline: (str, default='t_zero') baseline for shifing data; values:
* 't_zero' -- shift data by value at t0
* 'mean' -- shift data by the mean
* 'median' -- shift data by the median
-- compute_on_sales: (bool, default=True) computes on sales data; if False,
computes on units-sold data
-- NaN_allowance: (int or float from 0 to 100, default=5) max allowable
percentage of NaNs in product ts samples for statistical aggregation;
products exceeding allowance are discarded from output DataFrame and
reported in rejection dictionary
-- print_rejects: (bool, default=False) If True, print any products rejected
for excess null values in sample with their corresponding ratio of nulls
present in the dataset
-- return_rejects: (bool, default=False) If True, returns dictionary of
of products rejected for excess nulls along with main output dataframe.
"""
data = []
rejected = {}
counter = 0
df_name = None
product_place_args = [products, locations, cities, zipcodes]
import_type, var_index = select_import_params(product_place_args)
if import_type == 'E':
print (
'\nERROR: CONFLICTING VALUES ENTERED AMONG PRODUCTS, LOCATIONS, CITIES, '
'AND/OR ZIPCODES ARGUMENTS.\n'
'ONLY ONE OF THOSE FOUR LIST-ARGUMENTS MAY CONTAIN MORE THAN ONE VALUE.\n'
)
return
if import_type == 'A': # Statewide data for all products
stats, NaN_ratio, name = import_ala_params(period_wks, end_date,
MA_params=MA_params, normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=NaN_allowance)
data.append(stats)
df_name = name
if import_type == 'B': # Single product or place specified
stats, NaN_ratio, name = import_ala_params(period_wks, end_date,
product=products[0], location=locations[0], city=cities[0],
zipcode=zipcodes[0], MA_params=MA_params, normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=NaN_allowance)
if stats is not None:
# If null vals in sample exceed allowance threshold, dump product or place
# into rejected dict and exclude from output DF
if NaN_ratio > NaN_allowance / 100.:
if products[0] is not None:
rejected[stats['product_name']] = NaN_ratio
else:
rejected[stats['place_name']] = NaN_ratio
else:
data.append(stats)
df_name = name
else:
return
if import_type == 'C': # Iterate on products
for prod in products:
stats, NaN_ratio, name = import_ala_params(period_wks, end_date,
product=prod, location=locations[0], city=cities[0],
zipcode=zipcodes[0], MA_params=MA_params, normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=NaN_allowance)
if stats is None:
continue
else:
if NaN_ratio > NaN_allowance / 100.:
rejected[stats['product_name']] = NaN_ratio
else:
data.append(stats)
if counter < 1: # first loop, grab name for output DF
if locations[0] is not None:
df_name = ('Product Comparison, '
+ name.split(', ')[-1]
+ ', Business: {}'.format(locations[0].upper())
)
elif cities[0] is not None:
df_name = ('Product Comparison, '
+ name.split(', ')[-1]
+ ', City: {}'.format(cities[0].upper())
)
elif zipcodes[0] is not None:
df_name = ('Product Comparison, '
+ name.split(', ')[-1]
+ ', Zipcode: {}'.format(zipcodes[0])
)
else:
df_name = 'Product Comparison, ' + name.split(', ')[-1]
counter += 1
if import_type == 'D': # iterate on a place
if var_index == 1:
for loc in locations:
stats, NaN_ratio, name = import_ala_params(period_wks, end_date,
product=products[0], location=loc, MA_params=MA_params,
normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=NaN_allowance)
if stats is None:
continue
else:
if NaN_ratio > NaN_allowance / 100.:
rejected[stats['place_name']] = NaN_ratio
else:
data.append(stats)
if counter < 1:
if products[0] is None:
df_name = 'Comparison by Business, ' + name.split(', ')[-1]
else:
df_name = (name.split(', ')[0]
+ ', Comparison by Business, '
+ name.split(', ')[-1]
)
counter += 1
if var_index == 2:
for city in cities:
stats, NaN_ratio, name = import_ala_params(period_wks, end_date,
product=products[0], city=city, MA_params=MA_params, normed=normed,
baseline=baseline, compute_on_sales=compute_on_sales,
NaN_allowance=NaN_allowance)
if stats is None:
continue
else:
if NaN_ratio > NaN_allowance / 100.:
rejected[stats['place_name']] = NaN_ratio
else:
data.append(stats)
if counter < 1:
if products[0] is None:
df_name = 'Comparison by City, ' + name.split(', ')[-1]
else:
df_name = (name.split(', ')[0]
+ ', Comparison by City, '
+ name.split(', ')[-1]
)
counter += 1
if var_index == 3:
for zipcode in zipcodes:
stats, NaN_ratio, name = import_ala_params(period_wks, end_date,
product=products[0], zipcode=zipcode, MA_params=MA_params,
normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=NaN_allowance)
if stats is None:
continue
else:
if NaN_ratio > NaN_allowance / 100.:
rejected[stats['place_name']] = NaN_ratio
else:
data.append(stats)
if counter < 1:
if products[0] is None:
df_name = 'Comparison by Zipcode, ' + name.split(', ')[-1]
else:
df_name = (name.split(', ')[0]
+ ', Comparison by Zipcode, '
+ name.split(', ')[-1]
)
counter += 1
try:
product_stats_df = pd.DataFrame(data, columns=data[0].keys())
except IndexError:
print ('\nNO DATA AVAILABLE IN SPECIFIED PERIOD FOR PRODUCT AND/OR PLACE.\n'
'Utilize PlotRawData function to view data availability over time.\n'
)
else:
product_stats_df.name = df_name
if print_rejects:
if len(rejected) > 0:
print('Data for the following product(s) and/or place(s) exceed allowance for '
'null values \nand are excluded from statistical aggregation '
'and/or ranking:\n')
for k, v in rejected.iteritems():
print('{} -- Percent Null: {}%').format(k, round(v * 100, 2))
print '\n'
if return_rejects:
return product_stats_df, rejected
else:
return product_stats_df
def CompTrendsDF(period_wks, end_date, products=[None], locations=[None],
cities=[None], zipcodes=[None], MA_param=None,
shifted=False, normed=False,
baseline='t_zero', compute_on_sales=True, NaN_filler=0.0):
"""Construct DataFrame with time series across multiple products or places.
Default kwargs return a DataFrame with time series of raw sales data, NaNs
filled with 0.0. Otherwise, assign value to MA_param=.
Optionally may assign bool True to either shifted= or normed= arguments (NOT BOTH).
To preserve discontinuities in data (i.e., NaNs) set NaN_filler to None or
to a tag value close to zero such as 0.0001
ARGUMENTS:
-- period_wks: (int) sampling period in weeks
-- end_date: (date string: '07/15/2016') date string defining
end of sampling period.
SPECIFICATIONS FOR COMPARISON AND FILTERING:
Provide one argument for comparison, optionally add a second argument, either
a single product or a single place, as a filter. More than two arguments or
more than one argument that contains multiple values will produce an error.
-- products: (list of ints or strings) list of product names and/or IDs
-- locations: (list of ints or strings) list of business names and/or
IDs
-- cities: (list of strings) list of cities
-- zipcodes: (list of 5-digit zipcodes as ints) list of zipcodes
KEYWORD ARGUMENTS:
-- MA_param: (int) return dataframe of moving averages; int defines "boxcar"
window, in weeks, by which to compute moving average
NOTE: requires value for NaN_filler (!= None)
-- shifted: (bool, default=False) shift trend data to t0 = 0
NOTE: requires a non-null value for NaN_filler
-- normed: (bool, default=False) rescale data to feature range (-1, 1)
then shift data such that t0 = 0.
NOTE: requires a non-null value for NaN_filler
-- baseline: (str, default='t_zero') baseline for shifing data; values:
* 't_zero' -- shift data by value at t0
* 'mean' -- shift data to mean = 0
* 'median' -- shift data to median = 0
-- compute_on_sales: (bool, default=True) computes on sales data; if False,
computes on units-sold data
-- NaN_filler: (float or None, default=0.0) fillna value for raw data, allows marking
converted NaNs by using tag value such as 0.0001. Set to None to generate
a CompTrendsDF with only raw, unsmoothed sample data, NaNs in place.
"""
# Notify user of special arguments error
if NaN_filler is None and (MA_param or exp_smooth_param or shifted or normed):
print ("ERROR: MISSING NaN_filler VALUE\n"
"Value (int or float) must be provided for NaN_filler with MA_param, shifted=True"
" or normed=True arguments."
)
return None
# Column number to grab specified trend-type from TrendsDF object
col_index = column_sel(MA_param, shifted, normed)
# Insert single MA_param into list for SalesTrendsDF class
if MA_param is not None:
MA_param = [MA_param]
counter = 0
# From user specifications, set variable for comparison and filter
product_place_args = [products, locations, cities, zipcodes]
import_type, var_index = select_import_params(product_place_args)
if import_type == ('A' or 'E'):
print (
'\nERROR: CONFLICTING ARGUMENTS ENTERED (OR MISSING) AMONG PRODUCTS, LOCATIONS\n'
' CITIES, AND/OR ZIPCODES.\n\n'
'Provide one argument for comparison and (optionally) add a second argument\n'
'as a filter. The filter may be a single product or a single place.\n'
'Check that NOT MORE THAN ONE ARGUMENT contains multiple values.\n'
)
return
if import_type == 'B': # Single product or place specified
category = 'product' if products[0] is not None else 'place'
t_df, col_category, filter_name, filter_ID = import_ala_params(
period_wks, end_date, product=products[0], location=locations[0],
city=cities[0], zipcode=zipcodes[0], MA_params=MA_param, normed=normed,
baseline=baseline, compute_on_sales=compute_on_sales, NaN_allowance=100,
return_trendsDF=True, var_type=category)
comp_trends_df = build_seed_or_source_df(t_df, NaN_filler, col_category,
col_index)
df_title = CompTrendsDF_title(t_df, col_index, var_index, baseline,
MA_param, shifted, normed, filter_name, filter_ID)
comp_trends_df.name = df_title
if import_type == 'C': # Iterate on multiple products
for prod in products:
t_df, col_category, filter_name, filter_ID = import_ala_params(
period_wks, end_date, product=prod, location=locations[0],
city=cities[0], zipcode=zipcodes[0], MA_params=MA_param,
normed=normed, baseline=baseline, compute_on_sales=compute_on_sales,
NaN_allowance=100, return_trendsDF=True, var_type='product')
if counter < 1: # Build the seed (base) df with first product
comp_trends_df = build_seed_or_source_df(t_df, NaN_filler,
col_category, col_index)
df_title = CompTrendsDF_title(t_df, col_index, var_index,
baseline, MA_param, shifted, normed, filter_name, filter_ID)
comp_trends_df.name = df_title
else: # Add columns with subsequent products
build_seed_or_source_df(t_df, NaN_filler, col_category,
col_index, constr_seed_df=False, seed_df=comp_trends_df)
counter += 1
if import_type == 'D': # iterate on places
if var_index == 1:
for loc in locations:
t_df, col_category, filter_name, filter_ID = import_ala_params(
period_wks, end_date, product=products[0], location=loc,
MA_params=MA_param, normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=100,
return_trendsDF=True, var_type='place')
if counter < 1: # Build the seed df with first place
comp_trends_df = build_seed_or_source_df(t_df, NaN_filler,
col_category, col_index)
df_title = CompTrendsDF_title(t_df, col_index, var_index,
baseline, MA_param, shifted, normed, filter_name,
filter_ID)
comp_trends_df.name = df_title
else: # Add columns with subsequent places
build_seed_or_source_df(t_df, NaN_filler, col_category,
col_index, constr_seed_df=False, seed_df=comp_trends_df)
counter += 1
if var_index == 2:
for city in cities:
t_df, col_category, filter_name, filter_ID = import_ala_params(
period_wks, end_date, product=products[0], city=city,
MA_params=MA_param, normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=100,
return_trendsDF=True, var_type='place')
if counter < 1:
comp_trends_df = build_seed_or_source_df(t_df, NaN_filler,
col_category, col_index)
df_title = CompTrendsDF_title(t_df, col_index, var_index,
baseline, MA_param, shifted, normed, filter_name,
filter_ID)
comp_trends_df.name = df_title
else:
build_seed_or_source_df(t_df, NaN_filler, col_category,
col_index, constr_seed_df=False, seed_df=comp_trends_df)
counter += 1
if var_index == 3:
for zipcode in zipcodes:
t_df, col_category, filter_name, filter_ID = import_ala_params(
period_wks, end_date, product=products[0], zipcode=zipcode,
MA_params=MA_param, normed=normed, baseline=baseline,
compute_on_sales=compute_on_sales, NaN_allowance=100,
return_trendsDF=True, var_type='place')
if counter < 1:
comp_trends_df = build_seed_or_source_df(t_df, NaN_filler,
col_category, col_index)
df_title = CompTrendsDF_title(t_df, col_index, var_index,
baseline, MA_param, shifted, normed, filter_name,
filter_ID)
comp_trends_df.name = df_title
else:
build_seed_or_source_df(t_df, NaN_filler, col_category,
col_index, constr_seed_df=False, seed_df=comp_trends_df)
counter += 1
try:
return comp_trends_df
except UnboundLocalError:
print (
'\nERROR: CONFLICTING ARGUMENTS ENTERED (OR MISSING) AMONG PRODUCTS, LOCATIONS\n'
' CITIES, AND/OR ZIPCODES.\n\n'
'Provide one argument for comparison and (optionally) add a second argument\n'
'as a filter. The filter may be a single product or a single place.\n'
'Check that NOT MORE THAN ONE ARGUMENT contains multiple values.\n'
)
return
def column_sel(MA_param=None, shifted=False, normed=False):
"""Return integer for DataFrame column selection"""
if MA_param is not None:
smoothed = True
else:
smoothed = False
if not smoothed and not (shifted or normed):
return 0
if not smoothed and shifted:
return 1
if not smoothed and normed:
return 2
if smoothed and not (shifted or normed):
return 3
if smoothed and shifted:
return 4
if smoothed and normed:
return 5
def build_seed_or_source_df(t_df, NaN_filler, col_category, col_index,
constr_seed_df=True, seed_df=None):
if constr_seed_df: # Create base dataframe
if NaN_filler is not None:
t_df.fillna(NaN_filler, inplace=True)
comp_trends_df = pd.DataFrame(t_df[t_df.columns[col_index]])
comp_trends_df.columns = [col_category]
return comp_trends_df
else: # Add new columns to base dataframe
if NaN_filler is not None:
t_df.fillna(NaN_filler, inplace=True)
seed_df[col_category] = t_df.iloc[:,col_index]
def CompTrendsDF_title(t_df, col_index, var_index, baseline, MA_param, shifted,
normed, filter_name, filter_ID):
"""Construct df.name for CompTrendsDF."""
if filter_name is not None:
if var_index is not None: # filter is a single PRODUCT
A = '{} (ID: {}), '.format(filter_name, filter_ID)
else: # filter is a PLACE
if filter_ID is not None: # filter is a business
A = 'Business: {} (ID: {}), '.format(filter_name, filter_ID)
elif not filter_name.isdigit():
A = 'City: {}, '.format(filter_name)
else:
A = 'Zipcode: {}, '.format(filter_name)
else: # no filter specified
A = ''
if MA_param is not None:
B = '{}-Week Moving Average of '.format(MA_param[0])
bsln = baseline.capitalize() if baseline != 't_zero' else 'T0 = 0'
C = ', Data Shifted to {}'.format(bsln)
D = ', Data Rescaled (-50, 50) then Shifted to {}'.format(bsln)
E = t_df.name.split('in ')[1]
if col_index == 0:
title = A + E
if col_index == 1:
title = A + E + C
if col_index == 2:
title = A + E + D
if col_index == 3 and MA_param and not shifted:
title = A + B + E
if col_index == 4 and MA_param and shifted:
title = A + B + E + C
if col_index == 5 and MA_param and normed:
title = A + B + E + D
return title
class RankProductsPlaces(object):
"""Initialize with SalesStatsDF object and (optionally) by number of top
results desired; Rank products or places by user_selected statistic.
METHOD:
-- main(): Rank products and populate attributes using kwargs:
* smoothed (bool, default = True) rank on statistics generated from
trend lines smoothed via moving average or exponential alpha;
False = rank on raw trend data
* stat (str or Nonetype, default = 'sales')
- 'sales' (default)= average weekly sales over period; NOTE:
- 'gain' = uniform weekly gain or loss over period
- 'rate' = growth rate index for products with data
normalized (rescaled -100, 100) for sales volumes
- None = prompts user for statistic from menu
ATTRIBUTES:
-- results: pandas DataFrame of products or places ranked by selected statistic
-- ranked_products: numpy array of ranked product IDs
-- ranked_places: numpy array of ranked places or place IDs
-- ranked_df: same as RankProducts.results but including all other statistics
"""
def __init__(self, sales_stats_df, N_results=None):
self.sales_stats_df = sales_stats_df
self.N_results = N_results
self.results = None
self.ranked_products = None
self.ranked_places = None
self.ranked_df = None
def main(self, smoothed=True, stat='sales'):
"""Rank N-top products by a user-selected statistic specified either by
smoothed and stat keyword arguments, or manually by selection off of menu
OUTPUT: class attributes -- results, ranked_IDs, ranked_df
ARGUMENTS:
* smoothed (bool, default = True) rank on statistics generated from
trend lines smoothed via moving average or exponentially;
False = rank on raw trend data
* stat (str or NoneType, default = 'sales') rank products on ...
- 'sales' (default) = average weekly sales over period
- 'gain' = uniform weekly gain or loss over period
- 'rate' = growth rate for products with trend data rescaled (-50, 50)
to offset variation in overall sales volumes among products
- None = prompts user for selection of ranking statistic from menu
"""
# Grab the column with statistic for ranking
if stat:
cols = self.sales_stats_df.columns
if stat == 'sales':
stat_idx = 4
if not smoothed and stat == 'gain':
stat_idx = 5
if not smoothed and stat == 'rate':
stat_idx = 6
if smoothed and stat == 'gain':
stat_idx = 7
if smoothed and stat == 'rate':
stat_idx = 8
stat_col = cols[stat_idx]
else:
stat_idx = self._sel_rank_by()
stat_col = self.sales_stats_df.columns[stat_idx]
output_cols = list(self.sales_stats_df.columns)
output_cols.remove(stat_col)
output_cols.insert(4, stat_col)
ranked = self.sales_stats_df.sort_values(by=stat_col, ascending=False)
ranked.index = range(1, len(ranked.index) + 1)
if self.N_results:
# if ranking by product...
if len(self.sales_stats_df['product_name'].unique()) > 1:
self.ranked_df = ranked[output_cols][:self.N_results]
self.ranked_products = self.ranked_df['product_id'].values
self.results = self.ranked_df.iloc[:,:5]
self.results.drop(['product_id' ,'place_name' ,'place_id'],
axis=1, inplace=True)
else: # if ranking by place...
self.ranked_df = ranked[output_cols][:self.N_results]
self.ranked_places = self.ranked_df['place_name'].values
self.results = self.ranked_df.iloc[:,:5]
self.results.drop(['product_name', 'product_id', 'place_id'],
axis=1, inplace=True)
else:
# if ranking by product...
if len(self.sales_stats_df['product_name'].unique()) > 1:
self.ranked_df = ranked[output_cols]
self.ranked_products = self.ranked_df['product_id'].values
self.results = self.ranked_df.iloc[:,:5]
self.results.drop(['product_id' ,'place_name' ,'place_id'],
axis=1, inplace=True)
else: # if ranking by place...
self.ranked_df = ranked[output_cols]
self.ranked_places = self.ranked_df['place_name'].values
self.results = self.ranked_df.iloc[:,:5]
self.results.drop(['product_name', 'product_id', 'place_id'],
axis=1, inplace=True)
self.results.name = self.sales_stats_df.name
self.ranked_df.name = self.sales_stats_df.name + \
', Ranked by {}'.format(stat_col)
def _sel_rank_by(self):
"Prompt user for column for ranking; return its index"
cols = self.sales_stats_df.columns[4:]
index = range(1, len(cols) + 1)
menu = dict(zip(index, cols))
for k, v in menu.iteritems():
print(str(k) + ' -- ' + v)
selection = int(raw_input('\nSelect statistic for ranking.'))
return selection + 1
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
GENERATE DATA FOR BAR GRAPHS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
def HbarData(period_wks, end_date, products=[None], locations=[None],
cities=[None], zipcodes=[None], MA=5,
rank_on_sales=True, NaN_allowance=5, print_rejects=False,
rank_by=['sales'], fixed_order=True):
"""Return a dataframe configured for custom plotting in HbarRanked function
Called within HBarRanked in graph_trends.py. See documentation there for
further details.
ARGUMENTS
-- period_wks: (int, default=10) sample period for time series in weeks
-- end_date: (date string: '07/15/2016', default=None) date string defining
end of sampling period. Default uses most recent date in dataset.
PROVIDE ONE OF THE BELOW OR A COMBINATION OF TWO ARGUMENTS with ONE OF THE
TWO CONTAINING ONLY ONE VALUE IN ITS LIST
-- products: (list of ints or strings) list of product names and/or IDs for
filtering or statistical comparison
-- locations: (list of ints or strings) list of retail store names and/or
IDs for filtering or statistical comparison
-- cities: (list of strings) list of cities for filtering or statistical
comparison
-- zipcodes: (list of 5-digit zipcodes as ints) list of zipcodes for filtering
or statistical comparison
ADDITIONAL DATA KWARGS
-- rank_on_sales: (bool, default=True) ranks on sales data; if False,
ranks on units sold data
-- MA_param: (int or NoneType) return dataframe of moving averages; int defines "boxcar"
window, in weeks, by which to compute moving average; if None, computes
on raw trend data.
-- rank_by: (list of strings, default=['rate']) select statistic
by which to rank products in the primary and optional secondary
graphs in order of statistic. Values:
* 'sales' = cumulative sales over period
* 'gain' = uniform weekly gain or loss over period
* 'rate' = growth rate index for products with data
normalized (rescaled -100, 100) for sales volumes
-- fixed_order: (bool, default=True) only rank products in the primary
bar graph and maintain that rank-order in secondary graphs; if False,
rank products in each bar graph.
-- NaN_allowance: (int from 0 to 100, default=5) max allowable percentage of
NaNs in product ts samples for statistical aggregation; products
exceeding allowance are discarded from rankings.
-- print_rejects: (bool, default=False) If True, print report of products
rejected for excess null values in sample, with their corresponding
percentage of nulls in sample."""
boxcar = [MA] if MA is not None else None
product_place_args = [products, locations, cities, zipcodes]
import_type, var_index = select_import_params(product_place_args)
if import_type == 'C': # ranking products
drop_cols = ['product_id', 'place_name', 'place_id']
append_col = 'product_name'
if import_type == 'D': # ranking places
drop_cols = ['product_id', 'product_name', 'place_id']
append_col = 'place_name'
prod_stats = SalesStatsDF(period_wks, end_date, products, locations,
cities, zipcodes, MA_params=boxcar, compute_on_sales=rank_on_sales,
NaN_allowance=NaN_allowance, print_rejects=print_rejects)
if MA is not None:
base_name = prod_stats.name + ' -- {}-Week Moving Average'.format(MA)
else:
base_name = prod_stats.name + ' -- '
if len(rank_by) == 1 or fixed_order: # just need the RankProductsPlaces.results object
if len(rank_by) == 1:
rank_1 = RankProductsPlaces(prod_stats)
if MA is not None:
rank_1.main(stat=rank_by[0])
else:
rank_1.main(smoothed=False, stat=rank_by[0])
data = rank_1.ranked_df
data.drop(drop_cols, axis=1, inplace=True)
else:
rank_1 = RankProductsPlaces(prod_stats)
rank_1.main(smoothed=MA, stat=rank_by[0])
all_data = rank_1.ranked_df
df_cols = all_data.columns
cols = []
for rank_stat in rank_by:
cols.append(append_col)
cols.append(grab_column(stat=rank_stat, smoothed=MA))
data = all_data[cols]
if len(rank_by) > 1 and not fixed_order:
rank_1 = RankProductsPlaces(prod_stats)
rank_1.main(smoothed=MA, stat=rank_by[0])
data = rank_1.results
for i, rank_stat in enumerate(rank_by[1:]):
rank_next = RankProductsPlaces(prod_stats)
rank_next.main(smoothed=MA, stat=rank_stat)
next_ranked = rank_next.results
data['Ranking By {}'.format(rank_stat)] = next_ranked.iloc[:,0].values
data[next_ranked.columns[-1]] = next_ranked.iloc[:,-1].values
# data.drop(drop_cols, axis=1, inplace=True)
data = data[::-1] # reverse row order for matplotlib bar graphing
data.name = base_name
return data
def grab_column(stat, smoothed):
"""Return index for data column in HbarData fixed_order bar graph"""
if stat == 'sales':
return 'avg weekly sales'
if not smoothed and stat == 'gain':
return 'SHIFTED to t0=0 avg weekly gain ($)'
if not smoothed and stat == 'rate':
return 'NORMD growth rate'
if smoothed and stat == 'gain':
return '{}wk MA SHIFTED to t0=0 avg weekly gain ($)'.format(smoothed)
if smoothed and stat == 'rate':
return '{}wk MA NORMD growth rate'.format(smoothed)
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
GENERATE BEST-SELLER DATA
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
def BestSellerData(period_wks, end_date, products=[None], locations=[None],
cities=[None], zipcodes=[None], MA_param=None, NaN_allowance=5,
print_rejects=False, compute_on_sales=True, N_periods=10,
freq='7D', rank_by='rate'):
"""Return objects for graphing in graph_trends.PlotBestSellers():
1) BestSellers_df: dataframe summarizing rankings for products over a series of
N-identical-length periods spaced at equal intervals and
2) labeler: dictionary containing label names and positions for plot
ARGUMENTS:
-- period_wks: (int, default=10) length of sampling periods in weeks
-- end_date: (date string of form 'MM/DD/YYYY', default=None) end_date of most recent
ranking period
PROVIDE ONE OF THE BELOW OR A COMBINATION OF TWO ARGUMENTS with ONE OF THE
TWO CONTAINING ONLY ONE VALUE IN ITS LIST
-- products: (list of ints or strings) list of product names and/or IDs for
filtering or statistical comparison
-- locations: (list of ints or strings) list of retail store names and/or
IDs for filtering or statistical comparison
-- cities: (list of strings) list of cities for filtering or statistical
comparison
-- zipcodes: (list of 5-digit zipcodes as ints) list of zipcodes for filtering
or statistical comparison
OPTIONAL:
-- MA_param: (int or NoneType, default=None) rolling "boxcar" window, in weeks, by which to
compute moving averages; None: ranks on non-smoothed (raw) trend data
-- NaN_allowance: (int or float from 0 to 100, default=5) max allowable
percentage of NaNs in product ts samples for statistical aggregation;
-- print_rejects: (bool, default=False) If True, print any products rejected
for excess null values in sample with their corresponding ratio of nulls
present in the dataset
-- compute_on_sales: (bool, default=True) ranks on sales data; if False,
ranks on units-sold data
-- N_periods: (int, default=10) number of periods including latest for comparison
-- freq: (str, default='7D') pandas date_range() argument; interval between
periods for comparison. Other possible values: 'W' (Sunday-ending week),
'M' (month), 'Y', '2W', etc. See documentation for Pandas base time-series
frequencies.
-- rank_by: (string, default='rate') statistic by which to rank products.
Values:
* 'rate' = growth rate index for products with data
normalized (rescaled -100, 100) for sales volumes
* 'gain' = uniform weekly gain or loss over period
* 'sales' = cumulative sales over period
"""
# Identify variable and filter for comparison
product_place_args = [products, locations, cities, zipcodes]
import_type, var_index = select_import_params(product_place_args)
# Generate list of end_dates for multiple ranking periods
end_dates = generate_dates(end_date, N_periods, freq)
# Generate data consisting of product rankings over multiple periods
data_A = OrderedDict()
excess_null_error_msg = ('\nDATA FOR SOME PRODUCTS OR PLACES CONTAINED TOO MANY NULLS TO RANK.\n\n'
'** For details, re-run function with print_rejects keyword argument'
' set to True.\n\n** To ignore null values and proceed with rankings'
' (substituting zero for\nnulls in computations),'
' re-run function and set keyword argument NaN_allowance to 100.'
)
# Generate rankings and add to data dictionary (data_A) where keys = specified
# end_dates of the comparison periods; values = the product IDs ordered by rank
for i, end_d in enumerate(end_dates):
# On user command, executes diagnostic to reveal products w excess nulls
if print_rejects:
print('EXCESS NULL VALUES FROM PERIOD ENDING {}:\n').format(end_d)
else: pass
# Compute stats on products for one test period at a time
if MA_param is not None:
psdf, rej = SalesStatsDF(period_wks=period_wks, end_date=end_d,
products=products, locations=locations, cities=cities,
zipcodes=zipcodes,
MA_params=[MA_param], NaN_allowance=NaN_allowance,
print_rejects=print_rejects, return_rejects=True,
normed=True, compute_on_sales=compute_on_sales
)
# If undiagnosed excess nulls, print error message and exit from function
if len(rej) > 0:
if not print_rejects:
print(excess_null_error_msg)
return None, None
else: pass
else: pass
else: # if no moving-avg window specified . . .
psdf, rej = SalesStatsDF(period_wks=period_wks, end_date=end_d,
products=products, locations=locations, cities=cities,
zipcodes=zipcodes,
NaN_allowance=NaN_allowance,
print_rejects=print_rejects, return_rejects=True,
normed=True, compute_on_sales=compute_on_sales
)
if len(rej) > 0:
if not print_rejects:
print(excess_null_error_msg)
return None, None
else: pass
else: pass
ranked = RankProductsPlaces(psdf)
if MA_param is not None:
ranked.main(smoothed=True, stat=rank_by)
else:
ranked.main(smoothed=False, stat=rank_by)
if import_type == 'C':
data_A[end_d] = ranked.ranked_products
else:
data_A[end_d] = ranked.ranked_places
# Reconfigure data_A into a dictionary (data_B) of keys=products or places,
# values=list of a product/place's rankings over the series of comparison periods
data_B = OrderedDict()
if import_type == 'C': # variable = products
# create dictionary with keys as products and values as empty lists
for prod in products:
if type(prod) == str:
data_B[prod.lower()] = []
else:
data_B[product_name_from_ID(prod)] = []
# append rankings to list-values in dictionary
for prod_arr in data_A.itervalues():
for i, prod in enumerate(prod_arr):
if type(prod) == str:
data_B[prod.lower()].append(i+1) # i+1 represents product rank
else:
data_B[product_name_from_ID(prod)].append(i+1)
if import_type == 'D': # variable = places
for place in product_place_args[var_index]:
if type(place) == str:
data_B[place.lower()] = []
else:
if var_index == 3: # variable = zipcodes
data_B[str(place)] = []
else:
data_B[locations_name_from_ID(place)] = []
for place_arr in data_A.itervalues():
for i, place in enumerate(place_arr):
if type(place) == str:
data_B[place.lower()].append(i+1) # i+1 represents product rank
else:
if var_index == 3: # variable = zipcodes
data_B[str(place)].append(i+1)
else: # variable is location by ID
data_B[locations_name_from_ID(place)].append(i+1)
# Construct output dataframes
mask = lambda x: datetime.strptime(x, '%m/%d/%Y')
date_idx = [mask(dt) for dt in end_dates] # for DatetimeIndex of BestSellers_df
title = best_seller_title(MA_param, compute_on_sales, N_periods,
period_wks, rank_by, freq)
try: # Exits function if data contains excess null values
if import_type == 'C':
df_A = pd.DataFrame(data_A, index=range(1, len(products)+1))
else:
df_A = pd.DataFrame(data_A,
index=range(1, len(product_place_args[var_index])+1)
)
except ValueError:
if not print_rejects:
print(excess_null_error_msg)
return None, None
else:
# index of df_A represents rank levels 1 to N
BestSellers_df = pd.DataFrame(data_B, index=date_idx)
# Sort BestSellers_df columns by cumulative rankings
sum_o_ranks = BestSellers_df.sum()
foo = sum_o_ranks.sort_values(ascending=True)
sorted_by_best = list(foo.index)
BestSellers_df = BestSellers_df[sorted_by_best]
df_A.name = title
BestSellers_df.name = title
# Create labeler dict with labels (keys) and their positions (vals)
if import_type == 'C':
labels = [names_formatted[product_name_from_ID(prod_ID)] \
for prod_ID in df_A.iloc[:,-1]]
if import_type == 'D':
if var_index == 1 and type(locations[0]) != str:
# if variable is location by ID number
labels = [locations_name_from_ID(place_ID) \
for place_ID in df_A.iloc[:,-1]]
elif var_index == 3: # variable is zipcode
labels = [str(place_ID) for place_ID in df_A.iloc[:,-1]]
else:
labels = [place_ID for place_ID in df_A.iloc[:,-1]]
labeler = {}
for i, var in enumerate(labels):
labeler[var] = i + 1
return BestSellers_df, labeler
def generate_dates(end_date, N_periods=10, freq='7D'):
"""Make list of end_dates for ranking periods based on BestSellerData params."""
end_dates = []
last_date = datetime.strptime(end_date, '%m/%d/%Y')
d_range = pd.date_range(end=last_date, periods=N_periods, freq=freq)
for d in d_range:
str_d = d.strftime('%m/%d/%Y')
end_dates.append(str_d)
return end_dates
def parse_freq(freq):
if freq == '7D' or freq == 'W':
return 'at weekly intervals'
if 'W' in freq and len(freq) > 1:
mult = list(freq)[0]
return 'at {}-week intervals'.format(mult)
if freq == 'M':
return 'at monthly intervals'
if 'M' in freq and len(freq) > 1:
mult = list(freq)[0]
return 'at {}-month intervals'.format(mult)
if freq == 'Y':
return 'spaced annually'
def best_seller_title(MA_param, compute_on_sales, N_periods, period_wks,
rank_by, freq):
"Construct title (pandas.DataFrame.name) for BestSellerData objects."
if rank_by == 'rate':
alpha = 'Relative Growth Rate'
if rank_by == 'gain':
alpha = 'Uniform Weekly Gain/Loss in Sales'
if rank_by == 'sales':
alpha = 'Average Weekly Sales'
A = 'Successive Rankings on {}'.format(alpha)
B = 'Rankings over {} consecutive {}-week periods, '.format(N_periods,
period_wks)
beta = parse_freq(freq)
C = 'spaced {}'.format(beta)
gamma = 'sales.' if compute_on_sales else 'units sold.'
if MA_param:
D = '\nComputed on {}-week moving-average trends in daily {}\n'\
.format(MA_param, gamma)
else:
D = '\nComputed on trends in daily {}'.format(gamma)
return A + ' -- ' + B + C + D
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
SUPPORTING AND STAND-ALONE FUNCTIONS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
def biz_lookup(conn_str, cities=None, zipcodes=None):
"""
Return a list of IDs (ints) for all businesses in either:
-- A city (str) or list of cities, OR
-- A zipcode (5-digit int) or list of zipcodes
"""
if cities is not None and zipcodes is not None:
print "\nERROR: Enter value(s) for city OR zip; NOT BOTH."
return
conn = create_engine(conn_str)
biz_list = []
if type(cities) != list:
cities = [cities]
if type(zipcodes) != list:
zipcodes = [zipcodes]
if cities[0] is not None:
A = """
SELECT DISTINCT(locs.name)
FROM locations locs
JOIN weekly_sales ws
ON locs.wa_location_id = ws.location_id
WHERE locs.city = '{}'
""".format(cities[0].upper())
B = ''
if len(cities) > 1:
for city in cities[1:]:
B += '\nOR locs.city = {}'.format(city)
C = ';'
query_by_city = A + B + C
results = conn.execute(query_by_city)
for shop in results:
biz_list.append(locations_dict[shop[0]])
else:
A = """
SELECT DISTINCT(location_id)
FROM weekly_sales
WHERE zip = '{}'
""".format(zipcodes[0])
B = ''
if len(zipcodes) > 1:
for zipcode in zipcodes[1:]:
B += '\nOR zip = {}'.format(zipcode)
C = ';'
query_by_zip = A + B + C
results = conn.execute(query_by_zip)
for shop in results:
biz_list.append(shop[0])
return biz_list
def select_import_params(arg_list):
"""
Return tuple (str, int or None) containing ImportSalesData initialization configs with
iterable 'variable' and index of 'filter' as derived from user-inputs in parent function.
Return error trigger if user arguments contain conflicting values.
Types:
A = Import sales data statewide for all products (no variable or filter)
B = Import sales data for a single product OR a single location OR a single product
in a single location
C = Import sales data for multiple products (variable), optionally within a single
location (filter)
D = Import sales data for multiple locations (variable), optionally filtered by a
single product (filter)
E = ERROR_1: Conflicing values contained in arguments
"""
# find variable; the product or place category for comparison via iteration
mask_1 = lambda x: len(x) > 1
find_var = map(mask_1, arg_list)
# find arguments with user-specified values
mask_2 = lambda x: x[0] is not None
find_args = map(mask_2, arg_list)
if find_var.count(True) == 0: # If no arguments contain more than one value...
if find_args.count(True) == 0: # user did not specify variable or filter
return 'A', None
elif find_args.count(True) > 2: # user entered too many arguments
return 'E', None
else: # User specified a single value in a single argument, a product or a place
return 'B', None
elif find_var.count(True) > 1: # if user arguments exceed the limit of one variable
return 'E', None
elif find_var.count(True) == 1: # If one argument contains multiple values (the 'variable')
if find_var.index(True) == 0: # Compare / iterate on product
return 'C', None
else: # Compare / iterate on places
return 'D', find_var.index(True)
def import_ala_params(period_wks, end_date, product=None, location=None, city=None, zipcode=None,
MA_params=None, normed=True, baseline='t_zero', compute_on_sales=True,
NaN_allowance=5, return_trendsDF=False, var_type='product'):
"""
Import sales data per user params in parent function and return the following objects (tup):
If return_trendsDF set to False (for SalesStatsDF):
-- SalesTrends.trend_stats
-- SalesTrends.NaNs_ratio
-- SalesTrends.trendsDF.name
Else (for CompTrendsDF):
-- SalesTrends.trendsDF
-- (str) SalesTrends.product_name (if var_type set to 'product')
OR SalesTrends.place_name (if var_type != 'product')
"""
raw_data = ImportSalesData(product, location, city, zipcode)
raw_data.main()
if raw_data.product_df is not None:
if compute_on_sales:
ts = raw_data.sales
else:
ts = raw_data.units_sold
trends_data = SalesTrendsDF(ts, period_wks, end_date, MA_params, normed, baseline,
NaN_filler=0.0)
trends_data.main()
if return_trendsDF:
if var_type == 'product':
var_name = trends_data.product_name
filter_name = \
None if trends_data.place_name is None else trends_data.place_name
filter_ID = \
None if trends_data.place_ID is None else trends_data.place_ID
else:
var_name = trends_data.place_name
filter_name = \
None if trends_data.product_name is None else trends_data.product_name
filter_ID = \
None if trends_data.product_ID is None else trends_data.product_ID
return (trends_data.trendsDF,
var_name,
filter_name,
filter_ID)
else:
return (trends_data.trend_stats,
trends_data.NaNs_ratio,
trends_data.trendsDF.name)
else:
params_lst = [product, location, city, zipcode]
print ('ERROR: Invalid entry, no sales data associated with this combination\n'
'of values: {}\n'.format(params_lst)
)
if not return_trendsDF:
return None, None, None
else:
return None, None, None, None
def rank_products(product_stats_df, N_results=None):
"Rank N-top products by user-selected statistic; output in pandas DataFrame"
stat_idx = sel_rank_by(product_stats_df)
stat_col = product_stats_df.columns[stat_idx]
ranked_df = product_stats_df.sort_values(by=stat_col, ascending=False)
if N_results:
return ranked_df[['product_name', 'product_id', stat_col]][:N_results]
else:
return ranked_df[['product_name', 'product_id', stat_col]]
def sel_rank_by(product_stats_df):
"Prompt user for column for ranking; return its index"
cols = product_stats_df.columns[2:]
index = range(1, len(cols) + 1)
menu = dict(zip(index, cols))
for k, v in menu.iteritems():
print(str(k) + ' -- ' + v)
selection = int(raw_input('\nSelect statistic for ranking.'))
return selection + 1
def compute_rolling_avg(ts, window_wks):
"""
INPUT: complete time series (Series) and moving 'boxcar' window in weeks
OUTPUT: rolling average values
"""
boxcar = window_wks * 7
return ts.rolling(window=boxcar).mean()
def slice_timeseries(ts, period_wks, end_date=None):
"""Enter period in weeks and an optional end_date str ('07/31/2017')
Returns sliced Series
"""
days = period_wks * 7
if end_date:
return ts[end_date - days:end_date]
else:
return ts[-days:]
def norm_Series(ts):
"""Returns time series normalized then shifted such that t0 = 0
NOTE: Due to shifting, some normed values may exceed the feature range (-1, 1)
"""
values = ts.values
values = values.reshape(-1,1)
scaler = MinMaxScaler(feature_range=(-1,1))
scaler = scaler.fit(values)
normed_trend = scaler.transform(values).flatten()
normed_trend = pd.Series(normed_trend - normed_trend[0], index=ts.index)
normed_trend.name = ts.name + ' NORMED'
return normed_trend
def trend_AUC(ts, normalize=False):
"""
INPUT: trend data in time series (pandas.Series)
OUTPUT:
-- default: area under curve (AUC) for shifted trend data
-- normalize=True: AUC for normed then shifted trend data
"""
if normalize:
normed_trend = norm_Series(ts)
values = normed_trend.values
values = values - values[0]
return np.trapz(values)
else:
values = ts.values
values = values - values[0]
return np.trapz(values)
def add_rolling_avg_col(df, window_wks, data_col='ttl_sales'):
"""Add rolling average column to ImportSalesData.product_df object"""
boxcar = window_wks * 7
col = 'rolling_{}wk'.format(window_wks)
df[col] = df[data_col].rolling(window=boxcar).mean()
class ImportSalesDataOLD(object):
"""
FOR USE IN "TOY DATABASE" CONSISTING OF DATA AGGREGATED TO DAILY FREQUENCY
Query sales data from postgres and import to pandas data objects. Initialize
with product ID (int) or name (string) then run main() method to populate attributes
ATTRIBUTES:
-- product_df: pandas time series (DataFrame) with daily sales in dollars and units
-- sales: pandas time series (Series) of total daily sales
-- units_sold: pandas time series (Series) of total daily units sold
-- product_id (int)
-- product_name (string)
-- ts_start, ts_end (Datetime) start and end dates for time series, to assist
testing for continuity and synchronization among comparative products
NOTE: DataFrame and Series title strings with product name and ID may be accessed
via DataFrame.name and Series.name attributes
"""
def __init__(self, product):
self.product = product
self.product_id = None
self.product_name = None
self._query = None
self._connection_str = 'postgresql:///uplift'
self._conn = None
self.product_df = None
self.sales = None
self.units_sold = None
self.ts_start, self.ts_end = None, None
def main(self):
self._retrieve_ID()
self._query_product_sales()
self._connect_to_postgres()
self._SQL2pandasdf()
def _retrieve_ID(self):
if type(self.product) == str:
key = self.product.lower()
self.product_id = strain_dict[key]
else:
self.product_id = self.product
def _query_product_sales(self):
self._query = ("""
SELECT CAST(DATE_TRUNC('day', date_of_sale) AS DATE) as date
, strain_name as product_name
, generic_strain_id as product_id
, ROUND(SUM(retail_price)) as ttl_sales
, ROUND(SUM(retail_units)) as ttl_units_sold
FROM daily_sales
WHERE generic_strain_id = {}
GROUP BY date, strain_name, generic_strain_id
ORDER BY date;
""").format(self.product_id)
def _connect_to_postgres(self):
self._conn = create_engine(self._connection_str)
def _SQL2pandasdf(self):
stage_1 = pd.read_sql_query(self._query, self._conn)
stage_2 = pd.DataFrame(stage_1[['ttl_sales', 'ttl_units_sold']])
stage_2.index = pd.DatetimeIndex(stage_1['date'])
# Construct continuous time series even if data is discontinuous
self.ts_start, self.ts_end = stage_2.index[0], stage_2.index[-1]
main_idx = pd.date_range(start=self.ts_start, end=self.ts_end)
self.product_df = pd.DataFrame(index=main_idx)
self.product_df['ttl_sales'] = stage_2['ttl_sales']
self.product_df['ttl_units_sold'] = stage_2['ttl_units_sold']
self.product_name = names_formatted[product_name_from_ID(self.product_id)]
df_name = '{} (ID: {})'.format(self.product_name, self.product_id)
self.product_df.name = df_name
self.sales = self.product_df['ttl_sales']
self.sales.name = df_name + ' -- Daily Sales'
self.units_sold = self.product_df['ttl_units_sold']
self.units_sold.name = df_name + ' -- Daily Units Sold'
if __name__=='__main__':
"""Set input variables"""
products = range(1, 10) # list of product IDs
MAs = [5] # list of moving average window(s) in weeks
sample_period = 20 # in weeks
"""Run ImportSalesData method and access class attributes"""
product_3 = ImportSalesData(3)
product_3.main()
raw_df_3 = product_3.product_df # DataFrame of daily sales and units for product
sales_3 = product_3.sales # time series (pd.Series) of daily sales
units_3 = product_3.units_sold # time Series of daily units sold
"""Run SalesTrendsDF method and access class attributes"""
trends_3 = SalesTrendsDF(sales_3, sample_period, MA_params=MAs)
trends_3.main()
trends_df_3 = trends_3._trendsDF # DataFrame with columns of transformed data
stats_3 = trends_3.trend_stats # Single record (OrderedDict) of stats for product
"""Run SalesStatsDF function to generate comparative stats DF; Builds DF from
individual records in the form of SalesTrendsDF.trend_stats objects"""
comps_df = SalesStatsDF(products, sample_period, MA_params=MAs)
"""Print various attributes (names, DFs, Series) to test pipeline"""
print(raw_df_3.name)
print(raw_df_3.head(2))
print('\n' + sales_3.name)
print(sales_3.head(2))
print('\n' + units_3.name)
print(units_3.head(2))
print('\n' + trends_df_3.name)
print(trends_df_3.head(2))
print('\n')
print(stats_3)
print('\n')
print(comps_df)
|
from django.conf.urls import include, url
from django.contrib import admin
from datasets import views
urlpatterns = [
url(r"^dssh/", views.AddNewDSS.as_view(), name="addssh"),
url(r"^uploadbranchcodes/$", views.UploadBranchCode.as_view(), name="uploadbranchcode"),
url(r"^submission/new/dssh/", views.AddNewDSS.as_view(), name="addsshpost"),
url(r"^view/dsshe/", views.ViewDSS.as_view(), name="viewdssh"),
url(r"^updating/dss/", views.ViewDSS.as_view(), name="viewdssh"),
url(r"^edit/dsshid/(?P<dsshID>\d+)/$", views.UpdateHeaderFiles.as_view(), name="updatedssh"),
url(r"^upheaders/dsshupdatedversion/(?P<updatingID>\d+)/$", views.UpdateHeaderFiles.as_view(), name="updatedssh"),
]
|
import torch.nn as nn
import torch
from relogic.logickit.base.utils import log
from typing import Tuple
from relogic.logickit.modules.input_variational_dropout import InputVariationalDropout
from relogic.logickit.modules.bilinear_matrix_attention import BilinearMatrixAttention
import copy
import numpy
from relogic.logickit.utils.utils import get_range_vector, get_device_of, masked_log_softmax
from relogic.logickit.modules.chi_liu_edmonds import decode_mst
import torch.nn.functional as F
class BiaffineDepModule(nn.Module):
def __init__(self, config, task_name, n_classes):
super(BiaffineDepModule, self).__init__()
self.config = config
self.task_name = task_name
self.n_classes = n_classes
if hasattr(self.config, "sequence_labeling_use_cls") and self.config.sequence_labeling_use_cls:
self.mul = 2
log("Use CLS in dependency parsing")
else:
self.mul = 1
encoder_dim = config.hidden_size
arc_representation_dim = tag_representation_dim = config.dep_parsing_mlp_dim
# self.pos_tag_embedding = nn.Embedding()
self.head_sentinel = torch.nn.Parameter(torch.randn([1, 1, config.hidden_size]))
# TODO: Need to check the dropout attribute.
# TODO: How to design task specific parameter configuration
self.dropout = InputVariationalDropout(config.dropout)
self.head_arc_feedforward = nn.Sequential(
nn.Linear(encoder_dim, arc_representation_dim),
nn.ELU())
self.child_arc_feedforward = copy.deepcopy(self.head_arc_feedforward)
self.head_tag_feedforward = nn.Sequential(
nn.Linear(encoder_dim, tag_representation_dim),
nn.ELU())
self.child_tag_feedforward = copy.deepcopy(self.head_tag_feedforward)
self.arc_attention = BilinearMatrixAttention(
matrix_1_dim=arc_representation_dim,
matrix_2_dim=arc_representation_dim,
use_input_biases=True)
self.tag_bilinear = nn.modules.Bilinear(
tag_representation_dim, tag_representation_dim, self.n_classes)
def forward(self, *input, **kwargs):
features = kwargs.pop("features")
mask = (kwargs.pop("input_head") == 1).long()
head_indices = kwargs.pop("arcs_ids", None)
head_tags = kwargs.pop("label_ids", None)
batch_size = features.size(0)
encoding_dim = features.size(2)
head_sentinel = self.head_sentinel.expand(batch_size, 1, encoding_dim)
encoded_text = torch.cat([head_sentinel, features], dim=1)
mask = torch.cat([mask.new_ones(batch_size, 1), mask], dim=1)
float_mask = mask.float()
if head_indices is not None:
head_indices = torch.cat([head_indices.new_zeros(batch_size, 1), head_indices], 1)
if head_tags is not None:
head_tags = torch.cat([head_tags.new_zeros(batch_size, 1), head_tags], 1)
encoded_text = self.dropout(encoded_text)
head_arc_representation = self.dropout(self.head_arc_feedforward(encoded_text))
child_arc_representation = self.dropout(self.child_arc_feedforward(encoded_text))
head_tag_representation = self.dropout(self.head_tag_feedforward(encoded_text))
child_tag_representation = self.dropout(self.child_tag_feedforward(encoded_text))
attended_arcs = self.arc_attention(head_arc_representation, child_arc_representation)
# shape (batch_size, sequence_length, sequence_length)
minus_inf = -1e8
minus_mask = (1 - float_mask) * minus_inf
attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
if self.training:
predicted_heads, predicted_head_tags = self.greedy_decode(
head_tag_representation, child_tag_representation, attended_arcs, mask)
else:
predicted_heads, predicted_head_tags = self.mst_decode(
head_tag_representation, child_tag_representation, attended_arcs, mask
)
if head_indices is not None and head_tags is not None:
arc_nll, tag_nll = self.construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=head_indices,
head_tags=head_tags,
mask=mask,
)
else:
arc_nll, tag_nll = self.construct_loss(
head_tag_representation=head_tag_representation,
child_tag_representation=child_tag_representation,
attended_arcs=attended_arcs,
head_indices=predicted_heads.long(),
head_tags=predicted_head_tags.long(),
mask=mask,
)
loss = arc_nll + tag_nll
# if head_indices is not None and head_tags is not None:
# evaluation_mask = self._get_mask_for_eval(mask[:, 1:], pos_tags)
# # We calculate attatchment scores for the whole sentence
# # but excluding the symbolic ROOT token at the start,
# # which is why we start from the second element in the sequence.
# self._attachment_scores(
# predicted_heads[:, 1:],
# predicted_head_tags[:, 1:],
# head_indices,
# head_tags,
# evaluation_mask,
# )
output_dict = {
"heads": predicted_heads,
"head_tags": predicted_head_tags,
"arc_loss": arc_nll,
"tag_loss": tag_nll,
"loss": loss,
"mask": mask}
return output_dict
def construct_loss(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
head_indices: torch.Tensor,
head_tags: torch.Tensor,
mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Computes the arc and tag loss for a sequence given gold head indices and tags.
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
head_indices : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The indices of the heads for every word.
head_tags : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length).
The dependency labels of the heads for every word.
mask : ``torch.Tensor``, required.
A mask of shape (batch_size, sequence_length), denoting unpadded
elements in the sequence.
Returns
-------
arc_nll : ``torch.Tensor``, required.
The negative log likelihood from the arc loss.
tag_nll : ``torch.Tensor``, required.
The negative log likelihood from the arc tag loss.
"""
float_mask = mask.float()
batch_size, sequence_length, _ = attended_arcs.size()
# shape (batch_size, 1)
range_vector = get_range_vector(batch_size, get_device_of(attended_arcs)).unsqueeze(1)
# shape (batch_size, sequence_length, sequence_length)
normalised_arc_logits = (
masked_log_softmax(attended_arcs, mask)
* float_mask.unsqueeze(2)
* float_mask.unsqueeze(1)
)
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.get_head_tags(
head_tag_representation, child_tag_representation, head_indices
)
normalised_head_tag_logits = masked_log_softmax(
head_tag_logits, mask.unsqueeze(-1)
) * float_mask.unsqueeze(-1)
# index matrix with shape (batch, sequence_length)
timestep_index = get_range_vector(sequence_length, get_device_of(attended_arcs))
child_index = (
timestep_index.view(1, sequence_length).expand(batch_size, sequence_length).long()
)
# shape (batch_size, sequence_length)
arc_loss = normalised_arc_logits[range_vector, child_index, head_indices]
tag_loss = normalised_head_tag_logits[range_vector, child_index, head_tags]
# We don't care about predictions for the symbolic ROOT token's head,
# so we remove it from the loss.
arc_loss = arc_loss[:, 1:]
tag_loss = tag_loss[:, 1:]
# The number of valid positions is equal to the number of unmasked elements minus
# 1 per sequence in the batch, to account for the symbolic HEAD token.
valid_positions = mask.sum() - batch_size
arc_nll = -arc_loss.sum() / valid_positions.float()
tag_nll = -tag_loss.sum() / valid_positions.float()
return arc_nll, tag_nll
def greedy_decode(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Decodes the head and head tag predictions by decoding the unlabeled arcs
independently for each word and then again, predicting the head tags of
these greedily chosen arcs independently. Note that this method of decoding
is not guaranteed to produce trees (i.e. there maybe be multiple roots,
or cycles when children are attached to their parents).
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
Returns
-------
heads : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
greedily decoded heads of each word.
head_tags : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
dependency tags of the greedily decoded heads of each word.
"""
attended_arcs = attended_arcs + torch.diag(
attended_arcs.new(mask.size(1)).fill_(-numpy.inf))
# Compute the heads greedily.
# shape (batch_size, sequence_length)
_, heads = attended_arcs.max(dim=2)
# Given the greedily predicted heads, decode their dependency tags.
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.get_head_tags(
head_tag_representation, child_tag_representation, heads
)
_, head_tags = head_tag_logits.max(dim=2)
return heads, head_tags
def get_head_tags(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
head_indices: torch.Tensor) -> torch.Tensor:
"""
Decodes the head tags given the head and child tag representations
and a tensor of head indices to compute tags for. Note that these are
either gold or predicted heads, depending on whether this function is
being called to compute the loss, or if it's being called during inference.
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
head_indices : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length). The indices of the heads
for every word.
Returns
-------
head_tag_logits : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length, num_head_tags),
representing logits for predicting a distribution over tags
for each arc.
"""
batch_size = head_tag_representation.size(0)
# shape (batch_size,)
range_vector = get_range_vector(
batch_size, get_device_of(head_tag_representation)
).unsqueeze(1)
# This next statement is quite a complex piece of indexing, which you really
# need to read the docs to understand. See here:
# https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html#advanced-indexing
# In effect, we are selecting the indices corresponding to the heads of each word from the
# sequence length dimension for each element in the batch.
# shape (batch_size, sequence_length, tag_representation_dim)
selected_head_tag_representations = head_tag_representation[range_vector, head_indices]
selected_head_tag_representations = selected_head_tag_representations.contiguous()
# shape (batch_size, sequence_length, num_head_tags)
head_tag_logits = self.tag_bilinear(
selected_head_tag_representations, child_tag_representation
)
return head_tag_logits
def mst_decode(
self,
head_tag_representation: torch.Tensor,
child_tag_representation: torch.Tensor,
attended_arcs: torch.Tensor,
mask: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Decodes the head and head tag predictions using the Edmonds' Algorithm
for finding minimum spanning trees on directed graphs. Nodes in the
graph are the words in the sentence, and between each pair of nodes,
there is an edge in each direction, where the weight of the edge corresponds
to the most likely dependency label probability for that arc. The MST is
then generated from this directed graph.
Parameters
----------
head_tag_representation : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
child_tag_representation : ``torch.Tensor``, required
A tensor of shape (batch_size, sequence_length, tag_representation_dim),
which will be used to generate predictions for the dependency tags
for the given arcs.
attended_arcs : ``torch.Tensor``, required.
A tensor of shape (batch_size, sequence_length, sequence_length) used to generate
a distribution over attachments of a given word to all other words.
Returns
-------
heads : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
greedily decoded heads of each word.
head_tags : ``torch.Tensor``
A tensor of shape (batch_size, sequence_length) representing the
dependency tags of the optimally decoded heads of each word.
"""
batch_size, sequence_length, tag_representation_dim = head_tag_representation.size()
lengths = mask.data.sum(dim=1).long().cpu().numpy()
expanded_shape = [batch_size, sequence_length, sequence_length, tag_representation_dim]
head_tag_representation = head_tag_representation.unsqueeze(2)
head_tag_representation = head_tag_representation.expand(*expanded_shape).contiguous()
child_tag_representation = child_tag_representation.unsqueeze(1)
child_tag_representation = child_tag_representation.expand(*expanded_shape).contiguous()
# Shape (batch_size, sequence_length, sequence_length, num_head_tags)
pairwise_head_logits = self.tag_bilinear(head_tag_representation, child_tag_representation)
# Note that this log_softmax is over the tag dimension, and we don't consider pairs
# of tags which are invalid (e.g are a pair which includes a padded element) anyway below.
# Shape (batch, num_labels,sequence_length, sequence_length)
normalized_pairwise_head_logits = F.log_softmax(pairwise_head_logits, dim=3).permute(
0, 3, 1, 2
)
# Mask padded tokens, because we only want to consider actual words as heads.
minus_inf = -1e8
minus_mask = (1 - mask.float()) * minus_inf
attended_arcs = attended_arcs + minus_mask.unsqueeze(2) + minus_mask.unsqueeze(1)
# Shape (batch_size, sequence_length, sequence_length)
normalized_arc_logits = F.log_softmax(attended_arcs, dim=2).transpose(1, 2)
# Shape (batch_size, num_head_tags, sequence_length, sequence_length)
# This energy tensor expresses the following relation:
# energy[i,j] = "Score that i is the head of j". In this
# case, we have heads pointing to their children.
batch_energy = torch.exp(
normalized_arc_logits.unsqueeze(1) + normalized_pairwise_head_logits
)
return self._run_mst_decoding(batch_energy, lengths)
@staticmethod
def _run_mst_decoding(
batch_energy: torch.Tensor, lengths: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
heads = []
head_tags = []
for energy, length in zip(batch_energy.detach().cpu(), lengths):
scores, tag_ids = energy.max(dim=0)
# Although we need to include the root node so that the MST includes it,
# we do not want any word to be the parent of the root node.
# Here, we enforce this by setting the scores for all word -> ROOT edges
# edges to be 0.
scores[0, :] = 0
# Decode the heads. Because we modify the scores to prevent
# adding in word -> ROOT edges, we need to find the labels ourselves.
instance_heads, _ = decode_mst(scores.numpy(), length, has_labels=False)
# Find the labels which correspond to the edges in the max spanning tree.
instance_head_tags = []
for child, parent in enumerate(instance_heads):
instance_head_tags.append(tag_ids[parent, child].item())
# We don't care what the head or tag is for the root token, but by default it's
# not necesarily the same in the batched vs unbatched case, which is annoying.
# Here we'll just set them to zero.
instance_heads[0] = 0
instance_head_tags[0] = 0
heads.append(instance_heads)
head_tags.append(instance_head_tags)
return torch.from_numpy(numpy.stack(heads)), torch.from_numpy(numpy.stack(head_tags))
def _get_mask_for_eval(
self, mask: torch.LongTensor, pos_tags: torch.LongTensor
) -> torch.LongTensor:
"""
Dependency evaluation excludes words are punctuation.
Here, we create a new mask to exclude word indices which
have a "punctuation-like" part of speech tag.
Parameters
----------
mask : ``torch.LongTensor``, required.
The original mask.
pos_tags : ``torch.LongTensor``, required.
The pos tags for the sequence.
Returns
-------
A new mask, where any indices equal to labels
we should be ignoring are masked.
"""
new_mask = mask.detach()
for label in self._pos_to_ignore:
label_mask = pos_tags.eq(label).long()
new_mask = new_mask * (1 - label_mask)
return new_mask
|
def swap(first, second): # Такое название аргументов написано в задании
if len(first) == len(second):
for i in range(len(first)):
first[i], second[i] = second[i], first[i]
elif len(first) > len(second):
min_len = len(second) # Длинна 2 списка в начале должны быть = длинне 1 в конце, записываем в переменную тк потом длины будут меняться и будут ошибки
for i in range(len(second)): # Меняем элементы списков при равных индексах
first[i], second[i] = second[i], first[i]
while len(first) != min_len:
second.append(first[min_len])
del first[min_len]
elif len(first) < len(second):
min_len = len(first)
for i in range(len(first)):
first[i], second[i] = second[i], first[i]
while len(second) != min_len:
first.append(second[min_len])
del second[min_len]
first = [4, 5, 6, 7, 44, 23, 231231]
second = [1, 2, 3, 231, 23412, 1111, 22222, 423]
first_content = first[:]
second_content = second[:]
swap(first, second)
print(first, second_content, first == second_content)
print(second, first_content, second == first_content)
|
"""
calculator.py
Using our arithmetic.py file from Exercise02, create the
calculator program yourself in this file.
"""
from arithmetic import *
def calculator():
while True:
input = raw_input(">") #get input from user here
tokens = input.split(" ")
print tokens
if tokens[0] == "q":
break
else:
try:
arguments = []
for num in tokens[1:]:
arguments.append(float(num))
print arguments
print type(arguments[1])
num1 = float(tokens[1])
if tokens[0] == "+":
answer = add(arguments)
elif tokens[0] == "-":
answer = subtract(arguments)
elif tokens[0] == "*":
answer = multiply(arguments)
elif tokens[0] == "/":
answer = divide(arguments)
elif tokens[0] == "pow":
answer = power(num1, num2)
elif tokens[0] == "mod":
answer = mod(num1, num2)
elif tokens[0] == "square":
answer = square(num1)
elif tokens[0] == "cube":
answer = cube(num1)
else:
answer = "something something"
except (ValueError, NameError):
answer = "Error: non-integers entered."
print answer
calculator() |
try:
from ptop.settings.local import *
except ImportError:
from ptop.settings.base import *
|
import numpy as np
nulist = [0,0.1,0.2,0.5,1,2,3,4,5]
surffield = []
for nu in nulist:
field = np.loadtxt("nu"+str(nu)+".dat", usecols=(5,))
if nu == 0:
zerofield = min(field)
maxfield = min(field)
surffield.append([nu, maxfield, maxfield/zerofield])
np.savetxt("enhancement-factors.dat",surffield)
|
from rest_framework import permissions
class IsMemberOfChat(permissions.BasePermission):
def has_object_permission(self, request, view, chat):
if request.user:
print(chat.members.all())
return request.user in chat.members.all()
return False
class IsAuthorOfChatMessage(permissions.BasePermission):
def has_object_permission(self, request, view, chat_message):
if request.user:
return chat_message.author == request.user
return False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.