text stringlengths 38 1.54M |
|---|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Denoise_RNN(nn.Module):
def __init__(self, inp_sz, hidden_sz, num_layers, dropout, bidirectional):
super(Denoise_RNN, self).__init__()
self.LSTM = torch.nn.LSTM(input_size=inp_sz, hidden_size=2, num_layers=num_layers, bias=True, Batch_first=True, dropout=dropout, bidirectional=bidirectional)
def forward(self, inp):
LSTMoup = self.LSTM(inp)
oup = LSTMoup.permute(1, 0, 2)
return oup
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# by:Adil
class SSK_SVC():
# 此方法只适用于SNP数据或者普通字符串数据:
# eg.:['ACGTAGCTAGCTAC', 'ACGTAGCGATCGATC',.....]
# eg :['Are you ok', 'Yes, I'm ok']
# clf为SVC分类器
# kernel 为string核函数
# feats_train为字符串数据转化为SVC输入的中间量,不需要使用者考虑
def __init__(self):
self.clf = None
self.feats_train = None
self.kernel = None
self.isSNP = True
# 训练数据
# data 训练数据集,尽量使用list
def fit(self, data, label, maxlen, decay, isSNP=True):
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from shogun import SNP, RAWBYTE
from shogun import StringCharFeatures
from shogun import SubsequenceStringKernel
param = {'C': [1, 2, 3, 3.5, 4, 4.5, 5, 6, 7, 8, 9, 10, 11, 15, 20],
'probability': [True, False],
'decision_function_shape': ['ovo', 'ovr'],
'degree': [1, 2, 3, 4, 5]}
svc = SVC(kernel='precomputed')
self.clf = GridSearchCV(estimator=svc,
param_grid=param,
scoring='f1_macro',
n_jobs=6,
cv=5)
try:
if type(data) != type([]):
data = data.tolist()
except:
print '请输入正确的data格式,只能是list或者np.array'
# 将输入字符串转换成中间变量
self.isSNP = isSNP
if isSNP:
self.feats_train = StringCharFeatures(data, SNP)
else:
self.feats_train = StringCharFeatures(data, RAWBYTE)
# 获取核函数
print '训练核函数,可能会花费较长时间'
self.kernel = SubsequenceStringKernel(self.feats_train,
self.feats_train,
maxlen,
decay)
# 得到输入向量
data = self.kernel.get_kernel_matrix()
print '训练数据'
self.clf.fit(data, label)
print 'f1_macro的结果是:' + str(self.clf.best_score_)
def predict(self, data):
from shogun import SNP, RAWBYTE
from shogun import StringCharFeatures
if self.isSNP:
feats_test = StringCharFeatures(data, SNP)
else:
feats_test = StringCharFeatures(data, RAWBYTE)
# 将测试string数据转化为中间量
self.kernel.init(self.feats_train, feats_test)
feats_test = self.kernel.get_kernel_matrix()
result = self.clf.predict(feats_test.T)
print ' '.join(map(str, result))
return result
def score(self, data, label):
from shogun import SNP, RAWBYTE
from shogun import StringCharFeatures
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
if self.isSNP:
feats_test = StringCharFeatures(data, SNP)
else:
feats_test = StringCharFeatures(data, RAWBYTE)
# 将测试string数据转化为中间量
self.kernel.init(self.feats_train, feats_test)
feats_test = self.kernel.get_kernel_matrix()
retult = self.clf.predict(feats_test.T)
acc = accuracy_score(label, retult)
f1 = f1_score(label, retult, average='macro')
print '正确率是:' + str(acc), 'F1得分是:' + str(f1)
return acc, f1
|
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""Amazon SageMaker channel configurations for S3 data sources and file system data sources"""
from __future__ import absolute_import, print_function
FILE_SYSTEM_TYPES = ["FSxLustre", "EFS"]
FILE_SYSTEM_ACCESS_MODES = ["ro", "rw"]
class s3_input(object):
"""Amazon SageMaker channel configurations for S3 data sources.
Attributes:
config (dict[str, dict]): A SageMaker ``DataSource`` referencing
a SageMaker ``S3DataSource``.
"""
def __init__(
self,
s3_data,
distribution=None,
compression=None,
content_type=None,
record_wrapping=None,
s3_data_type="S3Prefix",
input_mode=None,
attribute_names=None,
target_attribute_name=None,
shuffle_config=None,
):
"""Create a definition for input data used by an SageMaker training job.
See AWS documentation on the ``CreateTrainingJob`` API for more details on the parameters.
Args:
s3_data (str): Defines the location of s3 data to train on.
distribution (str): Valid values: 'FullyReplicated', 'ShardedByS3Key'
(default: 'FullyReplicated').
compression (str): Valid values: 'Gzip', None (default: None). This is used only in
Pipe input mode.
content_type (str): MIME type of the input data (default: None).
record_wrapping (str): Valid values: 'RecordIO' (default: None).
s3_data_type (str): Valid values: 'S3Prefix', 'ManifestFile', 'AugmentedManifestFile'.
If 'S3Prefix', ``s3_data`` defines a prefix of s3 objects to train on.
All objects with s3 keys beginning with ``s3_data`` will be used to train.
If 'ManifestFile' or 'AugmentedManifestFile', then ``s3_data`` defines a
single S3 manifest file or augmented manifest file (respectively),
listing the S3 data to train on. Both the ManifestFile and
AugmentedManifestFile formats are described in the SageMaker API documentation:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_S3DataSource.html
input_mode (str): Optional override for this channel's input mode (default: None).
By default, channels will use the input mode defined on
``sagemaker.estimator.EstimatorBase.input_mode``, but they will ignore
that setting if this parameter is set.
* None - Amazon SageMaker will use the input mode specified in the ``Estimator``
* 'File' - Amazon SageMaker copies the training dataset from the S3 location to
a local directory.
* 'Pipe' - Amazon SageMaker streams data directly from S3 to the container via
a Unix-named pipe.
attribute_names (list[str]): A list of one or more attribute names to use that are
found in a specified AugmentedManifestFile.
target_attribute_name (str): The name of the attribute will be predicted (classified)
in a SageMaker AutoML job. It is required if the input is for SageMaker AutoML job.
shuffle_config (ShuffleConfig): If specified this configuration enables shuffling on
this channel. See the SageMaker API documentation for more info:
https://docs.aws.amazon.com/sagemaker/latest/dg/API_ShuffleConfig.html
"""
self.config = {
"DataSource": {"S3DataSource": {"S3DataType": s3_data_type, "S3Uri": s3_data}}
}
if not (target_attribute_name or distribution):
distribution = "FullyReplicated"
if distribution is not None:
self.config["DataSource"]["S3DataSource"]["S3DataDistributionType"] = distribution
if compression is not None:
self.config["CompressionType"] = compression
if content_type is not None:
self.config["ContentType"] = content_type
if record_wrapping is not None:
self.config["RecordWrapperType"] = record_wrapping
if input_mode is not None:
self.config["InputMode"] = input_mode
if attribute_names is not None:
self.config["DataSource"]["S3DataSource"]["AttributeNames"] = attribute_names
if target_attribute_name is not None:
self.config["TargetAttributeName"] = target_attribute_name
if shuffle_config is not None:
self.config["ShuffleConfig"] = {"Seed": shuffle_config.seed}
class FileSystemInput(object):
"""Amazon SageMaker channel configurations for file system data sources.
Attributes:
config (dict[str, dict]): A Sagemaker File System ``DataSource``.
"""
def __init__(
self,
file_system_id,
file_system_type,
directory_path,
file_system_access_mode="ro",
content_type=None,
):
"""Create a new file system input used by an SageMaker training job.
Args:
file_system_id (str): An Amazon file system ID starting with 'fs-'.
file_system_type (str): The type of file system used for the input.
Valid values: 'EFS', 'FSxLustre'.
directory_path (str): Absolute or normalized path to the root directory (mount point) in
the file system.
Reference: https://docs.aws.amazon.com/efs/latest/ug/mounting-fs.html and
https://docs.aws.amazon.com/fsx/latest/LustreGuide/mount-fs-auto-mount-onreboot.html
file_system_access_mode (str): Permissions for read and write.
Valid values: 'ro' or 'rw'. Defaults to 'ro'.
"""
if file_system_type not in FILE_SYSTEM_TYPES:
raise ValueError(
"Unrecognized file system type: %s. Valid values: %s."
% (file_system_type, ", ".join(FILE_SYSTEM_TYPES))
)
if file_system_access_mode not in FILE_SYSTEM_ACCESS_MODES:
raise ValueError(
"Unrecognized file system access mode: %s. Valid values: %s."
% (file_system_access_mode, ", ".join(FILE_SYSTEM_ACCESS_MODES))
)
self.config = {
"DataSource": {
"FileSystemDataSource": {
"FileSystemId": file_system_id,
"FileSystemType": file_system_type,
"DirectoryPath": directory_path,
"FileSystemAccessMode": file_system_access_mode,
}
}
}
if content_type:
self.config["ContentType"] = content_type
|
#!/usr/bin/env python3
import os
import time
import subprocess
import yaml
import logging
import pprint
from poor_mans_mailer import PoorMansMailer
logging.basicConfig(level=logging.INFO)
pp = pprint.PrettyPrinter(indent=4)
DEFAULT_SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_CONFIG_FILE = "%s/config.yaml" % DEFAULT_SCRIPT_DIR
DEFAULT_CHECK_INTERVAL = 5
DEFAULT_RETRY_INTERVAL = 1
DEFAULT_FAILURE_THRESHOLD = 5
DEFAULT_NRPE_BINARY = "/usr/lib/nagios/plugins/check_nrpe"
class PoorMansNagios(object):
def __init__(self, config_file=None, args={}):
self.config = self.parse_config(config_file or DEFAULT_CONFIG_FILE)
self.config.update(args)
self.quiet = 'quiet' in self.config and self.config['quiet']
self.debug = 'debug' in self.config and self.config['debug']
self.logger = logging.getLogger(self.__class__.__name__)
self.log_level = self.logger.level
if self.quiet:
self.enable_quiet()
if self.debug:
self.enable_debug()
self.logger.debug('Config:')
pp.pprint(self.config)
self.build_configuration()
self.mailer = PoorMansMailer(self.email_from, self.logger)
self.reset_on_check_ok()
def default_loglevel(self):
self.logger.setLevel(self.log_level)
def enable_debug(self):
self.logger.setLevel(logging.DEBUG)
def enable_quiet(self):
self.logger.setLevel(logging.ERROR)
def parse_config(self, config_file):
with open(config_file, 'r') as stream:
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as err:
raise RuntimeError("Could not load config file %s: %s" % (config_file, err))
return config
def build_configuration(self):
pmn_config = self.config['poor-mans-nagios']
nrpe_config = self.config['nrpe']
try:
self.nrpe_binary = pmn_config['nrpe-binary']
except KeyError:
self.nrpe_binary = DEFAULT_NRPE_BINARY
try:
self.check_interval = pmn_config['check-interval']
except KeyError:
self.check_interval = DEFAULT_CHECK_INTERVAL
try:
self.retry_interval = pmn_config['retry-interval']
except KeyError:
self.retry_interval = DEFAULT_RETRY_INTERVAL
try:
self.failure_threshold = pmn_config['failure-threshold']
except KeyError:
self.failure_threshold = DEFAULT_FAILURE_THRESHOLD
try:
self.alert_on_recovery = pmn_config['alert-on-recovery']
except KeyError:
self.alert_on_recovery = True
try:
self.alert_emails = pmn_config['alert-emails']
except KeyError:
self.alert_emails = []
self.email_from = pmn_config['email-from']
self.checked_host = nrpe_config['host']
self.check_command = nrpe_config['command']
def set_sleep_seconds(self, minutes):
self.sleep_seconds = minutes * 60
def reset_on_check_ok(self):
self.logger.info("Resetting tracking on recovery")
self.fail_count = 0
self.alert_sent = False
self.set_sleep_seconds(self.check_interval)
def check_failure_threshold(self):
return self.fail_count >= self.failure_threshold
def handle_failure(self):
self.fail_count += 1
self.logger.debug("Current fail count: %d, failure threshold: %d" % (self.fail_count, self.failure_threshold))
if self.check_failure_threshold():
self.logger.warning("Check %s for host %s over failure threshold" % (self.check_command, self.checked_host))
if self.alert_sent:
self.logger.debug("Alerts already sent for this failure, skipping")
else:
result = self.send_problem_alert()
if result:
self.alert_sent = True
def handle_recovery(self):
if self.alert_sent:
self.logger.info("Service recovered")
self.reset_on_check_ok()
self.send_recovery_alert()
def send_problem_alert(self):
self.logger.warning("Sending problem alert to: %s" % ", ".join(self.alert_emails))
return self.mailer.alert_problem(self.alert_emails, self.checked_host, self.check_command)
def send_recovery_alert(self):
if self.alert_on_recovery:
self.logger.info("Sending recovery alert to: %s" % ", ".join(self.alert_emails))
return self.mailer.alert_recovery(self.alert_emails, self.checked_host, self.check_command)
def run_shell_command(self, command, capture_output=True):
kwargs = {}
if capture_output:
kwargs.update({
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
'universal_newlines': True,
})
try:
proc = subprocess.Popen(command, **kwargs)
stdout, stderr = proc.communicate()
returncode = proc.returncode
except Exception as e:
stdout = ''
stderr = e.message if hasattr(e, 'message') else str(e)
returncode = 1
return returncode, stdout, stderr
def build_command_args(self):
args = [
self.nrpe_binary,
]
for arg, val in self.config['nrpe'].items():
args.append("--%s" % arg)
if val is not True:
args.append(val)
return args
def execute_check(self):
command = self.build_command_args()
self.logger.debug("Running check command: %s" % " ".join(command))
returncode, stdout, stderr = self.run_shell_command(command)
if returncode == 0:
self.logger.debug("Check %s on host %s succeeded" % (self.check_command, self.checked_host))
self.handle_recovery()
return True
self.logger.warning("Check %s on host %s failed, stdout: %s, stderr: %s" % (self.check_command, self.checked_host, stdout, stderr))
self.handle_failure()
return False
def configure_next_action(self, success):
if success:
self.set_sleep_seconds(self.check_interval)
else:
self.set_sleep_seconds(self.retry_interval)
self.logger.debug("Set interval to %d seconds" % self.sleep_seconds)
def monitor(self):
self.logger.info("Starting poor-mans-nagios with check_interval: %d, retry_interval: %d, failure_threshold: %d" % (self.check_interval, self.retry_interval, self.failure_threshold))
try:
while True:
success = self.execute_check()
self.configure_next_action(success)
self.logger.debug("Sleeping %d seconds" % self.sleep_seconds)
time.sleep(self.sleep_seconds)
except KeyboardInterrupt:
self.logger.warning('Process interrupted')
|
import sys
import base64
def encode(name):
fp = open(name)
data = fp.read(32000); # max-size in data-uri I think
print base64.b64encode(data)
if __name__ == '__main__':
encode(sys.argv[-1])
|
from swarmtransport.representations import CityMap, BusRepresentation
from swarmtransport.publictransport import SimpleBus, DecisiveBus
import swarmtransport.passenger as ps
from vispy import app, scene
from vispy.ext.six import next
import vispy.io
from vispy.visuals.transforms.linear import AffineTransform
import numpy as np
# define a rotation transformation
def rotation_matrix(deg):
pass
# initialize a citymap and a traffic network
citymap = CityMap(size=(1000, 800))
traffic_network = ps.initialize_random_network(15, citymap.size)
busses = []
bus_representations = []
bus_positions = []
n_buses = 15
# initialize the busses and their representations
for i in range(n_buses):
position = np.random.rand(2) * np.array(citymap.size)
destination = np.random.rand(2) * np.array(citymap.size)
bus = DecisiveBus(x_init=position, v_max=40, a_max=20)
bus_position = bus.drive(destination, dt=0.1)
bus_color = citymap.color_from_position(bus.position)
bus_repr = BusRepresentation(bus, canvas=citymap.canvas, color=bus_color)
bus_repr.translate_rotate(bus.position, bus.phi/2/np.pi * 360)
busses.append(bus)
bus_representations.append(bus_repr)
bus_positions.append(bus_position)
n = 0
def update(event):
global n
for i in range(n_buses):
bus = busses[i]
bus_repr = bus_representations[i]
bus_position = bus_positions[i]
try:
x,v = next(bus_position)
new_color = citymap.color_from_position(x)
bus_repr.repr.color = new_color
# adapt the direction and position of the bus
bus_repr.translate_rotate((x[0], x[1]), bus.phi/2/np.pi * 360)
except StopIteration:
random_x = np.random.random(2) * np.array(citymap.size)
bus_positions[i] = bus.drive(random_x, dt=0.1)
x = bus.position
citymap.canvas.update()
#image = citymap.canvas.render()
#vispy.io.write_png('movie/shot{0}.png'.format(n), image)
n += 1
timer = app.Timer(0.001, connect=update, start=True)
if __name__ == '__main__':
citymap.canvas.app.run()
|
# -*- coding: utf-8 -*-
"""Test suite for models defined in pathshare_api.models."""
import pytest
from datetime import datetime
from uuid import uuid4 # Used to generate tokens
from pathshare_api.models import Ride, User
from pathshare_api.utilities import encrypt_password
def test_ride_model() -> None:
"""Test that the Ride schema model is initialized correctly."""
# Create a ride, use the departure location
ride = dict(
riders=[0, 1, 2, 3, 4, 5],
departure_date=datetime.now(),
departure_location="Lubbock",
destination="Houston",
price_per_seat=20,
is_active=True,
)
# Initialize a ride_schema
ride_schema = Ride()
# Dump the ride data into the schema and extract the data
result = ride_schema.dump(ride).data
# Validate field types were created correctly
for item in result:
if item == "riders":
assert type(result[item]) == list
elif item == "departure_date":
assert type(result[item]) == str
elif item == "departure_location":
assert type(item) == str
elif item == "destination":
assert type(item) == str
elif item == "price_per_seat":
assert type(result[item]) == int
elif item == "is_active":
assert type(result[item]) == bool
async def test_user_model() -> None:
"""Test that the User schema model is initialized correctly."""
# Create a user, give them an encrypted password
password = await encrypt_password("mysecurepassword")
user = dict(
name="John Doe",
major="Computer Science",
age="20",
token=uuid4().hex,
is_validated=False,
username="jdoe",
email="jdoe@ttu.edu",
password=password
)
# Initialize a ride_schema
user_schema = User()
# Dump the ride data into the schema and extract the data
result = user_schema.dump(user).data
# Validate field types were created correted
for item in result:
if item == "name":
assert type(result[item]) == str
elif item == "major":
assert type(result[item]) == str
elif item == "age":
assert type(result[item]) == int
elif item == "token":
assert type(result[item]) == str
elif item == "is_validated":
assert type(result[item]) == bool
elif item == "username":
assert type(result[item]) == str
elif item == "password":
assert type(result[item]) == dict
def test_ride_02_model() -> None:
"""Test that the Ride schema model is initialized correctly."""
# Create a departure location
depature_location = dict(
lat="-106.346231",
long="35.000241 ",
)
# Create a destination location
destination = dict(
lat="-101.8754254",
long="33.5873746"
)
# Create a ride, use the departure location
ride = dict(
riders=[0, 1, 2, 3, 4, 5],
departure_date=datetime.now(),
departure_location=depature_location,
destination=destination,
price_per_seat=20,
is_active=True,
)
# Initialize a ride_schema
ride_schema = Ride()
# Dump the ride data into the schema and extract the data
result = ride_schema.dump(ride).data
# Validate field types were created correctly
for item in result:
if item == "riders":
assert type(result[item]) == list
elif item == "departure_date":
assert type(result[item]) == str
elif item == "departure_location":
for value in result[item]:
assert type(value) == str
elif item == "price_per_seat":
assert type(result[item]) == int
elif item == "is_active":
assert type(result[item]) == bool
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 28 18:44:35 2018
@author: Mohammad SAFEEA
"""
import math
from io import BytesIO
from iiwaaControler.Senders import Senders
from iiwaaControler.Getters import Getters
from iiwaaControler.check import check_size, check_scalar, check_non_zero
import numpy as np
def checkAcknowledgment(msg):
ak="done"
nak="nak"
print(msg)
if msg=="":
return False
if (msg==nak):
return False
if msg==ak:
return True
else:
return False
class PTP:
def __init__(self,mysoc):
self.mysoc=mysoc
self.sender=Senders(mysoc)
self.getter=Getters(mysoc)
def send(self,data):
data=data+b"\n"
self.mysoc.send(data)
msg = self.mysoc.receive()
return msg
def createCommand(self,name, data ):
command_list = [name]+[str(data)]+ [""]
command = "_".join(command_list).encode("ascii")
return command
def blockUntilAcknowledgment(self):
while(True):
msg = self.mysoc.receive()
if (checkAcknowledgment(msg)):
break
## arcc motions
def movePTPArc_AC(self,theta,c,k,vel):
check_size(3,"Center of circle", c)
check_size(3,"Orientation vector", k)
check_scalar("Angle",theta )
check_scalar("Relative velocity", vel)
pos=self.getter.getEEFPos()
x=math.pow(c[0]-pos[0],2)+math.pow(c[1]-pos[1],2)+math.pow(c[2]-pos[2],2)
r=math.pow(x,0.5)
check_non_zero("Radius", r)
check_non_zero("Angle",theta)
#calculate unit vector
x=math.pow(k[0],2)+math.pow(k[1],2)+math.pow(k[2],2)
normK=math.pow(x,0.5)
check_non_zero("Norm with direction of vector k", normK)
k[0]=k[0]/normK
k[1]=k[1]/normK
k[2]=k[2]/normK
s=[c[0]-pos[0],c[1]-pos[1],c[2]-pos[2]]
s[0]=-s[0]/r
s[1]=-s[1]/r
s[2]=-s[2]/r
n=[(k[1]*s[2]-k[2]*s[1]),(k[2]*s[0]-k[0]*s[2]),(k[0]*s[1]-k[1]*s[0])]
c1 = self.rotTheThing(theta/2,r,s,n,c)
c2 = self.rotTheThing(theta,r,s,n,c)
self.movePTPCirc1OrintationInter(c1,c2, vel)
def rotTheThing(self,theta,r,s,n,c):
c1=[0,0,0]
cos=math.cos(theta)
sin=math.sin(theta)
c1[0]=r*cos*s[0]+r*sin*n[0]+c[0]
c1[1]=r*cos*s[1]+r*sin*n[1]+c[1]
c1[2]=r*cos*s[2]+r*sin*n[2]+c[2]
return c1
def movePTPArcXY_AC(self,theta,c,vel):
check_size(2,"Center of circle", c)
check_scalar("Angle",theta )
check_scalar("Relative velocity", vel)
k=[0,0,1]
pos=self.getter.getEEFPos()
c1=[c,pos[2]]
self.movePTPArc_AC(theta,c1,k,vel)
def movePTPArcXZ_AC(self,theta,c,vel):
check_size(2,"Center of circle", c)
check_scalar("Angle",theta )
check_scalar("Relative velocity", vel)
k=[0,1,0]
pos=self.getter.getEEFPos()
c1=[c[0],pos[1],c[1]]
self.movePTPArc_AC(theta,c1,k,vel)
def movePTPArcYZ_AC(self,theta,c,vel):
check_size(2,"Center of circle", c)
check_scalar("Angle",theta )
check_scalar("Relative velocity", vel)
k=[1,0,0]
pos=self.getter.getEEFPos()
c1=[pos[0],c[1],c[2]]
self.movePTPArc_AC(theta,c1,k,vel)
def movePTPCirc1OrintationInter(self, f1,f2, relVel):
check_size(6,"First frame [x,y,z,alpha,beta,gamma]", f1)
check_size(6,"Second frame [x,y,z,alpha,beta,gamma]", f2)
check_scalar("Relative velocity", relVel)
command=self.createCommand('jRelVel',relVel)
self.send(command)
self.sender.sendCirc1FramePos(f1)
self.sender.sendCirc2FramePos(f2)
theCommand=b'doPTPinCSCircle1_'
self.send(theCommand)
self.blockUntilAcknowledgment()
def movePTPLineEEF(self,pos,vel):
check_size(6,"Position ", pos)
check_scalar("Velocity", vel)
command = self.createCommand("jRelVel",vel)
#
self.send(command)
self.sender.sendEEfPositions(pos)
theCommand=b'doPTPinCS_'
self.send(theCommand)
self.blockUntilAcknowledgment()
def movePTPLineEEFRelEEF(self,pos,vel):
check_size(3,"Position ", pos)
check_scalar("Velocity", vel)
command = self.createCommand("jRelVel",vel)
self.send(command)
newPos=[0,0,0,0,0,0]
newPos[0]=pos[0]
newPos[1]=pos[1]
newPos[2]=pos[2]
self.sender.sendEEfPositions(newPos)
theCommand=b'doPTPinCSRelEEF_'
self.send(theCommand)
self.blockUntilAcknowledgment()
def movePTPLineEEFRelBase(self,pos,vel):
check_size(3,"Position ", pos)
check_scalar("Velocity", vel)
command = self.createCommand("jRelVel",vel)
self.send(command)
newPos=[0,0,0,0,0,0,0]
newPos[0]=pos[0]
newPos[1]=pos[1]
newPos[2]=pos[2]
self.sender.sendEEfPositions(newPos)
theCommand=b'doPTPinCSRelBase'
self.send(theCommand)
self.blockUntilAcknowledgment()
# space
def movePTPJointSpace(self,jpos,relVel):
check_size(7,"Joints ",jpos)
check_scalar("Relative Velocity", relVel)
command = self.createCommand("jRelVel",relVel)
self.send(command)
self.sender.sendJointsPositions(jpos)
theCommand=b'doPTPinJS'
self.send(theCommand)
self.blockUntilAcknowledgment()
def movePTPHomeJointSpace(self,relVel):
check_scalar("Relative Velocity", relVel)
command = self.createCommand("jRelVel",relVel)
self.send(command)
jpos=[0,0,0,0,0,0,0]
self.sender.sendJointsPositions(jpos)
theCommand=b'doPTPinJS_'
self.send(theCommand)
self.blockUntilAcknowledgment()
def movePTPTransportPositionJointSpace(self,relVel):
check_scalar("Relative Velocity", relVel)
jpos=[0,0,0,0,0,0,0]
jpos[3]=25*math.pi/180
jpos[5]=90*math.pi/180
self.movePTPJointSpace(jpos,relVel)
|
import os
import sys
sys.path.append('../')
import time
import torch
import random
import numpy as np
import pandas as pd
import collections
import pdb
from utils import get_logger, load_pkl
import argparse
from io_utils import get_prefix
from tile_env import *
from tile_env import neighbors as tile_neighbors
from tile_utils import tup_to_str
np.set_printoptions(precision=5)
'''
How does the monte carlo tree search interact with the environment and model?
mcts.model.forward: maybe force the model to have something else?
mcts.env.action_space.n
mcts.env.neighbors()
mcts.env.encode_inv(state)
'''
class MCTS(object):
def __init__(self, root_grid, model, env, coeff=1):
self.root_grid = root_grid # this is a grid
self.root_tup = grid_to_tup(root_grid) # this is a grid
self.model = model
self.env = env
self.coeff = coeff
self.visits = collections.defaultdict(lambda: np.zeros(self.env.action_space.n))
self.values = collections.defaultdict(lambda: np.zeros(self.env.action_space.n))
# store the string rep or the encoded rep?
self.nbr_cache = {} # tree will not grow to be so large so this is fine
self.true_dist_2 = self.load_true('/local/hopan/tile/tile2.txt')
self.true_dist_3 = self.load_true('/local/hopan/tile/tile3.txt')
def load_true(self, fname):
df = pd.read_csv(fname, header=None, dtype={0: str, 1:int})
df.columns = ['perm', 'distance']
df = df.set_index('perm')
return df
@property
def nexplored(self):
return len(self.nbr_cache)
def neighbors(self, state):
grid_leaf = tup_to_grid(state)
if state in self.nbr_cache:
return self.nbr_cache[state] # tup state?
else:
# state needs to be a grid but we also want it to be
grid_nbrs_dict = tile_neighbors(grid_leaf)
nbrs = [grid_to_tup(grid_nbrs_dict[a]) for a in TileEnv.MOVES]
return nbrs
def search(self):
# leaf is a tuple
leaf, path_states, path_actions = self.search_leaf()
leaf_nbrs = self.neighbors(leaf) # leaf is a tuple
self.nbr_cache[leaf] = leaf_nbrs
# in the autodidactic iter paper the network spits out prob of neighbors
# we do no such thing, so expand leaves is just evaluates 1 state's value
# this basically feeds leaf through the neural net and computes the max action
value = self.expand_leaves([leaf])
pdb.set_trace()
self.backup_leaf(path_states, path_actions, value)
is_solved = [TileEnv.is_solved_perm(s) for s in leaf_nbrs]
if any(is_solved):
max_actions = [idx for idx, solved in enumerate(is_solved) if solved]
path_actions.append(random.choice(max_actions))
return path_actions
return None
# how do we know when something is a leaf?
def search_leaf(self):
curr = self.root_tup
path_states = []
path_actions = []
cnt = 0
curr_traj = set([curr])
while True:
# if we have not yet cached this node, then it is a leaf
next_states = self.nbr_cache.get(curr)
if next_states is None:
break
# num visits to this state
sqrt_n = np.sqrt(np.sum(self.visits[curr]))
act_cnts = self.visits[curr]
if sqrt_n < 1e-5: # no visits
act = random.randint(0, self.env.action_space.n - 1)
else:
u = self.coeff * sqrt_n / (act_cnts + 1)
q = self.values[curr]
uq = u + q
act = np.random.choice(np.flatnonzero(uq == uq.max()))
if cnt > 500:
pdb.set_trace()
curr = next_states[act]
# random action if repeated state
#if curr in curr_traj:
# act = random.randint(0, self.env.action_space.n - 1)
# curr = next_states[act]
curr_traj.add(curr)
path_actions.append(act)
path_states.append(curr)
cnt += 1
return curr, path_states, path_actions
def expand_leaves(self, leaves):
'''
leaves: string states
Returns the values of the leaves
'''
# now that we're at a leaf node, try to expand it. evaluate the model on each node
# leaves is a list of perm tuples
# want to convert this to something else
onehots = np.array([tup_to_onehot(t) for t in leaves])
onehots_th = torch.from_numpy(onehots).float()
val = self.model.forward(onehots_th).max().item()
#val = val.detach().cpu().numpy()
return val
def backup_leaf(self, states, actions, value):
for s, a in zip(states, actions):
self.values[s][a] = max(value, self.values[s][a])
self.visits[s][a] += 1
def best_action(self, tup):
if len(tup) == 4:
df = self.true_dist_2
else:
df = self.true_dist_3
def get_val(action, nbrs_dict):
grid = nbrs_dict[action]
tup = grid_to_tup(grid)
str_rep = tup_to_str(tup)
return df.loc[str_rep].distance
grid = tup_to_grid(tup)
nbrs = tile_neighbors(grid)
dists = np.array([get_val(a, nbrs) for a in TileEnv.MOVES])
return np.flatnonzero(dists == dists.max())
#return max(TileEnv.MOVES, key=lambda x: get_val(x, nbrs))
def solve(state, model, env, log, time_limit=None, max_steps=None, coeff=1):
'''
state: cube state (currently this is a string)
model: something that implements forward to compute values
env: TileEnv
log: logger instance
time_limit: int seconds allowed to run
max_steps: int number of nodes allowed to explore
coeff: float, how much to weight exploration
'''
tree = MCTS(state, model, env, coeff)
# number of steps == max
nsteps = 0
start = time.time()
while True:
sol_path = tree.search()
if sol_path:
# we've found a solution!
# we can postprocess and bfs
return sol_path, tree
nsteps += 1
if max_steps and nsteps > max_steps:
#log.info('Max steps exceeded. Stopping now after {} iters'.format(nsteps))
break
if time_limit and (time.time() - start) > time_limit:
#log.info('Max time exceeded. Stopping now after {} iters'.format(nsteps))
break
return None, tree
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--modelname', type=str, default='20k10k.pt')
parser.add_argument('--dist', type=int, default=100)
parser.add_argument('--max_steps', type=int, default=None)
parser.add_argument('--time_limit', type=int, default=600)
parser.add_argument('--ntest', type=int, default=20)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--tile_size', type=int, default=3)
parser.add_argument('--coeff', type=float, default=1)
args = parser.parse_args()
random.seed(args.seed)
logpath = None
model = torch.load('./models/{}'.format(args.modelname))
log = get_logger(logpath)
log.info('params: {}'.format(args))
env = TileEnv(args.tile_size, one_hot=False)
#cubes = [scramble_fixedcore(init_2cube(), n=args.dist) for _ in range(args.ntest)]
grid_puzzles = [env.reset() for _ in range(args.ntest)]
time_limit = 30
log.info('Starting to attempt solves')
nsolved = 0
solved = []
notsolved = []
for c in grid_puzzles:
# c is a grid
res, tree = solve(c, model, env, log, max_steps=args.max_steps, time_limit=args.time_limit, coeff=args.coeff)
if res is None:
log.info('Unable to solve: {} | total explored: {}'.format(c, tree.nexplored))
notsolved.append(c)
else:
log.info('Solved: {} | len: {} | total explored: {}'.format(c, len(res), tree.nexplored))
nsolved += 1
solved.append(c)
log.info('Solved: {} / {} | {:.3f}'.format(nsolved, len(grid_puzzles), nsolved / len(grid_puzzles)))
log.info('unable to solve:')
for c in notsolved:
log.info(c)
if __name__ == '__main__':
np.set_printoptions(precision=2)
main()
|
import ee
from constant.gee_constant import DICT_COLLECTION
import argparse
from datetime import date, timedelta
import json
from utils.cloud_filters import filter_clouds
ee.Initialize()
# l
def eedate_2_string(date):
"""
Args:
date: ee.Date
Returns:
string
"""
str_day = convert_int(str(date.get("day").format().getInfo()))
str_month = convert_int(str(date.get("month").format().getInfo()))
str_year = convert_int(str(date.get("year").format().getInfo()))
return "{}-{}-{}".format(str_year, str_month, str_day)
def convert_int(str_value):
"""
Args:
str_value:
Returns:
"""
if type(str_value) == type(1):
str_value = str(str_value)
if len(str_value) == 1:
return "0" + str_value
else:
return str_value
def string_2_datetime(str_date):
"""
Args:
str_date: string which describe the date year-month-day
Returns:
an ee.Date
"""
list_date = str_date.split("-")
new_date = date(int(list_date[0]), int(list_date[1]), int(list_date[2]))
return new_date
def next_string_date(str_date, i):
"""
Args:
str_date: string, date year-month-day
i: int
Returns:
a string, which is is the input date +i
"""
old_date = string_2_datetime(str_date)
old_date = old_date + timedelta(days=i)
return datetime_2_string(old_date)
def datetime_2_string(ex_date):
"""
Args:
ex_date: ee.Date
Returns:
string, the ee.Date converted into "year-month-day" format
"""
return "-".join([convert_int(ex_date.year), convert_int(ex_date.month), convert_int(ex_date.day)])
def gjson_2_eegeom(path_geojson):
"""
Args:
path_geojson: path to a Geojson Polygon File (NOT MULTIPOLUGON!!)
Returns:
the ee.Geometry which is decribed in the geojson
"""
with open(path_geojson) as f:
data = json.load(f)
assert len(data["features"]) == 1, "More than one feature is stored {}".format(data["features"])
list_coords = data["features"][0]["geometry"]["coordinates"]
print(list_coords)
return define_geometry(list_coords)
def define_geometry(list_coordinates):
"""
Args:
list_coordinates: list of coordinates ex : [[[x1,y1],...[xn,yn]]]
Returns:
the ee.Geometry.Polygon defined by the list of coordinates
"""
geometry = ee.Geometry.Polygon(
list_coordinates, None, False)
return geometry
def get_filter_collection(begin_date, ending_date, zone, sent=1, opt_param=None, name_s2=None):
"""
Args:
begin_date: ee.Date
ending_date: ee.Date
zone: ee.Geometry, the image of the collection searched should cover a part of the zone
sent: int, could be 1 or 2, respectively indicating Sentinel 1 Collection or Sentinel 2
opt_param: dictionnary
name_s2: string or None
Returns:
an ee.ImageCollection
"""
# print("begin {} ending {}".format(begin_date,ending_date))
if opt_param is None:
opt_param = {}
if type(begin_date) != type("u"):
print("begin {} ending {}".format(begin_date.format().getInfo(), ending_date.format().getInfo()))
collection = ee.ImageCollection(DICT_COLLECTION[sent])
collection = collection.filterDate(begin_date, ending_date).filterBounds(zone)
# print("Collection sent {} filter len {}".format(sent, collection.toList(100).length().getInfo()))
print(type(collection))
if sent == 2:
if name_s2 is not None:
return collection.filter(ee.Filter.eq("PRODUCT_ID", name_s2))
else:
return filter_clouds(collection, zone)
else:
return opt_filter(collection, opt_param, sent)
def opt_filter(collection, opt_param, sent):
"""
Args:
collection: an ee.Image Collection
opt_param: a dictionnary, contains optional filters to apply on S1 image
sent: int, could be one or 2
Returns:
collection : an ee.ImageCollection, corresponds to the input ImageCollection on which we have applied different filters
"""
# print("sent {}".format(sent))
if collection.toList(100).length().getInfo() == 0:
return collection
else:
if sent == 1:
if "sensorMode" in opt_param:
# print("sensorMode {}".format(opt_param["sensorMode"]))
collection = collection.filter(ee.Filter.eq('instrumentMode', opt_param["sensorMode"]))
if "polarisation" in opt_param:
for polar in opt_param["polarisation"].split(","):
# print("Filter by polarisation {}".format(polar))
collection = collection.filter(ee.Filter.listContains('transmitterReceiverPolarisation', polar))
if True:
# print("Filter by orbit direction {}".format(opt_param["orbitDirection"].upper()))
collection = collection.filter(
ee.Filter.eq('orbitProperties_pass', "DESCENDING")) # ee.Filter.eq('orbitProperties_pass', "")) #
else:
# print("Sentinel 2 default mode are MSI and Level 1C !!! To change that change the constant parameters !!")
# print(opt_param)
assert "ccp" in opt_param, "Wrong param for sentinel 2 {} should only be the cloud coverage percentage".format(
opt_param)
# print("Filter values with less than {} percentage of cloud pixels type {}".format(opt_param["ccp"],type(opt_param["ccp"])))
collection = collection.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', opt_param["ccp"]))
# print("Filter values with less than {} percentage of cloud pixels".format(opt_param["ccp"]))
return collection
def _argparser():
parser = argparse.ArgumentParser(description='Short sample app')
parser.add_argument('--bd', type=str, help="begin date")
parser.add_argument('--ed', type=str, help="begin date")
parser.add_argument('--zone', type=str, help="path where the zone coordinates are stored ")
parser.add_argument('--c', type=int, help="collection")
return parser.parse_args()
def list_image_name(image_collection, sent):
"""
Args:
image_collection: ee.ImageCollection
sent: int could be 1 or 2
Returns:list of string, returns a list of the Image_id of the Images contained in the input ImageCollection
"""
# get the len of the image collection
print(type(image_collection))
n = image_collection.toList(1000).length().getInfo()
list_name = []
list_image_collection = image_collection.toList(n)
for i in range(n):
if sent == 1:
name = ee.Image(list_image_collection.get(i)).get("system:id").getInfo()
else:
name = ee.Image(list_image_collection.get(i)).get("PRODUCT_ID").getInfo()
list_name += [name]
return list_name
def main(begin_date, ending_date, path_zone, sent):
zone = gjson_2_eegeom(path_zone)
collection = get_filter_collection(begin_date, ending_date, zone, sent)
print("get the collection")
# name = ee.Image(collection.first()).get("PRODUCT_ID")
# .getInfo()
# print(ee.String(name).getInfo())
print(list_image_name(collection, sent))
if __name__ == '__main__':
args = _argparser()
main(args.bd, args.ed, args.zone, args.c)
|
# load flask sub-systems
from flask import Flask,render_template,url_for,request
# from flask.views import MethodView
# load application vars
# from public import app
from config.site import defaults #default title
from reddit_user_api import *
from graph_api import *
app = Flask(__name__)
# testing
@app.route('/')
def home():
data = defaults
data['page'] = {
'title': 'Home'
}
data['meta_description'] = 'Hello there! Welcome to Moodspace.'
return render_template('home.html', data=data)
@app.route('/prediction_user/',methods=['POST', 'GET']) # @app.route('/index',methods=['POST'])
def prediction_user(): # def post_submit():
if request.method == 'POST':
username = request.form['username']
user_obj = RedditUserAPI(username)
#
clean_data = user_obj.get_clean_data()
pred_df = user_obj.get_prediction(clean_data)
#
month_cat_df = user_obj.get_df_by_month_and_cat(pred_df)
month_df = user_obj.get_df_by_month(pred_df)
#
total_post = pred_df.shape[0]
num_neg_post = len(pred_df[pred_df['class'] == 1])
neg_precent_int = int(num_neg_post/total_post * 100)
if neg_precent_int > 50:
sentiment = 'negative'
else:
sentiment = 'positive'
neg_percent = str(int(num_neg_post/total_post * 100)) + '%'
# ret_dict = {'total_post': total_post, 'num_neg_post': num_neg_post, 'neg_precent': neg_precent}
# graph
line_graph_json = CreateGraph().get_line_graph(month_df)
bar_graph_json = CreateGraph().get_bar_graph(pred_df)
stacked_bar_graph_json = CreateGraph().get_stacked_bar_graph(month_cat_df)
return render_template('prediction_user.html',
line_graph_json=line_graph_json, stacked_bar_graph_json=stacked_bar_graph_json, bar_graph_json=bar_graph_json,
username=username, total_post=total_post, sentiment=sentiment, neg_percent=neg_percent)
@app.route('/prediction_solo/',methods=['POST', 'GET']) # @app.route('/index',methods=['POST'])
def prediction_solo(): # def post_submit():
# get text
if request.method == 'POST':
name = request.form['name']
msg = request.form['message']
msg_df = pd.DataFrame({'text': [msg]})
msg_pred = TextAPI().get_clean_data(msg_df)
return render_template('prediction_solo.html', msg_pred=msg_pred , msg=msg, name=name)
# run application
if __name__ == '__main__':
app.run(debug=True) |
"""
This file defines how to handle the MNIST dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import os
import csv
import preprocessing
from preprocessing import image_preprocessing
import configparser
import gzip
import pickle
this_folder = os.path.dirname(os.path.abspath(__file__)) + '/'
config = configparser.ConfigParser()
config.read(this_folder + 'dataset_paths.ini')
base_folder = config['PATHS'].get('MNIST')
##### Training ###########
def collect_train_data(num_samples_per_class=0):
f = gzip.open(base_folder + 'mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
train_data = np.array(train_set[0], dtype='float32').reshape((-1,28,28,1))
train_labels = np.array(train_set[1], dtype='int32')
return train_data, train_labels
def collect_val_data():
print("Collecting validation data...")
f = gzip.open(base_folder + 'mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
val_data = np.array(valid_set[0], dtype='float32').reshape((-1,28,28,1))
val_labels = np.array(valid_set[1], dtype='int32')
return val_data, val_labels
# tf.data batch iterator for the training data
def train_BI(images,
labels,
batch_size,
num_parallel_calls=100):
dataset = tf.data.Dataset.from_tensor_slices(
(images, labels))
batched_dataset = dataset.batch(batch_size,
drop_remainder = False)
train_batch_iterator = batched_dataset.make_initializable_iterator()
return train_batch_iterator
# tf.data batch iterator for the validation data
def val_BI(images,
labels,
batch_size,
num_parallel_calls=100):
dataset = tf.data.Dataset.from_tensor_slices(
(images, labels))
batched_dataset = dataset.batch(batch_size,
drop_remainder = False)
val_batch_iterator = batched_dataset.make_initializable_iterator()
return val_batch_iterator
# Additional tf.data batch iterator for the data that is used just for the propagation
# of a few images for visualization.
def img_BI(images,
labels,
batch_size,
num_parallel_calls=100):
dataset = tf.data.Dataset.from_tensor_slices(
(images, labels))
batched_dataset = dataset.batch(batch_size,
drop_remainder = False)
img_batch_iterator = batched_dataset.make_initializable_iterator()
return img_batch_iterator
def interpret_as_image(image):
return image
def num_train_samples():
return 50000
def num_val_samples():
return 1000
def bounds():
# This is a little problematic here. Foolbox only allows
# for scalar bounds, not bounds per channel. For this reason,
# we use the worst-case bounds here.
return (0,1)
min_values = np.array([0.,0.,0.],np.float32)
max_values = np.array([1.,1.,1.],np.float32)
def image_range():
return [0.,1.] |
import argparse
import collections
import os
from pathlib import Path
from matplotlib.pyplot import xticks
import numpy as np
import torch
import models
from base import get_optimizer
from dataloader import Dataloader
from parse_config import ConfigParser
from trainer import Trainer
from utils.loss import AvgPerplexity
from utils.metric import Accuracy
import seaborn as sns
SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)
def main(config, model_path):
logger = config.get_logger('train')
if torch.cuda.is_available():
device = torch.device('cuda:0')
else:
device = torch.device('cpu')
tgt_preprocessing = None
def src_preprocessing(x): return x[::-1]
model_args = config['arch']['args']
state_dict=torch.load(model_path)['state_dict']
model_args.update({
'src_vocab': state_dict['src_vocab'],
'tgt_vocab': state_dict['tgt_vocab'],
'sos_tok': '<sos>',
'eos_tok': '<eos>',
'pad_tok': '<pad>',
'device': device
})
model = getattr(models, config['arch']['type'])(**model_args)
model.load_state_dict(state_dict)
del state_dict
model.to(device)
while True:
seq_str = input(">")
seq = list(seq_str.strip())
print(seq)
if src_preprocessing is not None:
seq = src_preprocessing(seq)
tgt_seq, attn_weights = model.predict(seq)
print(tgt_seq)
if attn_weights is not None:
fig = sns.heatmap(attn_weights, xticklabels=seq, yticklabels=tgt_seq)
fig.xaxis.set_label_position('top')
fig.figure.show()
if __name__ == '__main__':
args = argparse.ArgumentParser()
args.add_argument(
'-c',
'--config',
default=None,
type=str,
help='config file path (default: None)')
args.add_argument(
'--model-path',
type=str,
required=True,
help='path to model.pth to test (default: None')
args.add_argument(
'-r',
'--resume',
default=None,
type=str,
help='path to latest checkpoint (default: None)')
args.add_argument(
'-d',
'--device',
default=None,
type=str,
help='indices of GPUs to enable (default: all)')
# custom cli options to modify configuration from default values given in json file.
CustomArgs = collections.namedtuple('CustomArgs', 'flags type target help')
config,args = ConfigParser.from_args(args)
main(config, args.model_path)
|
#!/usr/bin/env python3
import hashlib
"""Hand-made cipher function. Expected to be pretty safe,
as long as HASHFUNC is know safe and BLOCK_SIZE long enough"""
HASHFUNC = hashlib.sha256
BLOCK_SIZE = 16
def _kiter(key:bytes):
# need an iterator bc we don't know string's length in advance
"""Iterates on the key : hashes the key (using HASHFUNC), then yields the beginning of the hash.
The full hash is used as subsequent key. Re-hashes after BLOCK_SIZE bytes yielded.
yields : int"""
while True :
key = HASHFUNC(key).digest()
yield from key[:BLOCK_SIZE]
def cipher(string:bytes, key:bytes):
"""Kind of Vernam mask : xors the text with the key hashed n times (as yielded by _kiter)
yields : int"""
return bytes(s^b for s,b in zip(string, _kiter(key)))
decipher = cipher
|
"""
Runs TestHarness for BioModels. Creates:
*.png figure that plots relative errors
*.pcl file with data collected from run
*.log file with information about run
Common usage:
# Access information about command arguments
python SBstoat/mainTestHarness.py --help
# Process the BioModels 1-800, creating a new log file and data file
python SBstoat/mainTestHarness.py --firstModel 1 --numModel 800
# Process the BioModels 1-800, using the existing log and data files.
python SBstoat/mainTestHarness.py --firstModel 1 --numModel 800 --useExistingData --useExistingLog
# Create a plot from the existing data file
python SBstoat/mainTestHarness.py --plot
# Run analysis
@author: joseph-hellerstein
"""
from SBstoat._testHarness import TestHarness
from SBstoat.logs import Logger
import argparse
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
# Handle problem with module load
try:
matplotlib.use('TkAgg')
except ImportError:
pass
IGNORE_TEST = True
IS_PLOT = True
DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(os.path.dirname(DIR), "biomodels")
PATH_PAT = os.path.join(DATA_DIR, "BIOMD0000000%03d.xml")
LOG_LEVEL = 3
LOG_PATH = os.path.join(DIR, "mainTestHarness.log")
FIG_PATH = os.path.join(DIR, "mainTestHarness.png")
FIRST_MODEL = 210
NUM_MODEL = 2
PCL_FILE = "mainTestHarness.pcl"
FIT_MODEL = "fitModel"
BOOTSTRAP = "bootstrap"
NUM_NOERROR = "num_noerror"
NUM_MODEL = "num_model"
LOGGER = "logger"
MAX_RELATIVE_ERROR = 1.0
# Context variables that are saved. Uses the following naming convention:
# ends in "s" is a list: initialized to []
# ends in "Dct" is a dict: initialized to {}
# ends in "Path" is file path: initialized to None
# begins with "is" is a bool: initialized to False
# otherwise: int: initialized to 0
CONTEXT = [ "firstModel", "numModel", "numNoError", "fitModelRelerrors",
"bootstrapRelerrors", "processedModels", "nonErroredModels", "erroredModels",
"modelParameterDct", "pclPath", "figPath", "isPlot",
"kwargDct"
]
############### FUNCTIONS ##################
def str2Bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def remove(ffile):
if os.path.isfile(ffile):
os.remove(ffile)
############### CLASSES ##################
class Runner(object):
"""Runs tests on biomodels."""
def __init__(self, firstModel:int=210, numModel:int=2,
pclPath=PCL_FILE, figPath=FIG_PATH,
useExistingData:bool=False,
isPlot=IS_PLOT, **kwargDct):
"""
Parameters
----------
firstModel: first model to use
numModel: number of models to use
pclPath: file to which results are saved
useExistingData: use data in existing PCL file
"""
self.useExistingData = useExistingData and os.path.isfile(pclPath)
# Recover previously saved results if desired
if self.useExistingData:
self.restore(pclPath=pclPath)
else:
# Initialize based on type of context variable
for name in CONTEXT:
if name[-1:] == "s":
self.__setattr__(name, [])
elif name[-3:] == "Dct":
self.__setattr__(name, {})
elif name[-4:] == "Path":
self.__setattr__(name, None)
elif name[0:2] == "is":
self.__setattr__(name, False)
else:
self.__setattr__(name, 0)
# Initialize to parameters for this instantiation
self.firstModel = firstModel
self.numModel = numModel
self.pclPath = pclPath
self.figPath = figPath
self.kwargDct = kwargDct
self.isPlot = isPlot
self.useExistingData = useExistingData
#
if LOGGER in kwargDct.keys():
self.logger = kwargDct[LOGGER]
else:
self.logger = Logger()
kwargDct[LOGGER] = self.logger
self.save()
def _isListSame(self, list1, list2):
diff = set(list1).symmetric_difference(list2)
return len(diff) == 0
def equals(self, other):
selfKeys = list(self.__dict__.keys())
otherKeys = list(other.__dict__.keys())
if not self._isListSame(selfKeys, otherKeys):
return False
#
for key, value in self.__dict__.items():
if isinstance(value, list):
isEqual = self._isListSame(value, other.__getattribute__(key))
if not isEqual:
return False
elif any([isinstance(value, t) for t in [int, str, float, bool]]):
if self.__getattribute__(key) != other.__getattribute__(key):
return False
else:
pass
#
return True
def run(self):
"""
Runs the tests. Saves state after each tests.
"""
# Processing models
modelNums = self.firstModel + np.array(range(self.numModel))
for modelNum in modelNums:
if (modelNum in self.processedModels) and self.useExistingData:
continue
else:
self.processedModels.append(modelNum)
input_path = PATH_PAT % modelNum
msg = "Model %s" % input_path
self.logger.activity(msg)
try:
harness = TestHarness(input_path, **self.kwargDct)
if len(harness.parametersToFit) == 0:
self.logger.result("No fitable parameters in model.")
self.save()
continue
harness.evaluate(stdResiduals=1.0,
fractionParameterDeviation=1.0, relError=2.0)
except Exception as err:
self.erroredModels.append(modelNum)
self.logger.error("TestHarness failed", err)
self.save()
continue
# Parameters for model
self.modelParameterDct[modelNum] = \
list(harness.fitModelResult.parameterRelErrorDct.keys())
# Relative error in initial fit
values = [v for v in
harness.fitModelResult.parameterRelErrorDct.values()]
self.fitModelRelerrors.extend(values)
# Relative error in bootstrap
values = [v for v in
harness.bootstrapResult.parameterRelErrorDct.values()]
self.bootstrapRelerrors.extend(values)
# Count models without exceptions
self.nonErroredModels.append(modelNum)
self.numNoError = len(self.nonErroredModels)
self.save()
# Check for plot
if self.isPlot:
self.plot()
def save(self):
"""
Saves state. Maintain in sync with self.restore().
"""
if self.pclPath is not None:
data = [self.__getattribute__(n) for n in CONTEXT]
with (open(self.pclPath, "wb")) as fd:
pickle.dump(data, fd)
def restore(self, pclPath=None):
"""
Restores state. Maintain in sync with self.save().
"""
if pclPath is None:
pclPath = self.pclPath
if os.path.isfile(pclPath):
with (open(pclPath, "rb")) as fd:
data = pickle.load(fd)
[self.__setattr__(n, v) for n, v in zip(CONTEXT, data)]
else:
raise ValueError("***Restart file %s does not exist"
% self.pclPath)
@staticmethod
def _pruneRelativeErrors(relativeErrors, maxError=MAX_RELATIVE_ERROR):
"""
Deletes Nans. Removes very large values.
Parameters
----------
list: relative errors
maxError: maximum relative error considered
Returns
-------
list: pruned errors
float: fraction pruned from non-nan values
"""
noNanErrors = [v for v in relativeErrors if not np.isnan(v)]
prunedErrors = [v for v in noNanErrors if v <= maxError]
prunedFrc = 1 - len(prunedErrors) / len(noNanErrors)
return prunedErrors, prunedFrc
def plot(self):
"""
Does all plots.
"""
_, axes = plt.subplots(1, 2)
prunedModelErrors, modelPrunedFrc = \
self._pruneRelativeErrors(self.fitModelRelerrors)
prunedBootstrapErrors, bootstrapPrunedFrc = \
self._pruneRelativeErrors(self.bootstrapRelerrors)
maxBin1 = self._plotRelativeErrors(axes[0], prunedModelErrors,
FIT_MODEL, modelPrunedFrc)
maxBin2 = self._plotRelativeErrors(axes[1], prunedBootstrapErrors,
BOOTSTRAP, bootstrapPrunedFrc, isYLabel=False)
maxBin = max(maxBin1, maxBin2)
if maxBin > 0:
axes[0].set_ylim([0, maxBin])
axes[1].set_ylim([0, maxBin])
#
if len(self.processedModels) == 0:
frac = 0.0
else:
frac = 1.0*self.numNoError/len(self.processedModels)
suptitle = "Models %d-%d. Fraction non-errored: %2.3f"
lastModel = self.firstModel + len(self.processedModels) - 1
suptitle = suptitle % (self.firstModel, lastModel, frac)
plt.suptitle(suptitle)
plt.show()
plt.savefig(self.figPath)
def _plotRelativeErrors(self, ax, relErrors, title, prunedFrc, isYLabel=True):
"""
Plots histogram of relative errors.
Parameters
----------
ax: Matplotlib.axes
relErrors: list-float
title: str
prunedFrc: float
isYlabel: bool
Returns
-------
float: maximum number in a bin
"""
rr = ax.hist(relErrors)
fullTitle = "%s. Frc Pruned: %2.2f" % (title, prunedFrc)
ax.set_title(fullTitle)
ax.set_xlabel("relative error")
if isYLabel:
ax.set_ylabel("number parameters")
ax.set_xlim([0, 1])
return max(rr[0])
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='SBstoat tests for BioModels.')
default = 1
parser.add_argument('--firstModel', type=int,
help='First BioModel to process (int); default: %d' % default,
default=default)
default = 0
parser.add_argument('--numModel', type=int,
help='Number of models to process (int); default = %d' % default,
default=default)
parser.add_argument('--logPath', type=str,
help='Path for log file (str); default: %s' % LOG_PATH,
default=LOG_PATH)
parser.add_argument('--figPath', type=str,
help='Path for figure (str); Default: %s' % FIG_PATH,
default=FIG_PATH)
parser.add_argument('--useExistingData', action='store_true',
help="Use saved data from an previous run (flag).")
parser.add_argument('--plot', action='store_true',
help="Plot existing data (flag).")
parser.add_argument('--useExistingLog', action='store_true',
help="Append to the existing log file, if it exists (flag).")
args = parser.parse_args()
useExistingLog = args.plot or args.useExistingLog
useExistingData = (args.plot and (args.numModel == 0)) or args.useExistingData
#
if not useExistingLog:
remove(args.logPath)
runner = Runner(firstModel=args.firstModel,
numModel=args.numModel,
useExistingData=useExistingData,
figPath=args.figPath,
isPlot=args.plot,
logger=Logger(toFile=args.logPath, logLevel=LOG_LEVEL))
runner.run()
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import pandas as pd
import plotly.graph_objs as go
from Visualization import (
Overview,
Sankey,
SVA_Research,
utils
)
from utils import Header, get_header, get_menu, make_dash_table
def create_layout(app):
return html.Div(
[
html.Div([Header(app)]),
html.Br([]),
html.Br([]),
#Home Page
html.Div(
[
# html.Div(
# [
# html.Img(
# src='https://studentveterans.org/media/com_workforce/employees/chriscate_1569001412.png',
# className='Me_img'
# ),
#
# ],
# className='row'
# ),
# html.Br([]),
# html.Br([]),
html.Div(
[
html.P(
[
"Led by ",
html.A("Dr. Chris Cate", href='https://studentveterans.org/aboutus/sva-team-leadership-student-veterans-of-america?view=employee&id=4'),
", the Student Veterans of America Research Department is highly regarded"
" within the industry. Frequently referenced in the United States Congress, SVA"
" works to inform the public, stake holders, policy makers, and others on student"
" veteran and military connected students topics and concerns through emperical research. "
"In addition to the Life Cycle Atlas, SVA has completed research projects such as ",
html.A("National Veteran Education Success Tracker (NVEST)", href='https://nvest.studentveterans.org/'),
', ',
html.A("SVA Spotlight", href='https://studentveterans.org/aboutus/research/sva-spotlight'),
' ,',
' and ',
html.A("SVA Census", href='https://studentveterans.org/images/pdf/2016-SVA-Census-Survey-Student-Veteran-General-Breakdowns-120716.pdf'),
", please take a look!",
html.Br([]),
html.Br([]),
"Dr. Cate is supported by a tenacious team of researchers "
"committed to impacting the lives of student veterans. ",
html.A("Ryan Kinsey", href='https://studentveterans.org/aboutus/sva-team-leadership-student-veterans-of-america?view=employee&id=103'),
" is SVA's Junior Data Scientist. With a "
"Master's Degree in Data Science from The George Washington University, Ryan works"
" to apply data science and data analytics techniques to higher education research. ",
html.A('Madeline Clark', href='https://studentveterans.org/aboutus/sva-team-leadership-student-veterans-of-america?view=employee&id=115'),
' is a current graduate student studying Linguistics at Georgetown University. '
'Her experience in qualitative and quantitative research as well as data '
'visualization skills make her an invaluable asset to the SVA research team! ',
html.A('Kameron Smith', href='https://studentveterans.org/aboutus/sva-team-leadership-student-veterans-of-america?view=employee&id=114'),
' served in the United States Air Force for over 7 years. He is'
' is currently pursuing a Journalism degree at American University. His firsthand '
'experience with being a student veteran, combined with his data analytics skills'
' bring valuable insight to SVA!',
html.Br([]),
html.Br([]),
" Each member of the team played a pivotal role in making this research available to the public! "
"For inquires or to report issues with this application, please ",
html.A("email",href='mailto:chris.cate@studentveterans.org'),
" or visit our ",
html.A('website', href='https://studentveterans.org/aboutus/research'),
"."
],
className='summary',
style={'font-size':'20px'}
)
],
className='summary'
),
],
className="sub-page"
),
],
className="page"
) |
from django.contrib import admin
from cocukcacim.apps.activities.models import Activity, EventActivity
class EventActivityInline(admin.StackedInline):
model = EventActivity
class EventAdmin(admin.ModelAdmin):
list_display = ('title', 'created_at', 'updated_at')
list_filter = ('created_at', 'updated_at')
inlines = [
EventActivityInline,
]
admin.site.register(Activity, EventAdmin)
|
from discord.ext import commands
import discord
class help(commands.Cog):
def __init__(self , client):
self.client = client
@commands.command()
async def help(self ,ctx , name : str = None):
help_dictionary = {
"ping" : "Check bot latency \nAliases: Latency \nUsage: -ping",
"quote" : "Get a motivational quote \nAliases: inspire \nUsage: -quote",
"help" : "get a list of all commmands \nAliases: None \nUsage: -help",
"rgbvibe" : "send the rgb vibe emote \nAliases: None \n Usage: -rgbvibe",
"angryahh" : "send the angry ahh emote\nAliases: None\nUsage: -angryahh",
"purge" : "delete messages\nAliases: clear\nUsage: -purge (number of messages)\nRequirements: Manage messages\nExample: -purge 10",
"add" : "add two numbers\nAliases: None\nUsage: -add (number_1) (number2)\nExample: -add 1 2",
"sub" : "subtract two numbers\nAliases: None\nUsage: -sub (number_1) (number_2)\nExample: -sub 2 1",
"multiply" : "multiply two numbers\nAliases: into\nUsage: -multiply (number_1) (number_2)\nExample: -multiply 2 2",
"divide" : "divide two numbers\nAliases: by\nUsage: -divide (number_1) (number_2)\nExample: -divide 4 2",
"remainder" : "get remainder after division\nAliases: None\nUsage: -remainder (number_1) (number_2)\nExample: -remainder 5 2",
"exponent" : "Raise a number to a power\nAliases: None\nUsage: -exponent (number_1) (number_2)\nExample: -exponent 5 2",
"sqrt" : "Take square root of a number\nAliases: root\nUsage: -sqrt (number_1)\nExample: -sqrt 4",
}
if name is None:
embed = discord.Embed(title="Help" , description = "**Misc** \n1.) help \n2.) ping \n3.) quote\n4.) purge \n \n**Maths** \n1.) add\n2.) sub\n3.) multipy\n4.) divide\n5.) remainder\n6.) exponent\n7.) sqrt\n \n**Emotes** \n1.) rgbvibe\n2.) angryahh\n------------------------" , color=discord.Color.red())
embed.set_thumbnail(url = self.client.avatar_url)
embed.set_footer(text="do -help (command name) for detailed info")
await ctx.send(embed = embed)
else:
if name in help_dictionary:
detail_embed = discord.Embed(title=name , description =help_dictionary[name] , color=discord.Color.blue())
detail_embed.set_footer(text=ctx.author.name , icon_url = ctx.author.avatar_url)
await ctx.send(embed = detail_embed)
else:
error_embed = discord.Embed(title="error" , description = "Command not found" , color=discord.Color.red())
error_embed.set_footer(text="do -help to see a list of all commands" , icon_url = ctx.author.avatar_url)
await ctx.send(embed = error_embed)
def setup(client):
client.add_cog(help(client)) |
class Solution(object):
def maxSubArray(self, nums):
# 以 nums[i] 为结尾的最大子数组和为 dp[i]
dp = [0] * len(nums)
for i in range(len(nums)):
dp[i] = max(nums[i], nums[i] + dp[i - 1])
return max(dp)
if __name__ == '__main__':
S = Solution()
print(S.maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4]))
|
import bpy
import os
import math
import random
directory = os.path.dirname(bpy.data.filepath)
path = directory + "//STLs"
print(path)
gridsize = 80
gridx = 5
gridy = 5
i = 0
def importStls(i):
for stl in os.listdir(path):
stlpath = path+"//"+stl
print(stlpath)
xOffset = gridsize * (i%gridx)
yOffset = gridsize * math.trunc(i / gridy)
zRot = random.uniform(-180,180)
bpy.ops.import_mesh.stl(filepath=stlpath)
bpy.ops.transform.translate(value=(xOffset, yOffset, 0))
bpy.ops.transform.rotate(value=zRot, orient_axis='Z')
i += 1
#make all objects same mat
def setMat():
for obj in bpy.data.objects:
if obj.type == "MESH" and obj.name != "Table":
print(obj.name)
obj.active_material = bpy.data.materials["CrispMatYellow"]
importStls(i)
setMat()
|
num = int(input("enter any number="))
a = num
i = 0
while i<=num:
rem = num%10
sum = rem+num
num = num //10
i+=1
if a%sum==0:
print(a,"is a harshad number")
else:
print("not a harshad number")
j = 1
while j<=1000:
sum = 0
i = 1
a = i
while i<= j:
rem = i%10
sum = rem+i
i = i//10
i+=1
if a%sum==0:
print(a,"is a harshad number")
else:
print("not a harshad number")
j+=1
d ={"a":1,"b":2,"a":3}
print(d) |
import workstation as ws
class WorkstationFactory:
def __init__ (self):
self.current_id = 0
self.map = dict()
def createWorkstation(self):
w = ws.Workstation(self.current_id)
self.current_id = self.current_id + 1
return w
def addWorkstation(self, w):
if type(w) is ws.Workstation:
self.map[w.getName()] = w
else:
print ('ERROR: variable must be workstation type')
def searchWorkstation(self, name):
if name in self.map:
return self.map[name]
else:
print('Error: '+name+'is not a defined workstation')
return None
|
import os, pdb
def write_placemark( file_obj, place_mark_name, coords_kml, bubbledata, bubbleFields, geom_type ):
"""
General function that creates the placemark tag. This works for polylines, polygons and points.
"""
# Write opening placemark tag and name. All geom_types share this
file_obj.write(
'<Placemark>\n'
'<name> %s </name>\n' % place_mark_name +
'<description>\n'
)
index_sync = 0
# Writing the desription node, syncronizing the attribute column
# names with the atttribute column row data
for data in bubbledata:
# Writing html
file_obj.write('<p> <b> %s: </b> %s </p>\n' % ( bubbleFields[ index_sync ], data ))
# add one to the counter
index_sync += 1
# If it is a point then write the appropriate tags for coordinates and ending placemark tag
if geom_type == 'Point':
# Close the desription tag and write out the coordinates
file_obj.write(
'</description>\n'
'<Point>\n'
'<coordinates> %s, %s </coordinates>\n' % ( coords_kml[0], coords_kml[1] ) +
'</Point>\n'
'</Placemark>\n'
)
# If it is a polyline then write the appropriate tags for coordinates and ending placemark tag
elif geom_type == 'Polyline':
# Write the closing description tag and opening coordinate tag
file_obj.write(
'</description>'
'<styleUrl>#msn_ylw-pushpin</styleUrl>\n'
'<LineString>\n'
'<tessellate>1</tessellate>\n'
'<coordinates>%s' % coords_kml + '\n'
'</coordinates>\n'
'</LineString>\n'
'</Placemark>\n'
)
# If it is a polygon then write the appropriate tags for coordinates and ending placemark tag
elif geom_type == 'Polygon':
# Write the closing description tag and opening coordinate tag
file_obj.write(
'</description>\n'
'<Polygon>\n'
'<tesselate>1</tesselate>\n'
'<outerBoundaryIs>\n'
'<LinearRing>\n'
'<coordinates> %s </coordinates>' % coords_kml +'\n'
'</LinearRing>\n'
'</outerBoundaryIs>\n'
'</Polygon>\n'
'</Placemark>\n'
)
def make_kml( outFolderKml, kmlFileName, coordFileToList, geom_type, bubbleFields=None, **kwargs ):
"""
Summary: Makes a kml file. This is used in the csvToKml class and
shapefileToKml class. Writes the kml to file.
'outFolderKml' - path to where the kml will be saved. For example:
C:\\To\Some\directory
'kmlFileName' - the name of the kml. For example:
myKmlName.kml
'bubbleFields' - A list of items that were the header column names, minus
the latitude, longitude, name. For example,
['Id','name','description']
'coordFileToList' - Nested lists of coordinates and row data. For example:
[
[[-81.2345,'31.22345'],['-81.45634','31.24355',['-81.32523','31.452'],['id_1','Tupelo','some description]],
[[-81.4345,'31.92345'],['-81.731.24355',['-81.52523','31.452'],['id_2','some name','some description]],
]
'kwargs' -
'fromShapfile=True' - a keyword argument that directs the logic if the
kml is being generated from a shapefile. For example:
fromShapefile=True
'UniqueLines=aList' - a keyword argument at has to be used if the kml
is being generated from a csv file. The 'aList' is a list of unique
features (i.e. polygons). For example:
aList = ['Sapelo','Ogeechee','St.Mary']
UniqueLines=aList
"""
# Outpath to where the kml is saved
kmlOutFolderPath = os.path.join( outFolderKml, kmlFileName)
print 'Creating KML: ', kmlFileName
print 'Saving KML to: ', kmlOutFolderPath
# Open file for writing
file_obj = open( kmlOutFolderPath , 'w' )
# write to file. NOTE: all features are white because this is a generic xml header, that
# doesn't actually conform to Google Earth standards. The features are white because
# that is the default in Google Earth for mal formed kmls.
file_obj.write('<?xml version="1.0" encoding="UTF-8"?>\n'
'<kml xmlns="http://www.opengis.net/kml/2.2">\n'
'<Document>\n'
'<name>%s</name>\n' % kmlFileName +
'<StyleMap id="msn_shaded_dot">\n'
'<Pair>\n'
'<key>normal</key>\n'
'<styleUrl>#sn_shaded_dot</styleUrl>\n'
'</Pair>\n'
'<Pair>\n'
'<key>highlight</key>\n'
'<styleUrl>#sh_shaded_dot</styleUrl>\n'
'</Pair>\n'
'</StyleMap>\n'
'<Style id="sh_shaded_dot">\n'
'<IconStyle>\n'
'<color>ff00ffaa</color>\n'
'<scale>0.945455</scale>\n'
'<Icon>\n'
'<href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n'
'</Icon>\n'
'</IconStyle>\n'
'<ListStyle>\n'
'</ListStyle>\n'
'</Style>\n'
'<Style id="sn_shaded_dot">\n'
'<IconStyle>\n'
'<color>ff00ffaa</color>\n'
'<scale>0.8</scale>\n'
'<Icon>\n'
'<href>http://maps.google.com/mapfiles/kml/shapes/shaded_dot.png</href>\n'
'</Icon>\n'
'</IconStyle>\n'
'<ListStyle>\n'
'</ListStyle>\n'
'</Style>\n'
)
# Checks to see if the data is coming from a parsed shapefile
if kwargs.has_key('fromShapefile') and kwargs['fromShapefile'] == True:
# A counter used to name the polygons and to synchronize the identifier
# that ArcGIS uses (so you can compare the kml and the shapefile).
FIDCounter = 0
# for each polygon in the shapefile
for coord in coordFileToList:
# Checks to see if it was a polygon shapefile
if geom_type == 'Polygon':
# Build coordinates
coords_kml = [ ',0 '.join(','.join( str(x) for x in nlst) for nlst in coord[:-1] ) ][0] + ',0'
# Write placemark tag for placemark
write_placemark(
file_obj,
FIDCounter, # place_mark_name
coords_kml, # coordinates
coord[-1:][0], # bubbledata
bubbleFields, # bubbleFields
geom_type
)
# Checks to see if it was a point shapefile
elif geom_type == 'Point':
# Write placemark tag for placemark
write_placemark(
file_obj,
FIDCounter, # place_mark_name
[ coord[0], coord[1] ], # coordinates [ lat, long ]
coord[2:], # bubbledata
bubbleFields, # bubbleFields
geom_type
)
# add one to the counter
FIDCounter += 1
# Checks to see if the data is coming from a parsed csv file
elif kwargs.has_key('parsed_csv'):
# Checks to see if it was a csv file for points
if geom_type == 'Point':
# for each list(coord) we are going to write it to the pop up bubble in
# Google Earth. This is what appears when you click on a placemark a
# bubble opens up.
for coord in coordFileToList:
# Write placemark tag
write_placemark(
file_obj,
coord[0], # place_mark_name
[ coord[-2:][0], coord[-1:][0] ], # coordinates [ lat, long ]
coord, # bubbledata
bubbleFields, # bubbleFieds
geom_type
)
# Checks to see if it was a csv file for polylines of polygons. These two share a
# significant portion of logic to generate both
elif geom_type == 'Polygon' or geom_type == 'Polyline':
unique_features = kwargs['parsed_csv']
# List that holds all the information related to a polyline, a list of lists
unique_feature_list = []
# For each in the unique list of polylines
for feature in unique_features:
# for each list in the every row of the csv file
for coord in coordFileToList:
# if each is the same as coord[0], which is the index where the
# polyline name is located.
if feature in coord[0]:
# if they are equal to each other append it to unique_feature_list
unique_feature_list.append(coord)
if geom_type == 'Polyline':
# Build coordinates
coords_kml = [ ',0 '.join( ','.join(coords[-2:]) for coords in unique_feature_list ) ][0]
elif geom_type == 'Polygon':
# Build the coordinates
coords_kml = [ ',0 '.join( ','.join(coords[-2:]) for coords in unique_feature_list ) ][0] + ',0 ' + ','.join(unique_feature_list[0][-2:]) + ',0 '
# Write placemark tag
write_placemark(
file_obj,
feature, # place_mark_name
coords_kml, # coordinates
unique_feature_list[0][:-2], # bubbledata minus lat and long coords
bubbleFields[:-2], # bubbleFieds minus lat and long headers
geom_type
)
# make the unique_feature_list list empty for the next polyline
unique_feature_list = []
# Close document tags
file_obj.write('</Document>\n'
'</kml>')
# close file
file_obj.close()
print 'Kml complete'
print ''
|
x=1
y=1.0
z=5j
print(type(x))
print(type(y))
print(type(z))
x=102002
y=-22222
print(type(x))
print(type(y))
x=1.22222222222222222
y=-1.4454
print(type(x))
print(type(y))
x=53+24j
y=-35j
print(type(x))
print(type(y))
x=1
y=1.0
z=2j
a=int(y)
b=float(x)
c=complex(x)
print(a)
print(b)
print(c)
print(type(a))
print(type(b))
print(type(c))
import random
print(random.randrange(1,10))
print(random.randrange(1,10))
print(random.randrange(1,10))
print(random.randrange(1,10))
|
class ShareInterface:
def draw(self):
pass
class Circle(ShareInterface):
def draw(self):
print("Circle.draw")
class Square(ShareInterface):
def draw(self):
print("Square.draw")
class ShareFactory:
@staticmethod
def getShape(typ):
if typ == 'circle':
return Circle()
if typ == 'square':
return Square()
assert 0, "Could not find the shape " + typ
|
+num=21
+num2=int(input())
+print(num1-num2)
+num2=int(input())
+print(num1-num2)
+num2=int(input())
+print(num1-num2) |
# Generated by Django 2.2 on 2019-04-17 15:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('standings', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='standings',
options={'ordering': ['league_name'], 'verbose_name': 'Standing'},
),
]
|
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from rest_framework import renderers
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, ModelViewSet
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import detail_route, list_route
from management.serializers import *
from management.models import *
from management.permissions import IsOwnerOrAdmin, IsProjectOwnerOrAdmin
# from django.shortcuts import render
@csrf_exempt
def test(request):
return render(request, 'test.html')
class FormViewSet(ModelViewSet):
name = 'Default'
template_name = 'basic-form.html'
@list_route(renderer_classes=[renderers.TemplateHTMLRenderer])
def form(self, request, *args, **kwargs):
serializer = self.get_serializer()
name = type(serializer).__name__
name = name[:len(name) - 10] # Remove serializer portion from name
print(name)
return Response({'serializer': serializer, 'title': name})
class ProjectViewSet(FormViewSet):
queryset = Project.objects.all()
serializer_class = ProjectSerializer
permission_classes = (IsAuthenticated, IsOwnerOrAdmin)
def perform_create(self, serializer):
return serializer.save(user=self.request.user)
def get_queryset(self):
if self.request.user.is_staff:
return Project.objects.all()
return Project.objects.filter(user=self.request.user)
# @list_route(methods=['get'])
# def get_form
@detail_route(methods=['get'])
def samples(self, request, pk=None):
sample_list = Sample.objects.filter(project=self.get_object())
serializer = SampleSerializer(sample_list, many=True)
return Response(serializer.data)
class FlowcellViewSet(FormViewSet):
queryset = Flowcell.objects.all()
serializer_class = FlowcellSerializer
# renderer_classes = [renderers.JSONRenderer, renderers.TemplateHTMLRenderer]
# template_name = "test-form.html"
def get_queryset(self):
if self.request.user.is_staff:
return Flowcell.objects.all()
return [lane.flowcell for lane in
self.request.user.lane_set.all().select_related('flowcell')]
@detail_route(methods=['get'])
def lanes(self, request, pk=None):
lane_list = Lane.objects.filter(flowcell=self.get_object())
serializer = LaneSerializer(lane_list, many=True)
return Response(serializer.data)
class ManufacturerViewSet(FormViewSet):
queryset = Manufacturer.objects.all()
serializer_class = ManufacturerSerializer
class KitViewSet(FormViewSet):
queryset = Kit.objects.all()
serializer_class = KitSerializer
class IndexViewSet(FormViewSet):
queryset = Index.objects.all()
serializer_class = IndexSerializer
class SampleViewSet(FormViewSet):
queryset = Sample.objects.all()
serializer_class = SampleSerializer
permission_classes = (IsAuthenticated, IsProjectOwnerOrAdmin)
def get_queryset(self):
if self.request.user.is_staff:
return Sample.objects.all()
return Sample.objects.filter(project__user=self.request.user)
class LaneViewSet(FormViewSet):
queryset = Lane.objects.all()
serializer_class = LaneSerializer
def perform_create(self, serializer):
return serializer.save(user=self.request.user)
def get_queryset(self):
if self.request.user.is_staff:
return Lane.objects.all()
return Lane.objects.filter(user=self.request.user)
class UserViewSet(ReadOnlyModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 22 02:46:16 2019
@author: VIVEK VISHAN
"""
#Tuples
coordinates = (3, 4, 5)
print(coordinates[1])
coordinates = (3, 4, 5)
#coordinates[1] = 10
print(coordinates[1])
coordinates =[(3, 4),(6,7),(80,34)]
print(coordinates)
|
import sqlite3
class Mitarbeiter_Data():
def __init__(self):
self.conn = sqlite3.connect("mitarbeiter.db")
self.c = self.conn.cursor()
def neuer_mitarbeiter_speichern(self, vorname, nachname, status):
params = (vorname, nachname, status)
sql = "INSERT INTO mitarbeiter VALUES (NULL, ?, ?, ?)"
self.c.execute(sql, params)
self.conn.commit()
def alle_ma_abfragen(self):
sql = "SELECT * FROM mitarbeiter"
self.c.execute(sql)
return self.c.fetchall()
def ma_loeschen(self, ma_id):
params = (ma_id, )
sql = "DELETE FROM mitarbeiter WHERE id = ?"
self.c.execute(sql, params)
self.conn.commit() |
"""
Django settings for spacesuite project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^t^s%3w@=+k_)*y98i^y83_-r@5fv1e59d@v7p8!g4dbtoh)bi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
SOCIAL_AUTH_FACEBOOK_KEY = '1909808875929144'
SOCIAL_AUTH_FACEBOOK_SECRET = '60e47322c1d39381de500da161216d76'
SOCIAL_AUTH_GITHUB_KEY = '41c47d472803e4a9f9ba'
SOCIAL_AUTH_GITHUB_SECRET = 'ed5652ef80e72b2ee6141e43972ba2e4919a9950'
SOCIAL_AUTH_TWITTER_KEY = 'M3uqIK0z971dgqYTgDvoVf3jx'
SOCIAL_AUTH_TWITTER_SECRET = 'rW1GD44y5gTzKFLsMPNS80d6glswIo9CXGOFNxHeiPWAFO0DFV'
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = '811893039464-cgqc42i1r5e0b7jaq69a13ie4gimunqu.apps.googleusercontent.com'
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = 'RdY2UkdkT0V6FMfrzvyv5qt3'
# Imgur Draceditor requirements
# Global draceditor settings
# Input: string boolean, `true/false`
DRACEDITOR_ENABLE_CONFIGS = {
'imgur': 'true', # to enable/disable imgur/custom uploader.
'mention': 'false', # to enable/disable mention
'jquery': 'true', # to include/revoke jquery (require for admin default django)
}
# Imgur API Keys
DRACEDITOR_IMGUR_CLIENT_ID = '59117600889b35e'
DRACEDITOR_IMGUR_API_KEY = 'c5f26349a9a1e32e49170f5a9bdfe0eeb4c8d44e'
# Safe Mode
DRACEDITOR_MARKDOWN_SAFE_MODE = True # default
# Markdownify
DRACEDITOR_MARKDOWNIFY_FUNCTION = 'draceditor.utils.markdownify' # default
DRACEDITOR_MARKDOWNIFY_URL = '/draceditor/markdownify/' # default
# Markdown extensions (default)
DRACEDITOR_MARKDOWN_EXTENSIONS = [
'markdown.extensions.extra',
'markdown.extensions.nl2br',
'markdown.extensions.smarty',
'markdown.extensions.fenced_code',
# Custom markdown extensions.
'draceditor.extensions.urlize',
'draceditor.extensions.del_ins', # ~~strikethrough~~ and ++underscores++
'draceditor.extensions.mention', # require for mention
'draceditor.extensions.emoji', # require for emoji
]
# Markdown Extensions Configs
DRACEDITOR_MARKDOWN_EXTENSION_CONFIGS = {}
# Markdown urls
DRACEDITOR_UPLOAD_URL = '/draceditor/uploader/' # default
DRACEDITOR_SEARCH_USERS_URL = '/draceditor/search-user/' # default
# Markdown Extensions
DRACEDITOR_MARKDOWN_BASE_EMOJI_URL = 'https://assets-cdn.github.com/images/icons/emoji/' # default
DRACEDITOR_MARKDOWN_BASE_MENTION_URL = 'https://forum.dracos-linux.org/profile/' # default (change this)
# Django allauth settings
LOGIN_URL = 'login/'
LOGOUT_URL = 'logout/'
LOGIN_REDIRECT_URL = '/'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'ramlalfff@gmail.com'#email of sender
EMAIL_HOST_PASSWORD = 'unbeaten1'# password of sender
EMAIL_PORT = 587
ACCOUNT_EMAIL_REQUIRED=True
ACCOUNT_AUTHENTICATION_METHOD ='username_email'
ACCOUNT_CONFIRM_EMAIL_ON_GET=True
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL='/accounts/login/'
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = '/'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1
ACCOUNT_EMAIL_CONFIRMATION_HMAC=True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_EMAIL_SUBJECT_PREFIX = 'asQ'
ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 5
ACCOUNT_LOGOUT_REDIRECT_URL = '/accounts/login/'
ACCOUNT_SIGNUP_FORM_CLASS = 'login.forms.UserRegisterForm'
ACCOUNT_USERNAME_MIN_LENGTH = 3
# Application definition
INSTALLED_APPS = [
'login.apps.LoginConfig',
'dal',
'dal_select2',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pagedown',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'markdownx',
'draceditor',
'crispy_forms',
'braces',
'notification_channels.apps.NotificationChannelsConfig',
]
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'spacesuite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
],
},
},
]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
WSGI_APPLICATION = 'spacesuite.wsgi.application'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
CRISPY_TEMPLATE_PACK = 'bootstrap3'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
import glob
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
class CarDataset:
def __init__(self):
vehicle_images = glob.glob('../data/vehicles/**/*.png')
notvehicle_images = glob.glob('../data/non-vehicles/**/*.png')
self.cars = []
self.notcars = []
for image in vehicle_images:
self.cars.append(image)
for image in notvehicle_images:
self.notcars.append(image)
def preview(self):
data_info = self.data_look(self.cars, self.notcars)
print('Your function returned a count of', data_info["n_cars"], ' cars and', data_info["n_notcars"], ' non-cars')
print('of size: ',data_info["image_shape"], ' and data type:', data_info["data_type"])
# Just for fun choose random car / not-car indices and plot example images
car_ind = np.random.randint(0, len(self.cars))
notcar_ind = np.random.randint(0, len(self.notcars))
# Read in car / not-car images
car_image = mpimg.imread(self.cars[car_ind])
notcar_image = mpimg.imread(self.notcars[notcar_ind])
# Plot the examples
fig = plt.figure()
plt.subplot(121)
plt.imshow(car_image)
plt.title('Example Car Image')
plt.subplot(122)
plt.imshow(notcar_image)
plt.title('Example Not-car Image')
# Define a function to return some characteristics of the dataset
def data_look(self, car_list, notcar_list):
data_dict = {}
# Define a key in data_dict "n_cars" and store the number of car images
data_dict["n_cars"] = len(car_list)
# Define a key "n_notcars" and store the number of notcar images
data_dict["n_notcars"] = len(notcar_list)
# Read in a test image, either car or notcar
example_img = mpimg.imread(car_list[0])
# Define a key "image_shape" and store the test image shape 3-tuple
data_dict["image_shape"] = example_img.shape
# Define a key "data_type" and store the data type of the test image.
data_dict["data_type"] = example_img.dtype
# Return data_dict
return data_dict
|
from typing import List, Optional
from src.graph import Graph
from src.treasure_map import TreasureMap
class TreasureFinder:
_graph: Graph
_start: int
_destination: int
def __init__(self, treasure_map: TreasureMap):
self._start = treasure_map.start
self._destination = treasure_map.treasure
self._graph = Graph()
for road in treasure_map.roads:
self._graph.add_edge(road[0], road[1])
def get_shortest_path(self) -> List[int]:
"""
For task 1, we simply return the shortest path from the start till the destination
:return: return the shortest path if it exists
"""
return self._graph.find_shortest_path(self._start, self._destination)
def get_treasure_avoiding_dragons(self, dragons: List[int]) -> List[int]:
"""
For task 2 we need to avoid dragons, but only when they sneeze i.e nodes 3, 6, 9 ... of the path should not
have a dragon on them. We can circle around 2 previous nodes in order to cross the dragon after its finished
sneezing. see method _circle_before_dragon
:param dragons:
:return:
"""
all_paths: List[List[int]] = self._graph.find_all_paths(self._start, self._destination)
if len(all_paths) == 0:
return []
possible_paths = []
for path in all_paths:
if self._will_burn(path, dragons):
p = self._circle_before_dragon(path, dragons)
if p is not None:
possible_paths.append(p)
else:
possible_paths.append(path)
return self._get_shortest_list(possible_paths)
def get_path_avoiding_shortest_path_roads(self) -> List[int]:
"""
For task 3 we need to avoid the rodes taken by the annoying neighbour. We also know that the neighbour will be
taking the shortest path. So, we can simply find the shortest path, and then remove all the rodes it contains from
the original graph. We then find the shortest path again, and if there exists a path, this is the solution.
:return: Returns a path that avoids the rodes on the shortest path of the original map
"""
shortest_path = self.get_shortest_path()
road_segments = self._get_road_segments(shortest_path)
self._graph.remove_roads(road_segments)
return self._graph.find_shortest_path(self._start, self._destination)
@staticmethod
def _get_shortest_list(possible_paths: List[List[int]]) -> List[int]:
if len(possible_paths) > 0:
possible_paths.sort(key=lambda l: len(l))
return possible_paths[0]
else:
return []
@staticmethod
def _will_burn(path, dragons):
return any([path[i] in dragons for i in range(3, len(path), 3)])
@staticmethod
def _get_road_segments(path: List[int]):
return [(path[i - 1], path[i]) for i in range(1, len(path))]
def _circle_before_dragon(self, path, dragons) -> Optional[List[int]]:
"""
Helper method to circle around a dragon node.
Since dragons sneeze every 15 mins, and it takes 5 mins to travel between nodes, we could cycle between 2 nodes
to avoid a dragons sneeze.
eg:
you have nodes 0 -> 1 -> 2 -> 3 -> 4
dragons = [3]
start = 0
treasure = 4
shortestPath = [0,1,2,3,4]
however dragon node is hit at minute 15, so this path would not work, we could do 0->1->2->1->2->3->4
this would make us cross the dragon node at 25 (10 mins later) and avoid a sneeze
:param path:
:return:
"""
check = 3
while check < len(path):
if path[check] in dragons:
# If there are 3 consecutive dragons, then there is no possible solution
if path[check + 1] in dragons and path[check + 2] in dragons:
path.clear()
return None
path.insert(check, path[check - 2])
path.insert(check + 1, path[check - 1])
check += 3
if self._will_burn(path, dragons):
self._circle_before_dragon(path, dragons)
else:
return path
|
#!/usr/bin/python3
from partitioning import Partitioning
import logging
from matplotlib import pyplot as plt
def get_index_communities(n, n_comm):
'''Returns SBM-style community blocks.'''
width = n // n_comm
return [set(range(i * width, (i + 1) * width)) for i in range(n_comm)]
def infer_index_communities(P: Partitioning):
'''Returns SBM-style community blocks after inferring agent and community count.'''
return get_index_communities(P.vertices(), len(P.part))
def rmat_communities(n, minimum_size=1,
num_initial_division: "0 means the whole graph is the first 'community'" = 0):
current_community_count = 2 ** num_initial_division
communities = []
logging.debug('n = {}, min size = {}, initial division = {}'.format(n, minimum_size, num_initial_division))
while minimum_size <= n // current_community_count:
logging.debug("Enter loop, current community count = {}".format(current_community_count))
to_add = get_index_communities(n, current_community_count)
logging.debug("Adding {} new communities.".format(len(to_add)))
communities.extend(to_add)
current_community_count *= 2
return communities
def write_communities(Cs, filename, separator=" ", width=1):
'''width = 0 infers width, which could be time consuming.'''
if not width:
width = max(max(len(str(v)) for v in c) for c in Cs) + 1
buff = ''
for c in Cs:
buff += separator.join("{{:<{}d}}".format(width).format(v) for v in c) + "\n"
if filename is not None:
with open(filename, 'w') as o:
o.write(buff)
else:
print(buff)
def load_communities(filename, seperator=None, target_type=int):
'''seperator=" " breaks on formatted multispaces. None provides
standard behavior of split() on whitespace.'''
with open(filename) as s:
return [
{target_type(c) for c in line.split(seperator)}
for line in s
if line.strip() and line.strip()[0] != "#"]
####################
## My graph generation scripts below.
## Old good graph functions
def draw_comm_count_test(df, back_style="--", col='score', outfile=None, annotate=True, **kwargs):
plt.figure(figsize=(12, 8))
for i in range(1, 7):
plt.subplot(2, 3, i)
k = 2 ** i
df_to_plot_over_replicate(
df[df.generator == "simple-er-{}".format(k)],
col,
back_style=back_style,
outfile=None, show=False,
annotate=annotate,
**kwargs)
if outfile:
plt.savefig(outfile)
def full_draw_code_test(df, back_style='--', col='score', **kwargs):
for g in GRAPHS:
for score in ('all', 'top5000'):
if g == GRAPHS[0] and score == 'top5000':
continue
logging.info(
"Considering graph {!r} under community {!r}".format(
g, score))
draw_code_test(df[df.generator == g][df.community_base == score],
back_style=back_style, col=col,
title="{}; ground {}".format(g, score),
**kwargs)
|
"""
Geo operations.
v_color,
plot_coords,
plot_bounds,
plot_line,
create_geohash_df,
create_bin_geohash_df,
decode_geohash_to_latlon,
"""
from typing import Optional, Text, Tuple, Union
import geohash2 as gh
import numpy as np
from matplotlib.pyplot import axes
from numpy import ndarray
from pandas import DataFrame
from shapely.geometry import LineString, MultiLineString
from shapely.geometry.base import BaseGeometry
from pymove.utils.constants import (
BASE_32,
BIN_GEOHASH,
COLORS,
GEOHASH,
LATITUDE,
LATITUDE_DECODE,
LONGITUDE,
LONGITUDE_DECODE,
)
from pymove.utils.log import progress_bar
BINARY = [
np.asarray(
list('{0:05b}'.format(x)), dtype=int
) for x in range(0, len(BASE_32))
]
BASE_32_TO_BIN = dict(zip(BASE_32, BINARY))
def v_color(ob: BaseGeometry) -> Text:
"""
Returns '#ffcc33' if object crosses otherwise it returns '#6699cc'.
Parameters
----------
ob : geometry object
Any geometric object
Return
------
str
Geometric object color
"""
return COLORS[ob.is_simple + 33]
def plot_coords(ax: axes, ob: BaseGeometry, color: Optional[Text] = 'r'):
"""
Plot the coordinates of each point of the object in a 2D chart.
Parameters
----------
ax : axes
Single axes object
ob : geometry object
Any geometric object
color : str, optional
Sets the geometric object color, by default 'r'
"""
x, y = ob.xy
ax.plot(x, y, 'o', color=color, zorder=1)
def plot_bounds(ax: axes, ob: Union[LineString, MultiLineString], color='b'):
"""
Plot the limites of geometric object.
Parameters
----------
ax : axes
Single axes object
ob : LineString or MultiLineString
Geometric object formed by lines.
color : str, optional
Sets the geometric object color, by default 'b'
"""
x, y = zip(*list((p.x, p.y) for p in ob.boundary))
ax.plot(x, y, '-', color=color, zorder=1)
def plot_line(
ax: axes,
ob: LineString,
color: Optional[Text] = 'r',
alpha: Optional[float] = 0.7,
linewidth: Optional[float] = 3,
solid_capstyle: Optional[Text] = 'round',
zorder: Optional[float] = 2
):
"""
Plot a LineString.
Parameters
----------
ax : axes
Single axes object
ob : LineString
Sequence of points.
color : str, optional
Sets the line color, by default 'r'
alpha : float, optional
Defines the opacity of the line, by default 0.7
linewidth : float, optional
Defines the line thickness, by default 3
solid_capstyle : str, optional
Defines the style of the ends of the line, by default 'round'
zorder : float, optional
Determines the default drawing order for the axes, by default 2
"""
x, y = ob.xy
ax.plot(
x, y, color=color, alpha=alpha, linewidth=linewidth,
solid_capstyle=solid_capstyle, zorder=zorder
)
def _encode(lat: float, lon: float, precision: Optional[float] = 15) -> Text:
"""
Encodes latitude/longitude to geohash.
Either to specified precision or to automatically evaluated precision.
Parameters
----------
lat : float
Latitude in degrees.
lon : float
Longitude in degrees.
precision : float, optional
Number of characters in resulting geohash, by default 15
Return
------
str
Geohash of supplied latitude/longitude.
"""
return gh.encode(lat, lon, precision)
def _decode(geohash: Text) -> Tuple[float, float]:
"""
Decode geohash to latitude/longitude.
Location is approximate centre of geohash cell, to reasonable precision.
Parameters
----------
geohash : str
Geohash str to be converted to latitude/longitude.
Return
------
(lat : float, lon : float)
Geohashed location.
"""
return gh.decode(geohash)
def _bin_geohash(lat: float, lon: float, precision: Optional[float] = 15) -> ndarray:
"""
Transforms a point's geohash into a binary array.
Parameters
----------
lat : float
Latitude in degrees
lon : float
Longitude in degrees
precision : float, optional
Number of characters in resulting geohash, by default 15
Return
------
array
Returns a binary geohash array
"""
hashed = _encode(lat, lon, precision)
return np.concatenate([BASE_32_TO_BIN[x] for x in hashed])
def _reset_and_create_arrays_none(
data: DataFrame, reset_index: Optional[bool] = True
) -> Tuple[ndarray, ndarray, ndarray, ndarray]:
"""
Reset the df index and create arrays of none values.
Parameters
----------
data : dataframe
The input trajectories data
reset_index : boolean, optional
Condition to reset the df index, by default True
Return
------
arrays
Returns arrays of none values, of the size of the df.
"""
if reset_index:
data.reset_index(drop=True, inplace=True)
latitudes = np.full(
data.shape[0], None, dtype=np.float64
)
longitudes = np.full(
data.shape[0], None, dtype=np.float64
)
geohash = np.full(
data.shape[0], None, dtype='object_'
)
bin_geohash = np.full(
data.shape[0], None, dtype=np.ndarray
)
return latitudes, longitudes, geohash, bin_geohash
def create_geohash_df(data: DataFrame, precision: Optional[float] = 15):
"""
Create geohash from geographic coordinates and integrate with df.
Parameters
----------
data : dataframe
The input trajectories data
precision : float, optional
Number of characters in resulting geohash, by default 15
"""
_, _, geohash, _ = _reset_and_create_arrays_none(data)
for idx, row in progress_bar(
data[[LATITUDE, LONGITUDE]].iterrows(), total=data.shape[0]
):
geohash[idx] = _encode(row[LATITUDE], row[LONGITUDE], precision)
data[GEOHASH] = geohash
def create_bin_geohash_df(data: DataFrame, precision: Optional[float] = 15):
"""
Create trajectory geohash binaries and integrate with df.
Parameters
----------
data : dataframe
The input trajectories data
precision : float, optional
Number of characters in resulting geohash, by default 15
"""
_, _, _, bin_geohash = _reset_and_create_arrays_none(data)
for idx, row in progress_bar(
data[[LATITUDE, LONGITUDE]].iterrows(), total=data.shape[0]
):
bin_geohash[idx] = _bin_geohash(row[LATITUDE], row[LONGITUDE], precision)
data[BIN_GEOHASH] = bin_geohash
def decode_geohash_to_latlon(
data: DataFrame,
label_geohash: Optional[Text] = GEOHASH,
reset_index: Optional[bool] = True
):
"""
Decode feature with hash of trajectories back to geographic coordinates.
Parameters
----------
data : dataframe
The input trajectories data
label_geohash : str, optional
The name of the feature with hashed trajectories, by default GEOHASH
reset_index : boolean, optional
Condition to reset the df index, by default True
"""
if label_geohash not in data:
raise ValueError('feature {} not in df'.format(label_geohash))
lat, lon, _, _ = _reset_and_create_arrays_none(data, reset_index=reset_index)
for idx, row in progress_bar(data[[label_geohash]].iterrows(), total=data.shape[0]):
lat_lon = _decode(row[label_geohash])
lat[idx] = lat_lon[0]
lon[idx] = lat_lon[1]
data[LATITUDE_DECODE] = lat
data[LONGITUDE_DECODE] = lon
|
#Author: Kai Huang
#Date: 2015.04.01
import sys
sys.path.append("../tools")
import time
import datetime
def SecsToDateString(secs):
if secs >= 0:
return time.strftime("%Y%m%d",time.gmtime(secs))
else:
return "0000.00.00"
def SecsToYear(secs):
if secs >= 0:
return time.strftime("%Y",time.gmtime(secs))
else:
return "0000" |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 15 21:26:53 2017
@author: James
"""
import numpy as np
def pearson_r(x, y):
"""Compute Pearson correlation coefficient between two arrays."""
# Compute correlation matrix: corr_mat
corr_mat = np.corrcoef(x, y)
# Return entry [0,1]
return corr_mat[0,1]
|
import logging
logger = logging.getLogger(__name__)
class BaseStorageEngine(object):
def save(self, fp, location):
logger.error("Unimplemented save method")
raise NotImplementedError
def read(self, location):
logger.error("Unimplemented read method")
raise NotImplementedError
def get_url(self, request, location):
logger.error("Unimplemented get_url method")
raise NotImplementedError
|
import os
from pathlib import Path
import PIL
from PIL import Image
def create_sprites(imagepaths, spritefilepath, cssfilepath):
"""
Creates a sprite image by combining the images in the imagepaths tuple into one image.
This is saved to spritefilepath.
Also creates a file of CSS classes saved to cssfilepath.
The class names are the original image filenames without the filename extension.
IOErrors are raised.
"""
size = _calculate_size(imagepaths)
_create_sprite_image(imagepaths, size, "sprites.png")
_create_styles(imagepaths, "spritestyles.css", "sprites.png")
def _calculate_size(imagepaths):
"""
Creates a width/height tuple specifying the size of the image
needed for the combined images.
"""
totalwidth = 0
maxheight = 0
try:
for imagepath in imagepaths:
image = Image.open(imagepath)
totalwidth += image.width
maxheight = max(image.height, maxheight)
except IOError as e:
raise
return (totalwidth, maxheight)
def _create_sprite_image(imagepaths, size, spritefilepath):
"""
Creates a new image and pastes the original images into it,
then saves it to spritefilepath.
"""
sprites = PIL.Image.new("RGBA", size, (255,0,0,0))
x = 0
try:
for imagepath in imagepaths:
image = Image.open(imagepath)
sprites.paste(image, (x, 0))
x += image.width
sprites.save(spritefilepath, compress_level = 9)
except IOError as e:
raise
def _create_styles(imagepaths, cssfilepath, spritefilepath):
"""
Creates a set of CSS classes for the sprite images
and saves it to spritefilepath.
"""
styles = []
x = 0
try:
for imagepath in imagepaths:
image = Image.open(imagepath)
classname = Path(imagepath).stem
style = ["."]
style.append(f"{classname}\n")
style.append("{\n")
style.append(f" background: url('{spritefilepath}') no-repeat;\n")
style.append(f" width: {image.width}px;\n")
style.append(f" height: {image.height}px;\n")
style.append(" display: inline-block;\n")
style.append(f" background-position: -{x}px 0px;\n")
style.append("}\n\n")
x += image.width
style = "".join(style)
styles.append(style)
styles = "".join(styles)
f = open(cssfilepath, "w+")
f.write(styles)
f.close()
except IOError as e:
raise
|
#import urllib2
from bs4 import BeautifulSoup
import urllib.request
from flask import Flask,request,render_template,flash,url_for,redirect
import pymysql
app=Flask(__name__)
db=pymysql.connect("localhost","root","","IPP", autocommit=True)
cursor=db.cursor()
app.secret_key='some_secret'
url="https://india.gov.in/sanctioned-nsap-beneficiaries-old-aged-disability-and-widows-scheme-nsap-mis"
request=urllib.request.Request(url)
response=urllib.request.urlopen(request)
page=urllib.request.urlopen(url)
soup=BeautifulSoup(page,"html.parser")
print(soup.title.text)
#for link in soup.findAll('a'):
# print(link.text)
#print(soup.find('div',{"class":"region region-three-33-third"},'a').find('href'))
#for link in soup.findAll('div',{"class":"region region-three-33-third"}):
# print(link.find('a').text)
# q="""INSERT INTO news VALUES('%s')"""%(link.find('href'))
# cursor.execute(q)
#print(soup.find('div',{"class":"view view-metadata view-id-metadata view-display-id-block_4 accordion-container view-dom-id-88dcff5681d13b63b6707a690f658253"}).find('a'))
#for link in soup.findAll('div',{"class":"views-field views-field-php-4"}):
# for l in soup.findAll('link'):
# if link.has_attr('href'):
# print(link.attrs['href'])
# print(link.find('a').text)
# q="""INSERT INTO news VALUES('%s')"""%(link.find('a'))
# cursor.execute(q)
pd=soup.findAll('div',attrs={'class':'views-field views-field-php-4'})
for d in pd:
print(d.find('a').text)
print('\t')
print(d.find('a')['href'])
print('\n')
q="""INSERT INTO news VALUES('%s','%s')"""%(d.find('a').text,d.find('a')['href'])
cursor.execute(q)
#print(soup.prettify())
#print(soup.title.string)
#print(soup.findAll('a'))
|
import json
import sys
import getpass
from account import Account
from authorization import Authorization
import pdb
class Operation:
def __init__(self):
self.authorized = False
self.start()
def start(self):
self.login_prompt()
response = raw_input()
self.login(response)
def login_prompt(self):
print "Please choose a number (1 - 3) to select one of the operation below:"
print " ( 1 ) Open a bank account"
print " ( 2 ) Deposit to a bank account"
print " ( 3 ) Login to your bank account"
print " ( 4 ) Exit"
def login(self, response):
if response == '1':
self.open_account()
elif response == '2':
self.deposit()
elif response == '3':
self.authorize()
self.select_operation()
elif response == '4':
print "Thank you for using our service."
sys.exit()
else:
print "We don't recognize this operation."
print "Please enter an integer in range 1 to 4."
self.start()
def authorize(self):
self.account_id = raw_input("Please enter your account id: ").lower()
self.password = getpass.getpass()
authorization = Authorization(self.account_id, self.password)
while authorization.check_identity() == False:
print "You may enter wrong account id or password. Please try again."
self.allow_cancel()
self.authorize()
self.authorized = True
def operation_prompt(self):
print "Please choose a number (1 - 5) to select one of the operation below:"
print " ( 1 ) Close your bank account"
print " ( 2 ) Withdraw from your bank account"
print " ( 3 ) Deposit into your bank account"
print " ( 4 ) Transfer to another bank account"
print " ( 5 ) Exit"
def select_operation(self):
self.operation_prompt()
response = raw_input()
self.start_operation(response)
def start_operation(self, response):
if response == '1':
self.close_account()
elif response == '2':
self.withdraw()
elif response == '3':
self.deposit()
elif response == '4':
self.transfer()
elif response == '5':
print "Thank you for using our service."
sys.exit()
else:
print "We don't recognize this operation."
print "Please enter an integer in range 1 to 4."
self.select_operation()
def open_account(self):
username = raw_input("Please enter your full name: ")
password = ""
while len(password) < 4:
print "Please enter a password with length of at least 4"
password = getpass.getpass()
self.account_id = Account().create(username, password)
print "Your acount is successfully created."
print "Your account id is " + self.account_id
self.authorized = True
self.select_operation()
def close_account(self):
self.is_authorized()
print "WARNING: This action is irreversible."
res = raw_input("Are you sure you want to close this account? (y/n): ").lower()
if res == "y":
Account().delete(self.account_id)
print "Your account is successfully closed"
self.authorized = False
self.start()
else:
self.select_operation()
def is_authorized(self):
if self.authorized == False:
self.authorize()
def deposit(self):
if self.authorized == True:
account_id = self.account_id
else:
account_id = raw_input("Please enter account id you want to deposit into: ").lower()
authorization = Authorization(account_id, "")
if authorization.check_account_id() == True:
amt = input("How much do you want to deposit: $")
while (
isinstance(amt, int) == False
and isinstance(amt, float) == False
and (amt < 0)
):
amt = float(raw_input("How much do you want to deposit: $"))
print "You must enter a positive number. Please try again"
self.allow_cancel()
print "You have deposited $%d" % amt
Account().update(account_id, amt)
if self.authorized == True:
self.select_operation()
else:
self.start()
else:
print "Account id does not exist. Please retry."
self.allow_cancel()
self.deposit()
def withdraw(self):
self.is_authorized()
amt = input("How much do you want to withdraw: $")
data = Account().records[self.account_id]
while amt > data['balance'] or amt < 0:
print "Your withdrawal exceeded your account balance. Your balance is currently $%d. Please try again." % data['balance']
self.allow_cancel()
amt = input("How much do you want to withdraw: $")
print "You have withdrawed $%d" % amt
amt = -amt
Account().update(self.account_id, amt)
self.select_operation()
def transfer(self):
self.is_authorized()
transferred_id = raw_input("Please enter account id you want to transfer to: ").lower()
authorization = Authorization(transferred_id, "")
if authorization.check_account_id() == True:
amt = input("How much do you want to transfer: $")
while (
isinstance(amt, int) == False
and isinstance(amt, float) == False
and (amt < 0)
):
amt = float(raw_input("How much do you want to deposit: $"))
print "You must enter a positive number. Please try again"
self.allow_cancel()
print "You have transferred ${} to account {}".format(amt, transferred_id)
Account().update(transferred_id, amt)
Account().update(self.account_id, -amt)
self.select_operation()
else:
print "Account id does not exist. Please retry."
self.allow_cancel()
self.transfer()
def allow_cancel(self):
print("Do you want to continue (y/n):" )
response = raw_input().lower()
if response == 'n':
if self.authorized == True:
self.select_operation()
else:
self.start()
|
def is_anagram(s: str, t: str) -> bool:
# return sorted(s) == sorted(t)
if len(s) != len(t):
return False
s_map = {}
for i in list(s):
if i in s_map.keys():
s_map[i] = s_map[i] + 1
else:
s_map[i] = 1
for i in list(t):
if i in s_map.keys():
if s_map[i] == 0:
return False
else:
s_map[i] = s_map[i] - 1
else:
return False
return True |
# -*- coding:utf-8 -*-
'''
Created on 2015年12月22日
@author: zhaojiangang
'''
from datetime import datetime, timedelta
import unittest
from biz.mock import patch
from entity.hallshare_test import share_conf
from entity.hallstore_test import clientIdMap, item_conf, products_conf, \
store_template_conf, store_default_conf
from entity.hallvip_test import vip_conf
from hall.entity import hallitem, hallvip, neituiguang, neituiguangtask, \
halltask, hallshare
from hall.entity.hallconf import HALL_GAMEID
from hall.servers.util.neituiguang_new_handler import NeiTuiGuangTcpHandler
from poker.entity.dao import userdata, gamedata
import poker.util.timestamp as pktimestamp
from test_base import HallTestMockContext
from hall.servers.util.rpc import neituiguang_remote
tasks_conf = {
"taskUnits":[
{
"taskUnitId":"hall.task.neituiguang.newUser",
"pools":[
{
"tasks":[
{
"kindId":10001,
"typeId":"hall.task.simple",
"name":"游戏5局",
"desc":"测试任务",
"pic":"${http_download}/dizhu/medal/img/play_5.png",
"count":5,
"star":0,
"totalLimit":1,
"inspector":{
"typeId":"hall.item.open",
"conditions":[
{
"typeId":"hall.item.open.kindId",
"kindIds":[1001]
}
]
},
"rewardContent":{
"typeId":"FixedContent",
"desc":"50金币",
"items":[
{"itemId":"user:chip", "count":50}
]
},
"rewardMail":"勋章奖励:\\${rewardContent}"
}
]
},
{
"tasks":[
{
"kindId":10002,
"typeId":"hall.task.simple",
"name":"游戏5局",
"desc":"测试任务",
"pic":"${http_download}/dizhu/medal/img/play_5.png",
"count":5,
"star":0,
"totalLimit":1,
"inspector":{
"typeId":"hall.item.open",
"conditions":[
{
"typeId":"hall.item.open.kindId",
"kindIds":[1001]
}
]
},
"rewardContent":{
"typeId":"FixedContent",
"desc":"50金币",
"items":[
{"itemId":"user:chip", "count":50}
]
},
"rewardMail":"勋章奖励:\\${rewardContent}"
}
]
}
]
}
]
}
neituiguang2 = {
"prizeDetail":"This is detail",
"prizeImgUrl":"${http_download}/hall/item/imgs/coin.png",
"prizeRewardItem":{
"itemId":"user:chip",
"count":1
},
"prizeRewardDesc":"每推荐一个人可获得\\${rewardContent}",
"prizeNotGotRewardDesc":"还未获得奖励",
"prizeGotTotalRewardDesc":"已经获得\\${totalRewardContent}",
"prizeAvailableRewardDesc":"可领取奖励\\${availableRewardContent}",
"prizeRewardTips":"恭喜您获得\\${rewardContent}",
"shareLoc":"neituiguang",
"taskDetail":"This is task detail"
}
class TestDailyCheckin(unittest.TestCase):
userId = 10001
inviteeUserId = 10002
gameId = 9999
clientId = 'IOS_3.6_momo'
testContext = HallTestMockContext()
regTaskClass = False
def getCurrentTimestamp(self):
return self.timestamp
def setUp(self):
self.testContext.startMock()
self.timestamp = pktimestamp.getCurrentTimestamp()
self.pktimestampPatcher = patch('poker.util.timestamp.getCurrentTimestamp', self.getCurrentTimestamp)
self.pktimestampPatcher.start()
# self.neituiguangRemotePatcher = mock._patch_multiple('hall.servers.util.rpc.neituiguang_remote',
# consumeAssets=self.userRemote.consumeAssets,
# addAssets=self.userRemote.addAssets,
# queryUserWeardItemKindIds=self.userRemote.queryUserWeardItemKindIds,
# presentItemByUnitsCount=self.userRemote.presentItemByUnitsCount,
# presentItem=self.userRemote.presentItem)
self.testContext.configure.setJson('game:9999:map.clientid', clientIdMap, 0)
self.testContext.configure.setJson('game:9999:item', item_conf, 0)
self.testContext.configure.setJson('game:9999:products', products_conf, 0)
self.testContext.configure.setJson('game:9999:store', store_template_conf, 0)
self.testContext.configure.setJson('game:9999:store', store_default_conf, clientIdMap[self.clientId])
self.testContext.configure.setJson('game:9999:vip', vip_conf, 0)
self.testContext.configure.setJson('game:9999:tasks', tasks_conf, 0)
self.testContext.configure.setJson('game:9999:share', share_conf, 0)
self.testContext.configure.setJson('game:9999:neituiguang2', neituiguang2, 0)
hallitem._initialize()
hallvip._initialize()
hallshare._initialize()
if not TestDailyCheckin.regTaskClass:
TestDailyCheckin.regTaskClass=True
halltask._registerClasses()
neituiguang._initialize()
neituiguangtask._initialize()
def tearDown(self):
self.testContext.stopMock()
self.pktimestampPatcher.stop()
# def testGetStates(self):
# handler = NeiTuiGuangTcpHandler()
# createTime = datetime.now()# datetime.strptime('2015-12-22 18:20:00.0', '%Y-%m-%d %H:%M:%S.%f')
# userdata.setAttr(self.userId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# userdata.setAttr(self.inviteeUserId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# print userdata.getAttr(self.userId, 'createTime')
#
# status = neituiguang.loadStatus(self.userId, self.timestamp)
# self.assertTrue(handler.translateState(status) == 0)
# self.assertTrue(status.isNewUser, 'status.isNewUser must be True')
# handler.doQueryState(HALL_GAMEID, 10001, 'IOS_3.70_360.360.0-hall6.360.day')
#
# def testCheckCode(self):
# handler = NeiTuiGuangTcpHandler()
# createTime = datetime.now()# datetime.strptime('2015-12-22 18:20:00.0', '%Y-%m-%d %H:%M:%S.%f')
# userdata.setAttr(self.userId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# userdata.setAttr(self.inviteeUserId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# createTime -= timedelta(days=8)
# userdata.setAttr(self.userId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# handler.doCheckCode(HALL_GAMEID, self.inviteeUserId, self.clientId, self.userId)
#
# def testCancelCheckCode(self):
# handler = NeiTuiGuangTcpHandler()
# createTime = datetime.now()# datetime.strptime('2015-12-22 18:20:00.0', '%Y-%m-%d %H:%M:%S.%f')
# userdata.setAttr(self.userId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# userdata.setAttr(self.inviteeUserId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# createTime += timedelta(days=8)
# userdata.setAttr(self.userId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
# handler.doCancelCodeCheck(HALL_GAMEID, self.inviteeUserId, self.clientId)
# status = neituiguang.loadStatus(self.inviteeUserId, self.timestamp)
# self.assertTrue(status.inviter and status.inviter.userId == 0)
#
# handler.doQueryTaskInfo(self.gameId, self.inviteeUserId)
#
def testGetPrize(self):
handler = NeiTuiGuangTcpHandler()
createTime = datetime.now()# datetime.strptime('2015-12-22 18:20:00.0', '%Y-%m-%d %H:%M:%S.%f')
userdata.setAttr(self.userId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
userdata.setAttr(self.inviteeUserId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
createTime -= timedelta(days=8)
userdata.setAttr(self.userId, 'createTime', createTime.strftime('%Y-%m-%d %H:%M:%S.%f'))
gamedata.delGameAttr(self.userId, self.gameId, 'neituiguang')
gamedata.delGameAttr(self.inviteeUserId, self.gameId, 'neituiguang')
handler.doQueryPrize(self.gameId, self.userId, self.clientId)
timestamp = pktimestamp.getCurrentTimestamp()
handler.doCheckCode(self.gameId, self.inviteeUserId, self.clientId, self.userId)
neituiguang_remote.onInvitationAccepted(self.userId, self.inviteeUserId)
status = neituiguang.loadStatus(self.userId, timestamp)
neituiguang.onNotifyInviterOk(status)
handler.doQueryPrize(self.gameId, self.userId, self.clientId)
handler.doGetPrize(self.gameId, self.userId, self.clientId)
if __name__ == '__main__':
unittest.main()
|
from celery.schedules import crontab
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "scrum.settings")
from django.conf import settings
from celery import Celery
from .tasks import say_hi
app = Celery('scrum', backend='amqp', broker='amqp://rabbit_admin:scrum@2016!@rabbitmq//?heartbeat=30')
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
app.conf.beat_schedule = {
'add-every-30-seconds': {
'task': 'scrum.tasks.say_hi',
'schedule': crontab(minute='*/5')
},
'send-welcome-email-every-5min': {
'task': 'scrum.tasks.send_welcome_email',
'schedule': crontab(minute='0', hour="*"),
},
} |
from NE import NeuroEvolution
class SANE(NeuroEvolution):
def initialize(self):
raise NotImplementedError()
def terminate(self):
raise NotImplementedError()
def compute_fitness(self):
raise NotImplementedError()
def recombine(self):
raise NotImplementedError()
def mutate(self):
raise NotImplementedError()
|
#!/usr/bin/python3 If only on linex
# variables.py Dyer
#
def main():
print("This is the variables.py file.")
if __name__ == "__main__": main()
|
def test_select_leaf_root(setup_complete_tree):
tree, nodes = setup_complete_tree
nodes[0].is_expanded = False
leaf = tree.select_leaf()
assert leaf is nodes[0]
def test_select_leaf(setup_complete_tree):
tree, nodes = setup_complete_tree
leaf = tree.select_leaf()
assert leaf is nodes[2]
def test_backpropagation(setup_complete_tree):
tree, nodes = setup_complete_tree
score = 1.5
tree.backpropagate(nodes[2], score)
nodes[0].backpropagate.assert_called_once_with(nodes[1], score)
nodes[1].backpropagate.assert_called_once_with(nodes[2], score)
nodes[2].backpropagate.assert_not_called()
def test_route_to_node(setup_complete_tree):
tree, nodes = setup_complete_tree
routes, route_nodes = tree.route_to_node(nodes[2])
assert len(routes) == 2
assert len(nodes) == 3
assert nodes[0] == route_nodes[0]
assert nodes[1] == route_nodes[1]
assert nodes[2] == route_nodes[2]
def test_create_graph(setup_complete_tree):
tree, nodes = setup_complete_tree
graph = tree.graph()
assert len(graph) == 3
assert list(graph.successors(nodes[0])) == [nodes[1]]
assert list(graph.successors(nodes[1])) == [nodes[2]]
|
from os import write
import argparse
import numpy as np
from ADTs.adt import ListNode, MatrixNode
class Graph:
def __init__(self):
self.graph_repr = None
def __repr__(self) -> str:
return str(self.graph_repr)
def read_graph(self, path: str, represent_as="matrix"):
with open(path) as fp:
self.V, self.E = self.__get_V_E(fp)
lines = fp.readlines()
self.isDigraph = (
True if fp.name.split("/")[-1].split("_")[0] == "digrafo" else False
)
self.graph_repr = (
self.AdjMatrix(self.V, self.isDigraph, lines)
if represent_as == "matrix"
else self.AdjList(self.V, self.isDigraph, lines)
)
def __get_V_E(self, fp):
while line := fp.readline():
if line.strip()[0] != "c":
return map(lambda x: int(x), line.split(" "))
def write_file(self, path="data/output/"):
file_name = "di" if self.isDigraph else ""
file_name = file_name + "grafo"
file_name = file_name + "v" if self.graph_repr.isValued else file_name
file_name = file_name + "_n_m.dot"
with open(path+file_name, "w") as fp:
fp.write(str(self))
class AdjMatrix:
def __init__(self, V: int, isDigraph, lines: str):
self.header = "digraph G" if isDigraph else "graph G"
self.isDigraph = isDigraph
self.isValued = False
self.graph_repr = self.__get_adj_matrix(lines, V, isDigraph)
def __get_adj_matrix(self, lines: str, V: int, isDigraph):
adj_matrix = [[MatrixNode(0, None) for _ in range(V)] for _ in range(V)]
for line in lines:
if line.strip()[0] == "c":
continue
row, column, weight = map(lambda x: int(x), line.strip().split(" "))
(adj_matrix[row - 1][column - 1]).has_edge = 1
(adj_matrix[row - 1][column - 1]).weight = weight
if weight != 0:
self.isValued = True
if not isDigraph:
pass
# self.__add_simetric_edge(adj_matrix, row, column, weight)
return adj_matrix
def __add_simetric_edge(self, adj_matrix, row, column, weight):
adj_matrix[column - 1][row - 1] = MatrixNode(1, weight)
def __repr__(self) -> str:
body = self.__get_formatted_graph()
formated_string = self.header + "\n" + "{\n" + body + "}\n"
return formated_string
def __get_formatted_graph(self):
separator = " -> " if self.isDigraph else " -- "
get_weigth_str = (lambda weight: f" [label = {weight}]") if self.isValued else lambda _: ""
get_col_range = lambda idx, matrix_dimension: range(0, matrix_dimension) if self.isDigraph else range(idx, matrix_dimension)
body = ""
matrix_dimension = len(self.graph_repr)
for row_idx in range(matrix_dimension):
for col_idx in get_col_range(row_idx, matrix_dimension):
if self.graph_repr[row_idx][col_idx].has_edge:
_from = str(row_idx + 1)
_to = str(col_idx + 1)
weight = self.graph_repr[row_idx][col_idx].weight
body = body + _from + separator + _to + get_weigth_str(weight) +";\n"
return body
class AdjList:
def __init__(self, V: int, isDigraph, lines: str):
self.header = "digraph G" if isDigraph else "graph G"
self.isDigraph = isDigraph
self.isValued = False
self.graph_repr = self.__get_adj_list(lines, V, isDigraph)
def __get_adj_list(self, lines: str, V: int, isDigraph):
adj_list = [[] for _ in range(V)]
for line in lines:
if line.strip()[0] == "c":
continue
row, column, weight = map(lambda x: int(x), line.strip().split(" "))
adj_list[row - 1].append(ListNode(column - 1, weight))
if weight != 0:
self.isValued = True
if not isDigraph:
pass
# self.__add_simetric_edge(adj_list, row, column, weight)
return adj_list
def __add_simetric_edge(self, adj_list, row, column, weight):
adj_list[column - 1].append(ListNode(row - 1, weight))
def __get_formatted_graph(self):
body = ""
separator = " -> " if self.isDigraph else " -- "
get_weigth_str = (lambda weight: f" [label = {weight}]") if self.isValued else lambda _: ""
for from_vertix, to_list in enumerate(self.graph_repr):
for to_vertix in to_list:
body = (
body
+ str(from_vertix + 1)
+ separator
+ str(to_vertix.links_to + 1)
+ get_weigth_str(to_vertix.weight)
+ ";\n"
)
return body
def __repr__(self) -> str:
body = self.__get_formatted_graph()
formated_string = self.header + "\n" + "{\n" + body + "}\n"
return formated_string
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-f', action='store', required=True)
parser.add_argument('-r', choices=['matrix', 'list'], action='store')
arg_dict = vars(parser.parse_args())
file_path, represent_as = arg_dict['f'], arg_dict['r']
my_graph = Graph()
my_graph.read_graph(file_path)
my_graph.write_file()
|
__version__ = "0.0.14"
__banner__ = \
"""
# multiplexor %s
# Author: Tamas Jos @skelsec (info@skelsecprojects.com)
""" % __version__ |
import os
import json
from flask import Flask, request
from subprocess import run
import requests
app = Flask(__name__)
# process_data
@app.route('/uflassist/process_question', methods=['GET'])
def process_pages():
question = request.args.get('question')
header = {'Content-Type': 'application/json'}
payload = {"text": question}
annotation = requests.post(
"http://0.0.0.0:5000/uflassist/nlu", data=json.dumps(payload), params=header)
if annotation.status_code == 200:
nlu_interpretation = json.loads(annotation.text)
else:
return "Error calling nlu service. Did you run it?"
#
# Pessoal, implementem a extração do WA (what about?)
# Basicamente, busquem pela menor quantidade de termos que identifiquem
# sobre o que o usuário está buscando
#
# adicionem um campo ao dicionário nlu_interpretation (nlu_interpretation['WA'])
return json.dumps(nlu_interpretation)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5003, debug=True)
|
from .abstract_result import AbstractResult, AbstractResultCodec
class ResultAsServer(AbstractResult):
...
class ResultAsServerCodec(AbstractResultCodec[ResultAsServer]):
...
|
list1 = ["one", "two", "thee", "four", "five"]
list2 = [1, 2, 3, 4, 5, ]
dic = {}
for i in list1:
for j in list2:
dic[i] = j
list2.remove(j)
break
print(dic)
|
f = input("enetr: ")
x = 0
for i in range(len(f)):
if f[i] == ".":
x = i
print("the filename is: ", f)
print("the extension of the file is: ", f[x+1: ])
|
import math
def mysqrt(a):
'''
estimate the square root of a
:param a:
:return: the square root of a
'''
x = 3
epsilon = 0.0000001
while True:
# print(x)
y = (x + a / x) / 2
if abs(x - y) < epsilon:
return y
break
x = y
def mysqrt_recrusive(a, x=3):
'''
using recrusive to estimate the square root of a
:param a:
:return:
'''
y = (x + a / x) / 2
eplison = 0.0000001
# print(x)
if abs(x - y) < eplison:
return y
x = y
return mysqrt_recrusive(a, x)
def test_square_root():
'''
make the difference between the function mysqrt and math.sqrt
:param a:
print a, the respetive result of the function mysqrt and math.sqrt, the diffence between the two function
'''
print("a ", "mysqrt(a) ", "math.sqrt(a) ", "diff ")
print("- ", "--------------- ", "---------------- ", "-----")
i = 0.0
while i < 9:
i += 1
sqrt_from_mysqrt = mysqrt(i)
sqrt_from_math_sqrt = math.sqrt(i)
diff = abs(sqrt_from_math_sqrt - sqrt_from_math_sqrt)
print(i, ' ', sqrt_from_mysqrt, '', sqrt_from_math_sqrt, ' ', diff)
# print(mysqrt(1600))
# print(mysqrt_recrusive(1600))
test_square_root()
|
"""
Definition of forms.
"""
from django import forms
from django.forms import ModelForm
from app.models import Ticket
from app.models import Tech_update
from django.contrib.auth.models import User
from django.forms import BaseModelFormSet
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy as _
class BootstrapAuthenticationForm(AuthenticationForm):
"""Authentication form which uses boostrap CSS."""
username = forms.CharField(max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'User name'}))
password = forms.CharField(label=_("Password"),
widget=forms.PasswordInput({
'class': 'form-control',
'placeholder':'Password'}))
class Ticket_form(forms.ModelForm):
class Meta:
model = Ticket
labels = {
'custID':'Location:',
'ticketID':'Problem:',
'description':'Description:',
'room_num':'Room:',
'severity':'Urgency:',
'aff_work':'Affecting Work?:',
'aff_num':'Number Affected:',
'contact_method':'Contact Method:',
}
fields = ['custID','ticketID','description','room_num','contact','contact_method','severity','aff_work','aff_num','document']
class DateInput(forms.DateInput):
input_type = 'date'
class Ticket_update(forms.ModelForm):
class Meta:
model = Ticket
labels = {
"ticket_completed":"Ticket Updated:",
}
widgets = {
'ticket_completed': DateInput()
}
fields = ['ticket_completed','ticket_fix','status']
class TechUpdate(forms.ModelForm):
class Meta:
model = Tech_update
labels = {
"tech_date":"Ticket Date:",
}
widgets = {
'tech_date': DateInput()
}
fields = ['tech_date','tech_notes','tech_status'] |
from tools.tools_module import *
import wolframalpha
appId = 'TKAPVA-Q2GP6VU62E'
client = wolframalpha.Client(appId)
question = ""
answer =""
def ask_quesion(inputType,self):
global answer
speak('Thank you.... Now I am ready to give, your answer. Please ask me,')
speak('I can answer to computational and geographical questions, So, what question do you want to ask now...')
global question
if inputType in ['userInput']:
question = prompt(text='Give me Question', title='Say Something..' , default='').lower()
else:
question = takeCommand(self).lower()
if question != None:
speak("The question is,"+question)
speak('Please wait....')
res = client.query(question)
try:
answer = next(res.results).text
except:
print('Invalid Question!')
speak("Sorry, The given question is not valid. Please, try again!")
return
else:
ask_quesion(inputType,self)
if question in ['close edit mode' , 'close' , 'exit' , 'quit','return','back']:
speak("Edit mode is successfully closed...")
else:
# print(answer)
speak("The answer of this question is,"+answer)
ask_quesion(inputType,self)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 06:15:46 2018
@author: Scott Warnock
"""
def cumulative_Data(period_DataFrame):
cum_DataFrame = period_DataFrame
cum_DataFrame.loc['Cumulative Total Cost'] = cum_DataFrame.loc['Period Total Cost'].cumsum()
cum_DataFrame.loc['Cumulative Planned Value'] = cum_DataFrame.loc['Period Total Planned'].cumsum()
cum_DataFrame.loc['Cumulative Earned Value'] = cum_DataFrame.loc['Period Total Earned'].cumsum()
print(cum_DataFrame)
return cum_DataFrame |
# coding: utf8
from django.core.mail import send_mail
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.shortcuts import render
from django.shortcuts import render_to_response
from django.template import Context
from django.template.loader import get_template
from models import Book
import datetime
# def search_form(request):
# return render_to_response('search_form.html')
def showbook(request):
return render_to_response('result_snippet.html', {'title': 'title123', 'author': '麦'})
def search(request):
errors = []
if 'q' in request.GET:
q = request.GET['q']
if not q:
errors.append('Enter a search term.')
elif len(q) > 20:
errors.append('Please enter at most 20 characters.')
else:
books = Book.objects.filter(title__icontains=q)
return render_to_response('search.html',
{'books': books, 'query': q})
return render_to_response('search.html', {'errors': errors})
def hours_ahead(request, offset):
try:
offset = int(offset)
except ValueError:
raise Http404()
dt = datetime.datetime.now() + datetime.timedelta(hours=offset)
assert False
html = "<html><body>In %s hour(s), it will be %s.</body></html>" % (offset, dt)
return HttpResponse(html)
def latest_books(request):
book_list = Book.objects.order_by('-publication_date')[:10]
return render_to_response('latest_books.html', {'book_list': book_list})
# def current_datetime(request):
# now = datetime.datetime.now()
# html = "<html><body>It is now %s.</body></html>" % now
# return HttpResponse(html)
# def current_datetime(request):
# now = datetime.datetime.now()
# t = get_template('current_datetime.html')
# html = t.render(Context({'current_date': now}))
# return HttpResponse(html)
def current_datetime(request):
now = datetime.datetime.now()
return render_to_response('current_datetime.html', {'current_date': now})
def current_section(request):
a = request.REQUEST.get('a', '0')
title = 'mypage'
current_section = 'mypage->nav.html'
try:
ua = request.META['HTTP_USER_AGENT']
except KeyError:
ua = 'unknown'
ua = request.META.get('HTTP_USER_AGENT', 'unknown')
# return HttpResponse("Your browser is %s" % ua)
return render_to_response('mypage.html', locals())
# def display_meta(request):
# values = request.META.items()
# values.sort()
# html = []
# for k, v in values:
# if str(v).startswith('<'):
# v = '<'+str(v)[1:-1]+'>'
# html.append('<tr><td>%s</td><td>%s</td></tr>' % (k, v))
# return HttpResponse('<table>%s</table>' % '\n'.join(html))
def display_meta(request):
re = request.META.items()
re.sort()
s = '<table>'
for k, v in re:
v = str(v).replace('<', '<').replace('<', '>')
s += ('<tr><td>%s</td><td>%s</td></tr>\n' % (k, v))
s += '</table>'
return HttpResponse(s)
def hello(request):
return HttpResponse("Hello world") |
import pymysql
import json
from app import app
from flask import jsonify,session
from flask import flash, request
from flask_restful import Resource, Api
from flaskext.mysql import MySQL
from datetime import date, timedelta
from time import mktime
mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = ''
app.config['MYSQL_DATABASE_DB'] = 'mtb_db'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
# app.config['MYSQL_DATABASE_USER'] = 'ducvinh26091997'
# app.config['MYSQL_DATABASE_PASSWORD'] = 'ducvinh26091997'
# app.config['MYSQL_DATABASE_DB'] = 'mtb_admin'
# app.config['MYSQL_DATABASE_HOST'] = 'db4free.net'
mysql.init_app(app)
class apiLogin(Resource):
def get(self, username, password):
conn = mysql.connect()
cursor = conn.cursor(pymysql.cursors.DictCursor)
cursor.execute("SELECT username, fullname, birthday, address, phone, email, gender from members where username='" + username + "' and password='" + password + "'")
rows = cursor.fetchone()
if rows:
resp = jsonify(rows)
resp.status_code = 200
else:
resp.status_code = 400
return resp |
# Copyright (c) 2020 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from PyPowerFlex import base_client
from PyPowerFlex import exceptions
LOG = logging.getLogger(__name__)
class MediaType:
"""Acceleration pool media types."""
ssd = 'SSD'
nvdimm = 'NVDIMM'
class AccelerationPool(base_client.EntityRequest):
def create(self,
media_type,
protection_domain_id,
name=None,
isRfcache=None):
"""Create PowerFlex acceleration pool.
:param media_type: one of predefined attributes of MediaType
:type media_type: str
:type protection_domain_id: str
:type name: str
:type isRfcache: bool
:rtype: dict
"""
if media_type == MediaType.ssd and not isRfcache:
msg = 'isRfcache must be set for media_type SSD.'
raise exceptions.InvalidInput(msg)
params = dict(
mediaType=media_type,
protectionDomainId=protection_domain_id,
name=name,
isRfcache=isRfcache
)
return self._create_entity(params)
def delete(self, acceleration_pool_id):
"""Delete PowerFlex acceleration pool.
:type acceleration_pool_id: str
:rtype: None
"""
return self._delete_entity(acceleration_pool_id)
|
import re
# import os
#
# path = "./data/openssl_openssl"
# files = os.listdir(path)
#
# for file in files:
# if os.path.getsize(os.fspath(path + '/' + file)) == 0: # 文件大小为0
# print(file + '是空文件,即将执行删除操作!')
# os.remove(os.fspath(path + '/' + file)) # 删除这个文件
# print(file)
# with open('./data/openssl/openssl_cve.txt', 'r') as f:
# content = f.read().split()
# for c in content:
# print(c)
# lst1 = [2, 1, 3, 4, 1]
# lst2 = list(set(lst1))
# print(lst2)
# str = "[['1.1.0'], ['1.1.0a'], ['1.1.0b'], ['1.1.0c']]"
# a = re.split('[\[\]\'\, ]', str)
# # print(re.split('[\[\]\'\, ]', str))
# for i in range(a.count('')):
# a.remove('')
# print('test' in 'test/asdasfasf')
print('excluding' in 'Up to (excluding)') |
#!/usr/bin/env python
import argparse
import utils
import os.path
import subprocess
import numpy as np
import random
import numpy.random
import tensorflow as tf
import keras
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
#random.seed(2018)
#np.random.seed(2018)
#tf.set_random_seed(2018)
#os.environ['PYTHONHASHSEED'] = '2018'
#config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
#keras.backend.tensorflow_backend.set_session(tf.Session(config=config))
print(utils.PROJECT_ROOT)
parser = argparse.ArgumentParser(prog='embed_main', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-m', '--model', choices=['cca', "dcca", 'ccca', "sdcca", 'seg_lstm_cca','s_dcca'], default='ccca')
parser.add_argument('-a', '--audio_path', default='vgg')
parser.add_argument('-af', '--audio_fusion_path', default='') # audio_feat/audio_soundnet.
parser.add_argument('-v', '--rgb_path', default='inception') # visual_feat/visual_i3d_joint.
parser.add_argument('-vf', '--rgb_fusion_path', default='')
parser.add_argument('-o', '--output_size', type=int)
parser.add_argument('-b', '--beta', type=float)
parser.add_argument('-p', '--padding', action='store_true', default=False)
parser.add_argument('-f', '--fold_path', default='folds')
args = parser.parse_args()
ROOT = "/home/dhzeng/AVIDEO/Data"
## read out dataset......
audio_path = os.path.join(ROOT, args.audio_path)
if args.audio_fusion_path:
audio_fusion_path = os.path.join(ROOT, args.audio_fusion_path)
else:
audio_fusion_path = ""
rgb_path = os.path.join(ROOT, args.rgb_path)
if args.rgb_fusion_path:
rgb_fusion_path = os.path.join(ROOT, args.rgb_fusion_path)
else:
rgb_fusion_path = ""
fold_path = os.path.join(ROOT, args.fold_path)
print(args.model)
print(args.padding)
if args.model == "seg_lstm_cca":
utils.run(audio_path, audio_fusion_path, rgb_path, rgb_fusion_path, fold_path, args.model, args.output_size, args.beta, 2, args.padding)
elif args.padding==True:
utils.run(audio_path, audio_fusion_path, rgb_path, rgb_fusion_path, fold_path, args.model, args.output_size, args.beta, 1, args.padding)
elif args.padding==False:
utils.run(audio_path, audio_fusion_path, rgb_path, rgb_fusion_path, fold_path, args.model, args.output_size, args.beta, 2, args.padding)
|
from django.conf.urls import url
from . import views
app_name = 'tracker'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^overview/$', views.overview, name='overview'),
url(r'^(?P<book_id>[0-9]+)/change_read_status/(?P<status>[\w-]+)/$', views.change_read_status, name='change_read_status'),
url(r'^listing/$', views.listing, name='listing'),
]
|
# Sorting Practice
def selectionSort():
enum = len(aList)
# go through list elements, swap min_num
for i in range(enum): # for going through sorted list.
min_index = i
for j in range(i+1, enum): # swap for smallest num in unsorted list
if aList[j] < aList[min_index]:
min_index = j
temp = aList[i]
aList[i] = aList[min_index]
aList[min_index] = temp
print(aList)
def countingSort():
bit_list = [0] * 10
for index in bList:
bit_list[index] += 1
new_list = []
for index in range(len(bit_list)):
if index > 0:
for j in range(bit_list[index]):
new_list.append(index)
print(new_list)
def insertionSort():
n = len(cList)
for i in range(1, n):
j = i
while cList[j-1] > cList[j] and j>0:
temp = cList[j-1]
cList[j-1] = cList[j]
cList[j] = temp
j = j-1
print(cList)
aList = [5, 8, 3, 4, 2, 9, 2]
bList = aList.copy()
cList = aList.copy()
selectionSort()
countingSort()
insertionSort()
|
from abc import ABC, abstractmethod
from copy import deepcopy
from dataclasses import dataclass, field
from textwrap import indent
from typing import Any, Callable, Dict, MutableMapping, Optional
import numpy as np
from local_migrator import REGISTER, class_to_str
from PartSegCore.algorithm_describe_base import (
AlgorithmDescribeBase,
AlgorithmProperty,
ROIExtractionProfile,
base_model_to_algorithm_property,
)
from PartSegCore.image_operations import RadiusType
from PartSegCore.project_info import AdditionalLayerDescription
from PartSegCore.roi_info import ROIInfo
from PartSegCore.utils import BaseModel, numpy_repr
from PartSegImage import Channel, Image
def calculate_operation_radius(radius, spacing, gauss_type):
if gauss_type == RadiusType.R2D and len(spacing) == 3:
spacing = spacing[1:]
base = min(spacing)
if base != max(spacing):
ratio = [x / base for x in spacing]
return [radius / r for r in ratio]
return radius
def dict_repr(dkt: MutableMapping) -> str:
"""
calculate dict representation which use :py:func:`numpy_repr` for numpy representation.
:param dict dkt: dict to be represented
:return: string representation
"""
res = []
for k, v in dkt.items():
if isinstance(v, MutableMapping):
res.append(f"{k}: {dict_repr(v)}")
elif isinstance(v, np.ndarray):
res.append(f"{k}: {numpy_repr(v)}")
else:
res.append(f"{k}: {v!r}")
return "{" + ", ".join(res) + "}"
@dataclass(frozen=True, repr=False)
class ROIExtractionResult:
"""
Class to store results of roi extraction process.
:ivar np.ndarray roi: Region of Interest represented as numpy array.
:ivar ROIExtractionProfile parameters: parameters of roi extraction process.
:ivar Dict[str,AdditionalLayerDescription] ~.additional_layers: additional layers returned by algorithm.
Could be previewer using napari viewer.
:ivar dict roi_annotation: Annotation for ROI. Currently displayed as tooltip
:ivar Dict[str,np.ndarray] alternative_representation: Arrays with alternative representations of ROI.
:ivar Optional[str] ~.file_path: information on which file roi extraction was performed.
:ivar ROIInfo ~.roi_info: ROIInfo for current roi.
:ivar Optional[np.ndarray] ~.points: array of points.
"""
# TODO add alternative representation using dict mapping.
roi: np.ndarray
parameters: ROIExtractionProfile
additional_layers: Dict[str, AdditionalLayerDescription] = field(default_factory=dict)
info_text: str = ""
roi_annotation: Dict = field(default_factory=dict)
alternative_representation: Dict[str, np.ndarray] = field(default_factory=dict)
file_path: Optional[str] = None
roi_info: Optional[ROIInfo] = None
points: Optional[np.ndarray] = None
def __post_init__(self):
if "ROI" in self.alternative_representation:
raise ValueError("alternative_representation field cannot contain field with ROI key")
for key, value in self.additional_layers.items():
if not value.name:
value.name = key
if self.roi_info is None:
object.__setattr__(
self,
"roi_info",
ROIInfo(roi=self.roi, annotations=self.roi_annotation, alternative=self.alternative_representation),
)
def __str__(self): # pragma: no cover
return (
f"ROIExtractionResult(roi=[shape: {self.roi.shape}, dtype: {self.roi.dtype},"
f" max: {np.max(self.roi)}], parameters={self.parameters},"
f" additional_layers={list(self.additional_layers.keys())}, info_text={self.info_text},"
f" alternative={dict_repr(self.alternative_representation)},"
f" roi_annotation={dict_repr(self.roi_annotation)}"
)
def __repr__(self): # pragma: no cover
return (
f"ROIExtractionResult(roi=[shape: {self.roi.shape}, dtype: {self.roi.dtype}, "
f"max: {np.max(self.roi)}], parameters={self.parameters}, "
f"additional_layers={list(self.additional_layers.keys())}, info_text={self.info_text},"
f" alternative={dict_repr(self.alternative_representation)},"
f" roi_annotation={dict_repr(self.roi_annotation)}"
)
SegmentationResult = ROIExtractionResult
def report_empty_fun(_x, _y): # pragma: no cover # skipcq: PTC-W0049
pass
class AlgorithmInfo(BaseModel, arbitrary_types_allowed=True):
algorithm_name: str
parameters: Any
image: Image
class ROIExtractionAlgorithm(AlgorithmDescribeBase, ABC):
"""
Base class for all segmentation algorithm.
:ivar Image ~.image: Image to process
:ivar numpy.ndarray ~.channel: selected channel
:ivar numpy.ndarray ~.segmentation: final segmentation
:ivar numpy.ndarray ~.mask: mask limiting segmentation area
"""
def __init__(self):
super().__init__()
self.image: Optional[Image] = None
self.channel = None
self.segmentation = None
self._mask: Optional[np.ndarray] = None
self.new_parameters: Dict[str, Any] = {}
def __repr__(self): # pragma: no cover
if self.mask is None:
mask_info = "mask=None"
elif isinstance(self.mask, np.ndarray):
mask_info = (
f"mask_dtype={self.mask.dtype}, mask_shape={self.mask.shape}, mask_unique={np.unique(self.mask)}"
)
else:
mask_info = f"mask={self.mask}"
return (
f"{self.__class__.__module__}.{self.__class__.__name__}(\n"
+ indent(f"image={self.image!r},\n", " " * 4)
+ indent(f"channel={numpy_repr(self.channel)},\n{mask_info},", " " * 4)
+ indent(f"\nvalue={self.get_segmentation_profile().values!r})", " " * 4)
)
def clean(self):
self.image = None
self.segmentation = None
self.channel = None
self.mask = None
@property
def mask(self) -> Optional[np.ndarray]:
if self._mask is not None and not self.support_time():
return self.image.clip_array(self._mask, t=0)
return self._mask
@mask.setter
def mask(self, val: Optional[np.ndarray]):
if val is None:
self._mask = None
return
self._mask = self.image.fit_mask_to_image(val)
@classmethod
@abstractmethod
def support_time(cls):
raise NotImplementedError
@classmethod
@abstractmethod
def support_z(cls):
raise NotImplementedError
def set_mask(self, mask):
"""Set mask which will limit segmentation area"""
self.mask = mask
def calculation_run_wrap(self, report_fun: Callable[[str, int], None]) -> ROIExtractionResult:
try:
return self.calculation_run(report_fun)
except SegmentationLimitException: # pragma: no cover
raise
except Exception as e: # pragma: no cover
parameters = self.get_segmentation_profile()
image = self.image
raise SegmentationException(
AlgorithmInfo(algorithm_name=self.get_name(), parameters=parameters, image=image)
) from e
@abstractmethod
def calculation_run(self, report_fun: Callable[[str, int], None]) -> ROIExtractionResult:
raise NotImplementedError
@abstractmethod
def get_info_text(self):
raise NotImplementedError
def get_channel(self, channel_idx):
if self.support_time():
return self.image.get_data_by_axis(c=channel_idx)
if self.image.is_time:
raise ValueError("This algorithm do not support time data")
if isinstance(channel_idx, int) and self.image.channels <= channel_idx:
raise SegmentationException(
f"Image {self.image} has only {self.image.channels} when requested channel {channel_idx}"
)
if isinstance(channel_idx, str) and channel_idx not in self.image.channel_names:
raise SegmentationException(
f"Image {self.image} has only {self.image.channel_names} when requested channel '{channel_idx}'"
)
return self.image.get_data_by_axis(c=channel_idx, t=0)
def set_image(self, image):
self.image = image
self.channel = None
self.mask = None
def set_parameters(self, _params=None, **kwargs):
# FIXME when drop python 3.7 use postional only argument
if _params is not None:
if isinstance(_params, dict):
kwargs = _params
else:
self.new_parameters = _params
return
if self.__new_style__:
kwargs = REGISTER.migrate_data(class_to_str(self.__argument_class__), {}, kwargs)
self.new_parameters = self.__argument_class__(**kwargs) # pylint: disable=not-callable
return
base_names = [x.name for x in self.get_fields() if isinstance(x, AlgorithmProperty)]
if set(base_names) != set(kwargs.keys()):
missed_arguments = ", ".join(set(base_names).difference(set(kwargs.keys())))
additional_arguments = ", ".join(set(kwargs.keys()).difference(set(base_names)))
raise ValueError(f"Missed arguments {missed_arguments}; Additional arguments: {additional_arguments}")
self.new_parameters = deepcopy(kwargs)
def get_segmentation_profile(self) -> ROIExtractionProfile:
return ROIExtractionProfile(name="", algorithm=self.get_name(), values=deepcopy(self.new_parameters))
@staticmethod
def get_steps_num():
"""Return number of algorithm steps if your algorithm report progress, else should return 0"""
return 0
@classmethod
def get_channel_parameter_name(cls):
if cls.__new_style__:
fields = base_model_to_algorithm_property(cls.__argument_class__)
else:
fields = cls.get_fields()
for el in fields:
if el.value_type == Channel:
return el.name
raise ValueError("No channel defined")
class SegmentationLimitException(Exception):
pass
class SegmentationException(Exception):
pass
SegmentationAlgorithm = ROIExtractionAlgorithm # rename backward compatibility
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 13 20:56:34 2018
@author: Rushi Varun
"""
from sklearn.datasets import fetch_20newsgroups
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix, classification_report
import numpy as np
from sklearn.pipeline import Pipeline
stuff_to_train = fetch_20newsgroups(subset = "train", shuffle = True)
stuff_to_train.target.shape
stuff_to_test = fetch_20newsgroups(subset='test', shuffle=True)
text_clf = Pipeline([('vect',CountVectorizer()),('clf',MultinomialNB())])
text_clf = text_clf.fit(stuff_to_test.data, stuff_to_test.target)
predicted = text_clf.predict(stuff_to_test.data)
print(np.mean(predicted == stuff_to_test.target))
stuff_to_train.target.shape
print(classification_report(stuff_to_test.target,predicted))
text = input("enter the text you want to categorise") # custom input
predict_new = text_clf.predict([text])
targetNames = stuff_to_train.target_names
print(targetNames[int(predict_new)])
mydict = {"text" : targetNames[int(predict_new)]}
print(mydict)
|
def pasc_tri(row,col):
if row == 0:
return 0
if col == 1:
return 1
return pasc_tri(row-1, col-1) + pasc_tri(row-1,col)
print(pasc_tri(3,1))
# 1
# 1 1
# 1 2 1
# 1 3 3 1
# 1 4 6 4 1 |
# -*- coding: utf-8 -*-
import re
from nose.tools import eq_
from mkt.users.helpers import emaillink, user_data
from mkt.users.models import UserProfile
def test_emaillink():
email = 'me@example.com'
obfuscated = unicode(emaillink(email))
# remove junk
m = re.match(
r'<a href="#"><span class="emaillink">(.*?)'
'<span class="i">null</span>(.*)</span></a>'
'<span class="emaillink js-hidden">(.*?)'
'<span class="i">null</span>(.*)</span>', obfuscated)
obfuscated = (''.join((m.group(1), m.group(2)))
.replace('@', '@').replace('.', '.'))[::-1]
eq_(email, obfuscated)
title = 'E-mail your question'
obfuscated = unicode(emaillink(email, title))
m = re.match(
r'<a href="#">(.*)</a>'
'<span class="emaillink js-hidden">(.*?)'
'<span class="i">null</span>(.*)</span>', obfuscated)
eq_(title, m.group(1))
obfuscated = (''.join((m.group(2), m.group(3)))
.replace('@', '@').replace('.', '.'))[::-1]
eq_(email, obfuscated)
def test_user_data():
u = user_data(UserProfile(pk=1))
eq_(u['anonymous'], False)
|
import datetime
from eppy.doc import EppRenewCommand
from registrobrepp.common.periodtype import PeriodType
class BrEppRenewDefRegCommand(EppRenewCommand):
def __init__(self, roid: str, curexpdate: datetime, period: int, periodtype: PeriodType = PeriodType.YEAR):
dct = {
'epp': {
'command': {
'renew': {
'defReg:renew': {
'roid': roid,
'curExpDate': curexpdate.strftime('%Y-%m-%d'),
'period': {'@unit': periodtype.value, '_text': period}
}
}
}
}
}
extra_nsmap = {'defReg': 'http://nic.br/epp/defReg-1.0'}
super(BrEppRenewDefRegCommand, self).__init__(dct=self.annotate(dct), extra_nsmap=extra_nsmap)
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from unittest.mock import patch
from pytest import raises
from byceps.services.ticketing import (
event_service,
ticket_code_service,
ticket_creation_service,
ticket_service,
)
def test_create_ticket(admin_app, category, ticket_owner):
ticket = ticket_creation_service.create_ticket(
category.party_id, category.id, ticket_owner.id
)
assert_created_ticket(ticket, category.id, ticket_owner.id)
# Clean up.
ticket_service.delete_ticket(ticket.id)
@patch(
'byceps.services.ticketing.ticket_code_service._generate_ticket_code'
)
def test_create_ticket_with_existing_code(
generate_ticket_code_mock, admin_app, category, ticket_owner
):
generate_ticket_code_mock.return_value = 'TAKEN'
existing_ticket = ticket_creation_service.create_ticket(
category.party_id, category.id, ticket_owner.id
)
assert existing_ticket.code == 'TAKEN'
with raises(ticket_creation_service.TicketCreationFailedWithConflict):
ticket_creation_service.create_ticket(
category.party_id, category.id, ticket_owner.id
)
# Clean up.
ticket_service.delete_ticket(existing_ticket.id)
def test_create_tickets(admin_app, category, ticket_owner):
quantity = 3
tickets = ticket_creation_service.create_tickets(
category.party_id, category.id, ticket_owner.id, quantity
)
for ticket in tickets:
assert_created_ticket(ticket, category.id, ticket_owner.id)
# Clean up.
for ticket in tickets:
ticket_service.delete_ticket(ticket.id)
@patch(
'byceps.services.ticketing.ticket_code_service._generate_ticket_code'
)
def test_create_tickets_with_clashing_generated_codes(
generate_ticket_code_mock, admin_app, category, ticket_owner
):
generate_ticket_code_mock.return_value = 'CLASH'
quantity = 3
with raises(ticket_creation_service.TicketCreationFailed) as excinfo:
tickets = ticket_creation_service.create_tickets(
category.party_id, category.id, ticket_owner.id, quantity
)
wrapped_exc = excinfo.value.args[0]
assert type(wrapped_exc) is ticket_code_service.TicketCodeGenerationFailed
def assert_created_ticket(ticket, expected_category_id, expected_owner_id):
assert ticket is not None
assert ticket.created_at is not None
assert ticket.code is not None
assert ticket.bundle_id is None
assert ticket.category_id == expected_category_id
assert ticket.owned_by_id == expected_owner_id
assert ticket.seat_managed_by_id is None
assert ticket.user_managed_by_id is None
assert ticket.occupied_seat_id is None
assert ticket.used_by_id is None
assert not ticket.revoked
assert not ticket.user_checked_in
events = event_service.get_events_for_ticket(ticket.id)
assert len(events) == 0
|
#!/usr/bin/env python
#we make the class for base tree of nodes here
class tree:
def __init__(self,left,right,parent):
self.parent = parent
self.left = left
self.right = right
#we make the class alphabet here
class alphabet:
def __init__(self,char,probability,leaf):
self.char = char
self.probability = probability
self.leaf = leaf
#we make the class forest here
class forest:
def __init__(self,weight,root):
self.weight = weight
self.root = root
#make function takes a value for left tree and right tree and returns the counter
def make(ltree,rtree):
counter = len(TREE)
TREE.append(tree(0,0,0))
TREE[counter].left = FOREST[ltree].root
TREE[counter].right = FOREST[rtree].root
TREE[counter].parent = 0
TREE[FOREST[rtree].root].parent = counter
TREE[FOREST[ltree].root].parent = counter
return counter
def lightones(one,two):
if FOREST[0].weight > FOREST[1].weight:
one =1
two = 0
else:
one = 0
two = 1
for i in range(2,len(FOREST)):
if FOREST[i].weight < FOREST[two].weight:
two = i
if FOREST[i].weight < FOREST[one].weight:
two = one
one = i
return (one,two)
def huffman():
temp1 = 0
temp2 = 0
counter_temp = len(FOREST)
#We use the huffman algorithm to obtain the minimum frequency nodes out of the min heap and replace the lowest two by a new node with the lowest two nodes as daughter nodes and repeat this.
while counter_temp > 1:
temp1, temp2 = lightones(temp1,temp2)
new = make(temp1,temp2)
FOREST[temp1].weight = FOREST[temp1].weight + FOREST[temp2].weight
FOREST[temp1].root = new
FOREST[temp2] = FOREST[counter_temp-1]
del FOREST[counter_temp-1]
counter_temp = counter_temp - 1
return
ALPHABET = [alphabet('a',7,0), alphabet('b',9,1), alphabet('c',12,2), alphabet('d',22,3), alphabet('e',23,4), alphabet('f',27,5)]
TREE = [tree(0,0,0), tree(0,0,0), tree(0,0,0), tree(0,0,0), tree(0,0,0), tree(0,0,0)]
FOREST = [forest(7,0), forest(9,1), forest(12,2), forest(22,3), forest(23,4), forest(27,5)]
print("For the first run")
print("TREE would be equal:")
k=0
while(k<len(TREE)):
print(TREE[k])
k+=1
print("\n")
k=0
print("ALPHABET would be equal:")
while(k<len(ALPHABET)):
print(ALPHABET[k])
k+=1
print("\n")
k=0
print("FOREST would be equal:")
while(k<len(FOREST)):
print(FOREST[k])
k+=1
print("\n")
huffman()
print("Result would be:")
print("\n")
print("TREE would be equal:")
k=0
while(k<len(TREE)):
print(TREE[k])
k+=1
print("\n")
k=0
print("ALPHABET would be equal:")
while(k<len(ALPHABET)):
print(ALPHABET[k])
k+=1
print("\n")
k=0
print("FOREST would be equal:")
while(k<len(FOREST)):
print(FOREST[k])
k+=1
print("\n")
|
#Confeccione un algoritmo en diagrama de flujo que,
# al leer el neto de una factura,
# calcule el I.V.A. y de cómo salida el total de la factura.
total = 0
neto = 0
iva = 0
print("Ingrese el Valor NETO de la Factura")
neto = int(input())
iva = int(neto * 0.19)
total = int(neto + (neto * 0.19))
print(f"El Total de la Factura es {total}, siendo el valor del IVA: {iva}")
print("El Total de la Factura es", total, ", siendo el valor del IVA:", iva) |
# -*- coding: utf-8 -*-
import re
from django import forms
from django.contrib.auth import forms as django_forms
from django.http.request import HttpRequest
from django.urls import reverse
from easy_select2 import select2_modelform, apply_select2
from captcha.fields import CaptchaField
from django.contrib import auth
from . import utils, dbutils
from . import models
class LoginForm(forms.Form):
username = forms.CharField(max_length=254)
password = forms.CharField(max_length=254)
captcha = CaptchaField()
def clean(self):
super(LoginForm, self).clean()
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = auth.authenticate(username=username, password=password)
if self.user_cache is None:
msg = u"用户名或者密码错误"
raise forms.ValidationError(msg)
return self.cleaned_data
def get_user(self):
return self.user_cache
class RegisterForm(forms.Form):
username = forms.CharField(max_length=254)
name = forms.CharField(max_length=254)
sex = forms.CharField(max_length=254)
password = forms.CharField(max_length=254)
password2 = forms.CharField(max_length=254)
invite_code = forms.CharField(max_length=254)
captcha = CaptchaField()
def clean_username(self):
username = self.cleaned_data["username"]
username = username.strip()
pattern = re.compile(r"^(13[0-9]|14[579]|15[0-3,5-9]|16[6]|17[0135678]|18[0-9]|19[89])\d{8}$")
if not pattern.match(username):
msg = u"手机格式不对"
raise forms.ValidationError(msg)
return username
def clean_name(self):
name = self.cleaned_data["name"]
name = name.strip()
pattern = re.compile(u'[\u4e00-\u9fa5]+')
if not pattern.match(name) or len(name) < 2:
msg = u"姓名必须为汉字"
raise forms.ValidationError(msg)
return name
def clean_invite_code(self):
invite_code = self.cleaned_data["invite_code"]
invite_code = invite_code.strip()
lens = len(invite_code)
if lens not in (6, 32, 11):
msg = u"邀请码格式不对"
raise forms.ValidationError(msg)
is_phone = lens == 11
self.father_user = utils.get_vuser_by_code(invite_code, is_phone)
if not self.father_user:
msg = u"邀请码不存在"
raise forms.ValidationError(msg)
return invite_code
def clean(self):
super(RegisterForm, self).clean()
password = self.cleaned_data.get('password')
password2 = self.cleaned_data.get('password2')
if password and password != password2:
msg = u"两次密码不一致"
raise forms.ValidationError(msg)
username = self.cleaned_data.get("username")
if not username:
msg = u"手机格式不对"
raise forms.ValidationError(msg)
else:
username = username.strip()
user = utils.get_user_by_username(username)
if user is not None:
msg = u"该用户已被注册"
raise forms.ValidationError(msg)
return self.cleaned_data
def get_father_user(self):
return self.father_user
|
maior = 0
codigo = 0
qtd = int(input())
for i in range(qtd):
cod, nota = input().split()
if float(nota) > maior:
maior = float(nota)
codigo = cod
if float(maior) >= 8:
print(codigo)
else:
print('Minimum note not reached')
|
import socket
import json
import sys
import time
datem = { "apid":551, "RequestNumber":300001, "Code": 9807, "CustId": 72752308, "TranCode": 7288, "IP1": 231, "IP2": 233, "IP3": 205, "IP4": 127, "gender": 2, "Eng_1": "G", "dele_time": "2018-06-26 17:47:32", "exec_time": "2018-06-26 17:47:34", "resp_time": "2018-06-26 17:47:36", "ClientIP": "231.233.205.127", "uid": "G200300001" }
HOST = '192.168.1.201'
PORT = 5000
msg = {'@message': datem, '@tags': ['python', 'test(1000, 300000)']}
lendatem=len(json.dumps(msg)+'\n')
print "len(message)="+str(lendatem)
counter = 1 + 4*1024*1024*1024 / (30000 * lendatem)
#counter = 10
for loop in range(0,counter):
then = time.time()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
sys.stderr.write("[ERROR] %s\n" % msg[1])
sys.exit(1)
try:
sock.connect((HOST, PORT))
except socket.error, msg:
sys.stderr.write("[ERROR] %s\n" % msg[1])
sys.exit(2)
print "total records to send:"+str(counter)
for count in range(0, 30000):
msg = {'@message': datem, '@tags': ['python', 'test('+str(loop)+','+str(count)+')']}
sock.send(json.dumps(msg)+'\n')
sock.close()
now = time.time()
diff = now - then
print "total counter="+str(loop)+'/'+str(counter)+", "+str(int(30000/diff))+" per second."
sys.exit(0)
|
# Generated by Django 3.0.2 on 2020-01-25 07:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gov', '0008_category_email'),
]
operations = [
migrations.AlterField(
model_name='event',
name='location',
field=models.CharField(max_length=200),
),
migrations.AlterField(
model_name='event',
name='name',
field=models.CharField(max_length=100),
),
]
|
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from brokenapp.lib.base import BaseController, render
import os
log = logging.getLogger(__name__)
class LogtoobigController(BaseController):
def index(self):
# Return a rendered template
#return render('/logtoobig.mako')
# or, return a string
for i in range(1,100):
log.info("log")
c.size = os.path.getsize('access.log')
log.info(c.size)
return render('/log.mako')
|
#!/usr/bin/env python3
# created by: Trinity Armstrong
# created on: October 2019
# This module contains constants for the "Number Guessing Game"
CORRECT_NUMBER = 5
|
from abc import ABC, abstractmethod
class Component(ABC):
@abstractmethod
def operation(self):
pass
class ReturnComponent(Component):
def operation(self):
return 'Меня зовут Компонент'
class Decorator(Component):
_component = None
def __init__(self, component: Component):
self._component = component
@property
def component(self):
return self._component
def operation(self):
return self._component.operation()
class DecoratorA(Decorator):
def operation(self):
return f'Декоратор А({self.component.operation()})'
class DecoratorB(Decorator):
def operation(self):
return f'Декоратор Б({self.component.operation()})'
def client_code(component: Component):
print(f'РЕЗУЛЬТАТ: {component.operation()}', end='')
if __name__ == '__main__':
simple = ReturnComponent()
print('Клиент: У меня есть простой компонент:')
client_code(simple)
print('\n')
decor1 = DecoratorA(simple)
decor2 = DecoratorB(decor1)
print('Клиент: Теперь у меня есть обернутый компонент:')
client_code(decor2) |
#Kinect v2 stereo calibration
import numpy as np
import cv2
import shelve
import os
import const
from glob import glob
def getImagePoints(imageNames):
img_points = []
for fileName in imageNames:
filebla = shelve.open(fileName)
img_points.append(filebla['img_points'])
return img_points
def stereocalibrate():
#load img_points
rgbImages = glob(const.rgbFolder + '*.dat')
irImages = glob(const.irFolder+ '*.dat')
rgb_img_points = getImagePoints(rgbImages)
ir_img_points = getImagePoints(irImages)
#create object points for all image pairs
pattern_points = np.zeros((irImages.__len__(),np.prod(const.pattern_size), 3), np.float32)
pattern_points[:,:, :2] = np.indices(const.pattern_size).T.reshape(-1, 2)
pattern_points *= const.square_size
#load calibration results
rgbCamera = shelve.open(const.rgbCameraIntrinsic+'/RGB')
irCamera = shelve.open(const.irCameraIntrinsic+'/IR')
retval, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, R, T, E, F = cv2.stereoCalibrate( pattern_points,
ir_img_points,
rgb_img_points,
irCamera['camera_matrix'],
irCamera['dist_coefs'],
rgbCamera['camera_matrix'],
rgbCamera['dist_coefs'],
const.ir_image_size)
print("error:"+ str(retval))
#save calibration results
camera_file = shelve.open(const.rgbToIR+'/rgbToIR', 'n')
camera_file['R'] = R
camera_file['T'] = T
camera_file.close() |
name = input('usrname:')
pwd = input('password:')
if name == 'alex' and pwd == '123':
print('yes')
else:
print('no')
|
from scattertext.Scalers import dense_rank
from scattertext.termscoring.DeltaJSDivergence import DeltaJSDivergence
from scattertext.termcompaction.AssociationCompactor import JSDCompactor
from scattertext import SampleCorpora, whitespace_nlp_with_sentences, produce_frequency_explorer, RankDifference
from scattertext.CorpusFromPandas import CorpusFromPandas
convention_df = SampleCorpora.ConventionData2012.get_data()
corpus = CorpusFromPandas(
convention_df,
category_col='party',
text_col='text',
nlp=whitespace_nlp_with_sentences
).build().get_unigram_corpus().compact(JSDCompactor(1000))
term_etc_df = corpus.get_term_freq_df('').assign(
DemocraticRank=lambda df: dense_rank(df['democrat']),
RepublicanRank=lambda df: dense_rank(df['republican']),
RankDiff=lambda df: RankDifference().get_scores(df['democrat'], df['republican']),
)
get_custom_term_html = '(function(x) {return "Term: " + x.term + "<span class=topic_preview>"' + ' '.join(
f''' + "<br>{name}: " + x.etc.{key}.toFixed(5)'''
for name, key in
[('Democratic Rank', 'DemocraticRank'),
('Republican Rank', 'RepublicanRank'),
('Rank Difference Score', 'RankDiff')]
) + '+ "</span>" ;})'
html = produce_frequency_explorer(
corpus,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
minimum_term_frequency=0,
pmi_threshold_coefficient=0,
width_in_pixels=1000,
metadata=convention_df['speaker'],
term_scorer=DeltaJSDivergence(),
transform=dense_rank,
term_metadata_df=term_etc_df,
get_custom_term_html=get_custom_term_html,
enable_term_category_description=False,
header_names={'upper': 'Top Dem. RankDiff',
'lower': 'Top GOP RankDiff'},
header_sorting_algos={'upper': '(function(a, b) {return b.etc.RankDiff - a.etc.RankDiff})',
'lower': '(function(a, b) {return a.etc.RankDiff - b.etc.RankDiff})'}
)
open('./demo_JSDivergence.html', 'wb').write(html.encode('utf-8'))
print('Open ./demo_JSDivergence.html in Chrome or Firefox.')
|
import forecastio
from geopy.geocoders import Nominatim
from utils.mongo import Mongo
import re
from .baseprovider import BaseProvider
local = {
'ru': re.compile('(?i)погода'),
'en': re.compile('(?i)weather')
}
class WeatherProvider(BaseProvider):
@staticmethod
def get(query, config, params={}, lang='en'):
if local[lang].match(query) and 'user_id' in params:
mongo = Mongo('users')
user_location = mongo.get_user_location(params['user_id'])
lat, lng = user_location['lat'], user_location['long']
else:
geolocator = Nominatim()
location = geolocator.geocode(query)
if location is None:
return {
'content': 'nan'
}
lat, lng = location.latitude, location.longitude
forecast = forecastio.manual('https://api.forecast.io/forecast/%s/%s,%s?units=%s&lang=%s' % (
config['forecastio_api_key'], lat, lng, "auto", lang
))
if forecast is None:
return {
'content': 'nan'
}
return {
'type': 'text',
'content': forecast.daily().summary
}
|
# project euler problem 5
# smallest multiple divisible by 1 through 20
from math import gcd
# lcm is from python 3.9 and I've got 3.8.5 sadly
def lcm(x, y):
return int((x * y) / gcd(x, y))
num = 1
for itr in range(1, 21):
num = lcm(num, itr)
print(num) |
import pygame
from towers import Tower
import os
import numpy as np
from time import time
class CannonTower(Tower):
def __init__(self,x,y):
super().__init__(x,y)
self.tower_imgs = []
self.archer_imgs = []
self.archer_count = []
self.tower_imgs = []
self._load_offline_images()
self.last_hit_timer = time()
# attack animation parameters
self.attack_time = 0.15
self.bullet_from_x = self.x - self.width//3
self.bullet_from_y = self.y + self.height // 4
# Tower attributes
self.level_max = 3
self._range = [100,120,150]
self._damage = [5,8,12]
self.attack_interval = 0.3 #second
self._price = [100, 200, 300]
self._sell_price = [50, 100, 150]
def _load_offline_images(self):
img = super().load_img("cannon_2.png")
img = pygame.transform.flip(img,True,False)
self.tower_imgs.append(img)
img = super().load_img("cannon_3.png")
img = pygame.transform.flip(img,True,True)
self.tower_imgs.append(img)
path_to_project = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..')
attack_img = pygame.image.load(os.path.join(path_to_project, "assets", "towers", "bullet.png"))
# pygame image [width, height]
self.attack_img = pygame.transform.scale(attack_img, (20,20))
def draw(self, win):
"""
Draw attack range as circle when the flag is true
Args:
win:
Returns:
"""
self._draw(win)
def attack(self, enemies):
"""
attacks an enemy in the enemy list, modifies the list
Args:
enemies:
"""
self._attack(enemies)
def draw_attack_annotation(self,win,interval):
self._draw_attack_annotation(win,interval,traj='straight') |
import numpy as np
import argparse
import os
import os.path as osp
import torch
import torch.utils.data as data
import glob
import random
from TorchTools.DataTools.Prepro import aug_img, aug_img_np, crop_img, crop_img_np, downsample_tensor, rggb_prepro
from TorchTools.DataTools.FileTools import tensor2np
import torchvision.transforms.functional as TF
from PIL import Image
from datasets import unprocess, process
from datasets.unprocess import mosaic
from tqdm import tqdm
import h5py
import cv2
from scipy.io import loadmat, savemat
EXT = ['png', 'PNG', 'tiff', 'tif', 'TIFF', 'JPG', 'jgp', 'bmp', 'BMP']
def set_seed(seed=0):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class LoadBenchmark(data.Dataset):
"""
Load RGB GT images -> generate input
"""
def __init__(self,
data_dir='data/benchmark',
downsampler='bic', scale=2,
in_type='noisy_lr_raw',
mid_type='raw', # or None
out_type='rgb',
ext='png',
noise_model='gp',
sigma=10
):
super(LoadBenchmark, self).__init__()
self.data_dir = data_dir
self.scale = scale
self.downsampler = downsampler
self.noise_model = noise_model
self.sigma = sigma / 255.
self.in_type = in_type
self.mid_type = mid_type
self.out_type = out_type
filename = f'x{scale}_{noise_model}' if 'p' in noise_model else f'x{scale}_{noise_model}x{sigma}'
self.save_dir = osp.join(data_dir, filename)
self.src_dir = osp.join(data_dir, 'gt')
self.data_path = osp.join(data_dir, f'{filename}.pth')
os.makedirs(self.save_dir, exist_ok=True)
self.ext = ext
self.datas = []
self.data_lists = []
if not osp.exists(self.data_path):
print('===> {} does not exist, generate now'.format(self.data_path))
self.generate_benchmark()
else:
self.datas = torch.load(self.data_path)
def generate_benchmark(self):
# set the seed for benchmarking
set_seed(0)
self.data_lists = sorted(
glob.glob(osp.join(self.src_dir, '*' + self.ext))
)
for i in tqdm(range(len(self.data_lists))):
self.datas.append(self.process_img(i))
torch.save(self.datas, self.data_path)
print('===> save benchmark dataset to {}'.format(self.data_path))
def process_img(self, index):
rgb = Image.open(self.data_lists[index]).convert('RGB')
rgb = TF.to_tensor(rgb)
C, H, W = rgb.shape
if (H % (self.scale * 2) != 0) or (W % (self.scale * 2) != 0):
H = H - H % (self.scale * 2)
W = W - W % (self.scale * 2)
rgb = rgb[:, :H, :W]
data = {'rgb': rgb}
if 'lr' in self.in_type:
lr_rgb = downsample_tensor(rgb, scale=self.scale, downsampler=self.downsampler)
lr_rgb = torch.clamp(lr_rgb, 0., 1.)
data.update({'lr_rgb': lr_rgb})
# ---------------------------------
# unprocess step
# if raw, lin is in in_type, mean we need an unprocess
# if noisy is activated, we also need unprocess the rgb (since we only know the shot and read noise of the Raw).
if ('raw' in self.in_type and 'p' in self.noise_model) or 'lin' in self.in_type:
rgb2cam = unprocess.random_ccm()
cam2rgb = torch.inverse(rgb2cam)
rgb_gain, red_gain, blue_gain = unprocess.random_gains()
metadata = {
'ccm': cam2rgb,
'rgb_gain': rgb_gain,
'red_gain': red_gain,
'blue_gain': blue_gain,
}
raw, linrgb = unprocess.unprocess(rgb, rgb2cam, rgb_gain, red_gain, blue_gain)
data.update({'metadata': metadata})
data.update({'raw': raw, 'linrgb': linrgb})
if 'lr' in self.in_type:
lr_raw, lr_linrgb = unprocess.unprocess(lr_rgb, rgb2cam, rgb_gain, red_gain, blue_gain)
data.update({'lr_raw': lr_raw, 'lr_linrgb': lr_linrgb})
if ('raw' in self.in_type and 'p' not in self.noise_model) or 'lin' in self.in_type:
raw = mosaic(rgb.clone())
data.update({'raw': raw})
if 'lr' in self.in_type:
lr_raw = mosaic(lr_rgb.clone())
data.update({'lr_raw': lr_raw})
if 'noisy' in self.in_type and 'p' in self.noise_model:
shot_noise, read_noise = unprocess.random_noise_levels()
if 'raw' in self.in_type: # add noise to the bayer raw image and denoise it
if 'lr' in self.in_type:
# Approximation of variance is calculated using noisy image (rather than clean
# image), since that is what will be avaiable during evaluation.
noisy_lr_raw = unprocess.add_noise(lr_raw, shot_noise, read_noise)
data.update({'noisy_lr_raw': noisy_lr_raw})
else:
noisy_raw = unprocess.add_noise(raw, shot_noise, read_noise)
data.update({'noisy_raw': noisy_raw})
elif 'linrgb' in self.in_type: # also add noise on raw but denoise on RGB.
if 'lr' in self.in_type:
noisy_lr_linrgb = unprocess.add_noise(lr_linrgb, shot_noise, read_noise)
data.update({'noisy_lr_linrgb': noisy_lr_linrgb})
else:
noisy_linrgb = unprocess.add_noise(linrgb, shot_noise, read_noise)
data.update({'noisy_linrgb': noisy_linrgb})
elif 'rgb' in self.in_type:
if 'lr' in self.in_type:
noisy_lr_rgb = unprocess.add_noise(lr_rgb, shot_noise, read_noise)
data.update({'noisy_lr_rgb': noisy_lr_rgb})
else:
noisy_rgb = unprocess.add_noise(rgb, shot_noise, read_noise)
data.update({'noisy_linrgb': noisy_rgb})
noise = {
'read_noise': read_noise,
'shot_noise': shot_noise,
}
data.update({'noise': noise})
if 'noisy' in self.in_type and 'p' not in self.noise_model:
if 'raw' in self.in_type: # add noise to the bayer raw image and denoise it
if 'lr' in self.in_type:
# Approximation of variance is calculated using noisy image (rather than clean
# image), since that is what will be avaiable during evaluation.
noisy_lr_raw = lr_raw + torch.randn(lr_raw.size()).mul_(self.sigma)
variance = torch.ones(lr_raw[0:1].size()).mul_(self.sigma)
data.update({'noisy_lr_raw': noisy_lr_raw.clone(), 'variance': variance.clone()})
else:
noisy_raw = raw + torch.randn(raw.size()).mul_(self.sigma)
variance = torch.ones(raw[0:1].size()).mul_(self.sigma)
data.update({'noisy_raw': noisy_raw.clone(), 'variance': variance.clone()})
elif 'rgb' in self.in_type:
if 'lr' in self.in_type:
noisy_lr_rgb = lr_rgb + torch.randn(lr_rgb.size()).mul_(self.sigma)
variance = torch.ones(lr_rgb[0:1].size()).mul_(self.sigma)
data.update({'noisy_lr_rgb': noisy_lr_rgb.clone(), 'variance': variance.clone()})
else:
noisy_rgb = rgb + torch.randn(rgb.size()).mul_(self.sigma)
variance = torch.ones(rgb[0:1].size()).mul_(self.sigma)
data.update({'noisy_rgb': noisy_rgb.clone(), 'variance': variance.clone()})
# save for matlab
if 'noisy_lr_raw' in data.keys():
filename = osp.basename(self.data_lists[index].split('.')[0] + '.mat')
savemat(osp.join(self.save_dir, filename), {'noisy_lr_raw': data['noisy_lr_raw'].permute(1, 2, 0).numpy()})
return data
def __len__(self):
return len(self.datas)
def __getitem__(self, index):
# read images
return self.datas[index]
class LoadBenchmarkPixelShift(data.Dataset):
def __init__(self,
data_path,
downsampler='bic', scale=2,
in_type='noisy_lr_raw',
mid_type='raw', # or None
out_type='linrgb',
src_dir='data/benchmark/pixelshift/mat',
ext='mat',
bit=14,
):
super(LoadBenchmarkPixelShift, self).__init__()
self.data_path = osp.join(data_path, f'pixelshift_{in_type}_{out_type}_x{scale}.pt')
self.scale = scale
self.downsampler = downsampler
self.in_type = in_type
self.mid_type = mid_type
self.out_type = out_type
self.src_dir = src_dir
self.gt_dir = osp.join(osp.dirname(src_dir), 'gt')
self.ext = ext
self.bit = bit
self.datas = []
self.data_lists = sorted(glob.glob(osp.join(self.src_dir, '*')))
if not osp.exists(self.data_path):
print('===> {} does not exist, generate now'.format(self.data_path))
self.generate_benchmark()
else:
self.datas = torch.load(self.data_path)
def generate_benchmark(self):
# set the seed for benchmarking
set_seed(0)
for i in tqdm(range(len(self.data_lists))):
self.datas.append(self.process_img(i))
torch.save(self.datas, self.data_path)
print('===> save benchmark dataset to {}'.format(self.data_path))
def process_img(self, index):
img_path = self.data_lists[index]
data = {}
try:
with h5py.File(img_path, 'r') as matfile:
# be carefull, save as mat higher version will transpose matrix.
rggb = np.asarray(matfile['raw']).astype(np.float32) / (2 ** self.bit - 1)
rggb = np.transpose(rggb, (2, 1, 0))
matainfo = matfile['metadata']
matainfo = {'colormatrix': np.transpose(matainfo['colormatrix']),
'red_gain': matainfo['red_gain'],
'blue_gain': matainfo['blue_gain']
}
ccm, red_g, blue_g = process.metadata2tensor(matainfo)
metadata = {'ccm': ccm, 'red_gain': red_g, 'blue_gain': blue_g}
data.update({'metadata': metadata})
except:
matfile = loadmat(img_path)
rggb = np.asarray(matfile['raw']).astype(np.float32) / (2 ** self.bit - 1)
linrgb = np.stack((rggb[:, :, 0], np.mean(rggb[:, :, 1:3], axis=-1), rggb[:, :, 3]), axis=2)
linrgb = TF.to_tensor(linrgb)
linrgb = torch.clamp(linrgb, 0., 1.)
data.update({'linrgb': linrgb})
# get the rgb gt for pixelshift200 dataset.
rgb = process.rgb2srgb(linrgb.unsqueeze(0), metadata['red_gain'], metadata['blue_gain'], metadata['ccm']).squeeze(0)
save_name = osp.basename(img_path).split('.')[0] + '.png'
save_path = osp.join(self.gt_dir, save_name)
ndarr = rgb.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
cv2.imwrite(save_path, cv2.cvtColor(ndarr, cv2.COLOR_RGB2BGR), [cv2.IMWRITE_PNG_COMPRESSION, 0])
data.update({'rgb': rgb})
if 'lr' in self.in_type:
lr_linrgb = downsample_tensor(linrgb, scale=self.scale, downsampler=self.downsampler)
lr_linrgb = torch.clamp(lr_linrgb, 0., 1.)
data.update({'lr_linrgb': lr_linrgb})
# ---------------------------------
# unprocess step
# if raw, lin is in in_type, mean we need an unprocess
if 'raw' in self.in_type:
raw = unprocess.mosaic(linrgb)
data.update({'raw': raw})
if 'lr' in self.in_type:
lr_raw = unprocess.mosaic(lr_linrgb)
data.update({'lr_raw': lr_raw})
if 'noisy' in self.in_type:
shot_noise, read_noise = unprocess.random_noise_levels()
if 'raw' in self.in_type: # add noise to the bayer raw image and denoise it
if 'lr' in self.in_type:
# Approximation of variance is calculated using noisy image (rather than clean
# image), since that is what will be avaiable during evaluation.
noisy_lr_raw = unprocess.add_noise(lr_raw, shot_noise, read_noise)
data.update({'noisy_lr_raw': noisy_lr_raw})
else:
noisy_raw = unprocess.add_noise(raw, shot_noise, read_noise)
data.update({'noisy_raw': noisy_raw})
elif 'linrgb' in self.in_type: # also add noise on raw but denoise on RGB.
if 'lr' in self.in_type:
noisy_lr_linrgb = unprocess.add_noise(lr_linrgb, shot_noise, read_noise)
data.update({'noisy_lr_linrgb': noisy_lr_linrgb})
else:
noisy_linrgb = unprocess.add_noise(linrgb, shot_noise, read_noise)
data.update({'noisy_linrgb': noisy_linrgb})
noise = {
'read_noise': read_noise,
'shot_noise': shot_noise,
}
data.update({'noise': noise})
return data
def __len__(self):
return len(self.datas)
def __getitem__(self, index):
# read images
return self.datas[index]
if __name__ == '__main__':
"""
Given a batch of images (RGB in PNG or RGGB in MAT),
load all of them into .pth file, and use them as evaluation datasets.
"""
parser = argparse.ArgumentParser(description='Evaluation Data preparation')
parser.add_argument('--in_type', type=str, default='noisy_lr_raw')
parser.add_argument('--mid_type', type=str, default='raw')
parser.add_argument('--out_type', type=str, default='linrgb')
parser.add_argument('--ext', type=str, default='png')
parser.add_argument('--scale', type=int, default=2,
help='default scale ratio for the SR')
parser.add_argument('--save_rgb', action='store_true',
help='save rgb images of the desired dataset for preview')
parser.add_argument('--src_dir',
default='data/benchmark/pixelshift200/gt',
help='path to the original data')
args = parser.parse_args()
project_dir = osp.dirname(osp.dirname(osp.realpath(__file__)))
src_dir = osp.join(project_dir, args.src_dir)
if 'pixelshift' in args.src_dir.lower():
benchmark_data = LoadBenchmarkPixelShift(None,
in_type=args.in_type,
mid_type=args.mid_type,
out_type=args.out_type,
scale=args.scale,
src_dir=src_dir,
save_rgb=args.save_rgb,
)
else:
benchmark_data = LoadBenchmark(None,
in_type=args.in_type,
mid_type=args.mid_type,
out_type=args.out_type,
scale=args.scale,
src_dir=src_dir,
ext=args.ext)
# # Code for debug the PixelShift dataset
# from datasets import process
# def vis_numpy(x):
# import matplotlib.pyplot as plt
# plt.imshow(x, cmap='gray')
# plt.show()
#
#
# def vis_tensor(tensor):
# from torchvision import utils
# import matplotlib.pyplot as plt
#
# grid = utils.make_grid(tensor)
# ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
# # vis_numpy(ndarr)
# plt.imshow(ndarr, cmap='gray')
# plt.show()
#
#
#
# def raw_unpack(input):
# import torch.nn as nn
# demo = nn.PixelShuffle(2)
# return demo(input)
# # show the rgb img:
# # vis_gray(rgb)
# vis_gray(linrgb.permute(1,2,0))
# vis_gray(lr_linrgb.permute(1,2,0))
#
# # show the gt raw:
# vis_tensor(raw_unpack(raw.unsqueeze(0)))
#
# # show the noisy raw:
# vis_gray(raw_unpack(noisy_lr_raw.unsqueeze(0)).squeeze())
|
ENDPOINT = 'https://5apps.com/rs/oauth/cyroxx'
#ENDPOINT = 'https://heahdk.net/authorizations/new?login=cyroxx'
CLIENT = 'Vault'
SCOPES = ['vault:rw']
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.views import View
import random
import datetime
from core import models
class IndexPageView(View):
def get(self, request, *args, **kwargs):
date = datetime.datetime.now()
books = models.Item.objects.all()
categories = models.Categories.objects.all()
return render(request, 'index.html', {
'hour': date.hour,
'minute': date.minute,
'books': books,
'categories': categories
})
class Basket(View):
def get(self, request):
basket = models.Basket.objects.filter(user_id=request.session.get('user_id')) #get some -> [...]
# basket == [{item_id=1, user_id=1}, {item_id=2, user_id=1}, {item_id=11, user_id=1}, ...]
categories = models.Categories.objects.all() #get all -> [....]
return render(request, 'basket.html', {
'user_basket': basket,
'categories': categories
})
class AddBasket(View):
def get(self, request, item_id):
item = models.Item.objects.get(id=item_id) #get only one -> obj
user = models.User.objects.get(id=request.session.get('user_id'))
new_item_in_basket = models.Basket(user_id=user, item_id=item) #create
new_item_in_basket.save()
return redirect('/') #перенаправленя на головну
class RemoveFromBasket(View):
def get(self, request, item_id):
item = models.Item.objects.get(id=item_id)
user = models.User.objects.get(id=request.session.get('user_id'))
remove_item_from_basket = models.Basket.objects.filter(user_id=user, item_id=item).first()
remove_item_from_basket.delete()
return redirect('/basket')
class ConfirmOrder(View):
def get(self, request):
basket_items = models.Basket.objects.filter(user_id=request.session.get('user_id'))
for i in basket_items: #[obj1, obj2]
print(f'{i.item_id.name} , id: {i.item_id.id}')
basket_items.delete()
return redirect('/basket')
class Delivery(View):
def get(self, request):
return HttpResponse('''
delivery works 8.00 - 23.00
monday - friday
''')
class Discounts (View):
def get(self, request):
return HttpResponse('there are some books with discounts')
class AboutMe(View): #camel case
def get(self, request):
categories = models.Categories.objects.all()
return render(request, 'about_me.html', {
'categories': categories
}) # sneaky case
class Payment(View):
def get (self, request):
return HttpResponse('способи оплати')
class Item(View):
def get(self, request, name):
return HttpResponse('hi {}'.format(name))
class Bonuses(View):
def get (self, request):
return HttpResponse('bonuses')
class SignUp(View):
def get (self, request):
return render(request, 'not_exithtml')
def post(self, request):
print(request.POST) # {'email': 'kek', 'password': 567, 'csrf': 'aaaaaaaa'}
login = request.POST['email']
password = request.POST['password']
new_user = models.User(login=login, password=password)
new_user.save()
return redirect('/')
class LogIn(View):
def get(self, request):
if request.session.get('user_id'): # get returns value or None if not exists
return redirect('/')
return render(request, 'not_exithtml')
def post(self, request):
input_login = request.POST['email'] #post це властивість. request is an object. it has post. post is a dict
input_password = request.POST['password']
user = models.User.objects.get(login=input_login, password=input_password)
request.session['user_id'] = user.id # writes user.id to user_id key
return redirect('/')
class Category(View):
def get(self, request, category_id):
books = models.Item.objects.filter(categories_id=category_id)
categories = models.Categories.objects.all()
return render(request, 'index.html', {
'books': books,
'categories': categories
})
|
def FIBO(number):
if(number==0):
return (0)
elif(number==1):
return 1
else :
return FIBO(number-1)+FIBO(number-2)
a=int(input("enter the no."))
if a<0:
print("error:the no. entered is -ve")
else:
print(FIBO(a))
|
# from django.db.models import query
# from django.db.models.query_utils import Q
#from django.shortcuts import render
from django.views.generic import ListView
from .models import User_Package_Detail
from django.db.models import Q
# search result class...
class SearchClass(ListView):
model = User_Package_Detail
template_name = 'result.html'
# defining quaryset
def get_queryset(self):
query = self.request.GET.get('q')
object_list = User_Package_Detail.objects.filter(Q(tracker__icontains=query))
return object_list
|
import sys
sys.path.append('../includes')
from db import DB
from common_functions import parse_company_prices, parse_ftse_prices, get_seconds_from_date,get_date_after,parse_currency
import datetime
import urllib.request
import requests
import ssl
import re
context = ssl._create_unverified_context()
url_to_complete = "https://uk.finance.yahoo.com/quote/{}/history?period1={}&period2={}&interval=1d&filter=history&frequency=1d"
url_ftse_history = "https://uk.investing.com/instruments/HistoricalDataAjax"
url_currency_history = "https://www.investing.com/instruments/HistoricalDataAjax"
db = DB()
month_gap = 4
def extract_company_history_prices(company_code, t0,t1):
t0_seconds = get_seconds_from_date(t0)
t1_seconds = get_seconds_from_date(t1)
url = url_to_complete.format(company_code,t0_seconds,t1_seconds)
with urllib.request.urlopen(url, context=context) as url_file:
html = url_file.read()
parse_company_prices(company_code,html, db)
def extract_ftse_history_data(url):
ftse_data = {
"action":"historical_data",
"curr_id" : 27,
"st_date" : "01/01/2010",
"end_date" : "25/05/2017",
"interval_sec" : "Daily"
}
headers={
"Host": "uk.investing.com",
"Connection": "keep-alive",
"Content-Length": "99",
"Origin": "https://uk.investing.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/plain, */*; q=0.01",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://uk.investing.com/indices/uk-100-historical-data",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2",
"Cookie": "PHPSESSID=2hmbcb2a0u8hqatud9tigoltc7; adBlockerNewUserDomains=1495716248; \
gtmFired=OK; StickySession=id.2285927456.600uk.investing.com; \
optimizelyEndUserId=oeu1495716249369r0.9393955994124044; __qca=P0-439101783-1495716250288; \
__gads=ID=d8a4f97b263a533d:T=1495716250:S=ALNI_MZTjNmD4ydW5i2CNdF8xDdikCbNLQ; \
cookieConsent=was-set; geoC=GB; SideBlockUser=a%3A2%3A%7Bs%3A10%3A%22stack_size%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Bi%3A8%3B%7Ds%3A6%3A%22stacks%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Ba%3A1%3A%7Bi%3A0%3Ba%3A3%3A%7Bs%3A7%3A%22pair_ID%22%3Bs%3A2%3A%2227%22%3Bs%3A10%3A%22pair_title%22%3Bs%3A0%3A%22%22%3Bs%3A9%3A%22pair_link%22%3Bs%3A15%3A%22%2Findices%2Fuk-100%22%3B%7D%7D%7D%7D; optimizelySegments=%7B%224225444387%22%3A%22gc%22%2C%224226973206%22%3A%22direct%22%2C%224232593061%22%3A%22false%22%2C%225010352657%22%3A%22none%22%7D; optimizelyBuckets=%7B%7D; billboardCounter_51=1; nyxDorf=ODVhNGUtZTozZWFzZzM4Pzd4Yzk1NjY3; _ga=GA1.2.310379084.1495716250; _gid=GA1.2.1956065754.1495745779"
}
html = requests.post(url,data= ftse_data, headers= headers).content
parse_ftse_prices(html, db)
def extrace_currency_history_data(url, currency):
currency_data = {
"action":"historical_data",
"curr_id":"37",
"st_date":"01/01/2014",
"end_date":"05/26/2017",
"interval_sec":"Daily"
}
headers = {
"Host": "www.investing.com",
"Connection": "keep-alive",
"Content-Length": "99",
"Origin": "https://www.investing.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) \
Chrome/58.0.3029.110 Safari/537.36",
"Accept": "text/plain, */*; q=0.01",
"X-Requested-With": "XMLHttpRequest",
"Referer": "https://www.investing.com/currencies/eur-nok-historical-data",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.8,en;q=0.6,zh-CN;q=0.4,zh-TW;q=0.2",
"Cookie": "adBlockerNewUserDomains=1495716248; optimizelyEndUserId=oeu1495716249369r0.9393955994124044; \
__qca=P0-439101783-1495716250288; __gads=ID=d8a4f97b263a533d:T=1495716250:S=ALNI_MZTjNmD4ydW5i2CNdF8xDdikCbNLQ; \
PHPSESSID=e7qif9lociv41eic4r0ohse5h1; geoC=GB; gtmFired=OK; StickySession=id.80697086445.433www.investing.com; \
editionPostpone=1495790778437; \
SideBlockUser=a%3A2%3A%7Bs%3A10%3A%22stack_size%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Bi%3A8%3B%7Ds%3A6%3A\
%22stacks%22%3Ba%3A1%3A%7Bs%3A11%3A%22last_quotes%22%3Ba%3A1%3A%7Bi%3A0%3Ba%3A3%3A%7Bs%3A7%3A%22pair_ID%22%3Bs%3A2%3A\
%2237%22%3Bs%3A10%3A%22pair_title%22%3Bs%3A20%3A%22Euro+Norwegian+Krone%22%3Bs%3A9%3A%22pair_link%22%3Bs%3A19%3A%22%\
2Fcurrencies%2Feur-nok%22%3B%7D%7D%7D%7D; optimizelySegments=%7B%224225444387%22%3A%22gc%22%2C%224226973206%22%3A%22direct\
%22%2C%224232593061%22%3A%22false%22%2C%225010352657%22%3A%22none%22%7D; optimizelyBuckets=%7B%7D; billboardCounter_1=2;\
nyxDorf=NTg1Z2EpNWg%2FbWlkMn83NzVlM243Ljo6NzdjaA%3D%3D; _gat=1; _gat_allSitesTracker=1; _ga=GA1.2.310379084.1495716250; \
_gid=GA1.2.1374619376.1495797462"
}
html = requests.post(url,data= currency_data, headers= headers).content
parse_currency(html, db, currency)
db.cur.execute('''select company_code from tomorrow_external_data.index_company WHERE category ="OSEAX"; ''')
for row in db.cur.fetchall():
company_code = row["company_code"]
t0 = datetime.datetime(2014, 1, 1)
t1 = get_date_after(t0, month_gap)
while(t0 < datetime.datetime.now()):
extract_company_history_prices(company_code, t0, t1)
t0 = t1
t1 = get_date_after(t0, month_gap)
# retrieve ftse 100 data
# extract_ftse_history_data(url_ftse_history)
# retrieve eur_nok currency data
# extrace_currency_history_data(url_currency_history, "EUR_NOK")
db.close_connection() |
#Emily Murphy
#2017-10-04
#betterAdditionGameDemo.py - asks addition problem until user gets 5 right
from random import randint
numCorrect = 0
while numCorrect < 5:
num1 = randint(-10,10)
num2 = randint(-10,10)
question = 'What is ' + str(num1) + ' + ' + str(num2) + '?'
answer = int(input(question))
if num1 + num2 == answer:
print('Correct')
numCorrect += 1
else:
print('The answer was', num1+num2)
print('Good job! You are not stupid!')
|
# Copyright (c) Alibaba, Inc. and its affiliates.
import math
import os
from collections import namedtuple
from typing import Dict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from modelscope.metainfo import Models
from modelscope.models.builder import MODELS
from modelscope.models.nlp.structbert import SbertConfig, SbertModel
from modelscope.models.nlp.task_models.task_model import BaseTaskModel
from modelscope.utils.config import Config, ConfigFields
from modelscope.utils.constant import ModelFile, Tasks
activations = {
'relu': F.relu,
'tanh': torch.tanh,
'linear': lambda x: x,
}
activation_coeffs = {
'relu': math.sqrt(2),
'tanh': 5 / 3,
'linear': 1.,
}
class LinearProjection(nn.Module):
def __init__(self,
in_features,
out_features,
activation='linear',
bias=True):
super().__init__()
self.activation = activations[activation]
activation_coeff = activation_coeffs[activation]
linear = nn.Linear(in_features, out_features, bias=bias)
nn.init.normal_(
linear.weight, std=math.sqrt(1. / in_features) * activation_coeff)
if bias:
nn.init.zeros_(linear.bias)
self.model = nn.utils.weight_norm(linear)
def forward(self, x):
return self.activation(self.model(x))
class RelationModule(nn.Module):
def __init__(self, args):
super(RelationModule, self).__init__()
input_size = args.proj_hidden_size * 4
self.prediction = torch.nn.Sequential(
LinearProjection(
input_size, args.proj_hidden_size * 4, activation='relu'),
nn.Dropout(args.dropout),
LinearProjection(args.proj_hidden_size * 4, 1))
def forward(self, query, protos):
n_cls = protos.shape[0]
n_query = query.shape[0]
protos = protos.unsqueeze(0).repeat(n_query, 1, 1)
query = query.unsqueeze(1).repeat(1, n_cls, 1)
input_feat = torch.cat(
[query, protos, (protos - query).abs(), query * protos], dim=-1)
dists = self.prediction(input_feat) # [bsz,n_query,n_cls,1]
return dists.squeeze(-1)
class MetricsLayer(nn.Module):
def __init__(self, args):
super(MetricsLayer, self).__init__()
self.args = args
assert args.metrics in ('relation', 'cosine')
if args.metrics == 'relation':
self.relation_net = RelationModule(args)
@property
def name(self):
return self.args.metrics
def forward(self, query, protos):
""" query : [bsz, n_query, dim]
support : [bsz, n_query, n_cls, dim] | [bsz, n_cls, dim]
"""
if self.args.metrics == 'cosine':
supervised_dists = self.cosine_similarity(query, protos)
if self.training:
supervised_dists *= 5
elif self.args.metrics in ('relation', ):
supervised_dists = self.relation_net(query, protos)
else:
raise NotImplementedError
return supervised_dists
def cosine_similarity(self, x, y):
# x=[bsz, n_query, dim]
# y=[bsz, n_cls, dim]
n_query = x.shape[0]
n_cls = y.shape[0]
dim = x.shape[-1]
x = x.unsqueeze(1).expand([n_query, n_cls, dim])
y = y.unsqueeze(0).expand([n_query, n_cls, dim])
return F.cosine_similarity(x, y, -1)
class AveragePooling(nn.Module):
def forward(self, x, mask, dim=1):
return torch.sum(
x * mask.float(), dim=dim) / torch.sum(
mask.float(), dim=dim)
class AttnPooling(nn.Module):
def __init__(self, input_size, hidden_size=None, output_size=None):
super().__init__()
self.input_proj = nn.Sequential(
LinearProjection(input_size, hidden_size), nn.Tanh(),
LinearProjection(hidden_size, 1, bias=False))
self.output_proj = LinearProjection(
input_size, output_size) if output_size else lambda x: x
def forward(self, x, mask):
score = self.input_proj(x)
score = score * mask.float() + -1e4 * (1. - mask.float())
score = F.softmax(score, dim=1)
features = self.output_proj(x)
return torch.matmul(score.transpose(1, 2), features).squeeze(1)
class PoolingLayer(nn.Module):
def __init__(self, args):
super(PoolingLayer, self).__init__()
if args.pooling == 'attn':
self.pooling = AttnPooling(args.proj_hidden_size,
args.proj_hidden_size,
args.proj_hidden_size)
elif args.pooling == 'avg':
self.pooling = AveragePooling()
else:
raise NotImplementedError(args.pooling)
def forward(self, x, mask):
return self.pooling(x, mask)
@MODELS.register_module(
Tasks.faq_question_answering, module_name=Models.structbert)
class SbertForFaqQuestionAnswering(BaseTaskModel):
_backbone_prefix = ''
@classmethod
def _instantiate(cls, **kwargs):
model = cls(kwargs.get('model_dir'))
model.load_checkpoint(kwargs.get('model_dir'))
return model
def __init__(self, model_dir, *args, **kwargs):
super().__init__(model_dir, *args, **kwargs)
backbone_cfg = SbertConfig.from_pretrained(model_dir)
self.bert = SbertModel(backbone_cfg)
model_config = Config.from_file(
os.path.join(model_dir,
ModelFile.CONFIGURATION)).get(ConfigFields.model, {})
metric = model_config.get('metric', 'cosine')
pooling_method = model_config.get('pooling', 'avg')
Arg = namedtuple('args', [
'metrics', 'proj_hidden_size', 'hidden_size', 'dropout', 'pooling'
])
args = Arg(
metrics=metric,
proj_hidden_size=self.bert.config.hidden_size,
hidden_size=self.bert.config.hidden_size,
dropout=0.0,
pooling=pooling_method)
self.metrics_layer = MetricsLayer(args)
self.pooling = PoolingLayer(args)
def forward(self, input: Dict[str, Tensor]) -> Dict[str, Tensor]:
"""
Args:
input (Dict[str, Tensor]): the preprocessed data, it contains the following keys:
query(:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
The query to be predicted.
support(:obj:`torch.LongTensor` of shape :obj:`(support_size, sequence_length)`):
The support set.
support_label(:obj:`torch.LongTensor` of shape :obj:`(support_size, )`):
The labels of support set.
Returns:
Dict[str, Tensor]: result, it contains the following key:
scores(:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_cls)`):
Predicted scores of all classes for each query.
Examples:
>>> from modelscope.hub.snapshot_download import snapshot_download
>>> from modelscope.preprocessors import FaqQuestionAnsweringPreprocessor
>>> from modelscope.models.nlp import SbertForFaqQuestionAnswering
>>> cache_path = snapshot_download('damo/nlp_structbert_faq-question-answering_chinese-base')
>>> preprocessor = FaqQuestionAnsweringPreprocessor.from_pretrained(cache_path)
>>> model = SbertForFaqQuestionAnswering.from_pretrained(cache_path)
>>> param = {
>>> 'query_set': ['如何使用优惠券', '在哪里领券', '在哪里领券'],
>>> 'support_set': [{
>>> 'text': '卖品代金券怎么用',
>>> 'label': '6527856'
>>> }, {
>>> 'text': '怎么使用优惠券',
>>> 'label': '6527856'
>>> }, {
>>> 'text': '这个可以一起领吗',
>>> 'label': '1000012000'
>>> }, {
>>> 'text': '付款时送的优惠券哪里领',
>>> 'label': '1000012000'
>>> }, {
>>> 'text': '购物等级怎么长',
>>> 'label': '13421097'
>>> }, {
>>> 'text': '购物等级二心',
>>> 'label': '13421097'
>>> }]
>>> }
>>> result = model(preprocessor(param))
"""
assert not self.training
query = input['query']
support = input['support']
if isinstance(query, list):
query = torch.stack(query)
if isinstance(support, list):
support = torch.stack(support)
n_query = query.shape[0]
n_support = support.shape[0]
query_mask = torch.ne(query, 0).view([n_query, -1])
support_mask = torch.ne(support, 0).view([n_support, -1])
support_labels = input['support_labels']
num_cls = torch.max(support_labels) + 1
onehot_labels = self._get_onehot_labels(support_labels, n_support,
num_cls)
input_ids = torch.cat([query, support])
input_mask = torch.cat([query_mask, support_mask], dim=0)
pooled_representation = self.forward_sentence_embedding({
'input_ids':
input_ids,
'attention_mask':
input_mask
})
z_query = pooled_representation[:n_query]
z_support = pooled_representation[n_query:]
cls_n_support = torch.sum(onehot_labels, dim=-2) + 1e-5
protos = torch.matmul(onehot_labels.transpose(0, 1),
z_support) / cls_n_support.unsqueeze(-1)
scores = self.metrics_layer(z_query, protos).view([n_query, num_cls])
if self.metrics_layer.name == 'relation':
scores = torch.sigmoid(scores)
return {'scores': scores}
def _get_onehot_labels(self, labels, support_size, num_cls):
labels_ = labels.view(support_size, 1)
target_oh = torch.zeros(support_size, num_cls).to(labels)
target_oh.scatter_(dim=1, index=labels_, value=1)
return target_oh.view(support_size, num_cls).float()
def forward_sentence_embedding(self, inputs: Dict[str, Tensor]):
input_ids = inputs['input_ids']
input_mask = inputs['attention_mask']
if not isinstance(input_ids, Tensor):
input_ids = torch.IntTensor(input_ids)
if not isinstance(input_mask, Tensor):
input_mask = torch.IntTensor(input_mask)
rst = self.bert(input_ids, input_mask)
last_hidden_states = rst.last_hidden_state
if len(input_mask.shape) == 2:
input_mask = input_mask.unsqueeze(-1)
pooled_representation = self.pooling(last_hidden_states, input_mask)
return pooled_representation
|
#!/usr/bin/python
# Copyright Justin Buist
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import contextlib
import pango
import pygtk
import gtk
import gobject
import pyaudio
import wave
import threading
import os
import sys
pygtk.require('2.0')
TARGET_TEXT = 'HARRIET'
# Use in a 'with' block to supress stderr messages for the duration of
# the block
@contextlib.contextmanager
def ignore_stderr():
devnull = os.open(os.devnull, os.O_WRONLY)
old_stderr = os.dup(2)
sys.stderr.flush()
os.dup2(devnull, 2)
os.close(devnull)
try:
yield
finally:
os.dup2(old_stderr, 2)
os.close(old_stderr)
def playSound(filename):
with ignore_stderr():
chunk = 1024
f = wave.open(filename, "rb")
p = pyaudio.PyAudio()
stream = p.open(format = p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
data = f.readframes(chunk)
while data:
stream.write(data)
data = f.readframes(chunk)
stream.stop_stream()
stream.close()
p.terminate()
def playFinish():
t = threading.Thread(target=playSound, args=['applause.wav'])
t.start()
def playReset():
t = threading.Thread(target=playSound, args=['boing.wav'])
t.start()
# BEGIN GTK Event Ops
def delete_event(widget, event, data=None):
return False
def destroy(widget, data=None):
gtk.main_quit()
def name_onKeyPress(widget):
inputText = widget.get_text()
subTarget = TARGET_TEXT[:len(inputText)]
if (inputText.lower() == subTarget.lower()):
if (len(inputText) == len(TARGET_TEXT)):
# The uesr has typed in every character correct. Notify via sound
# and then reset the input text widget.
playFinish()
widget.set_text('')
else:
# The user has typed a character wrong. Notify via sound and then
# reset the input text widget.
playReset()
widget.set_text('')
# END GTK Event Ops
# If this isn't called then the main GTK method will get all manner
# of hosed up if you try and create other threads.
gobject.threads_init()
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.set_border_width(10)
window.connect("delete_event", delete_event)
window.connect("destroy", destroy)
vbox = gtk.VBox()
nameFont = pango.FontDescription("Monospace 24")
nameLabel = gtk.Label()
nameLabel.set_justify(gtk.JUSTIFY_LEFT)
nameLabel.modify_font(nameFont)
nameLabel.set_text(TARGET_TEXT)
nameInput = gtk.Entry()
nameInput.modify_font(nameFont)
nameInput.connect('changed', name_onKeyPress)
vbox.add(nameLabel)
vbox.add(nameInput)
window.add(vbox)
window.set_size_request(300, -1)
window.show_all()
window.show()
gtk.main()
|
from rest_framework import serializers, exceptions
class QuizIDSerializer(serializers.Serializer):
quiz_id = serializers.IntegerField()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-06 22:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Estudiantes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cedula', models.CharField(max_length=50, unique=True, verbose_name='N\xfamero de Identificaci\xf3n')),
('apellido', models.CharField(max_length=100)),
('nombre', models.CharField(max_length=100)),
],
options={
'db_table': 'estudiantes',
'verbose_name_plural': 'estudiantes',
},
),
migrations.CreateModel(
name='Seccion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100)),
('asignatura', models.CharField(max_length=100)),
('codigo', models.CharField(blank=True, max_length=10, null=True)),
],
options={
'db_table': 'secciones',
'verbose_name_plural': 'secciones',
},
),
migrations.AddField(
model_name='estudiantes',
name='seccion',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='estudiantes.Seccion'),
),
]
|
from serif.model.tokenizer_model import TokenizerModel
class PlainWhiteSpaceTokenizer(TokenizerModel):
'''Does nothing but return a tokenization according to whitespace (doesn't attempt to split off final punctuation)'''
def __init__(self,**kwargs):
super(PlainWhiteSpaceTokenizer,self).__init__(**kwargs)
def add_tokens_to_sentence(self, sentence):
ret = []
text = sentence.text
chunks = text.split(" ")
char_visited = 0
for i,chunk in enumerate(chunks):
token_text = chunk.strip()
char_start = sentence.start_char + char_visited + chunk.find(token_text)
char_end = char_start + len(token_text) - 1
char_visited += len(chunk) + 1
# Do not add empty sentence
if len(token_text) > 0:
ret.extend(TokenizerModel.add_new_token(sentence.token_sequence, token_text, char_start, char_end))
return ret
|
"""
test_Lx(z)
Author: Jacob Jost
Affiliation: University of Colorado at Boulder
Created on: Thu June 4 09:00:00 MDT 2015
--- = spacing between different sections of a code
$$$ = spacing between different parts of codes
The redshift can be set so the same redshift is used for each plot or can be set
for each individual plot. You either need to keep the top z for the overall or
hash it out and unhash the other z's to set individually.
"""
import ares
import numpy as np
import matplotlib.pyplot as pl
from scipy import integrate
a15 = ares.util.read_lit('aird2015')
z = np.arange(0, 3)
L = np.logspace(41, 46)
fig1 = pl.figure(1); ax1 = fig1.add_subplot(111)
colors = 'k', 'b', 'g'
for i, redshift in enumerate(z):
lf_ldde1 = a15.LuminosityFunction(L, redshift)
ax1.loglog(L, lf_ldde1, color=colors[i], ls='-',
label=r'$z={}$'.format(redshift))
ax1.set_xlabel(r'$L_X$')
ax1.set_ylabel(r'$\phi(L_X)$')
ax1.legend(loc='best')
pl.show()
import sys
sys.exit()
z = 1.0
#------------------------------------------------------------
#z = 5.0
L = np.logspace(41, 47, 100)
fig1 = pl.figure(1); ax1 = fig1.add_subplot(111)
models = []
for p, Lx in enumerate(L):
model = a15.LuminosityFunction(Lx, z)
models.append(model)
models = np.array(models)
pl.loglog(L, models, color = 'k', label = r'LDDE1-Hard Band')
#pl.title('2-7 KeV LDDE1 at z ~ {:.1f}'.format(z))
#pl.ylim(10**-9.1, 10**-2)
#ax1.set_xlabel(r'$L_X$')
#ax1.set_ylabel(r'$\phi(L_X)$')
#ax1.legend(loc='best')
#pl.show()
#------------------------------------------------------------
#z = 5.0
for p, Lx in enumerate(L):
model = a15.LuminosityFunction(Lx, z)
models.append(model)
models = np.array(models)
pl.loglog(L, models, color = 'g', label = r'LDDE2-Hard Band')
#pl.title('2-7 KeV LDDE2 at z ~ {:.1f}'.format(z))
#pl.ylim(10**-9.1, 10**-2)
#ax1.set_xlabel(r'$L_X$')
#ax1.set_ylabel(r'$\phi(L_X)$')
#ax1.legend(loc='best')
#pl.show()
import sys
sys.exit()
#------------------------------------------------------------
#z = 5.0
L = np.logspace(41, 47, 100)
fig1 = pl.figure(1); ax1 = fig1.add_subplot(111)
models = []
for p, Lx in enumerate(L):
model = a15.LuminosityFunction(Lx, z, LDDE1 = True, **a15.qsolf_LDDE1_softpars)
models.append(model)
models = np.array(models)
pl.loglog(L, models, color = 'r', label = r'LDDE1-Soft Band')
#pl.title('0.5-2 KeV LDDE1 at z ~ {:.1f}'.format(z))
#pl.ylim(10**-9.1, 10**-2)
#ax1.set_xlabel(r'$L_X$')
#ax1.set_ylabel(r'$\phi(L_X)$')
#ax1.legend(loc='best')
#pl.show()
#------------------------------------------------------------
#z = 5.0
L = np.logspace(41, 47, 100)
fig1 = pl.figure(1); ax1 = fig1.add_subplot(111)
models = []
for p, Lx in enumerate(L):
model = a15.LuminosityFunction(Lx, z, LDDE2 = True, **a15.qsolf_LDDE2_softpars)
models.append(model)
models = np.array(models)
pl.loglog(L, models, color = 'b', label = r'LDDE2-Soft Band')
pl.title(r'Different models for $\phi(L_X)$ for soft and hard bands at $z$ ~ ${:.1f}$'.format(z))
pl.ylim(10**-9.1, 10**-2)
ax1.set_xlabel(r'$L_X$')
ax1.set_ylabel(r'$\phi(L_X)$')
ax1.legend(loc='best')
pl.show()
import sys
sys.exit()
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
"""
# -*- coding: utf-8 -*-
test_Lx(z)
Author: Jacob Jost
Affiliation: University of Colorado at Boulder (Undergraduate)
Created on: Thu June 12 11:00:00 MDT 2015
The redshift can be set so the same redshift is used for each plot or can be set
for each individual plot. You either need to keep the top z for the overall or
hash it out and unhash the other z's to set individually.
Models = # of models you want to run
NOTE:
This has not been vectorized so any more than 50 models will take quite some
time to run.
If you want to look at a particular model, just use triple quotes to take the
section you dont need out.
To converte from the 2-10 KeV band to the 0.5-8 Kev Band divide integrand1 by 1.33.
--- = spacing between different sections of code
# of steps for random samples needs to match the steps of the redshift
Need to combine the parameters dictionary and the err dictionarty to use the randomsamples function
"""
#------------------------------------------------------------
#a15 = ares.util.read_lit('aird2015')
Legend = ['Green = LDDE1 softband', 'Red = LDDE1 hardband', \
'Blue = LDDE2 softband', 'Black = LDDE2 hardband']
z = np.linspace(0, 5, 100)
fig2 = pl.figure(2); ax2 = fig2.add_subplot(111)
#------------------------------------------------------------
#z = np.linspace(0, 5, 100)
#fig2 = pl.figure(2); ax2 = fig2.add_subplot(111)
#a15 = ares.util.read_lit('aird2015')
hardpars = a15.qsolf_LDDE1_hardpars #parameters dictionary
harderr = a15.qsolf_LDDE1_harderr #parameters error dictionary
hardall = hardpars.copy() #copying parameters dictionary
hardall.update(harderr) #combining parameters dictionary & parameters error dictionary
hardsamples = a15.randomsamples(100, **hardall)
models = 50
Legend1 = ['Red = LDDE1 hardband']
integrand1=[]
for i in range(models):
integrand = []
for j in range(len(z)):
x = lambda Lx: a15.LuminosityFunction(Lx, z[j], LDDE1 = True,\
**hardsamples[i])
p, err = integrate.quad(x, 10**41, 10**46)
integrand.append(p)
integrand1.append(integrand)
#HEADS UP: this takes a while to run, use caution.
for i in range(len(integrand1)):
pl.semilogy(z, integrand1[i], alpha = 0.25, color = 'r')
#ax2.set_ylabel(r'$L_X(z)$')
#ax2.set_xlabel(r'$z$')
#pl.legend((Legend1), loc='best')
#pl.show()
#------------------------------------------------------------
#z = np.linspace(0, 5, 100)
#fig2 = pl.figure(2); ax2 = fig2.add_subplot(111)
#a15 = ares.util.read_lit('aird2015')
softpars = a15.qsolf_LDDE1_softpars
softerr = a15.qsolf_LDDE1_softerr
softall = softpars.copy()
softall.update(softerr)
softsamples = a15.randomsamples(100, **softall)
models = 50
Legend2 = ['Green = LDDE1 Softband']
integrand1=[]
for i in range(models):
integrand = []
for j in range(len(z)):
x = lambda Lx: a15.LuminosityFunction(Lx, z[j], LDDE1 = True,\
**softsamples[i])
p, err = integrate.quad(x, 10**41, 10**46)
integrand.append(p)
integrand1.append(integrand)
#HEADS UP: this takes a while to run, use caution.
for i in range(len(integrand1)):
pl.semilogy(z, integrand1[i], alpha = 0.25, color = 'g')
#ax2.set_ylabel(r'$L_X(z)$')
#ax2.set_xlabel(r'$z$')
#pl.legend((Legend2), loc='best')
#pl.show()
#------------------------------------------------------------
#z = np.linspace(0, 5, 100)
#fig2 = pl.figure(2); ax2 = fig2.add_subplot(111)
#a15 = ares.util.read_lit('aird2015')
hardpars = a15.qsolf_LDDE2_hardpars
harderr = a15.qsolf_LDDE2_harderr
hardall = hardpars.copy()
hardall.update(harderr)
hardsamples = a15.randomsamples(100, **hardall)
models = 50
Legend3 = ['Black = LDDE2 hardband']
integrand1=[]
for i in range(models):
integrand = []
for j in range(len(z)):
x = lambda Lx: a15.LuminosityFunction(Lx, z[j], LDDE2 = True,\
**hardsamples[i])
p, err = integrate.quad(x, 10**41, 10**46)
integrand.append(p)
integrand1.append(integrand)
#HEADS UP: this takes a while to run, use caution.
for i in range(len(integrand1)):
pl.semilogy(z, integrand1[i], alpha = 0.25, color = 'k')
#fig2 = pl.figure(2); ax2 = fig2.add_subplot(111)
#ax2.set_ylabel(r'$L_X(z)$')
#ax2.set_xlabel(r'$z$')
#pl.legend((Legend3), loc='best')
#pl.show()
#------------------------------------------------------------
#z = np.linspace(0, 5, 100)
#fig2 = pl.figure(2); ax2 = fig2.add_subplot(111)
#a15 = ares.util.read_lit('aird2015')
softpars = a15.qsolf_LDDE2_softpars
softerr = a15.qsolf_LDDE2_softerr
softall = softpars.copy()
softall.update(softerr)
softsamples = a15.randomsamples(100, **softall)
models = 50
Legend4 = ['Blue = LDDE2 Softband']
integrand1=[]
for i in range(models):
integrand = []
for j in range(len(z)):
x = lambda Lx: a15.LuminosityFunction(Lx, z[j], LDDE2 = True,\
**softsamples[i])
p, err = integrate.quad(x, 10**41, 10**46)
integrand.append(p)
integrand1.append(integrand)
#HEADS UP: this takes a while to run, use caution.
for i in range(len(integrand1)):
pl.semilogy(z, integrand1[i], alpha = 0.25, color = 'b')
ax2.set_ylabel(r'$L_X(z)$')
ax2.set_xlabel(r'$z$')
#pl.legend((Legend4), loc='best')
pl.legend((Legend), loc='best')
pl.show()
#---------------------------------------------------------
"""
#z = np.linspace(0, 5, 50)
models = 50
Legend5 = ['Black = 0.5-8 KeV Band (LDDE2)']
integrand1=[]
for i in range(models):
integrand = []
for j in range(len(z)):
x = lambda Lx: a15._LuminosityFunction_LDDE2_integrate(Lx, z[j],\
list(a15.qsolf_LDDE2_hardpars_integration.values())[3], \
list(a15.qsolf_LDDE2_hardpars_integration.values())[-2][i], \
list(a15.qsolf_LDDE2_hardpars_integration.values())[2][i],\
list(a15.qsolf_LDDE2_hardpars_integration.values())[1][i], \
list(a15.qsolf_LDDE2_hardpars_integration.values())[9][i], \
list(a15.qsolf_LDDE2_hardpars_integration.values())[6][i], \
list(a15.qsolf_LDDE2_hardpars_integration.values())[7][i], \
list(a15.qsolf_LDDE2_hardpars_integration.values())[-1][i], \
list(a15.qsolf_LDDE2_hardpars_integration.values())[-3][i],\
list(a15.qsolf_LDDE2_hardpars_integration.values())[-6][i],\
list(a15.qsolf_LDDE2_hardpars_integration.values())[5][i],\
list(a15.qsolf_LDDE2_hardpars_integration.values())[-4][i],\
list(a15.qsolf_LDDE2_hardpars_integration.values())[4][i],\
list(a15.qsolf_LDDE2_hardpars_integration.values())[0][i])
p, err = integrate.quad(x, 10**41, 10**46)
integrand.append(p)
integrand1.append(integrand)
integrand1 = np.array(integrand1)
#HEADS UP: this takes a while to run, use caution.
for i in range(len(integrand1)):
pl.semilogy(z, integrand1[i], alpha = 0.25, color = 'k')
ax2.set_ylabel(r'$L_X(z)$')
ax2.set_xlabel(r'$z$')
pl.legend((Legend), loc='best')
pl.show()
"""
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
"""
test_Lx(z)
Author: Jacob Jost
Affiliation: University of Colorado at Boulder (Undergraduate)
Created on: Thu June 4 09:00:00 MDT 2015
--- = spacing between different sections of code
The redshift can be set so the same redshift is used for each plot or can be set
for each individual plot. You either need to keep the top z for the overall or
hash it out and unhash the other z's to set individually."""
#a15 = ares.util.read_lit('aird2015')
fig3 = pl.figure(3); ax3 = fig3.add_subplot(111)
Legend = ['Green = LDDE1 softband', 'Red = LDDE1 hardband', 'Blue = LDDE2 softband', 'Black = LDDE2 hardband']
z = 5.0
L = np.logspace(41, 47, 100)
m = 1000
#------------------------------------------------------------
#z = 5.0
#L = np.logspace(41, 47, 100)
#m = 100
#Legend1 = ['Red = LDDE1 hardband']
#fig3 = pl.figure(3); ax3 = fig3.add_subplot(111)
hardpars = a15.qsolf_LDDE1_hardpars
harderr = a15.qsolf_LDDE1_harderr
hardall = hardpars.copy()
hardall.update(harderr)
hardsamples = a15.randomsamples(1000, **hardall)
models = []
for t in range(m):
model = []
models.append(model)
for Lx in L:
model1 = a15.LuminosityFunction(Lx, z, LDDE1 = True, **hardsamples[t])
model.append(model1)
for i, j in enumerate(models):
pl.loglog(L, models[i], color = 'r', alpha = 0.1)
#pl.title('2-7 KeV LDDE1 at z ~ {:.1f}'.format(z))
#pl.ylim(10**-9.1, 10**-2)
#ax3.set_xlabel(r'$L_X$')
#ax3.set_ylabel(r'$\phi(L_X)$')
#ax3.legend((Legend1), loc='best')
#pl.show()
#------------------------------------------------------------
#z = 5.0
#L = np.logspace(41, 47, 100)
#m = 100
#Legend2 = ['Green = LDDE1 softband']
#fig3 = pl.figure(3); ax3 = fig3.add_subplot(111)
softpars = a15.qsolf_LDDE1_softpars
softerr = a15.qsolf_LDDE1_softerr
softall = softpars.copy()
softall.update(softerr)
softsamples = a15.randomsamples(1000, **softall)
models = []
for t in range(m):
model = []
models.append(model)
for Lx in L:
model1 = a15.LuminosityFunction(Lx, z, LDDE1 = True, **softsamples[t])
model.append(model1)
for i, j in enumerate(models):
pl.loglog(L, models[i], color = 'g', alpha = 0.1)
#pl.title('2-7 KeV LDDE1 at z ~ {:.1f}'.format(z))
#pl.ylim(10**-9.1, 10**-2)
#ax3.set_xlabel(r'$L_X$')
#ax3.set_ylabel(r'$\phi(L_X)$')
#ax3.legend((Legend2), loc='best')
#pl.show()
#------------------------------------------------------------
#z = 5.0
#L = np.logspace(41, 47, 1000)
#m = 100
#Legend3 = ['Black = LDDE2 hardband']
#fig3 = pl.figure(3); ax3 = fig3.add_subplot(111)
hardpars = a15.qsolf_LDDE2_hardpars
harderr = a15.qsolf_LDDE2_harderr
hardall = hardpars.copy()
hardall.update(harderr)
hardsamples = a15.randomsamples(1000, **hardall)
models = []
for t in range(m):
model = []
models.append(model)
for Lx in L:
model1 = a15.LuminosityFunction(Lx, z, LDDE2 = True, **hardsamples[t])
model.append(model1)
for i, j in enumerate(models):
pl.loglog(L, models[i], color = 'k', alpha = 0.1)
#pl.title('0.5-2 KeV LDDE1 at z ~ {:.1f}'.format(z))
#pl.ylim(10**-9.1, 10**-2)
#ax3.set_xlabel(r'$L_X$')
#ax3.set_ylabel(r'$\phi(L_X)$')
#ax3.legend((Legend3), loc='best')
#pl.show()
#------------------------------------------------------------
#z = 5.0
#L = np.logspace(41, 47, 100)
#m = 100
#Legend4 = ['Blue = LDDE2 softband']
#fig3 = pl.figure(3); ax3 = fig3.add_subplot(111)
softpars = a15.qsolf_LDDE2_softpars
softerr = a15.qsolf_LDDE2_softerr
softall = softpars.copy()
softall.update(softerr)
softsamples = a15.randomsamples(1000, **softall)
models = []
for t in range(m):
model = []
models.append(model)
for Lx in L:
model1 = a15.LuminosityFunction(Lx, z, LDDE2 = True, **softsamples[t])
model.append(model1)
for i, j in enumerate(models):
pl.loglog(L, models[i], color = 'b', alpha = 0.1)
pl.title(r'Different models for $\phi(L_X)$ for soft and hard bands at $z$ ~ ${:.1f}$'.format(z))
pl.ylim(10**-9.1, 10**-2)
ax3.set_xlabel(r'$L_X$')
ax3.set_ylabel(r'$\phi(L_X)$')
#ax3.legend((Legend4), loc='best')
ax3.legend((Legend), loc='best')
pl.show()
#------------------------------------------------------------
|
from PyQt5.QtCore import QTimer,Qt,QDateTime,QUrl,pyqtProperty
from PyQt5.QtWidgets import QApplication,QWidget,QGridLayout,QListWidget,QLabel,\
QVBoxLayout,QHBoxLayout,QPushButton
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtWebChannel import QWebChannel
from MySharedObject import MySharedObject
import sys
#嵌入javascript ,pyqt5与javascript交互
class TimerDemo(QWidget):
def __init__(self,parent=None):
super(TimerDemo,self).__init__(parent)
self.initUI()
def initUI(self):
layout=QVBoxLayout()
self.browser=QWebEngineView()
self.browser.load(QUrl('index.html'))
#创建共享对象
channel=QWebChannel()
myobj=MySharedObject()
channel.registerObject("bridge",myobj)
self.browser.page().setWebChannel(channel)
layout.addWidget(self.browser)
self.setLayout(layout)
if __name__=='__main__':
app=QApplication(sys.argv)
demo=TimerDemo()
demo.show()
sys.exit(app.exec_()) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the helpers for command line tools."""
import io
import os
import unittest
try:
import win32console
except ImportError:
win32console = None
from dfvfs.lib import definitions
from dfvfs.helpers import command_line
from dfvfs.path import factory as path_spec_factory
from dfvfs.volume import apfs_volume_system
from dfvfs.volume import gpt_volume_system
from dfvfs.volume import lvm_volume_system
from dfvfs.volume import tsk_volume_system
from dfvfs.volume import vshadow_volume_system
from tests import test_lib as shared_test_lib
class CLIInputReaderTest(shared_test_lib.BaseTestCase):
"""Tests for the command line interface input reader interface."""
def testInitialize(self):
"""Tests the __init__ function."""
input_reader = command_line.CLIInputReader()
self.assertIsNotNone(input_reader)
class FileObjectInputReaderTest(shared_test_lib.BaseTestCase):
"""Tests for the file object command line interface input reader."""
_TEST_DATA = (
b'A first string\n'
b'A 2nd string\n'
b'\xc3\xberi\xc3\xb0ja string\n'
b'\xff\xfef\x00j\x00\xf3\x00r\x00\xf0\x00a\x00 \x00b\x00a\x00n\x00d\x00')
def testReadAscii(self):
"""Tests the Read function with ASCII encoding."""
file_object = io.BytesIO(self._TEST_DATA)
input_reader = command_line.FileObjectInputReader(
file_object, encoding='ascii')
string = input_reader.Read()
self.assertEqual(string, 'A first string\n')
string = input_reader.Read()
self.assertEqual(string, 'A 2nd string\n')
# UTF-8 string with non-ASCII characters.
string = input_reader.Read()
self.assertEqual(string, '\ufffd\ufffdri\ufffd\ufffdja string\n')
# UTF-16 string with non-ASCII characters.
string = input_reader.Read()
expected_string = (
'\ufffd\ufffdf\x00j\x00\ufffd\x00r\x00\ufffd\x00a\x00 '
'\x00b\x00a\x00n\x00d\x00')
self.assertEqual(string, expected_string)
def testReadUtf8(self):
"""Tests the Read function with UTF-8 encoding."""
file_object = io.BytesIO(self._TEST_DATA)
input_reader = command_line.FileObjectInputReader(file_object)
string = input_reader.Read()
self.assertEqual(string, 'A first string\n')
string = input_reader.Read()
self.assertEqual(string, 'A 2nd string\n')
# UTF-8 string with non-ASCII characters.
string = input_reader.Read()
self.assertEqual(string, 'þriðja string\n')
# UTF-16 string with non-ASCII characters.
string = input_reader.Read()
expected_string = (
'\ufffd\ufffdf\x00j\x00\ufffd\x00r\x00\ufffd\x00a\x00 '
'\x00b\x00a\x00n\x00d\x00')
self.assertEqual(string, expected_string)
class StdinInputReaderTest(shared_test_lib.BaseTestCase):
"""Tests for the stdin command line interface input reader."""
def testInitialize(self):
"""Tests the __init__ function."""
input_reader = command_line.StdinInputReader()
self.assertIsNotNone(input_reader)
class CLIOutputWriter(shared_test_lib.BaseTestCase):
"""Tests for the command line interface output writer interface."""
def testInitialize(self):
"""Tests the __init__ function."""
test_writer = command_line.CLIOutputWriter()
self.assertIsNotNone(test_writer)
class FileObjectOutputWriterTest(shared_test_lib.BaseTestCase):
"""Tests for the file object command line interface output writer."""
def _ReadOutput(self, file_object):
"""Reads all output added since the last call to ReadOutput.
Args:
file_object (file): file-like object.
Returns:
str: output data.
"""
file_object.seek(0, os.SEEK_SET)
output_data = file_object.read()
file_object.seek(0, os.SEEK_SET)
file_object.truncate(0)
return output_data
def testWriteAscii(self):
"""Tests the Write function with ASCII encoding."""
file_object = io.BytesIO()
output_writer = command_line.FileObjectOutputWriter(
file_object, encoding='ascii')
output_writer.Write('A first string\n')
byte_stream = self._ReadOutput(file_object)
self.assertEqual(byte_stream, b'A first string\n')
# Unicode string with non-ASCII characters.
output_writer.Write('þriðja string\n')
byte_stream = self._ReadOutput(file_object)
self.assertEqual(byte_stream, b'?ri?ja string\n')
def testWriteUtf8(self):
"""Tests the Write function with UTF-8 encoding."""
file_object = io.BytesIO()
output_writer = command_line.FileObjectOutputWriter(file_object)
output_writer.Write('A first string\n')
byte_stream = self._ReadOutput(file_object)
self.assertEqual(byte_stream, b'A first string\n')
# Unicode string with non-ASCII characters.
output_writer.Write('þriðja string\n')
byte_stream = self._ReadOutput(file_object)
self.assertEqual(byte_stream, b'\xc3\xberi\xc3\xb0ja string\n')
class StdoutOutputWriterTest(shared_test_lib.BaseTestCase):
"""Tests for the stdout command line interface output writer."""
def testWriteAscii(self):
"""Tests the Write function with ASCII encoding."""
output_writer = command_line.StdoutOutputWriter(encoding='ascii')
output_writer.Write('A first string\n')
class CLITabularTableViewTest(shared_test_lib.BaseTestCase):
"""Tests for the command line tabular table view class."""
# TODO: add tests for _WriteRow
# TODO: add tests for AddRow
# TODO: add tests for Write
class CLIVolumeScannerMediatorTest(shared_test_lib.BaseTestCase):
"""Tests for the command line volume scanner mediator."""
# pylint: disable=protected-access
def testFormatHumanReadableSize(self):
"""Tests the _FormatHumanReadableSize function."""
test_mediator = command_line.CLIVolumeScannerMediator()
expected_size_string = '1000 B'
size_string = test_mediator._FormatHumanReadableSize(1000)
self.assertEqual(size_string, expected_size_string)
expected_size_string = '1.0KiB / 1.0kB (1024 B)'
size_string = test_mediator._FormatHumanReadableSize(1024)
self.assertEqual(size_string, expected_size_string)
expected_size_string = '976.6KiB / 1.0MB (1000000 B)'
size_string = test_mediator._FormatHumanReadableSize(1000000)
self.assertEqual(size_string, expected_size_string)
expected_size_string = '1.0MiB / 1.0MB (1048576 B)'
size_string = test_mediator._FormatHumanReadableSize(1048576)
self.assertEqual(size_string, expected_size_string)
def testParseVolumeIdentifiersString(self):
"""Tests the _ParseVolumeIdentifiersString function."""
test_mediator = command_line.CLIVolumeScannerMediator()
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('')
self.assertEqual(volume_identifiers, [])
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('all')
self.assertEqual(volume_identifiers, ['all'])
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('v1')
self.assertEqual(volume_identifiers, ['v1'])
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('1')
self.assertEqual(volume_identifiers, ['v1'])
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('1,3')
self.assertEqual(volume_identifiers, ['v1', 'v3'])
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('1..3')
self.assertEqual(volume_identifiers, ['v1', 'v2', 'v3'])
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('v1..v3')
self.assertEqual(volume_identifiers, ['v1', 'v2', 'v3'])
volume_identifiers = test_mediator._ParseVolumeIdentifiersString('1..3,5')
self.assertEqual(volume_identifiers, ['v1', 'v2', 'v3', 'v5'])
with self.assertRaises(ValueError):
test_mediator._ParseVolumeIdentifiersString('bogus')
with self.assertRaises(ValueError):
test_mediator._ParseVolumeIdentifiersString('1..bogus')
def testPrintAPFSVolumeIdentifiersOverview(self):
"""Tests the _PrintAPFSVolumeIdentifiersOverview function."""
test_path = self._GetTestFilePath(['apfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/',
parent=test_raw_path_spec)
volume_system = apfs_volume_system.APFSVolumeSystem()
volume_system.Open(test_apfs_container_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
test_mediator._PrintAPFSVolumeIdentifiersOverview(volume_system, ['apfs1'])
file_object.seek(0, os.SEEK_SET)
output_data = file_object.read()
expected_output_data = [
b'The following Apple File System (APFS) volumes were found:',
b'',
b'Identifier Name',
b'apfs1 apfs_test',
b'']
if not win32console:
# Using join here since Python 3 does not support format of bytes.
expected_output_data[2] = b''.join([
b'\x1b[1m', expected_output_data[2], b'\x1b[0m'])
self.assertEqual(output_data.split(b'\n'), expected_output_data)
def testPrintLVMVolumeIdentifiersOverview(self):
"""Tests the _PrintLVMVolumeIdentifiersOverview function."""
test_path = self._GetTestFilePath(['lvm.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_lvm_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_LVM, location='/', parent=test_raw_path_spec)
volume_system = lvm_volume_system.LVMVolumeSystem()
volume_system.Open(test_lvm_container_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
test_mediator._PrintLVMVolumeIdentifiersOverview(
volume_system, ['lvm1'])
file_object.seek(0, os.SEEK_SET)
output_data = file_object.read()
expected_output_data = [
b'The following Logical Volume Manager (LVM) volumes were found:',
b'',
b'Identifier',
b'lvm1',
b'']
if not win32console:
# Using join here since Python 3 does not support format of bytes.
expected_output_data[2] = b''.join([
b'\x1b[1m', expected_output_data[2], b'\x1b[0m'])
self.assertEqual(output_data.split(b'\n'), expected_output_data)
def testPrintPartitionIdentifiersOverviewGPT(self):
"""Tests the _PrintPartitionIdentifiersOverview function on GPT back-end."""
test_path = self._GetTestFilePath(['gpt.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_gpt_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_GPT, location='/', parent=test_raw_path_spec)
volume_system = gpt_volume_system.GPTVolumeSystem()
volume_system.Open(test_gpt_container_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
test_mediator._PrintPartitionIdentifiersOverview(
volume_system, ['p1', 'p2'])
file_object.seek(0, os.SEEK_SET)
output_data = file_object.read()
expected_output_data = [
b'The following partitions were found:',
b'',
b'Identifier Offset (in bytes) Size (in bytes)',
b'p1 1048576 (0x00100000) 64.0KiB / 65.5kB (65536 B)',
b'p2 2097152 (0x00200000) 64.0KiB / 65.5kB (65536 B)',
b'']
if not win32console:
# Using join here since Python 3 does not support format of bytes.
expected_output_data[2] = b''.join([
b'\x1b[1m', expected_output_data[2], b'\x1b[0m'])
self.assertEqual(output_data.split(b'\n'), expected_output_data)
def testPrintPartitionIdentifiersOverviewGPTNonSequential(self):
"""Tests the _PrintPartitionIdentifiersOverview function on GPT back-end."""
test_path = self._GetTestFilePath(['gpt_non_sequential.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_gpt_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_GPT, location='/', parent=test_raw_path_spec)
volume_system = gpt_volume_system.GPTVolumeSystem()
volume_system.Open(test_gpt_container_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
test_mediator._PrintPartitionIdentifiersOverview(
volume_system, ['p1', 'p3'])
file_object.seek(0, os.SEEK_SET)
output_data = file_object.read()
expected_output_data = [
b'The following partitions were found:',
b'',
b'Identifier Offset (in bytes) Size (in bytes)',
b'p1 1048576 (0x00100000) 64.0KiB / 65.5kB (65536 B)',
b'p3 3145728 (0x00300000) 64.0KiB / 65.5kB (65536 B)',
b'']
if not win32console:
# Using join here since Python 3 does not support format of bytes.
expected_output_data[2] = b''.join([
b'\x1b[1m', expected_output_data[2], b'\x1b[0m'])
self.assertEqual(output_data.split(b'\n'), expected_output_data)
def testPrintPartitionIdentifiersOverviewTSK(self):
"""Tests the _PrintPartitionIdentifiersOverview function on TSK back-end."""
test_path = self._GetTestFilePath(['mbr.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_tsk_partition_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK_PARTITION, parent=test_raw_path_spec)
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(test_tsk_partition_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
test_mediator._PrintPartitionIdentifiersOverview(
volume_system, ['p1', 'p2'])
file_object.seek(0, os.SEEK_SET)
output_data = file_object.read()
expected_output_data = [
b'The following partitions were found:',
b'',
b'Identifier Offset (in bytes) Size (in bytes)',
b'p1 512 (0x00000200) 64.5KiB / 66.0kB (66048 B)',
b'p2 67072 (0x00010600) 64.5KiB / 66.0kB (66048 B)',
b'']
if not win32console:
# Using join here since Python 3 does not support format of bytes.
expected_output_data[2] = b''.join([
b'\x1b[1m', expected_output_data[2], b'\x1b[0m'])
self.assertEqual(output_data.split(b'\n'), expected_output_data)
def testPrintVSSStoreIdentifiersOverview(self):
"""Tests the _PrintVSSStoreIdentifiersOverview function."""
test_path = self._GetTestFilePath(['vss.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_vss_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, parent=test_raw_path_spec)
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(test_vss_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
test_mediator._PrintVSSStoreIdentifiersOverview(
volume_system, ['vss1', 'vss2'])
file_object.seek(0, os.SEEK_SET)
output_data = file_object.read()
expected_output_data = [
b'The following Volume Shadow Snapshots (VSS) were found:',
b'',
b'Identifier Creation Time',
b'vss1 2021-05-01 17:40:03.2230304',
b'vss2 2021-05-01 17:41:28.2249863',
b'']
if not win32console:
# Using join here since Python 3 does not support format of bytes.
expected_output_data[2] = b''.join([
b'\x1b[1m', expected_output_data[2], b'\x1b[0m'])
self.assertEqual(output_data.split(b'\n'), expected_output_data)
def testReadSelectedVolumesAPFS(self):
"""Tests the _ReadSelectedVolumes function on APFS back-end."""
test_path = self._GetTestFilePath(['apfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/',
parent=test_raw_path_spec)
volume_system = apfs_volume_system.APFSVolumeSystem()
volume_system.Open(test_apfs_container_path_spec)
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader)
selected_volumes = test_mediator._ReadSelectedVolumes(
volume_system, prefix='apfs')
self.assertEqual(selected_volumes, ['apfs1'])
def testReadSelectedVolumesGPT(self):
"""Tests the _ReadSelectedVolumes function on GPT back-end.."""
test_path = self._GetTestFilePath(['gpt.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_gpt_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_GPT, location='/', parent=test_raw_path_spec)
volume_system = gpt_volume_system.GPTVolumeSystem()
volume_system.Open(test_gpt_container_path_spec)
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader)
selected_volumes = test_mediator._ReadSelectedVolumes(
volume_system, prefix='p')
self.assertEqual(selected_volumes, ['p1', 'p2'])
def testReadSelectedVolumesGPTNonSequential(self):
"""Tests the _ReadSelectedVolumes function on GPT back-end.."""
test_path = self._GetTestFilePath(['gpt_non_sequential.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_gpt_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_GPT, location='/', parent=test_raw_path_spec)
volume_system = gpt_volume_system.GPTVolumeSystem()
volume_system.Open(test_gpt_container_path_spec)
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader)
selected_volumes = test_mediator._ReadSelectedVolumes(
volume_system, prefix='p')
self.assertEqual(selected_volumes, ['p1', 'p3'])
def testReadSelectedVolumesLVM(self):
"""Tests the _ReadSelectedVolumes function on LVM back-end."""
test_path = self._GetTestFilePath(['lvm.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_lvm_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_LVM, location='/', parent=test_raw_path_spec)
volume_system = lvm_volume_system.LVMVolumeSystem()
volume_system.Open(test_lvm_container_path_spec)
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader)
selected_volumes = test_mediator._ReadSelectedVolumes(
volume_system, prefix='lvm')
self.assertEqual(selected_volumes, ['lvm1', 'lvm2'])
def testReadSelectedVolumesTSK(self):
"""Tests the _ReadSelectedVolumes function on TSK back-end.."""
test_path = self._GetTestFilePath(['mbr.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_tsk_partition_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK_PARTITION, parent=test_raw_path_spec)
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(test_tsk_partition_path_spec)
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader)
selected_volumes = test_mediator._ReadSelectedVolumes(
volume_system, prefix='p')
self.assertEqual(selected_volumes, ['p1', 'p2'])
def testReadSelectedVolumesVShadow(self):
"""Tests the _ReadSelectedVolumes function on VShadow back-end."""
test_path = self._GetTestFilePath(['vss.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_vss_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, parent=test_raw_path_spec)
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(test_vss_path_spec)
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader)
selected_volumes = test_mediator._ReadSelectedVolumes(
volume_system, prefix='vss')
self.assertEqual(selected_volumes, ['vss1', 'vss2'])
def testGetAPFSVolumeIdentifiers(self):
"""Tests the GetAPFSVolumeIdentifiers function."""
test_path = self._GetTestFilePath(['apfs.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_apfs_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_APFS_CONTAINER, location='/',
parent=test_raw_path_spec)
volume_system = apfs_volume_system.APFSVolumeSystem()
volume_system.Open(test_apfs_container_path_spec)
# Test selection of single volume.
input_file_object = io.BytesIO(b'1\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers(
volume_system, ['apfs1'])
self.assertEqual(volume_identifiers, ['apfs1'])
# Test selection of single volume.
input_file_object = io.BytesIO(b'apfs1\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers(
volume_system, ['apfs1'])
self.assertEqual(volume_identifiers, ['apfs1'])
# Test selection of single volume with invalid input on first attempt.
input_file_object = io.BytesIO(b'bogus\napfs1\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers(
volume_system, ['apfs1'])
self.assertEqual(volume_identifiers, ['apfs1'])
# Test selection of all volumes.
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers(
volume_system, ['apfs1'])
self.assertEqual(volume_identifiers, ['apfs1'])
# Test selection of no volumes.
input_file_object = io.BytesIO(b'\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetAPFSVolumeIdentifiers(
volume_system, ['apfs1'])
self.assertEqual(volume_identifiers, [])
def testGetLVMVolumeIdentifiers(self):
"""Tests the GetLVMVolumeIdentifiers function."""
test_path = self._GetTestFilePath(['lvm.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_lvm_container_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_LVM, location='/', parent=test_raw_path_spec)
volume_system = lvm_volume_system.LVMVolumeSystem()
volume_system.Open(test_lvm_container_path_spec)
# Test selection of single volume.
input_file_object = io.BytesIO(b'1\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetLVMVolumeIdentifiers(
volume_system, ['lvm1'])
self.assertEqual(volume_identifiers, ['lvm1'])
# Test selection of single volume.
input_file_object = io.BytesIO(b'lvm1\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetLVMVolumeIdentifiers(
volume_system, ['lvm1'])
self.assertEqual(volume_identifiers, ['lvm1'])
# Test selection of single volume with invalid input on first attempt.
input_file_object = io.BytesIO(b'bogus\nlvm1\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetLVMVolumeIdentifiers(
volume_system, ['lvm1'])
self.assertEqual(volume_identifiers, ['lvm1'])
# Test selection of all volumes.
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetLVMVolumeIdentifiers(
volume_system, ['lvm1', 'lvm2'])
self.assertEqual(volume_identifiers, ['lvm1', 'lvm2'])
# Test selection of no volumes.
input_file_object = io.BytesIO(b'\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetLVMVolumeIdentifiers(
volume_system, ['lvm1'])
self.assertEqual(volume_identifiers, [])
def testGetPartitionIdentifiers(self):
"""Tests the GetPartitionIdentifiers function."""
test_path = self._GetTestFilePath(['mbr.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_tsk_partition_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_TSK_PARTITION, parent=test_raw_path_spec)
volume_system = tsk_volume_system.TSKVolumeSystem()
volume_system.Open(test_tsk_partition_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
# Test selection of single partition.
input_file_object = io.BytesIO(b'2\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetPartitionIdentifiers(
volume_system, ['p1', 'p2'])
self.assertEqual(volume_identifiers, ['p2'])
# Test selection of single partition.
input_file_object = io.BytesIO(b'p2\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetPartitionIdentifiers(
volume_system, ['p1', 'p2'])
self.assertEqual(volume_identifiers, ['p2'])
# Test selection of single partition with invalid input on first attempt.
input_file_object = io.BytesIO(b'bogus\np2\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetPartitionIdentifiers(
volume_system, ['p1', 'p2'])
self.assertEqual(volume_identifiers, ['p2'])
# Test selection of all partitions.
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetPartitionIdentifiers(
volume_system, ['p1', 'p2'])
self.assertEqual(volume_identifiers, ['p1', 'p2'])
# Test selection of no partitions.
input_file_object = io.BytesIO(b'\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetPartitionIdentifiers(
volume_system, ['p1', 'p2'])
self.assertEqual(volume_identifiers, [])
def testGetVSSStoreIdentifiers(self):
"""Tests the GetVSSStoreIdentifiers function."""
test_path = self._GetTestFilePath(['vss.raw'])
self._SkipIfPathNotExists(test_path)
test_os_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_OS, location=test_path)
test_raw_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_RAW, parent=test_os_path_spec)
test_vss_path_spec = path_spec_factory.Factory.NewPathSpec(
definitions.TYPE_INDICATOR_VSHADOW, parent=test_raw_path_spec)
volume_system = vshadow_volume_system.VShadowVolumeSystem()
volume_system.Open(test_vss_path_spec)
file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
output_writer=test_output_writer)
# Test selection of single store.
input_file_object = io.BytesIO(b'2\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetVSSStoreIdentifiers(
volume_system, ['vss1', 'vss2'])
self.assertEqual(volume_identifiers, ['vss2'])
# Test selection of single store.
input_file_object = io.BytesIO(b'vss2\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetVSSStoreIdentifiers(
volume_system, ['vss1', 'vss2'])
self.assertEqual(volume_identifiers, ['vss2'])
# Test selection of single store with invalid input on first attempt.
input_file_object = io.BytesIO(b'bogus\nvss2\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetVSSStoreIdentifiers(
volume_system, ['vss1', 'vss2'])
self.assertEqual(volume_identifiers, ['vss2'])
# Test selection of all stores.
input_file_object = io.BytesIO(b'all\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetVSSStoreIdentifiers(
volume_system, ['vss1', 'vss2'])
self.assertEqual(volume_identifiers, ['vss1', 'vss2'])
# Test selection of no stores.
input_file_object = io.BytesIO(b'\n')
test_input_reader = command_line.FileObjectInputReader(input_file_object)
output_file_object = io.BytesIO()
test_output_writer = command_line.FileObjectOutputWriter(output_file_object)
test_mediator = command_line.CLIVolumeScannerMediator(
input_reader=test_input_reader, output_writer=test_output_writer)
volume_identifiers = test_mediator.GetVSSStoreIdentifiers(
volume_system, ['vss1', 'vss2'])
self.assertEqual(volume_identifiers, [])
# TODO: add tests for UnlockEncryptedVolume
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.