text stringlengths 38 1.54M |
|---|
from pyspark import SparkConf
from pyspark.sql import SparkSession, Window
from pyspark.sql.types import ArrayType, StructField, StructType, StringType, IntegerType, DecimalType, FloatType
from pyspark.sql.functions import udf, collect_list, struct, explode, pandas_udf, PandasUDFType, col
from decimal import Decimal
import random
import pandas as pd
import numpy as np
appName = "Python Example - UDF with Apache Arrow (Pandas UDF)"
master = 'local'
# Create Spark session
conf = SparkConf().setMaster(master)
spark = SparkSession.builder.config(conf=conf) \
.getOrCreate()
# Enable Arrow optimization and fallback if there is no Arrow installed
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true")
# Construct the data frame directly (without reading from HDFS)
cust_count = 10
txn_count = 100
data = [(i, j, i * j * random.random() * random.choice((-1, 1)))
for j in range(txn_count) for i in range(cust_count)]
# Create a schema for the dataframe
schema = StructType([
StructField('CustomerID', IntegerType(), False),
StructField('TransactionID', IntegerType(), False),
StructField('Amount', FloatType(), True)
])
# Create the data frame
df = spark.createDataFrame(data, schema=schema)
# Function 1 - Scalar function - dervice a new column with value as Credit or Debit.
def calc_credit_debit_func(amount):
return pd.Series(["Credit" if a >= 0 else "Debit" for a in amount])
fn_credit_debit = pandas_udf(calc_credit_debit_func, returnType=StringType())
df = df.withColumn("CreditOrDebit", fn_credit_debit(df.Amount))
df.show()
# Function 2 - Group map function - calculate the difference from mean
attributes = [
StructField('CustomerID', IntegerType(), False),
StructField('TransactionID', IntegerType(), False),
StructField('Amount', FloatType(), False),
StructField('CreditOrDebit', StringType(), False),
StructField('Diff', FloatType(), False)
]
attribute_names = [a.name for a in attributes]
@pandas_udf(StructType(attributes), PandasUDFType.GROUPED_MAP)
def fn_calc_diff_from_mean(txn):
pdf = txn
amount = pdf.Amount
pdf = pdf.assign(Diff=amount - amount.mean())
return pdf
df_map = df.groupby("CustomerID").apply(fn_calc_diff_from_mean)
df_map.show(100)
# Function 3 - Group aggregate function - calculate mean only
@pandas_udf(FloatType(), PandasUDFType.GROUPED_AGG)
def mean_udf(amount):
return np.mean(amount)
df_agg = df.groupby("CustomerID").agg(mean_udf(df['Amount']).alias("Mean"))
df_agg.show()
# Function 4 - Group aggregate function - Windowing function
w = Window \
.partitionBy('CustomerID') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('Mean', mean_udf(df['Amount']).over(w)).show()
|
class Human:
def __init__(self, id, speed, origin):
self.id = id
self.speed = speed
self.origin = origin
self.isCompute = 1
self.finish = 0
self.start = origin
self.end = origin
self.arrive = 0
self.time = 0
self.length = 0
self.output = 0
self.history = list()
self.allPath = list()
self.path = list()
self.allCosts = list()
self.initPaths = list()
self.costs = list()
self.Paths = list()
self.Costs = list()
def get_isCompute(self):
return self.isCompute
def get_end(self):
return self.end
def set_end(self, end):
self.end = end
def is_compute(self):
if self.speed + self.finish > self.length:
self.isCompute = 1
return 1
else:
self.isCompute = 0
return 0
def update_start_end(self, start, end):
self.start = start
self.end = end
self.isCompute = 0
def set_finish(self, finish):
self.finish = finish |
def solution(new_id):
#STEP 1 전부 소문자로 바꾸기
new_id = new_id.lower()
#2단계 알파벳 소문자, 숫자, 빼기(-), 밑줄(_), 마침표(.)를 제외한 모든 문자를 제거합니다.
import re
new_id = re.sub(r'[^a-z0-9-._]','',new_id)
#3단계 new_id에서 마침표(.)가 2번 이상 연속된 부분을 하나의 마침표(.)로 치환합니다.
while '..' in new_id:
new_id = new_id.replace('..', '.')
#4단계 new_id에서 마침표(.)가 처음이나 끝에 위치한다면 제거합니다.
new_id = re.sub('^\.|\.$', '', new_id)
#5단계 new_id가 빈 문자열이라면, new_id에 "a"를 대입합니다.
if new_id == '':
new_id = 'a'
#6단계 new_id의 길이가 16자 이상이면, new_id의 첫 15개의 문자를 제외한 나머지 문자들을 모두 제거합니다.
###만약 제거 후 마침표(.)가 new_id의 끝에 위치한다면 끝에 위치한 마침표(.) 문자를 제거합니다.
if len(new_id) > 15:
if new_id[14] == ".":
new_id = new_id[:14]
elif new_id[14] != ".":
new_id = new_id[:15]
else:
new_id = new_id
#7단계 new_id의 길이가 2자 이하라면, new_id의 마지막 문자를 new_id의 길이가 3이 될 때까지 반복해서 끝에 붙입니다.
if len(new_id) < 3:
last_word = new_id[len(new_id)-1]
add = len(new_id)
new_id = new_id + last_word * (3-add)
else:
new_id = new_id
answer = new_id
return answer |
import random,time,mcpi.minecraft as minecraft #imports necessary modules and renames one as minecraft for easy of use
mc=minecraft.Minecraft.create() # Creates game and connects to it.
time.sleep(3) #Waits for 3 seconds.
gravel=13 #Saves minecraft's gravel block ID as 'gravel' for easy of use.
while True: #Loops infinitely
x,y,z=mc.player.getPos() #Finds players position
mc.setBlock(float(x+random.randint(-10,10)),float(y+50),float(z+random.randint(-10,10)),random.randint(12,13)) #Uses players position to randomly place blocks of sand above the player in a certain radius. The blocks fall, making it appear like it's raining sand.
time.sleep(0.2) #Slows loop by adding a sleep for 0.2, so the program doesn't use to much processing power.
|
"""
What are you doing here? GET OUT OF HERE. >:P
"""
import random # imports random library
for _ in range(10): #repeats 10 times
print(random.randint(0, 200) # prints a random int between 0 and 200
print("done") # prints "done" once the for loop is done
|
import numpy as np
import matplotlib.pyplot as plt
import sys
def liner_regression_gradient_descent(x, y, alpha=0.0005, initial_theta=None, iter_num=1000, stream=sys.stdout):
if x.ndim is 1:
x = x.reshape(1, -1).transpose()
assert len(x) is len(y)
data_num = len(x)
new_x = np.hstack((np.ones(data_num).reshape(-1, 1), x))
trans_new_x = new_x.transpose()
initial_theta = np.ones(len(x[0]) + 1) if initial_theta is None else initial_theta
d_alpha = alpha / iter_num
cost_saver = []
for _ in range(iter_num):
loss = np.dot(new_x, initial_theta) - y
cost = np.sum(loss ** 2) / (2 * data_num)
gradient = np.dot(trans_new_x, loss)
initial_theta = initial_theta - alpha * gradient
alpha -= d_alpha
cost_saver.append(str(cost)+'\n')
if stream is not None:
for i in cost_saver:
stream.write(i)
return initial_theta, float(cost_saver[-1])
def linear_regression_ordinary_least_squares(x, y):
if x.ndim is 1:
x = x.reshape(-1, 1)
assert len(x) is len(y)
x = np.hstack((np.ones(len(x)).reshape(-1, 1), x))
trans_x = np.transpose(x)
y = y.reshape(-1, 1)
trans_y = np.transpose(y)
m, n = x.shape
middle_ = np.dot(np.linalg.pinv(np.dot(trans_x,x)), trans_x)
thetha = np.dot(middle_, y).flatten()
p = np.dot(x, middle_)
trans_p = np.transpose(p)
l = np.eye(m) - np.ones((m, m)) / m
upper = np.linalg.multi_dot((trans_y, trans_p, l, p, y))
lower = np.linalg.multi_dot((trans_y, l, y))
r_2 = (upper/lower)[0][0]
return thetha, r_2
def linear_regression_generalized_least_squares(x, y, cov_matrix=None):
if x.ndim is 1:
x = x.reshape(-1, 1)
assert len(x) is len(y)
y = y.reshape(-1, 1)
x = np.hstack((np.ones(len(x)).reshape(-1, 1), x))
trans_x = np.transpose(x)
if cov_matrix is None:
cov_matrix = np.eye(len(x))
v = cov_matrix
inv_v = np.linalg.pinv(v)
middle_ = np.dot(trans_x, v)
theta = np.linalg.multi_dot((np.linalg.pinv(np.dot(middle_, x)), trans_x, inv_v, y))
return theta.flatten()
def test_linear_regression():
pass
def visualize_data():
pass
def make_data():
pass
|
fruits = {'apple': 'manzana', 'orange': 'naranja', 'grape': 'uva'}
for fruit in fruits:
print(fruit + ' is ' + fruits[fruit] + ' in Spanish') |
import tensorflow as tf
import numpy as np
from tokenizers import Tokenizer
import random
from train import config, model_str
INPUT_LEN = config["input_len"]
DIM = config["dim"]
DIM = config["dim"]
OUTPUT_LEN = 40
model_filename = model_str(config)
model = tf.keras.models.load_model('./saved_models/' + model_filename)
tokenizer = Tokenizer.from_file("bpe-fi.tokenizer.json")
vocab = tokenizer.get_vocab()
inv_vocab = {value: key for key, value in vocab.items()}
indices = list(inv_vocab.keys())
x0 = "moi moi mitä sinulle kullu väinö:"
x0 = np.array([tokenizer.encode(x0).ids])[:,-INPUT_LEN:]
print("X0 shape", x0.shape)
def probs(x, temp=1.0):
x_prime = x * temp
x_prime = np.exp(x_prime)
return x_prime / np.sum(x_prime)
output = []
for i in range(OUTPUT_LEN):
new_wordpiece = inv_vocab[int(x0[0,-1])]
output.append(new_wordpiece)
print(new_wordpiece)
yi = model(x0)
print("yi", yi.shape)
p = probs(yi, temp=1.0)[0]
x_new = np.array([random.choices(indices, weights=p)])
x_old = x0[:,1:]
x0 = tf.concat([x0[:,1:], x_new], axis=1)
print("x0", x0.shape)
print(" ".join(output))
output = "".join(output)
output = output.replace("▁", " ")
print(output)
|
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("-c", "--company", help="Help text")
parser.add_argument("-i", "--hirer", default="whom it may concern", help="Help text")
parser.add_argument("-p", action="store_true")
a = parser.parse_args()
if a.p:
filePath = "./templates/general_dev_pdf.md"
else:
filePath = "./templates/general_dev.md"
with open(filePath) as f:
fStr = "".join(f.readlines())
formatted = fStr.format(company=a.company, hirer=a.hirer)
print(formatted)
|
from flask import Flask, redirect, url_for, render_template, flash
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager, UserMixin, login_user, logout_user,\
current_user
from oauth import OAuthSignIn |
# Generated by Django 3.0.8 on 2021-05-14 17:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('covid19', '0003_auto_20210511_2302'),
]
operations = [
migrations.DeleteModel(
name='Covid_image',
),
]
|
import csv
import json
import unittest
from io import BytesIO, StringIO
from unittest import mock
import requests
from bonobo.util.testing import BufferingNodeExecutionContext
from django.contrib.gis.geos import GEOSGeometry
from django.test import override_settings
from geostore.models import Feature, Layer
from terra_bonobo_nodes import common
class Test_TestCommon_CsvDictReader(unittest.TestCase):
def test_csvdirectreader(self):
csvfile = StringIO()
fieldnames = ["Test1"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
tableau = [
{"Test1": "test1"},
{"Test1": "iklojlk"},
]
for i in tableau:
writer.writerow(i)
csvfile.seek(0)
reader = csvfile.read()
csvdictreader = common.CsvDictReader()
tableau_rendu_csvdictreader = [row for row in csvdictreader(reader)]
self.assertSequenceEqual(tableau_rendu_csvdictreader, tableau)
def test_csvdirectreader_vide(self):
csvfile = BytesIO()
csvfile.seek(0)
reader = csvfile.read()
csvdictreader = common.CsvDictReader()
tableau_rendu_csvdictreader = [row for row in csvdictreader(reader)]
self.assertSequenceEqual(tableau_rendu_csvdictreader, [])
def test_dialect(self):
dialecte_expected = {
"delimiter": ":",
"quotechar": '"',
"escapechar": "True",
"doublequote": "False",
"skipinitialspace": "True",
"lineterminator": "\n",
"quoting": 0,
}
csvdictreader_dialect = common.CsvDictReader(**dialecte_expected)
self.assertDictEqual(
dialecte_expected, csvdictreader_dialect.get_dialect_kwargs()
)
class Test_TestCommon_GeojsonReader(unittest.TestCase):
def setUp(self):
self.dict_crs = {
"type": "EPSG",
"properties": {
"code": 4326,
"coordinate_order": [1, 0],
"name": "name_to_allow",
},
}
self.dict_raw_geojson_str = {
"type": "FeatureCollection",
"crs": self.dict_crs,
"features": [],
}
self.raw_geojson_str = json.dumps(self.dict_raw_geojson_str)
def test_geojsonreader_error(self):
geojsonreader = common.GeojsonReader(geom="geom")
with self.assertRaises(ValueError):
next(geojsonreader(self.raw_geojson_str))
def test_geojsonreader(self):
dic_geometry_1 = {
"type": "LineString",
"coordinates": [[102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]],
}
dict_feature_1 = {
"type": "feature",
"id": "id0",
"geometry": dic_geometry_1,
"properties": {"prop0": "value0", "prop1": "value1"},
}
self.dict_crs.get("properties").update(
{"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}
)
self.dict_crs.update({"properties": self.dict_crs.get("properties")})
self.dict_raw_geojson_str.update({"features": [dict_feature_1]})
self.dict_raw_geojson_str.update({"crs": self.dict_crs})
raw_geojson_str = json.dumps(self.dict_raw_geojson_str)
geojsonreader = common.GeojsonReader(geom="geom")
result_geo_array = [row.get("geom") for row in geojsonreader(raw_geojson_str)]
expected_array = [
GEOSGeometry(json.dumps(row.get("geometry")))
for row in self.dict_raw_geojson_str.get("features")
]
self.assertSequenceEqual(expected_array, result_geo_array)
def test_geojsonreader_empty(self):
name_allowed = self.dict_crs.get("properties").get("name")
geojsonreader = common.GeojsonReader(
geom="geom", allowed_projection=[name_allowed]
)
result_array = [row for row in geojsonreader(self.raw_geojson_str)]
array_expected = []
self.assertSequenceEqual(result_array, array_expected)
class Test_TestCommon_IdentifierFromProperty(unittest.TestCase):
def test_identifierfromproperty(self):
id_property = "id_prop"
identifierproperty = common.IdentifierFromProperty(property=id_property)
record_original = {
"test": "identifierproperty",
id_property: "property",
"other": "try",
}
identifier, record = identifierproperty(record_original)
self.assertIn(id_property, record)
self.assertEqual(identifier, record_original[id_property])
class Test_TestCommon_GenerateIdentifier(unittest.TestCase):
def setUp(self):
self.arguments = ("voici", "les", "arguments", "de", "tests")
def test_generateidentifier_empty(self):
generate_identifier = common.GenerateIdentifier()
res = generate_identifier(self.arguments)
self.assertEqual(2, len(res))
self.assertEqual(self.arguments, res[1])
self.assertTrue(isinstance(res[0], common.uuid.UUID))
def test_generateidentifier_error(self):
generate_identifier = common.GenerateIdentifier(generator=3)
with self.assertRaises(ValueError):
next(generate_identifier(self.arguments))
def test_generateidentifier(self):
generate_identifier = common.GenerateIdentifier(generator=sorted)
array_res = generate_identifier(self.arguments)
self.assertEqual(2, len(array_res))
self.assertEqual(self.arguments, array_res[1])
def test_generateidentifier_error_arguments(self):
generate_identifier = common.GenerateIdentifier(generator=abs)
with self.assertRaises(ValueError):
next(generate_identifier(self.arguments))
class Test_TestCommon_ExcludeAttributes(unittest.TestCase):
def test_excludeattributes(self):
list_to_exclude = ["member_to_exclude_1", "member_to_exclude_2"]
exclude_attributes = common.ExcludeAttributes(excluded=list_to_exclude)
identifier = "id"
record = {
"member_to_exclude_1": "exclusion",
"member_to_exclude_2": "exclusion2",
"member_to_stay": "stay",
}
array_res = [row for row in exclude_attributes(identifier, record)]
record_keys = list(array_res[0][1].keys())
self.assertNotEqual(list_to_exclude, record_keys)
self.assertEqual(identifier, array_res[0][0])
self.assertEqual(2, len(array_res[0]))
class Test_TestCommon_FilterAttributes(unittest.TestCase):
def test_filterattributes(self):
list_to_filter = ["member_to_filter_1", "member_to_filter_2"]
filterattributes = common.FilterAttributes(included=list_to_filter)
identifier = "id"
record = {
"member_to_filter_1": "filter",
"member_to_filter_2": "filter2",
"member_to_exclude": "exclusion",
}
result = [row for row in filterattributes(identifier, record)]
record_keys = list(result[0][1].keys())
self.assertEqual(list_to_filter, record_keys)
self.assertEqual(identifier, result[0][0])
self.assertEqual(2, len(result[0]))
class Test_TestCommon_FilterByProperties(unittest.TestCase):
def setUp(self):
self.identifier = "id"
self.record = {"key_1": "value_1", "key_2": "value_2"}
def test_filterbyproperties_false(self):
keep_eval_function = (
lambda identfier, record: False
) # NOQA -> TODO: fix flake8 error
filterbyproperties = common.FilterByProperties(
keep_eval_function=keep_eval_function
)
filterbyproperties(self.identifier, self.record)
result = [row for row in filterbyproperties(self.identifier, self.record)]
expected_result = []
self.assertSequenceEqual(result, expected_result)
def test_filterbyproperties_true(self):
keep_eval_function = (
lambda identfier, record: True
) # NOQA -> TODO: fix flake8 error
filterbyproperties = common.FilterByProperties(
keep_eval_function=keep_eval_function
)
filterbyproperties(self.identifier, self.record)
result = next(filterbyproperties(self.identifier, self.record))
self.assertDictEqual(self.record, result[1])
self.assertEqual(self.identifier, result[0])
self.assertEqual(2, len(result))
class Test_TestCommon_CollectAndSum(unittest.TestCase):
def test_collectandsum(self):
geom = "geom"
identifier = "identifier"
layercollectandsum = Layer.objects.create(name="layercollectandsum")
Feature.objects.create(geom=common.Point(2, 4), layer=layercollectandsum)
features = Feature.objects.all()
collectandsum = common.CollectAndSum(geom=geom)
id_result, features_result = next(
collectandsum(identifier=identifier, features=features)
)
self.assertEqual(id_result, identifier)
self.assertIn("ids", features_result)
self.assertIn(geom, features_result)
self.assertIsInstance(features_result, dict)
class Test_TestCommon_MapProperties(unittest.TestCase):
def setUp(self):
self.identifier = "id"
self.record = {"key_1": "value_1", "key_2": "value_2"}
def test_mapproperties(self):
map_function = sorted
mapproperties = common.MapProperties(map_function=map_function)
result = next(mapproperties(self.identifier, self.record))
result_expected = map_function(self.record)
self.assertEqual(result_expected, result[1])
self.assertEqual(self.identifier, result[0])
self.assertEqual(2, len(result))
class Test_TestCommon_AttributeToGeometry(unittest.TestCase):
def setUp(self):
self.identifier = "id"
self.geom = "geom"
self.asso_attribute_1 = {
"type": "Polygon",
"coordinates": [
[
[3.55, 51.08],
[4.36, 50.73],
[4.84, 50.85],
[4.45, 51.30],
[3.55, 51.08],
]
],
}
self.asso_attribute_2 = {
"type": "LineString",
"coordinates": [[100.0, 0.0], [101.0, 1.0]],
}
self.attribute_1 = "attribute_1"
self.attribute_2 = "attribute_2"
self.record = {
self.attribute_1: json.dumps(self.asso_attribute_1),
self.attribute_2: json.dumps(self.asso_attribute_2),
}
def test_get_geosgeometry(self):
attribute_to_geometry = common.AttributeToGeometry(
attribute=self.attribute_1, geom=self.geom
)
result = attribute_to_geometry.get_geosgeometry(
json.dumps(self.asso_attribute_1)
)
srid_expected = 4326
self.assertEqual(result.srid, srid_expected)
self.assertEqual(result.geom_type, self.asso_attribute_1.get("type"))
def test_attributetogeometry(self):
attribute_to_geometry = common.AttributeToGeometry(
attribute=self.attribute_1, geom=self.geom
)
result = next(attribute_to_geometry(self.identifier, self.record))
result_expected_geom = self.asso_attribute_1.get("type")
result_geom = result[1].get(self.geom).geom_type
self.assertNotIn(self.attribute_1, self.record)
self.assertEqual(result_expected_geom, result_geom)
self.assertEqual(self.identifier, result[0])
self.assertEqual(2, len(result))
def test_attributetogeometry_linestring(self):
attribute_to_geometry = common.AttributeToGeometry(
attribute=self.attribute_2, geom=self.geom
)
result = next(attribute_to_geometry(self.identifier, self.record))
result_expected_geom = self.asso_attribute_2.get("type")
result_geom = result[1].get(self.geom).geom_type
self.assertNotIn(self.attribute_2, self.record)
self.assertEqual(result_expected_geom, result_geom)
self.assertEqual(self.identifier, result[0])
self.assertEqual(2, len(result))
class Test_TestCommon_AttributesToGeometry(unittest.TestCase):
def setUp(self):
self.attribute_1 = "1"
self.attribute_2 = "2"
self.identifier = "id"
self.geom = "geom"
self.srid = 4326
self.y = "Key_2"
self.x = "Key_1"
def test_attributestogeometry(self):
attributestopointgeometry = common.AttributesToPointGeometry(
x=self.x, y=self.y, geom=self.geom, srid=self.srid
)
original_record = {"Key_1": self.attribute_1, "Key_2": self.attribute_2}
identifier, record = attributestopointgeometry(self.identifier, original_record)
point_result = record.get(self.geom)
self.assertEqual("Point", point_result.geom_type)
self.assertEqual(self.identifier, identifier)
self.assertEqual(float(self.attribute_1), point_result[0])
self.assertEqual(float(self.attribute_2), point_result[1])
def test_attributestogeometry_error(self):
attributestopointgeometry = common.AttributesToPointGeometry(
x=self.x, y=self.y, geom=self.geom, srid=self.srid
)
record = {"Key_1": "attribute_1", "Key_2": self.attribute_2}
with self.assertRaises(ValueError):
next(attributestopointgeometry(self.identifier, record))
class Test_TestCommon_GeometryToJson(unittest.TestCase):
def test_geometrytojson(self):
source = "source"
simplify = 0.0
destination = "destination"
identifier = "id"
example_geo = common.Point(0, 1)
geometrytojson = common.GeometryToJson(
source=source, destination=destination, simplify=simplify
)
properties = {source: example_geo}
r_identifier, record = geometrytojson(identifier, properties)
self.assertEqual(identifier, r_identifier)
self.assertEqual(
record.get(destination).get("type"), record.get(source).geom_type
)
class Test_TestCommon_GeometryToCentroid(unittest.TestCase):
def test_geometrytocentroid(self):
example_geo = common.Polygon(((0, 0), (0, 1), (1, 1), (0, 0)))
geom = "geom"
geom_dest = "geom_destination"
identifier = "id"
properties = {geom: example_geo}
geometrytocentroid = common.GeometryToCentroid(geom=geom, geom_dest=geom_dest)
r_identifier, record = geometrytocentroid(identifier, properties)
self.assertEqual(identifier, r_identifier)
self.assertEqual("Point", record.get(geom_dest).geom_type)
class Test_TestCommon_Geometry3Dto2D(unittest.TestCase):
def test_geometry3dto2d(self):
geom = "geom"
geom_dest = "geom_destination"
identifier = "id"
example_geo = common.Point(1, 1, 1)
properties = {geom: example_geo}
geometry3dto2d = common.Geometry3Dto2D(geom=geom, geom_dest=geom_dest)
result = next(geometry3dto2d(identifier, properties))
result_geom_3d = result[1].get(geom)
result_geom_2d = result[1].get(geom_dest)
self.assertEqual(identifier, result[0])
self.assertEqual(2, len(result))
self.assertEqual(3, len(result_geom_3d.coords))
self.assertEqual(2, len(result_geom_2d.coords))
class Test_TestCommon_CopyOnPipelineSplit(unittest.TestCase):
def test_copyonpipelinesplit(self):
geom = "geom"
example_geo = common.Point(1, 1, 1)
properties = {geom: example_geo}
identifier = "id"
copyonpipelinesplit = common.CopyOnPipelineSplit()
result = next(copyonpipelinesplit(identifier, properties))
self.assertEqual(identifier, result[0])
self.assertEqual(2, len(result))
self.assertEqual(properties, result[1])
class Test_TestCommon_DropIdentifier(unittest.TestCase):
def test_dropidentifier(self):
geom = "geom"
example_geo = common.Point(1, 1, 1)
properties = {geom: example_geo}
identifier = "id"
dropidentifier = common.DropIdentifier()
result = next(dropidentifier(identifier, properties))
self.assertEqual(1, len(result))
self.assertEqual(properties, result)
class Test_TestCommon_DjangoLog(unittest.TestCase):
def test_djangolog(self):
log_level = 10
identifier = "id"
geom_example = common.Point(4, 6)
geom = "geom"
record = {geom: geom_example, "Key2": "Value2"}
djangolog = common.DjangoLog(log_level=log_level)
with self.assertLogs(level=log_level) as cm:
djangolog(identifier, record)
self.assertEqual(cm.records[1].msg, f'{record["geom"].ewkt}')
self.assertEqual(cm.records[1].levelno, log_level)
self.assertEqual(cm.records[0].msg, f"{identifier}: {record}")
self.assertEqual(cm.records[0].levelno, log_level)
self.assertEqual(len(cm), 2)
class Test_TestCommon_IsochroneCalculation(unittest.TestCase):
def setUp(self):
self.body = {
"polygons": [
{
"type": "Feature",
"properties": {"name": "Research Triangle", "area": 252},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[-78.93, 36.00],
[-78.67, 35.78],
[-79.04, 35.90],
[-78.93, 36.00],
]
],
},
}
]
}
@override_settings(GRAPHHOPPER="http://graphopper/", DEBUG=True)
def test_isochronecalculation_valid(self):
request = requests.Session()
with mock.patch.object(
request, "get", return_value=mock.Mock(ok=True)
) as mock_get:
mock_get.return_value.json.return_value = self.body
isochronecalculation = common.IsochroneCalculation()
identifier = "id"
geom_example = common.Point(4, 6)
properties = {"geom": geom_example}
result = next(isochronecalculation(identifier, properties, request))
self.assertEqual(identifier, result[0])
self.assertEqual(2, len(result))
@override_settings(GRAPHHOPPER="http://graphopper/", DEBUG=True)
def test_isochronecalculation_non_valid(self):
request = requests.Session()
with mock.patch.object(
request, "get", return_value=mock.Mock(ok=True)
) as mock_get:
mock_get.return_value.json = mock.MagicMock(
side_effect=json.JSONDecodeError("test", "test2", 123)
)
isochronecalculation = common.IsochroneCalculation()
identifier = "id"
geom_example = common.Point(4, 6)
properties = {"geom": geom_example}
try:
with self.assertLogs():
next(isochronecalculation(identifier, properties, request))
except StopIteration:
pass
class Test_TestCommon_UnionOnProperty(unittest.TestCase):
def test_uniononproperty(self):
atr = "atr"
properties = [
{"geom": common.Point(1, 0), atr: 32},
{"geom": common.Point(2, 0), atr: 33},
{"geom": common.Point(3, 0), atr: 32},
{"geom": common.LineString([(0, 0), (1, 0)]), atr: 32},
{"geom": common.Point(4, 0), atr: 32},
]
with BufferingNodeExecutionContext(
common.UnionOnProperty(property=atr)
) as context:
for row in properties:
context.write_sync(("id", row))
result = dict(context.get_buffer())
for row in properties:
select_result = result.get(row.get(atr))
select_result_geom = select_result.get("geom")
self.assertEqual(row.get(atr), select_result.get("level"))
self.assertTrue(select_result_geom.intersects(row.get("geom")))
if __name__ == "__main__":
unittest.main()
|
import sys
import socket
import cv2
import imagezmq
camSet = "nvarguscamerasrc sensor-id=0 ! video/x-raw(memory:NVMM), width=1640, height=1232, framerate=30/1, format=NV12 ! nvvidconv flip-method=0 ! video/x-raw, width=640, height=480, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink"
cap = cv2.VideoCapture(camSet)
port = 5555
sender = imagezmq.ImageSender("tcp://*:{}".format(port), REQ_REP=False)
print("Input stream opened")
jpeg_quality = 95
rpi_name = socket.gethostname()
try:
while True:
_ ,frame = cap.read()
ret_code, jpg_buffer = cv2.imencode(
".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_quality])
sender.send_jpg(rpi_name, jpg_buffer)
except (KeyboardInterrupt, SystemExit):
print('Exit due to keyboard interrupt')
cap.release()
sys.exit()
|
import unittest
from typing import List, Tuple
import pathlib
from dffml.base import (
BaseDataFlowFacilitatorObject,
config,
field,
list_action,
BaseDataFlowFacilitatorObjectContext,
)
from dffml.feature.feature import Feature, Features
from dffml.source.source import BaseSource
from dffml.source.csv import CSVSource
from dffml.source.json import JSONSource
from dffml.util.entrypoint import entrypoint, base_entry_point
from dffml.util.cli.arg import Arg
from dffml.util.cli.cmd import parse_unknown
from dffml.util.asynctestcase import AsyncTestCase
@config
class FakeTestingConfig:
num: float
files: List[str]
features: Features
nums: Tuple[int]
name: str = field("Name of FakeTesting")
label: str = "unlabeled"
readonly: bool = False
source: BaseSource = JSONSource
@base_entry_point("dffml.test", "test")
class BaseTesting(BaseDataFlowFacilitatorObject):
pass # pragma: no cov
@entrypoint("fake")
class FakeTesting(BaseTesting):
CONFIG = FakeTestingConfig
class TestAutoArgsConfig(AsyncTestCase):
def test_00_args(self):
self.maxDiff = 99999
self.assertEqual(
FakeTesting.args({}),
{
"test": {
"plugin": None,
"config": {
"fake": {
"plugin": None,
"config": {
"num": {
"plugin": Arg(type=float),
"config": {},
},
"files": {
"plugin": Arg(type=str, nargs="+"),
"config": {},
},
"features": {
"plugin": Arg(
type=Feature,
nargs="+",
action=list_action(Features),
),
"config": {},
},
"nums": {
"plugin": Arg(type=int, nargs="+"),
"config": {},
},
"name": {
"plugin": Arg(
type=str, help="Name of FakeTesting"
),
"config": {},
},
"readonly": {
"plugin": Arg(
action="store_true", default=False,
),
"config": {},
},
"label": {
"plugin": Arg(
type=str, default="unlabeled"
),
"config": {},
},
"source": {
"plugin": Arg(
type=BaseSource.load,
default=JSONSource,
),
"config": {},
},
},
}
},
}
},
)
async def test_config_defaults(self):
config = FakeTesting.config(
await parse_unknown(
"--test-fake-name",
"feedface",
"--test-num",
"-4.2",
"--test-files",
"a",
"b",
"c",
"--test-source-filename",
"file.json",
"--test-features",
"Year:int:1",
"Commits:int:10",
"--test-fake-nums",
"100",
)
)
self.assertEqual(config.num, -4.2)
self.assertEqual(config.files, ["a", "b", "c"])
self.assertEqual(config.name, "feedface")
self.assertEqual(config.label, "unlabeled")
self.assertFalse(config.readonly)
self.assertTrue(isinstance(config.source, JSONSource))
self.assertEqual(
config.source.config.filename, pathlib.Path("file.json")
)
self.assertEqual(
config.features,
Features(Feature("Year", int, 1), Feature("Commits", int, 10)),
)
self.assertEqual(config.nums, (100,))
async def test_config_set(self):
config = FakeTesting.config(
await parse_unknown(
"--test-fake-name",
"feedface",
"--test-num",
"-4.2",
"--test-fake-label",
"default-label",
"--test-fake-readonly",
"--test-files",
"a",
"b",
"c",
"--test-fake-source",
"csv",
"--test-source-filename",
"file.csv",
"--test-features",
"Year:int:1",
"Commits:int:10",
"--test-fake-nums",
"100",
"42",
)
)
self.assertEqual(config.num, -4.2)
self.assertEqual(config.files, ["a", "b", "c"])
self.assertEqual(config.name, "feedface")
self.assertEqual(config.label, "default-label")
self.assertTrue(config.readonly)
self.assertTrue(isinstance(config.source, CSVSource))
self.assertEqual(
config.source.config.filename, pathlib.Path("file.csv")
)
self.assertEqual(
config.features,
Features(Feature("Year", int, 1), Feature("Commits", int, 10)),
)
self.assertEqual(config.nums, (100, 42))
class FakeTestingContext(BaseDataFlowFacilitatorObjectContext):
"""
Fake Testing Context
"""
@config
class FakeTestingConfig2:
name: str = field("Name of FakeTesting2")
num: float
features: Features = Features(
Feature("default", int, 1), Feature("features", int, 10)
)
label: str = "unlabeled"
@entrypoint("fake2")
class FakeTesting2(BaseTesting):
CONTEXT = FakeTestingContext
CONFIG = FakeTestingConfig2
@config
class FakeTestingConfig3:
label: str = "unlabeled"
@entrypoint("fake3")
class FakeTesting3(BaseTesting):
CONTEXT = FakeTestingContext
CONFIG = FakeTestingConfig3
class TestCONFIG(unittest.TestCase):
def test_CONFIG(self):
with self.assertRaises(TypeError):
config = FakeTesting2()
config = FakeTesting3()
|
qtdg=float(input("quantidade de acai em gramas:"))
qtds=int(input("quantidade de salgados:"))
valor=float(input("valor pago:"))
qtdg1= 24
sal= 3
x= qtdg/1000
tot= sal * qtds + x * qtdg1
print(round(tot, 2))
if valor>tot:
print("Sim")
else:
print("Nao") |
#guess the number game
import random #random module
import time #time module
print("Hello! What is your name?")
name = input()
print("Hello there, " + name + "! " + "I am thinking of a number")
print("berween 1 and 22. Think you can guess it!?")
time.sleep(1) #wait one second before moving onto the for loop (inside while loop)
t = True #sets t to bool True to satisfy into while loop
while(t): #loop keeps running while condition is true
randomNum = random.randint(1, 22) #selects random number between 1 and 22
for guessesTaken in range(1, 7): #loop runs 6 times, gussesTaken variable keeps track of amount of times user guessed (between 1 and 6 times)
print("What is your guess? You have " + str((7 - guessesTaken)) + " guesses")
userGuess = input() #saves user input to variable called userguess
if int(userGuess) > randomNum: #if input user entered is higher than radnom number
print("Your guess to too high. Please guess again")
elif int(userGuess) < randomNum: #if input user entered is lower than radnom numer
print("Your guess is too low. Please guess again")
else: #if number guessed is equal to random number (not too high, not too low) program breaks out of loop before 6 iterations
break
if int(userGuess) == randomNum: #if user number = random number
print("You got it! You guessed the correct number in " + str(guessesTaken) + " guess(es)!")
print("Press y to play again")
yes = input()
if yes == "y": #if user enters y to input,
t = True #condition is still true, while loop (aka game) runs again
else: #user enters anything else, program sets t to false, which then does not satify while loop condition - ends program
print("Goodbye!")
t = False
else:
print("Sorry! Maximum number of guesses allowed. The number was " + str(randomNum))#if for loop goes thru all 6 iterations (aka number not guessed correctly
print("Press y to try again")
yes = input()
if yes == "y": #same as if else statement in block above
t = True #continues while loop
else:
print("Goodbye!")
t = False #ends while loop
|
import os
from .quantlabapp import QuantLabApp
try:
from jupyterhub.singleuser import SingleUserNotebookApp
except ImportError:
SingleUserQuantLabApp = None
raise ImportError('You must have jupyterhub installed for this to work.')
else:
class SingleUserQuantLabApp(SingleUserNotebookApp, QuantLabApp):
def init_webapp(self, *args, **kwargs):
super().init_webapp(*args, **kwargs)
settings = self.web_app.settings
if 'page_config_data' not in settings:
settings['page_config_data'] = {}
settings['page_config_data']['hub_prefix'] = self.hub_prefix
settings['page_config_data']['hub_host'] = self.hub_host
settings['page_config_data']['hub_user'] = self.user
api_token = os.getenv('JUPYTERHUB_API_TOKEN')
if not api_token:
api_token = ''
if not self.token:
try:
self.token = api_token
except AttributeError:
self.log.error("Can't set self.token")
settings['page_config_data']['token'] = api_token
def main(argv=None):
return SingleUserQuantLabApp.launch_instance(argv)
if __name__ == "__main__":
main()
|
import sys
# Abrindo arquivo kdmer
def abrirArquivo():
try:
caminho = sys.argv[1]
except:
print("Passe o arquivo como arguemento na chamada do programa!" )
exit()
try:
f = open(caminho, 'r')
except:
print("Arquivo não encontrado!!")
exit()
k, d= caminho.split("d")
k = int(k.split('k')[1])
d = int(d.split("mer.txt")[0])
x = f.read()
ls = x.replace('[', "").replace(']', "").replace('\'', "").replace(" ", "").split(',')
return {'k':k,'d':d,'sequencia':ls}
def prefixo(i):
s1 , s2 = i.split('|')
s1 = s1[0:-1]
s2 = s2[0:-1]
return (s1, s2)
def sufixo(i):
s1 , s2 = i.split('|')
s1 = s1[1:]
s2 = s2[1:]
return (s1, s2)
def geraAdjLista(composicao):
rotulo = {}
grafo = {}
saida = {}
entrada = {}
for x in composicao['sequencia']:
pre = prefixo(x)
suf = sufixo(x)
rotulo[pre] = x
grafo[pre] = []
saida[pre] = 0
saida[suf] = 0
entrada[suf] = 0
entrada[pre] = 0
for x in composicao['sequencia']:
pre = prefixo(x)
suf = sufixo(x)
grafo[pre].append(suf)
saida[pre] += 1
entrada[suf] += 1
return [grafo,saida,entrada, rotulo]
def encontraInicio(entrada, saida):
mim = 0
chave = list(entrada)[0]
for i in (entrada):
if entrada[i] - saida[i] <= mim:
mim = entrada[i] - saida[i]
chave = i
return chave
def acha_caminho(grafo, entrada, saida, chave):
caminho = []
pilha = []
while True:
if len(pilha) == 0 and saida[chave] == 0:
caminho.append(chave)
break
else:
if saida[chave] == 0:
caminho.append(chave)
chave = pilha.pop()
else:
pilha.append(chave)
viz = grafo[chave].pop()
saida[chave] +=-1
entrada[viz] +=-1
chave = viz
return caminho[::-1]
def remonta( d, k, caminho, rotulo):
sequencia = ""
for i in caminho[:-1]:
ini = i[0]
if sequencia == "":
sequencia += ini
else:
sequencia += ini[-1]
tam = len(caminho)
for i in range(d + 1):
sequencia += caminho[tam-d + i -4][1][1]
#print( caminho[tam-d + i -4][1][1])
return sequencia + caminho[-2][1][0] + caminho[-1][1]
composicao = abrirArquivo()
grafo, saida, entrada, rotulo = geraAdjLista(composicao)
chave_inicio = encontraInicio(entrada, saida)
cami = acha_caminho(grafo, entrada, saida, chave_inicio)
sequencia = remonta(composicao['d'],composicao['k'],cami, rotulo)
arq = open('resposta.fasta', 'a')
sequencia = '>k={}d={}\n'.format(composicao['k'],composicao['d']) + sequencia + '\n'
arq.write(sequencia)
arq.close()
print("arquivo resposta.fasta gerado com a sequencia.")
|
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.datasets import load_boston
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
# %%
boston = load_boston()
data_boston = pd.DataFrame(boston.data, columns=boston.feature_names)
# %%
data_boston['PRICE'] = boston.target
lr_multi = LinearRegression()
x_column_list_for_multi = [
'CRIM',
'ZN',
'INDUS',
'CHAS',
'NOX',
'RM',
'AGE',
'DIS',
'RAD',
'TAX',
'PTRATIO',
'B',
'LSTAT'
]
y_column_list_for_multi = ['PRICE']
lr_multi.fit(data_boston[x_column_list_for_multi],
data_boston[y_column_list_for_multi])
print(lr_multi.coef_)
print(lr_multi.intercept_)
# %%
X_train, X_test, y_train, y_test = train_test_split(
data_boston[x_column_list_for_multi],
data_boston[y_column_list_for_multi],
test_size=0.3
)
lr_multi2 = LinearRegression()
lr_multi2.fit(X_train, y_train)
print(lr_multi2.coef_)
print(lr_multi2.intercept_)
y_pred = lr_multi2.predict(X_test)
print(mean_absolute_error(y_pred, y_test))
# %%
# ラッソ回帰
lasso = Lasso(alpha=0.01, normalize=True)
lasso.fit(X_train, y_train)
print(lasso.coef_)
print(lasso.intercept_)
# %%
y_pred_lasso = lasso.predict(X_test)
mean_absolute_error(y_pred_lasso, y_test)
# %%
# リッジ回帰
ridge = Ridge(alpha=0.01, normalize=True)
ridge.fit(X_train, y_train)
print(ridge.coef_)
print(ridge.intercept_)
# %%
y_pred = ridge.predict(X_test)
mean_absolute_error(y_pred, y_test)
# %%
|
import cv2
import sys
import boardcv
import vidio
import game_tree as gt
BOARD_SIZE = 19
def main():
video_source = sys.argv[1]
video_cap = vidio.get_video_cap(video_source)
num_frames = 0 # frame counter
frame_debug = None
tracker = boardcv.BoardTracker()
game_tree = gt.GameTree(BOARD_SIZE)
while True:
ret, frame = video_cap.read()
# # TEMP: Draw straight onto the image before giving it to the tracker
# corners = tracker.get_corner_estimate()
# cv2.drawContours(frame, [corners], -1, (255,255,255), 2)
# # Slam a marker at the board corners
# for corner in corners:
# x,y= corner[0]
# x= int(x)
# y= int(y)
# cv2.rectangle(frame, (x-10,y-10),(x+10,y+10),(0,0,255),-1)
# if num_frames % 10 == 0: # TODO: Do this per time rather than frames
# Detect the board
frame_debug = tracker.update(frame)
s = tracker.get_board_state_estimate()
state_changed, game_node = game_tree.update(s)
if state_changed:
print(s)
print(game_node.difference_from_parent())
print(game_tree.sgf_game.serialise())
# result = tracker.draw_piece_debug(result)
cv2.imshow("Program Output", frame_debug)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
num_frames += 1
# After the loop release the cap object
video_cap.release()
# Destroy all the windows
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn import metrics
import joblib
df = pd.read_csv ('transaction_dataset.csv')
df = df[['Avg min between sent tnx', 'Avg min between received tnx', 'avg val received', 'avg val sent', 'FLAG']]
df = df.dropna()
X = df.drop('FLAG',axis = 1)
y = df.FLAG
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=4) #random_state allows for the RNG to be deterministic for reproduction and testing
dt = DecisionTreeClassifier() #Decision Tree classifer object
dt.fit(X_train,y_train) #Train by fitting to training set
y_pred = dt.predict(X_test) #Predict fraud/label/'FLAG' on the test set
dt_rec = metrics.recall_score(y_test, y_pred)
print("Recall:",dt_rec)
model_filename = 'model.pkl'
joblib.dump(dt, model_filename) |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from dsxspark import exceptions
def handle_error(stdout, stderr, return_code):
instance_error_msg = '"msg": "Error in creating instance'
if instance_error_msg in stdout:
raise exceptions.InstanceCreateException()
def run_playbook_subprocess(playbook, extra_vars=None, inventory=None):
cmd = ['ansible-playbook', playbook]
if inventory:
cmd.extend(['-i', inventory])
if extra_vars:
extra_vars_string = ""
for var in extra_vars:
extra_vars_string += "%s='%s' " % (var, extra_vars[var])
extra_vars_string = extra_vars_string.rstrip()
cmd.extend(['--extra-vars', extra_vars_string])
cmd.extend(['--timeout', '25'])
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode > 0:
handle_error(stdout, stderr, proc.returncode)
print("ERROR: Playbook %s failed with:\n\tstderr:\n\t\t%s\n"
"\tstdout:\n\t\t%s" % (playbook, stderr, stdout))
raise exceptions.PlaybookFailure
|
from jira import JIRA
from MongoCRUD import MongoCRUD
from FastTraveler import FastTraveler
from dotenv import load_dotenv
import os, urllib3
try:
load_dotenv() # setup use for getting environment variables
# ignore warning from invalid certificate, allan needs to fix
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# jira = JIRA('https://jira.atlassian.com')
jira = JIRA(basic_auth=(os.getenv('JIRA_USERNAME'), os.getenv('JIRA_PASSWORD')),
options={'server': os.getenv('SERVER_ADDRESS'), 'verify': False})
except:
print("jira connection error occurred, please verify env variables")
x = 0 # incrementer
mongo = MongoCRUD('ft_data')
for issue in jira.search_issues('issuetype = "Fast Traveler"', maxResults=2000):
x += 1
fast_traveler = FastTraveler(issue.key)
if mongo.check_exists(fast_traveler.key):
print("skipping " + str(fast_traveler.key))
else:
for coordinates in fast_traveler.locations:
key = fast_traveler.key
user = fast_traveler.reporter
ft_type = "Fast Traveler"
date = fast_traveler.created_date
description = fast_traveler.description
print (str(x) + "\tAdded: " + str(user) + "\t" + str(key) + "\t" + str(date) + "\t" + str(coordinates))
mongo.write_ft(user, key, ft_type, date, description, coordinates)
mongo.close()
print (x) |
# You need to install pyaudio to run this example
# pip install pyaudio
# In this example, the websocket connection is opened with a text
# passed in the request. When the service responds with the synthesized
# audio, the pyaudio would play it in a blocking mode
from __future__ import print_function
from ibm_watson import TextToSpeechV1
from ibm_watson.websocket import SynthesizeCallback
import pyaudio
import json
# If service instance provides API key authentication
with open('keys.json') as json_file:
data = json.load(json_file)
to_url = data['watson_url']
apikey = data['iam_apikey']
service = TextToSpeechV1(
## url is optional, and defaults to the URL below. Use the correct URL for your region.
url=to_url,
iam_apikey=apikey)
# service = TextToSpeechV1(
# ## url is optional, and defaults to the URL below. Use the correct URL for your region.
# # url='https://stream.watsonplatform.net/text-to-speech/api,
# username='YOUR SERVICE USERNAME',
# password='YOUR SERVICE PASSWORD')
class Play(object):
"""
Wrapper to play the audio in a blocking mode
"""
def __init__(self):
self.format = pyaudio.paInt16
self.channels = 1
self.rate = 22050
self.chunk = 1024
self.pyaudio = None
self.stream = None
def start_streaming(self):
self.pyaudio = pyaudio.PyAudio()
self.stream = self._open_stream()
self._start_stream()
def _open_stream(self):
stream = self.pyaudio.open(
format=self.format,
channels=self.channels,
rate=self.rate,
output=True,
frames_per_buffer=self.chunk,
start=False
)
return stream
def _start_stream(self):
self.stream.start_stream()
def write_stream(self, audio_stream):
self.stream.write(audio_stream)
def complete_playing(self):
self.stream.stop_stream()
self.stream.close()
self.pyaudio.terminate()
class MySynthesizeCallback(SynthesizeCallback):
def __init__(self):
SynthesizeCallback.__init__(self)
self.play = Play()
def on_connected(self):
print('Opening stream to play')
self.play.start_streaming()
def on_error(self, error):
print('Error received: {}'.format(error))
def on_timing_information(self, timing_information):
print(timing_information)
def on_audio_stream(self, audio_stream):
self.play.write_stream(audio_stream)
def on_close(self):
print('Completed synthesizing')
self.play.complete_playing()
test_callback = MySynthesizeCallback()
# An example SSML text
SSML_sorry_text = """<speak version=\"1.0\">
<emphasis> I am sorry, I know how it feels.</emphasis>
</speak>"""
# Another example of SSML text
SSML_text = """
Good morning sir how may i help?
"""
def speak(text):
service.synthesize_using_websocket(text,
test_callback,
accept='audio/wav',
voice="en-US_MichaelVoice"
)
|
# cite: https://classes.engineering.wustl.edu/ese205/core/index.php?title=Serial_Communication_between_Raspberry_Pi_%26_Arduino
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import serial
ser = serial.Serial('/dev/ttyUSB0', 9600)
while 1:
if(ser.in_waiting >100):
line = ser.readline()
print(line)
|
# Library imports
import webapp2
import jinja2
import os
import time
import datetime
import calendar
import unittest
from google.appengine.ext import ndb
from google.appengine.ext import testbed
# Project imports
from user import *
from util import *
class InstructorCenter(webapp2.RequestHandler):
def get(self):
uNm = getAccount(self.request.cookies.get('CurrentUser'))
instructor = User.query(User.Name == uNm.Name).get()
QL = []
#QL.append(Question.query(Question.lec == 'cs361').fetch())
print("\t\t" + str(type(instructor.lectures)))
for lec in instructor.lectures:
for Q in lec.QL:
QL.append(Q)
SL = []
for lec in instructor.lectures:
for username in lec.userNames:
if not SL.contains(username):
SL.append(username)
template = JINJA_ENVIRONMENT.get_template('Html/insc.html')
template_values = {
"CurrentUser": uNm.userName,
'QL': QL,
'SL': SL
}
self.response.write(template.render(template_values))
def post(self):
q = Question()
q.time = datetime.datetime.now()
q.owner = self.request.get('student')
q.topic = self.request.get('topic')
q.content = self.request.get('content')
q.answered = False
q.lec = self.request.get('class')
q.put()
self.redirect('/insc')
def goToChat(self):
id = self.request.get('Quest')
template_values = {
'user'
} |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
# This is the target that all Cobalt modules should depend on if they
# wish to use Skia. It augments skia_library (Skia's core code) with
# Cobalt specific code defined in source files from skia_cobalt.gypi,
# such as platform-specific implementations of certain functions.
'target_name': 'skia',
'type': 'static_library',
'dependencies': [
'skia_library',
],
'includes': [
'skia_cobalt.gypi',
'skia_common.gypi',
],
'export_dependent_settings': [
'skia_library',
],
},
{
# Skia's core code from the Skia repository.
'target_name': 'skia_library',
'type': 'static_library',
'includes': [
'skia_common.gypi',
'skia_library.gypi',
'skia_sksl.gypi',
],
},
{
# A small program imported from Chromium that tests Skia with fuzzed
# filters.
'target_name': 'filter_fuzz_stub',
'type': 'executable',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'skia',
],
'sources': [
'test/filter_fuzz_stub/filter_fuzz_stub.cc',
],
},
{
'target_name': 'filter_fuzz_stub_deploy',
'type': 'none',
'dependencies': [
'filter_fuzz_stub',
],
'variables': {
'executable_name': 'filter_fuzz_stub',
},
'includes': [ '<(DEPTH)/starboard/build/deploy.gypi' ],
},
],
}
|
from Tkinter import *
from ttk import *
from Tkinter import Tk, Text, BOTH, W, N, E, S
from PIL import Image, ImageTk
class UI:
"""docstring for UI"""
def __init__(self, original_image_path, processed_image_path, verified_plate_image_path, ip):
self.ip = ip
self.root = Tk()
self.frame = Frame(self.root)
original_file = Image.open(original_image_path).resize((800, 500),Image.ANTIALIAS)
self.original_image = ImageTk.PhotoImage(original_file)
processed_img = Image.open(processed_image_path).resize((800, 500),Image.ANTIALIAS)
self.processed_image = ImageTk.PhotoImage(processed_img)
verified_plate_img = Image.open(verified_plate_image_path).resize((200, 100),Image.ANTIALIAS)
self.verified_plate_image = ImageTk.PhotoImage(verified_plate_img)
self.w = self.root.winfo_screenwidth()
self.h = self.root.winfo_screenheight()
self.create_frame()
self.root.title("SCP MiddleWare GUI")
self.root.geometry("%dx%d+0+0" % (900, self.h-100))
self.root.mainloop()
def create_frame(self):
frame1 = Frame(self.root,width=500, height=500)
frame2 = Frame(self.root,width=800, height=576)
frame1.pack()
frame2.pack( fill=BOTH, expand=1)
frame1.place(x=0,y=0)
frame2.place(x=0,y=560)
note = Notebook(frame1)
frame1.style = Style()
frame1.style.theme_use("clam")
frame2.style = Style()
frame2.style.theme_use("clam")
tab1 = Frame(note)
tab2 = Frame(note)
tab3 = Frame(note)
Button(tab1, text='Exit').pack(padx=self.w-120, pady=self.h-100)
note.add(tab1, text = "Imagen Recibida",compound=TOP)
note.add(tab2, text = "Imagen Procesada")
note.add(tab3, text = "Placa Procesada")
label_original_image = Label(tab1, image=self.original_image)
label_original_image.place(x=5, y=20)
label_original_image = Label(frame2,foreground="blue",text="Cam: "+self.ip, font=("Times",16),width=20)
label_original_image.pack()
label_processed_image = Label(tab2, image=self.processed_image)
label_processed_image.place(x=5, y=20)
label_processed_image = Label(frame2, width=20)
label_processed_image.pack()
label_plate_image = Label(tab3, image=self.verified_plate_image)
label_plate_image.place(x=350, y=200)
label_plate_image = Label(frame2, width=20)
label_plate_image.pack()
cbtn = Button(frame2, text="Cerrar",width=49)
cbtn['command'] = self.btn_close
cbtn.pack(side=LEFT)
obtn = Button(frame2, text="Apertura Emergencia",width=49)
obtn['command'] = self.btn_open_signal
obtn.pack(side=LEFT)
note.pack()
def btn_close(self):
self.root.quit()
def btn_open_signal(self):
print True
return True
img = cv2.imread("pa.jpg",1)
img2 = cv2.imread("verificada.png",1)
img3 = cv2.imread("recorte.png",1)
ui = UI("pa.jpg", "verificada.png", "recorte.png","192.168.0.40")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/5/22 17:51
# @Author : leyton
# @Site :
# @File : zhihu__login_requests.py
# @Software: PyCharm
import requests
try:
import cookielib
except:
import http.cookiejar as cookielib
import re
agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0"
header = {
'HOST': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com',
'User-Agent': agent
}
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename='cookies.txt')
def get_xsrf():
response = session.post('https://www.zhihu.com/signup?next=%2F', headers=header)
return response.cookies['_xsrf']
def zhihu_login(account, password):
# 知乎登陆
if re.match('^1\d{10}', account):
print('手机号登陆')
post_url = "https://www.zhihu.com/api/v3/oauth/sign_in"
post_data = {
"X-Xsrftoken": get_xsrf(),
'phone_num': account,
'password': password
}
response_text = session.post(post_url, data=post_data, headers=header)
session.cookies.save()
zhihu_login('15132788746', 'My..,962464') |
###########################################
# Let's Have Some Fun
# File Name: 659.py
# Author: Weilin Liu
# Mail: liuweilin17@qq.com
# Created Time: Sun Dec 1 11:23:51 2019
###########################################
#coding=utf-8
#!/usr/bin/python
class Solution:
def isPossible(self, nums: List[int]) -> bool:
count = collections.Counter(nums)
target = collections.defaultdict(int)
for num in nums:
if count[num] == 0: # the number has been assigned to a subsequence
continue
elif target[num] > 0: # before num, there exists some subsequence
target[num] -= 1 # delete
target[num+1] += 1 # add num and has a new subsequence
count[num] -= 1 # num has been added
elif count[num+1] >0 and count[num+2]>0:
count[num] -= 1 # num has been added
count[num+1] -= 1
count[num+2] -= 1
target[num+3] += 1
else:
return False
return True
|
from random_word import RandomWords
import random
from sklearn.model_selection import train_test_split
import nltk
r = RandomWords()
def generate_dataset():
output = []
for x in range(10):
label = random.randint(0, 1)
text = r.get_random_words(
minLength=3, maxLength=15, limit=random.randint(5, 15))
if not text:
continue
output.append(tuple([text, label]))
return output
def generate_features(text):
return {"last-word": text[-1], "Amount-of-words": len(text), "Length of text": len(" ".join(text))}
labeled_features = generate_dataset()
featuresets = [(generate_features(text), label)
for (text, label) in labeled_features]
print("Training :)")
train_set, test_set = featuresets[5:], featuresets[:5]
dt_classifier = nltk.DecisionTreeClassifier.train(train_set)
nb_classifier = nltk.NaiveBayesClassifier.train(train_set)
print(nltk.classify.accuracy(dt_classifier, test_set))
print(nltk.classify.accuracy(nb_classifier, test_set))
|
from rest_framework import serializers
import pytz
class DriverSerializer(serializers.Serializer):
"""
Структура, содержащая данные по водителю.
"""
id = serializers.IntegerField(label="id",
help_text="Идентификатор водителя")
fname = serializers.CharField(label="fname",
help_text="Имя водителя",
max_length=250)
mname = serializers.CharField(label="mname",
help_text="Отчество водителя",
max_length=250)
lname = serializers.CharField(label="lname",
help_text="Фамилия водителя",
max_length=250)
licenceNr = serializers.CharField(label="licenceNr",
help_text="Номер лицензии",
max_length=250)
phone = serializers.CharField(label="phone",
help_text="Телефон",
max_length=250)
category = serializers.CharField(label="category",
help_text="Тип лицензии",
max_length=250)
internalNr = serializers.CharField(label="internalNr",
help_text="Внутренний номер",
max_length=250)
driverCat = serializers.CharField(label="driverCat",
help_text="Категория прав",
max_length=250)
class Meta:
fields = (
'id',
'fname',
'mname',
'lname',
'licenceNr',
'phone',
'category',
'internalNr',
'driverCat',
)
class DeviceSerializer(serializers.Serializer):
"""
Структура, содержащая данные по автомобилю.
"""
id = serializers.IntegerField(label="id",
help_text="Идентификатор устройства слежения")
name = serializers.CharField(label="name",
help_text="Текстовое наименование устройства (ТС)",
max_length=250)
regNumber = serializers.CharField(label="regNumber",
help_text="Государственный номер",
max_length=250)
serialNumber = serializers.CharField(label="serialNumber",
help_text="Серийный номер",
max_length=250)
garageNumber = serializers.CharField(label="garageNumber",
help_text="Гаражный номер",
max_length=250)
phone = serializers.CharField(label="phone",
help_text="Телефон",
max_length=250)
simNumber = serializers.CharField(label="simNumber",
help_text="Номер SIM-карты",
max_length=250)
fuelSort = serializers.CharField(label="fuelSort",
help_text="Тип топлива",
max_length=250)
brand = serializers.CharField(label="brand",
help_text="Марка автомобиля",
max_length=250)
description = serializers.CharField(label="description",
help_text="Текстовое описание устройства (ТС)",
max_length=250)
groupIds = serializers.ListField(serializers.IntegerField(label="groupId",
help_text="ID группы"),
label="groupIds",
allow_empty=True,
help_text="Список ID групп (клиентов), к которым относится ТС")
class Meta:
fields = (
'id',
'name',
'regNumber',
'serialNumber',
'garageNumber',
'phone',
'simNumber',
'fuelSort',
'brand',
'description',
'groupIds',
)
class DeviceGroupSerializer(serializers.Serializer):
"""
Структура содержит данные по группе (клиенту)
"""
id = serializers.IntegerField(label="id",
help_text="Уникальный идентификатор группы")
name = serializers.CharField(label="name",
help_text="Имя группы",
max_length=250)
description = serializers.CharField(label="description",
help_text="Описание группы",
max_length=250)
parentId = serializers.IntegerField(label="parentId",
help_text="id родительской группы (необязательный параметр)",
allow_null=True,
required=False)
class Meta:
fields = (
'id',
'name',
'description',
'parentId',
)
class PointSerializer(serializers.Serializer):
"""
Широта и долгота (координата)
"""
lat = serializers.FloatField(label="lat",
help_text="Широта")
lon = serializers.FloatField(label="lon",
help_text="Долгота")
class Meta:
fields = (
"lat",
"lon",
)
class GeoZoneSerializer(serializers.Serializer):
"""
Структура, содержащая данные по геозоне
"""
id = serializers.IntegerField(label="id",
help_text="Идентификатор геозоны")
name = serializers.CharField(label="name",
max_length=250,
allow_blank=True,
help_text="Текстовое описание")
points = PointSerializer(label="points",
many=True,
help_text="Координаты полигона геозоны")
class Meta:
fields = (
"id",
"name",
"points",
)
class RouteControlPointSerializer(serializers.Serializer):
"""
Структура, описывающая контрольные точки маршрута
"""
geoZoneId = serializers.IntegerField(label="geoZoneId",
help_text="Уникальный идентификатор контрольной точки (геозоны) маршрута")
ffrom = serializers.DateTimeField(label='from',
source='from',
help_text="Дата и время планового въезда в контрольной точки",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
to = serializers.DateTimeField(label='to',
help_text="Дата и время планового выезда из контрольной точки",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
description = serializers.CharField(label="description",
help_text="Описание контрольной точки маршрута",
max_length=250)
def to_representation(self, instance):
rep = super().to_representation(instance)
rep['from'] = rep.pop('ffrom')
return rep
class Meta:
fields = ("geoZoneId",
'from',
'to',
'description',
)
# class RouteCriteriumSerializer(serializers.Serializer):
# class Meta:
# fields = (
# 'routeCriteriumType',
# 'routeCriteriumValues',
# 'isStatusEffect'
# )
class RouteSerializer(serializers.Serializer):
"""
Структура, описывающая набор данных, характеризующих маршрут
"""
id = serializers.IntegerField(label="id",
help_text="Уникальный идентификатор маршрута")
name = serializers.CharField(label="name",
max_length=250,
allow_blank=True,
help_text="Имя и описание маршрута")
ffrom = serializers.DateTimeField(label='from',
source='from',
help_text="Дата и время начала маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
to = serializers.DateTimeField(label='to',
help_text="Дата и время окончания маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
planBegin = serializers.DateTimeField(label='planBegin',
help_text="Плановое время начала маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
planEnd = serializers.DateTimeField(label='planEnd',
help_text="Плановое время окончания маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
deviceId = serializers.IntegerField(label="deviceId",
help_text='Идентификатор устройства "выполняющего" маршрут')
driverId = serializers.IntegerField(label="driverId",
help_text='Идентификатор водителя выполняющего маршрут')
routeControlPoints = RouteControlPointSerializer(label='routeControlPoints',
help_text="Cписок структур контрольных точек маршрута",
many=True)
# routeCriteriums = RouteCriteriumSerializer(label='routeCriteriums',
# help_text="Список структур критериев оценки прохождения маршрута",
# many=True)
def to_representation(self, instance):
rep = super().to_representation(instance)
rep['from'] = rep.pop('ffrom')
return rep
class Meta:
fields = ("id",
'name',
'from',
'to',
'deviceId',
'driverId',
'routeControlPoints',
# 'routeCriteriums',
)
class GetAllRoutestRequestSerializer(serializers.Serializer):
ffrom = serializers.DateTimeField(label='from',
source='from',
help_text="Дата и время начала маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
to = serializers.DateTimeField(label='to',
help_text="Дата и время окончания маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
def to_representation(self, instance):
rep = super().to_representation(instance)
rep['from'] = rep.pop('ffrom')
return rep
class Meta:
fields = ('from', 'to')
class GetRouteStatusesRequestSerializer(serializers.Serializer):
routeIds = serializers.ListField(label="routeIDs",
help_text='Cписок идентификаторов маршрутов',
allow_empty=False,
child=serializers.IntegerField())
class Meta:
fields = (
'routeIds',
)
class ControlPointStatusSerializer(serializers.Serializer):
"""
Структура, описывающая статус прохождения отдельной контрольной точки (геозоны) маршрута.
Примечание: для отображения статусов прохождения контрольных точке используется элементы перечисления RouteStatusValue, аналогично статусам маршрутов в целом
"""
controlPointID = serializers.IntegerField(label="controlPointID",
help_text="Порядковый номер контрольной точки в маршруте")
controlPointStatusValue = serializers.ChoiceField(label='controlPointStatusValue',
choices=[
'Executed', 'NotExecuted', 'ExecutedPartially', 'Performed'],
help_text='Значение статуса прохождения контрольной точки: выполнен (Executed), не выполнен (NotExecuted), частично выполнен(ExecutedPartially), выполняется(Performed)')
enterFact = serializers.DateTimeField(label='enterFact',
help_text="Фактическое время входа в контрольную точку",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
exitFact = serializers.DateTimeField(label='exitFact',
help_text="Фактическое время выхода из контрольной точки",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
class Meta:
fields = (
'controlPointId',
'controlPointStatusValue',
'enterFact',
'exitFact',
)
class RouteStatusSerializer(serializers.Serializer):
"""
Структура, описывающая статус прохождения маршрута.
Примечание: для описания значений статуса прохождения маршрута используются элементы перечисления RouteStatusValue
Примечание: для описания статусов прохождения отдельных точек маршрута используется структура ControlPointStatus
"""
routeId = serializers.IntegerField(label="routeId",
help_text="Уникальный идентификатор маршрута, для которого приводится описание статуса")
routeStatusValue = serializers.ChoiceField(label='routeStatusValue',
choices=[
'Executed', 'NotExecuted', 'ExecutedPartially', 'Performed'],
help_text='Значение статуса прохождения маршрута: выполнен (Executed), не выполнен (NotExecuted), частично выполнен(ExecutedPartially), выполняется(Performed)')
routePercentage = serializers.IntegerField(label="routePercentage",
help_text="Процент прохождения маршрута")
fromFact = serializers.DateTimeField(label='fromFact',
help_text="Фактическое время начала маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
toFact = serializers.DateTimeField(label='toFact',
help_text="фактическое время окончания маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
mileage = serializers.FloatField(label="mileage",
help_text="Пробег по маршруту")
controlPointStatuses = ControlPointStatusSerializer(label='controlPointStatuses',
help_text='Статусы прохождения отдельных точек маршрута',
many=True)
class Meta:
fields = (
'routeId',
'routeStatusValue',
'routePercentage',
'fromFact',
'tofact',
'mileage',
'controlPointStatuses'
)
class GetChannelDescriptorsRequestSerializer(serializers.Serializer):
device = serializers.IntegerField(label="device",
help_text='Идентификатор автомобиля',
)
class Meta:
fields = (
'device',
)
class ChannelDescriptorSerializer(serializers.Serializer):
"""
Структура, содержащая данные по каналу
"""
id = serializers.IntegerField(label="id",
help_text="Идентификатор канала")
name = serializers.CharField(label="name",
max_length=250,
allow_blank=True,
help_text="Имя канала")
type = serializers.ChoiceField(label='type',
choices=[
'Float', 'Boolean', 'Long', 'Datetime', 'String', 'Point', 'LongSeq'],
help_text='Типы значений каналов')
class Meta:
fields = (
'id',
'name',
'type',
)
class GetPositionRequestSerializer(serializers.Serializer):
device = serializers.IntegerField(label="device",
help_text="Идентификатор транспортного средства")
datetime = serializers.DateTimeField(label='datetime',
help_text="Дата в формате YYYY-MM-DDTHH:MM:SS",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
threshold = serializers.IntegerField(label='threshold',
help_text='Погрешность в секундах. Значение вычисляется на отрезке [datetime-threshold; datetime+threshold]',
default=0)
class Meta:
fields = (
'device',
'datetime',
'threshold'
)
class GetCurrentRoutesRequestSerializer(serializers.Serializer):
time_in = serializers.DateTimeField(label='time_in',
help_text="Дата и время начала маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
time_out = serializers.DateTimeField(label='time_out',
help_text="Дата и время окончания маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
class Meta:
fields = (
'time_in',
'time_out'
)
class MtGeoZoneSerializer(serializers.Serializer):
description = serializers.CharField(label="description",
help_text="Описание площадки",
max_length=250)
nav_id = serializers.IntegerField(label="nav_id",
help_text="Идентификатор канала")
mt_id = serializers.IntegerField(label="mt_id",
help_text="Идентификатор водителя")
in_time = serializers.CharField(label="in_time",
help_text="Время входа",
max_length=250)
out_time = serializers.CharField(label="out_time",
help_text="Время выхода",
max_length=250)
class Meta:
fields = (
'description',
'nav_id',
'in_time',
'out_time',
'mt_id',
)
class CurrentRoutesSerializer(serializers.Serializer):
id = serializers.IntegerField(label="id",
help_text="Идентификатор маршрута")
device = serializers.IntegerField(label="device",
help_text="Идентификатор канала")
mtIds = serializers.ListField(child=MtGeoZoneSerializer(label="mtId",
help_text="МТ ID площадки"),
label="mtIds",
allow_empty=True,
help_text="Список МТ ID площадок")
class Meta:
fields = (
'id',
'device',
'mtIds',
)
class GetRouteUnloadsRequestSerializer(serializers.Serializer):
ids = serializers.ListField(child=serializers.IntegerField(label="id",
help_text="Идентификатор маршрута"),
help_text="Идентификаторы маршрутов",
label='ids')
time_in = serializers.DateTimeField(label='time_in',
help_text="Дата и время начала маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
time_out = serializers.DateTimeField(label='time_out',
help_text="Дата и время окончания маршрута",
format='%Y-%m-%dT%H:%M:%S',
input_formats=['%Y-%m-%dT%H:%M:%S'],
default_timezone=pytz.utc)
statuses = serializers.ListField(label='statuses',
help_text="Состояния площадок",
required=False,
allow_empty=False,
child=serializers.ChoiceField(label='statuses',
required=False,
allow_blank=False,
choices=[
'Executed', 'NotExecuted', 'ExecutedPartially', 'Performed'],
help_text='Значение статуса прохождения контрольной точки: выполнен (Executed), не выполнен (NotExecuted), частично выполнен (ExecutedPartially), выполняется (Performed)'))
class Meta:
fields = (
'ids',
'time_in',
'time_out',
'statuses',
)
class RouteUnloadsSerializer(serializers.Serializer):
description = serializers.CharField(label="description",
help_text="Описание площадки",
max_length=250)
nav_id = serializers.IntegerField(label="nav_id",
help_text="Идентификатор канала")
mt_id = serializers.IntegerField(label="mt_id",
help_text="Идентификатор водителя")
in_time = serializers.CharField(label="in_time",
help_text="Время входа",
max_length=250)
out_time = serializers.CharField(label="out_time",
help_text="Время выхода",
max_length=250)
state = serializers.CharField(label="state",
help_text="Статус",
max_length=250)
class Meta:
fields = (
'description',
'nav_id',
'in_time',
'out_time',
'mt_id',
'state',
)
class RouteUnloadsSerializerQwe(serializers.Serializer):
id = serializers.IntegerField(label="id",
help_text="Идентификатор маршрута")
unloaded_platforms = serializers.ListField(label='unloaded_platforms',
help_text="Состояния площадок",
child=RouteUnloadsSerializer(label='unloaded_platform',
help_text='Отгруженная площадка'))
class Meta:
fields = (
'id',
'unloaded_platforms',
)
|
from optparse import OptionParser
flags_parser = OptionParser()
flags_parser.add_option('-f', '--snakefile', dest='filename', metavar='FILE',
help="Use FILE as the Snakefile")
flags_parser.add_option('-t', '--trace', dest='trace', action='store_true',
help="Turn on verbose backtraces")
flags_parser.add_option('-T', '--tasks', dest='show_tasks', action='store_true',
help="Display the tasks with descriptions and exit")
flags_parser.add_option('--version', dest='version', action='store_true',
help="Display the version information and exit")
class ApplicationArgsParser(object):
"""Parses the arguments used in the command line. Snake uses a combination
of option flags, positional, and keyword arguments. In order to properly
parse the arguments, a combination of OptionParser and custom parsing is
used. The option parser eats up all of the program options and the rest
is left as positional arguments to be parsed as task names and keyword
arguments for tasks.
"""
@classmethod
def parse(cls, tokens):
"""
Parses the command line and returns the list of tasks to execute, the
keyword arguments to use, and the program arguments passed in.
:return: tuple of tasks, keyword arguments, and program options
"""
opts, remaining = flags_parser.parse_args(tokens)
tasks, args = cls()._parse_positional_args(remaining)
return tasks, args, opts
def __init__(self):
self._tasks = []
self._args = {}
def _parse_positional_args(self, tokens):
for token in tokens:
if '=' in token:
self._parse_arg(token)
else:
self._parse_task(token)
return self._tasks, self._args
def _parse_task(self, token):
self._tasks.append(token)
def _parse_arg(self, token):
"""
Arguments come from the command line in the form of key=value
"""
key, value = token.split('=')
self._args[key] = value
|
#!/bin/python
import picamera
import argparse
from datetime import datetime, date
import socket
import time
import os
def get_default_filename():
now = datetime.now()
date_str = now.strftime("%Y%m%d")
time_str = now.strftime("%H%M%S")
return "_".join(["Atmos", date_str, time_str])
def get_default_video_filename():
return get_default_filename() + ".h264"
def get_defailt_image_filename():
return get_default_filename() + ".jpg"
def record_to_file(camera, filename, duration):
stream = open(filename, "wb")
record(camera, stream, duration)
stream.close()
def record_to_sock(camera, port, duration):
sock = socket.socket()
sock.bind(("0.0.0.0", port))
sock.listen(0)
connection = sock.accept()[0].makefile('wb')
record(camera, connection, duration)
def record(camera, stream, duration):
camera.start_preview()
camera.start_recording(stream, format='h264', quality=23)
camera.wait_recording(duration)
camera.stop_recording()
def timelapse(camera, period):
today = date.today()
dir_name = '_'.join(['Atmos', 'Timelapse', today.strftime('%Y%m%d')])
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
os.chdir(dir_name)
camera.start_preview()
for filename in camera.capture_continuous('{timestamp:%H%M}.jpg'):
time.sleep(period)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--action", default="record-to-file")
parser.add_argument("-o", "--output", default=get_default_video_filename())
parser.add_argument("-d", "--duration", default=10, type=int)
parser.add_argument("-p", "--port", default=8000, type=int)
args = parser.parse_args()
camera = picamera.PiCamera()
camera.resolution = (640, 480)
camera.awb_mode = 'auto'
if args.action == "record-to-file":
record_to_file(camera, args.output, args.duration)
elif args.action == "record-to-socket":
record_to_sock(camera, args.port, args.duration)
elif args.action == "timelapse":
timelapse(camera, 60)
if __name__ == '__main__':
main()
|
listagem = ("lápis", 1.10, 'borracha', 0.50, 'caderno', 15, 'mochila', 150, 'caneta', 1)
print("-="* 15)
print('{}'.format("LOJÃO DO PABLO"))
print("-="* 15)
for pos in range(0, len(listagem)):
if pos % 2 == 0:
print(f"{listagem[pos]:.<30}", end="")
else:
print(f'R${listagem[pos]:< 7.2f}')
#print(listagem[0], 'R$', listagem[1]) |
# -*- coding: utf-8 -*-
"""Tests related to drawing new points from the pool."""
import numpy as np
import pytest
from unittest.mock import MagicMock
from nessai.proposal import FlowProposal
def test_draw_populated(proposal):
"""Test the draw method if the proposal is already populated"""
proposal.populated = True
proposal.samples = np.arange(3)
proposal.indices = list(range(3))
out = FlowProposal.draw(proposal, None)
assert out == proposal.samples[2]
assert proposal.indices == [0, 1]
def test_draw_populated_last_sample(proposal):
"""Test the draw method if the proposal is already populated but there
is only one sample left.
"""
proposal.populated = True
proposal.samples = np.arange(3)
proposal.indices = [0]
out = FlowProposal.draw(proposal, None)
assert out == proposal.samples[0]
assert proposal.indices == []
assert proposal.populated is False
@pytest.mark.parametrize("update", [False, True])
def test_draw_not_populated(proposal, update, wait):
"""Test the draw method when the proposal is not populated"""
import datetime
proposal.populated = False
proposal.poolsize = 100
proposal.population_time = datetime.timedelta()
proposal.samples = None
proposal.indices = []
proposal.update_poolsize = update
proposal.update_poolsize_scale = MagicMock()
proposal.ns_acceptance = 0.5
def mock_populate(*args, **kwargs):
wait()
proposal.populated = True
proposal.samples = np.arange(3)
proposal.indices = list(range(3))
proposal.populate = MagicMock(side_effect=mock_populate)
out = FlowProposal.draw(proposal, 1.0)
assert out == 2
assert proposal.populated is True
assert proposal.population_time.total_seconds() > 0.0
proposal.populate.assert_called_once_with(1.0, N=100)
assert proposal.update_poolsize_scale.called == update
|
#! /usr/bin/python2.7
#----------------------------------------------------------------------------------------
# Name:
# pltWaterIsot.py
#
# Purpose:
# Plot time series of multiple species retrieved with FTIR columns/VMRs
# Note: See below for inputs
#
# Notes:
#
#
# Version History:
# Created, May, 2016 Ivan Ortega (iortega@ucar.edu)
#----------------------------------------------------------------------------------------
#-------------------------#
# Import Standard modules #
#-------------------------#
from scipy.io import netcdf
import os
import datetime as dt
import numpy as np
import numpy.ma as ma
import sys
import glob
from scipy import interpolate
import matplotlib.dates as md
import matplotlib.dates as md
from matplotlib.dates import DateFormatter, MonthLocator, YearLocator, DayLocator, WeekdayLocator, MONDAY
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter, MultipleLocator,AutoMinorLocator,ScalarFormatter
from matplotlib.backends.backend_pdf import PdfPages #to save multiple pages in 1 pdf...
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.cm as mplcm
import matplotlib.colors as colors
import matplotlib.gridspec as gridspec
import matplotlib.gridspec as gridspec
from itertools import izip
from numpy import *
import myfunctions as mf
import dataOutClass as dc
from collections import OrderedDict
import PltClass as mp
#-------------------------#
# Define helper functions #
#-------------------------#
def ckDir(dirName,logFlg=False,exit=False):
''' '''
if not os.path.exists( dirName ):
print 'Input Directory %s does not exist' % (dirName)
if logFlg: logFlg.error('Directory %s does not exist' % dirName)
if exit: sys.exit()
return False
else:
return True
def ckFile(fName,logFlg=False,exit=False):
'''Check if a file exists'''
if not os.path.isfile(fName):
print 'File %s does not exist' % (fName)
if logFlg: logFlg.error('Unable to find file: %s' % fName)
if exit: sys.exit()
return False
else:
return True
def closeFig(self):
self.pdfsav.close()
#----------------------------#
# #
# --- Main--- #
# #
#----------------------------#
def main():
#-----------------------------------------------------------------------------------------
# Initializations for FTIR
#-----------------------------------------------------------------------------------------
loc = 'mlo'
gasName = ['h2o', 'hdo']
ver = ['Current_ERA', 'Current']
ctlF = ['sfit4.ctl', 'sfit4.ctl']
saveFlg = False
pltFile = '/data/iortega/results/'+loc.lower()+'/fig/'+loc.upper()+'_Water_Isot.pdf'
#------
# Flags - 2
#------
errorFlg = True # Flag to process error data
fltrFlg = True # Flag to filter the data
byYrFlg = False # Flag to create plots for each individual year in date range
szaFlg = True # Flag to filter based on min and max SZA
dofFlg = True # Flag to filter based on min DOFs
pcNegFlg = False # Flag to filter profiles with negative partial columns
tcNegFlg = False # Flagsag to filter profiles with negative total columns
tcMMFlg = False # Flag to filter based on min and max total column amount
cnvrgFlg = True # Flag to filter profiles that did not converge
rmsFlg = True # Flag to filter based on max RMS
chiFlg = False # Flag to filter based on max CHI_2_Y
mnthFlg = False # Flag to filter based on
mnths = [6,7,8] # Months to filter on (these are the months to include data)
maxRMS = [3.0, 3.0] # Max Fit RMS to filter data. Data is filtered according to <= maxrms
minDOF = [1.0, 1.0] # Min DOFs for filtering
minSZA = 0.0 # Min SZA for filtering
maxSZA = 90.0 # Max SZA for filtering
maxCHI = 2.0 # Max CHI_y_2 value
maxTC = 5.0E24 # Max Total column amount for filtering
minTC = 0.0 # Min Total column amount for filtering
sclfct = 1.0E6 # Scale factor to apply to vmr plots (ppmv=1.0E6, ppbv=1.0E9, etc)
sclfctName = 'ppm' # Name of scale factor for labeling plots
pCols = [ [1.6, 8.0] ] #--ALTITUDE TO CALCULATE PARTIAL COLUMNS AND WEIGHTED VMR
#----------------------
# Date range to process
#----------------------
iyear = 2016
imnth = 1
iday = 1
fyear = 2016
fmnth = 3
fday = 30
#----------------------------#
# --- START --- #
#----------------------------#
retDir = [ '/data1/ebaumer/'+loc.lower()+'/'+g.lower()+'/'+v+'/' for g,v in izip(gasName,ver)]
ctlFile = ['/data1/ebaumer/'+loc.lower()+'/'+g.lower()+'/'+'x.'+g.lower()+'/'+ ctlF[i] for i, g in enumerate(gasName)]
#---------------------------
# Check file and directories
#---------------------------
for d in retDir: ckDir(d,exit=True)
for c in ctlFile: ckFile(c,exit=True)
ckDir(os.path.dirname(os.path.realpath(pltFile)),exit=True)
#-------------------------------------
# Create instance of output data class
#-------------------------------------
statDataCl = OrderedDict()
for i,gas in enumerate(gasName):
statDataCl[gas+'_'+ver[i]] = dc.ReadOutputData(retDir[i],'',ctlFile[i],iyear,imnth,iday,fyear,fmnth,fday)
#--------------
# Read profiles
#--------------
for i, gasVer in enumerate(statDataCl):
if i == 0:
statDataCl[gasVer].readprfs([statDataCl[gasVer].PrimaryGas, 'HDO'], retapFlg=1)
statDataCl[gasVer].readprfs([statDataCl[gasVer].PrimaryGas, 'HDO'], retapFlg=0)
else:
statDataCl[gasVer].readprfs([statDataCl[gasVer].PrimaryGas], retapFlg=1)
statDataCl[gasVer].readprfs([statDataCl[gasVer].PrimaryGas], retapFlg=0)
if statDataCl[gasVer].empty:
print 'No retreivals found for {}. Exiting.......'.format(gasVer)
sys.exit()
rPrfVMR = OrderedDict()
rPrfMol = OrderedDict()
dates = OrderedDict()
alt = OrderedDict()
Airmass = OrderedDict()
waterVMR = OrderedDict()
waterMol = OrderedDict()
totClmn = OrderedDict()
TCdryAir = OrderedDict()
TCdry = OrderedDict()
rPrfDry = OrderedDict()
rms = OrderedDict()
dofsAvg = OrderedDict()
dofsAvg_cs = OrderedDict()
vmrP = OrderedDict()
TCp = OrderedDict()
avkSCF = OrderedDict()
avkVMR = OrderedDict()
avkSCFav = OrderedDict()
avkVMRav = OrderedDict()
aPrfVMR = OrderedDict()
aPrfMol = OrderedDict()
for j, gasVer in enumerate(statDataCl):
rPrfVMR[gasVer] = np.asarray(statDataCl[gasVer].rprfs[statDataCl[gasVer].PrimaryGas]) * sclfct
rPrfMol[gasVer] = np.asarray(statDataCl[gasVer].rprfs[statDataCl[gasVer].PrimaryGas] * np.asarray(statDataCl[gasVer].rprfs['AIRMASS']))
dates[gasVer] = statDataCl[gasVer].rprfs['date']
alt[gasVer] = np.asarray(statDataCl[gasVer].rprfs['Z'][0,:])
Airmass[gasVer] = np.asarray(statDataCl[gasVer].rprfs['AIRMASS'])
waterVMR[gasVer] = np.asarray(statDataCl[gasVer].aprfs['H2O'])
waterMol[gasVer] = np.asarray(statDataCl[gasVer].aprfs['H2O'] * Airmass[gasVer])
totClmn[gasVer] = np.sum(rPrfMol[gasVer],axis=1)
TCdryAir[gasVer] = np.sum(Airmass[gasVer],axis=1) - np.sum(waterMol[gasVer],axis=1)
TCdry[gasVer] = (totClmn[gasVer] / TCdryAir[gasVer]) * sclfct
if j == 0: rPrfVMRHDO = np.asarray(statDataCl[gasVer].rprfs['HDO']) * sclfct
aPrfVMR[gasVer] = np.asarray(statDataCl[gasVer].aprfs[statDataCl[gasVer].PrimaryGas]) * sclfct
aPrfMol[gasVer] = np.asarray(statDataCl[gasVer].aprfs[statDataCl[gasVer].PrimaryGas] * np.asarray(statDataCl[gasVer].aprfs['AIRMASS']))
#----------------------------------------
# This is the mixing ratio for DRY AIR!!!
#----------------------------------------
rPrfDry[gasVer] = np.asarray(statDataCl[gasVer].rprfs[statDataCl[gasVer].PrimaryGas]) / (1.0 - waterVMR[gasVer]) * sclfct
#----------------------------------
# Read Summary data (For filtering)
#----------------------------------
statDataCl[gasVer].readsummary()
rms[gasVer] = np.asarray(statDataCl[gasVer].summary[statDataCl[gasVer].PrimaryGas+'_FITRMS'])
#--------------------
# Call to filter data
#--------------------
if fltrFlg: statDataCl[gasVer].fltrData(statDataCl[gasVer].PrimaryGas,mxrms=maxRMS[j],rmsFlg=rmsFlg, minDOF=minDOF[j], dofFlg=dofFlg,
tcFlg=tcNegFlg, pcFlg=pcNegFlg , cnvrgFlg=True)
else: statDataCl[gasVer].inds = np.array([])
#--------------------------------------------
# Read Error data to get AVK and profile DOFs
#-------------------------------------------------
# Determine if AVK has been created via sfit4 core
# code or via python error analysis
#-------------------------------------------------
if errorFlg: # Read AVK from error output
statDataCl[gasVer].readError(totFlg=False,sysFlg=False,randFlg=False,vmrFlg=True,avkFlg=True,KbFlg=False)
#---------------------
# Get averaging kernel
#---------------------
avkSCF[gasVer] = np.delete(np.asarray(statDataCl[gasVer].error['AVK_scale_factor']),statDataCl[gasVer].inds,axis=0)
avkVMR[gasVer] = np.delete(np.asarray(statDataCl[gasVer].error['AVK_vmr']),statDataCl[gasVer].inds,axis=0)
dofs = np.diagonal(avkSCF[gasVer],axis1=1,axis2=2)
avkSCFav[gasVer] = np.mean(avkSCF[gasVer],axis=0)
avkVMRav[gasVer] = np.mean(avkVMR[gasVer],axis=0)
dofsAvg[gasVer] = np.diag(avkSCFav[gasVer])
dofsAvg_cs[gasVer] = np.cumsum(np.diag(avkSCFav[gasVer])[::-1])[::-1]
else: # Read AVK from sfit4 output (only contains scaled AVK)
avkSCFi = []
for d in statDataCl[gasVer].dirLst:
lines = dc.tryopen( d + statDataCl[gasVer].ctl['file.out.ak_matrix'][0])
if not lines: continue
avkSCFi.append(np.array( [ [ float(x) for x in line.strip().split() ] for line in lines[2:] ] ))
if not statDataCl[gasVer].readPrfFlgApr[statDataCl[gasVer].PrimaryGas]: statDataCl[gasVer].readprfs([statDataCl[gasVer].PrimaryGas],retapFlg=0) # Apriori Profiles
avkSCF[gasVer] = np.asarray(avkSCFi)
nobs = np.shape(avkSCF[gasVer])[0]
n_layer = np.shape(avkSCF[gasVer])[1]
avkVMR[gasVer] = np.zeros((nobs,n_layer,n_layer))
for obs in range(0,nobs):
Iapriori = np.zeros((n_layer,n_layer))
IaprioriInv = np.zeros((n_layer,n_layer))
np.fill_diagonal(Iapriori,statDataCl[gasVer].aprfs[statDataCl[gasVer].PrimaryGas.upper()][obs])
np.fill_diagonal(IaprioriInv, 1.0 / (statDataCl[gasVer].aprfs[statDataCl[gasVer].PrimaryGas.upper()][obs]))
avkVMR[gasVer][obs,:,:] = np.dot(np.dot(Iapriori,np.squeeze(avkSCF[gasVer][obs,:,:])),IaprioriInv)
avkSCF[gasVer] = np.delete(avkSCF[gasVer],statDataCl[gasVer].inds,axis=0)
avkVMR[gasVer] = np.delete(avkVMR[gasVer],statDataCl[gasVer].inds,axis=0)
dofs = np.diagonal(avkSCF[gasVer],axis1=1,axis2=2)
avkSCFav[gasVer] = np.mean(avkSCF[gasVer],axis=0)
avkVMRav[gasVer] = np.mean(avkVMR[gasVer],axis=0)
dofsAvg[gasVer] = np.diag(avkSCFav[gasVer])
dofsAvg_cs[gasVer] = np.cumsum(np.diag(avkSCFav[gasVer])[::-1])[::-1]
#--------------------------------------
# Remove retrieval data based on filter
#--------------------------------------
nfltr = len(statDataCl[gasVer].inds)
rms[gasVer] = np.delete(rms[gasVer],statDataCl[gasVer].inds)
ntot = len(rms[gasVer])
dates[gasVer] = np.delete(dates[gasVer],statDataCl[gasVer].inds)
totClmn[gasVer] = np.delete(totClmn[gasVer],statDataCl[gasVer].inds)
rPrfVMR[gasVer] = np.delete(rPrfVMR[gasVer],statDataCl[gasVer].inds,axis=0)
rPrfMol[gasVer] = np.delete(rPrfMol[gasVer],statDataCl[gasVer].inds,axis=0)
rPrfDry[gasVer] = np.delete(rPrfDry[gasVer],statDataCl[gasVer].inds,axis=0)
Airmass[gasVer] = np.delete(Airmass[gasVer],statDataCl[gasVer].inds,axis=0)
TCdry[gasVer] = np.delete(TCdry[gasVer],statDataCl[gasVer].inds)
aPrfVMR[gasVer] = np.delete(aPrfVMR[gasVer],statDataCl[gasVer].inds,axis=0)
aPrfMol[gasVer] = np.delete(aPrfMol[gasVer],statDataCl[gasVer].inds,axis=0)
if j == 0:
rPrfVMRHDO = np.delete(rPrfVMRHDO,statDataCl[gasVer].inds,axis=0)
rPrfdD1 = rPrfVMRHDO / rPrfVMR[gasVer]
#-------------------------------------------------
# Calculate partial columns and weighted VMR
#-------------------------------------------------
for pcol in pCols:
ind1 = mf.nearestind(pcol[0], alt[gasVer])
ind2 = mf.nearestind(pcol[1], alt[gasVer])
vmrP[gasVer] = np.average(rPrfVMR[gasVer][:,ind2:ind1],axis=1,weights=Airmass[gasVer][:,ind2:ind1])
TCp[gasVer] = np.sum(rPrfMol[gasVer][:,ind2:ind1],axis=1)
#-------------------------------------------------------------------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------------------------------------------------------------------
rPrfdD2 = (np.divide(3.107e-4*rPrfVMR[gasName[1]+'_'+ver[1]], rPrfVMR[gasName[0]+'_'+ver[0]])/3.1152e-4 - 1) *1000.
for j, gasVer in enumerate(statDataCl):
rPrfVMRMean = np.mean(rPrfVMR[gasVer],axis=0)
prfSTD = np.std(rPrfVMR[gasVer], axis=0)
fig, ax1 = plt.subplots(1)
ax1.plot(rPrfVMRMean,alt[gasVer], color='k')
ax1.fill_betweenx(alt[gasVer],rPrfVMRMean-prfSTD,rPrfVMRMean+prfSTD,alpha=0.5,color='0.75')
ax1.grid(True,which='both')
ax1.set_ylabel('Altitude [km]')
ax1.set_xlabel('VMR ['+sclfctName+']')
ax1.set_ylim(0, 20)
ax1.set_xlim(xmin=0)
plt.title(gasName[j].upper(), fontsize=18)
fig, ax1 = plt.subplots(1)
rPrfdD2Mean = np.mean(rPrfdD2,axis=0)
ax1.plot(rPrfdD2Mean,alt[gasName[0]+'_'+ver[0]], color='k')
ax1.grid(True,which='both')
ax1.set_ylabel('Altitude [km]')
ax1.set_xlabel('VMR ['+sclfctName+']')
ax1.set_ylim(0, 20)
ax1.set_xlim(xmin=0)
plt.title(gasName[j].upper(), fontsize=18)
if saveFlg: pl.closeFig()
else:
plt.show(block=False)
user_input = raw_input('Press any key to exit >>> ')
sys.exit()
if __name__ == "__main__":
main() |
import os
import sys
import json
import shutil
import ase.io
import logging
import argparse
import numpy as np
import logging
from pprint import pprint
from cStringIO import StringIO
from ase.optimize.sciopt import SciPyFminCG
from ase.optimize import BFGS, FIRE, LBFGS, MDMin, QuasiNewton
from ase.constraints import UnitCellFilter, StrainFilter
try:
from quippy.io import write, AtomsWriter
from quippy import Atoms, Potential, frange, set_fortran_indexing
except ImportError:
pass
from imeall import app
set_fortran_indexing(False)
def relax_gb(gb_file='file_name', traj_steps=120, total_steps=1200, force_tol = 0.05):
"""Method to relax a grain_boundary bicrystal structure. Requires a .json
file with at a minimum the 'param_file' variable specified.
Args:
gb_file(str): gbid.
traj_steps(int): number of steps between print trajectories.
total_steps(int): total number of force relaxation steps.
force_tol(float): Force relaxation criterion in ev/A.
Returns:
:class:`ase.Atoms` Object
"""
def converged(grain, smax, fmax):
maxstress = max(grain.get_stress().ravel())
rmsforces = np.sum(grain.get_forces()**2, axis=1)**0.5
maxforce = max(rmsforces)
if maxforce < fmax and maxstress < smax:
return True
return False
with open('subgb.json', 'r') as outfile:
j_dict = json.load(outfile)
try:
POT_DIR = os.path.join(app.root_path, 'potentials')
except KeyError:
sys.exit("Please set POTDIR in os environment. `export POTDIR='path/to/potfiles/`")
try:
param_file = j_dict['param_file']
if param_file == 'iron_mish.xml':
eam_pot = os.path.join(POT_DIR, 'iron_mish.xml')
r_scale = 1.0129007626
elif param_file == 'Fe_Mendelev.xml':
eam_pot = os.path.join(POT_DIR, 'Fe_Mendelev.xml')
r_scale = 1.00894848312
elif param_file == 'PotBH.xml':
eam_pot = os.path.join(POT_DIR, 'PotBH.xml')
r_scale = 1.00894848312
elif param_file == 'Fe_Ackland.xml':
eam_pot = os.path.join(POT_DIR,'Fe_Ackland.xml')
r_scale = 1.00894185389
elif param_file == 'Fe_Dudarev.xml':
eam_pot = os.path.join(POT_DIR,'Fe_Dudarev.xml')
r_scale = 1.01279093417
elif param_file == 'gp33b.xml':
eam_pot = os.path.join(POT_DIR,'gp33b.xml')
sparse_file = 'gp33b.xml.sparseX.GAP_2016_10_3_60_19_29_10_8911'
eam_pot_sparse = os.path.join(POT_DIR, sparse_file)
shutil.copy(eam_pot, './')
shutil.copy(eam_pot_sparse, './')
else:
print 'No paramfile found!'
sys.exit()
except KeyError:
print 'No EAM potential file with that name. Relax failed.'
sys.exit()
print 'Using: ', eam_pot
pot_file = eam_pot.split('/')[-1]
print '{0}.xyz'.format(gb_file)
print os.getcwd()
grain = io.read('{0}.xyz'.format(gb_file), index='-1')
if param_file != 'gp33b.xml':
pot = Potential('IP EAM_ErcolAd do_rescale_r=T r_scale={0}'.format(r_scale), param_filename=eam_pot)
else:
pot = Potential('IP GAP', param_filename=eam_pot)
grain.set_calculator(pot)
grain.info['adsorbate_info'] = None
E_gb_init = grain.get_potential_energy()
traj_file = gb_file
if 'traj' in traj_file:
out = AtomsWriter('{0}'.format('{0}.xyz'.format(traj_file)))
else:
out = AtomsWriter('{0}'.format('{0}_traj.xyz'.format(traj_file)))
strain_mask = [0,0,1,0,0,0]
ucf = UnitCellFilter(grain, strain_mask)
opt = FIRE(ucf)
cell = grain.get_cell()
A = cell[0][0]*cell[1][1]
H = cell[2][2]
#Calculation dumps total energyenergy and grainboundary area data to json file.
with open('subgb.json','r') as f:
gb_dict = json.load(f)
#Write an initial dict so we know if the system has been initialized but the calculation is not finished.
with open('subgb.json', 'w') as outfile:
for key, value in gb_dict.items():
j_dict[key] = value
json.dump(j_dict, outfile, indent=2)
CONVERGED = False
FORCE_TOL = force_tol
#default to 5 if traj_steps = 120, otherwise increases
num_iters = int(float(total_steps)/float(traj_steps))
logging.debug('num_iters: {}'.format(num_iters))
for i in range(num_iters):
opt.run(fmax=FORCE_TOL, steps=traj_steps)
out.write(grain)
force_array = grain.get_forces()
max_force_II = max([max(f) for f in force_array])
max_forces = [np.sqrt(fx**2+fy**2+fz**2) for fx, fy, fz in zip(grain.properties['force'][0],
grain.properties['force'][1], grain.properties['force'][2])]
if max(max_forces) <= FORCE_TOL:
CONVERGED = True
break
out.close()
gb_dict['converged'] = CONVERGED
E_gb = grain.get_potential_energy()
gb_dict['E_gb'] = E_gb
gb_dict['E_gb_init'] = E_gb_init
gb_dict['area'] = A
with open('subgb.json', 'w') as outfile:
for key, value in gb_dict.items():
j_dict[key] = value
json.dump(j_dict, outfile, indent=2)
if param_file == 'gp33b.xml':
os.remove(param_file)
os.remove(sparse_file)
else:
pass
return grain
if __name__ == '__main__':
#Command line tool for relaxing grainboundary structure
parser = argparse.ArgumentParser()
parser.add_argument('-inp', '--input_file', help='name of input structure file')
parser.add_argument('-ts', '--traj_steps', help='Number of steps to write trajectory to file', type=int, default=1200)
parser.add_argument('-f', '--force_tol', help='Force tolerance for minimization', type=float, default=0.05)
args = parser.parse_args()
input_file = args.input_file
relax_gb(gb_file=input_file, traj_steps=args.traj_steps, force_tol=args.force_tol)
|
import datetime
import logging
import sqlite3
import time
import schedule
from current_ministry_of_finance import (
get_current_articles_from_legislacja, get_current_articles_from_projects,
get_current_articles_on_ministry_of_finance,
get_current_articles_on_website_podatki_gov_pl)
from shorten_url import shorten_link
from mail_credentials import constants
from quickstart import main
logging.basicConfig(filename='financial_newsletter_log', level=logging.INFO)
def make_database():
database_connection = sqlite3.connect("articles.db")
database_connection.execute("DROP TABLE IF EXISTS Articles")
database_connection.commit()
try:
with database_connection:
database_connection.execute(
"""CREATE TABLE Articles(
Title TEXT,
Link TEXT
)"""
)
except sqlite3.OperationalError as error:
logging.warning(error)
logging.info("Database has been created.")
def insert_articles_to_database(articles_list):
database_connection = sqlite3.connect("articles.db")
cursor = database_connection.cursor()
for article in articles_list:
cursor.execute("SELECT Title from Articles WHERE Title=?",
(article["title"],))
result = cursor.fetchone()
if not result:
cursor.execute(
"INSERT INTO Articles VALUES (?,?)", (article["title"],
article["url"]))
current_time = datetime.datetime.now()
current_time = current_time.strftime("%b %d %Y %H:%M")
logging.info(f"Article has been added to database at {current_time}")
article["url"] = shorten_link(article["url"])
for recipent in constants.get('recipients', ''):
main(article, recipent)
time.sleep(120)
# send_mail(article)
database_connection.commit()
def get_archive_and_make_database():
make_database()
logging.info("I have made a database function")
return True
def main_job_get_current_articles():
logging.info("I'm starting get current articles")
articles = get_current_articles_on_ministry_of_finance()
logging.info("I have finished getting articles from ministry of finance")
insert_articles_to_database(articles)
logging.info(
"I've started scraping current articles from podatki.gov.pl")
articles = get_current_articles_on_website_podatki_gov_pl()
logging.info("I have finished getting articles from podatki.gov.pl")
insert_articles_to_database(articles)
logging.info("I've started scraping current articles from legislacja")
articles = get_current_articles_from_legislacja()
logging.info("I have finished getting articles from legislacja")
insert_articles_to_database(articles)
logging.info("I've started scraping current articles from projects")
articles = get_current_articles_from_projects()
logging.info("I have finished getting articles from projects")
insert_articles_to_database(articles)
def job():
get_archive_and_make_database()
main_job_get_current_articles()
if __name__ == "__main__":
get_archive_and_make_database()
main_job_get_current_articles()
schedule.every(15).minutes.do(job)
while True:
schedule.run_pending()
time.sleep(1)
|
from bot import BotObtainer
from web import HandlerApi
from aiohttp import web
import settings
import logging
import asyncio
import pathlib
import json
import jwt
from aiohttp import web
from loguru import logger
def decode_token(jwt_token, user_token):
logger.debug(jwt_token)
logger.debug(user_token)
decoded = ''
try:
decoded = jwt.decode(jwt_token, user_token, algorithms=['HS256'])
except:
logger.debug(f"trying to login with falsy token: {jwt_token} and user: {user_token}")
finally:
return decoded
def validate_token(jwtoken):
obtainer = BotObtainer.get_current().get_bot_for_login()
a = decode_token(jwtoken, obtainer['token'])
logger.debug(a)
return a
# return True
#return False
@web.middleware
async def check_auth(request, handler):
user_token = request.headers.get("auth-token")
if user_token and validate_token(user_token):
response = await handler(request)
return response
return web.json_response({'status': "unautheticated user"})
# Configure logging
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
app = web.Application()
bot_pool = BotObtainer(settings.BOT_WEBHOOK_PATH, settings.BOT_WEBHOOK_URL)
bot_pool.add_bot("bot_1", "697083959:AAEMcQW2EwsXV267zmypRvP6frvREmf9dKo")
bot_pool.add_bot("bot_2", "631844699:AAEVFt1lUrpQGaDiDZ7NpbunNRWezY8nXn0")
bot_pool.set_bot_for_login("bot_1")
bot_pool.configure_app(app)
bot_pool.load_handlers()
#for name, cur_dp in bot_pool.get_all_bots().items():
# loop.run_until_complete(bot_pool.set_webhook(name))
handler_api = HandlerApi()
app.router.add_route('post', '/approve_user', handler_api.approve_user)
app.router.add_route('get', "/", handler_api.show_site)
app.router.add_static('/static', "dist/static", show_index=True)
admin_app = web.Application(middlewares=[check_auth])
admin_app.router.add_route('post', '/ping', handler_api.ping)
admin_app.router.add_route('post', '/get_all_bots', handler_api.get_bot_all)
admin_app.router.add_route('post', '/get_bot', handler_api.get_bot_by_name_token)
app.add_subapp('/admin/', admin_app)
for i in app.router.routes():
print(i)
web.run_app(app, host=settings.WEBAPP_HOST, port=settings.WEBAPP_PORT)
|
# -*- coding: utf-8
from django.contrib import admin
from whipturk.models import WhipReport
class WhipReportAdmin(admin.ModelAdmin):
raw_id_fields = ('bill', 'target', 'user')
admin.site.register(WhipReport, WhipReportAdmin)
|
#!/usr/bin/python3
"""[summary]
"""
from api.v1.views import app_views
from models import storage
from models.engine.file_storage import classes
@app_views.route('/status')
def status():
"""Return the status of the page
"""
return {'status': 'OK'}
@app_views.route('/stats')
def countdown():
"""Return the count for each Class
"""
return {
'amenities': storage.count(classes['Amenity']),
'cities': storage.count(classes['City']),
'places': storage.count(classes['Place']),
'reviews': storage.count(classes['Review']),
'states': storage.count(classes['State']),
'users': storage.count(classes['User'])
}
|
from django.shortcuts import render
from .models import Book, Review
# Create your views here.
def book_list(request):
books = Book.objects.all()
book_list = []
for book in books:
reviews = book.review_set.all()
print(reviews)
total_books = Book.objects.count()
return render(request, 'base.html')
|
"""
Acl roles
=========
"""
_schema = {'name': {'type': 'string',
'required': 'true',
},
'description': {'type': 'string'},
'ref': {'type': 'string',
'unique': True},
'group': {
'type': 'objectid',
'required': True,
'data_relation': {
'resource': 'acl/groups',
'field': '_id',
'embeddable': True,
},
},
}
definition = {
'item_title': 'acl/roles',
'url': 'acl/roles',
'allowed_write_roles': ['superadmin'],
'allowed_item_write_roles': ['superadmin'],
'datasource': {'source': 'acl_roles',
},
'internal_resource': False,
'concurrency_check': True,
'resource_methods': ['GET', 'POST'],
'item_methods': ['GET','PATCH'],
'versioning': False,
'schema': _schema,
} |
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.hashers import make_password
from activity.models import Activity, ActivityStatistics
# Scraping tools
import random
import json
from django.contrib.auth.models import User
from faker import Factory
fake = Factory.create()
activities = ['Swimming', 'Cooking', 'Sleeping', 'Hiking', 'Bird Watching', 'Play Soccer', 'Running', 'Cycling']
class Command(BaseCommand):
# this is a requirement for any admin command to extend BaseCommand
help = 'Closes the specified poll for voting'
def add_arguments(self, parser):
parser.add_argument('number', nargs='+', type=int)
def handle(self, *args, **options):
number = int(options['number'][0])
print(options['number'])
for i in range(number):
print("Creating user {} of {}".format(i+1, number))
user = User()
user.username = self.get_username()
user.first_name = self.get_username().capitalize()
user.last_name = self.get_username().capitalize()
user.password = make_password("password")
user.email = fake.email()
user.save()
print("Done. Created {} users".format(i+1))
print("==================================UserActivities=========================")
users = User.objects.all()
for user in users:
for activiti in activities:
activity = Activity()
activity.owner = user
activity.activity_name = activiti
activity.save()
print("=================Statistics======================")
user_activities = Activity.objects.all()
for i in range(10):
print("Activities round {}".format(i))
for activity in user_activities:
print("User {}".format(activity.owner.username))
activity_stat = ActivityStatistics()
activity_stat.activity = activity
activity_stat.owner = activity.owner
activity_stat.value = random.randint(3, 20)
activity_stat.statistics_date = fake.date()
activity_stat.save()
def get_username(self):
name = fake.name().split()[-1].lower()
if User.objects.filter(username=name):
return self.get_username()
else:
return name
|
import cv2
import numpy as np
import tensorflow as tf
import h5py
import os
from keras.backend.tensorflow_backend import set_session
from keras.models import Sequential
from keras.layers import Convolution2D, Flatten, Dense, MaxPooling2D, Dropout
from keras.utils.np_utils import to_categorical
from keras import losses, optimizers, regularizers
from keras.models import load_model
from styx_msgs.msg import TrafficLight
cwd = os.path.dirname(os.path.realpath(__file__))
class TLClassifier(object):
def __init__(self):
#TODO load classifier
os.chdir(cwd)
self.model = load_model('tl_classify_sim.h5')
# This trick makes Keras happy - Thanks to Eric Lavigne for the tip
self.graph = tf.get_default_graph()
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
light_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB);
resized_image = cv2.resize(light_image, (32,64))
processed_image = resized_image/255.
# Keras trick part 2 - Thanks again Eric
with self.graph.as_default():
y_hat = self.model.predict(processed_image.reshape((1, 64, 32, 3)))
enum_color = y_hat[0].tolist().index(np.max(y_hat[0]))
return enum_color
|
import os
import nibabel as nib
## ######### ART PARAMETERS (edit to desired values) ############
global_mean=1 # global mean type (1: Standard 2: User-defined Mask)
motion_file_type=0 # motion file type (0: SPM .txt file 1: FSL .par file 2:Siemens .txt file)
use_diff_motion=1 # 1: uses scan-to-scan motion to determine outliers; 0: uses absolute motion
use_diff_global=1 # 1: uses scan-to-scan global signal change to determine outliers; 0: uses absolute global signal values
use_norms=0 # 1: uses composite motion measure (largest voxel movement) to determine outliers; 0: uses raw motion measures (translation/rotation parameters)
#mask_file=[] # set to user-defined mask file(s) for global signal estimation (if global_mean is set to 2)
##################################################################
subject = snakemake.wildcards.sub
func = snakemake.input.swutFunc
rp = snakemake.input.rpTxt
#SPM = snakemake.input.SPM
cfgPath = snakemake.output.artCfg
if type(func) is str:
nSes = 1
imgPath = func
rpPath = rp
else:
func = str(func).split(" ")
nSes = len(func)
imgPath = func[0]
rpPath = rp[0]
#write config file
with open(cfgPath,'w') as cfg:
cfg.write('# Automatic script created through snakemake\n')
cfg.write('# art config scripts can be edited and ran manually through\n')
cfg.write('# the art toolbox in matlab with the following syntax:\n')
cfg.write("# art('sess_file',cfgPath)\n\n")
cfg.write(f'sessions: {nSes}\n')
cfg.write(f'global_mean: {global_mean}\n')
cfg.write(f'motion_file_type: {motion_file_type}\n')
cfg.write('motion_fname_from_image_fname: 0\n')
cfg.write(f'use_diff_motion: {use_diff_motion}\n')
cfg.write(f'use_diff_global: {use_diff_global}\n')
cfg.write(f'use_norms: {use_norms}\n')
cfg.write(f'output_dir: {os.path.dirname(imgPath)}\n\n')
#cfg.write(f'spm_file: {SPM}\n')
cfg.write('end\n')
#session specific stuff
for i in range(nSes):
if type(func) is str:
imgPath = func
rpPath = rp
else:
imgPath = func[i]
rpPath = rp[i]
img = nib.load(imgPath) #get nVols
nVols = img.shape[3]
sess = ''
for j in range(1,nVols+1): #initialize loop 1-nVols (matlab indices start at 1, not 0)
sess += f'session {i+1} image {imgPath+","+str(j)+" "}'
cfg.write(sess+'\n\n')
for i in range(nSes):
cfg.write(f'session {i+1} motion {rpPath}\n')
cfg.write('end\n')
|
#Imports for MailChimpETL
import json
import datetime
import pandas as pd
from dateutil.relativedelta import relativedelta
from sqlalchemy import create_engine
import flat_table
from mailchimp3 import MailChimp
#Create the class for MailChimp scrapper
class MailChimp_ETL():
"""An instance of this class sets up the connection to the Mailchimp API and
retrieves the data in a json format to then transform it into a pandas dataframe
and load it into a SQL database"""
def __init__(self, mc_api, mc_user, db_url):
"""Instantiate the class and create internal attributes"""
# Credentials
self.client = MailChimp(mc_api=mc_api, mc_user=mc_user)
self.engine = create_engine(db_url)
self.extracted_data = {}
self.extracted_data_df = {}
self.transformed_data = {}
def __str__(self):
return 'This is a template for the MailChimp ETL class'
#====================================================Extraction functions====================================================
def extract_data(self, data_list = None):
'''
Function for extracting data from Mail Chimp. Input a list of tables
to be extracted. If no tables are specified, extract all tables.
Outputs a dictionary with the table name as the key. Also saves it to
attribute self.extracted_data
Example output: {'users': extracted_users_data}
Tables are: campaigns, users, opens, clicks
Campaigns will always be extracted, as it is needed to extract the other tables
'''
#dictionary of functions to extract each table
self.extract_func = {
'campaigns': self.__extract_campaigns,
'users': self.__extract_users,
'opens': self.__extract_opens,
'clicks': self.__extract_clicks
}
#if the tables to be extracted is not specified, extract all tables
if(data_list==None):
data_list = list(self.extract_func.keys())
#campaigns needs to be extracted first to extract the rest
data_list.append('campaigns')
data_list = list(set(data_list))
data_list.remove('campaigns')
#create a list of data to extract
extract_list = []
for val in data_list:
if(val in self.extract_func):
extract_list.append(val)
else:
print(f'{val} is not an extractable object')
print('Extracting...')
extracted_data = {}
#extract campaigns first
print(f'Extracting campaigns...')
self.campaigns_data = self.__extract_campaigns()
extracted_data['campaigns'] = self.campaigns_data
print(f'Finished extracting campaigns')
#extract each item in the extract list
for data in extract_list:
print(f'Extracting {data}...')
#run the extraction function for each table
extracted_data[data] = self.extract_func[data]()
print(f'Finished extracting {data}')
print('Finished extracting...')
self.extracted_data = extracted_data
return extracted_data
#----------------------------------------------------Campaigns----------------------------------------------------
def __extract_campaigns(self):
"""
This method extracts all the campaings sent by the company into a list
of dictionaries to be further processed
returns -> list of dictionaries
"""
campaign_details = []
campaigns = self.client.campaigns.all(get_all=True)['campaigns']
_campaigns = map(self.__campaign_mapper, campaigns)
return list(_campaigns)
def __campaign_mapper(self, campaign):
"""Helper to unwrap the campaign object from the API
returns -> dict"""
return {
'id': campaign['id'],
'open_rate': campaign.get('report_summary', {'open_rate': 0})['open_rate'],
'click_rate': campaign.get('report_summary', {'click_rate': 0})['click_rate'],
'send_time': campaign['send_time'],
'status': campaign['status'],
'title': campaign['settings']['title'],
'subject_line': campaign['settings'].get('subject_line', "")
}
#----------------------------------------------------Users----------------------------------------------------
def __extract_users(self):
"""
This method extracts the lists along with all their members and
returns a list of dictionaries to be processed
returns -> list of dictionaries
"""
all_lists = self.client.lists.all(get_all=True)
lists = all_lists['lists']
row_lists = []
for lst in lists:
lst_id = lst['id']
lst_name = lst['name']
members = self.client.lists.members.all(lst_id, get_all=True)
for member in members['members']:
first_name = ''
last_name = ''
try:
member_id = member['id']
email = member['email_address']
status = member['status']
first_name = member['merge_fields']['FNAME']
last_name = member['merge_fields']['LNAME']
except:
pass
finally:
row_lists.append({
'email_id': member_id,
'status': status,
'list_name': lst_name,
'list_id': lst_id,
'email': email,
'first_name': first_name,
'last_name': last_name
})
return row_lists
#----------------------------------------------------Opens----------------------------------------------------
def __extract_opens(self):
"""This method uses the connection to query the opens or the registers of
the interaction of each user with the or campaigns sent
returns -> list of dictionaries"""
open_members = []
for campaign in self.campaigns_data:
campaign_id = campaign['id']
open_report = self.client.reports.open_details.all(campaign_id, get_all=True)['members']
open_member_list = map(self.__open_report_mapper, open_report)
open_members += list(open_member_list)
return open_members
def __open_report_mapper(self,member):
"""Helper to unwrap the opens object from the API
returns -> dict"""
opens = member.get('opens', [])
if opens:
last_open = opens[-1]
else:
last_open = None
return {
'email_id' : member['email_id'],
'campaign_id' : member['campaign_id'],
'opens' : opens,
'opens_count' : member.get('opens_count', 0),
'last_open' : last_open
}
#----------------------------------------------------Clicks----------------------------------------------------
def __extract_clicks(self):
"""This method returns all user user interactions with the urls contained within the
campaigns that were sent by the company
returns -> list of dictionaries"""
# get all click details for all campaigns
click_members = []
total_clicks = 0
for campaign in self.campaigns_data:
campaign_id = campaign['id']
click_report = self.client.reports.click_details.all(campaign_id, get_all=True)
urls = click_report['urls_clicked']
# get all url details for all click reports
for url in urls:
url_link = url['url']
url_id = url['id']
total_clicks = url['total_clicks']
url_params = [url_id, url_link]
# if there are clicks, grab its detail and append
if total_clicks > 0:
click_details_report = self.client.reports.click_details.members.all(campaign_id, url_id, get_all=True)['members']
click_members += [self.__click_detail_mapper(click_detail, url_params) for click_detail in click_details_report]
return click_members
def __click_detail_mapper(self, member, params):
"""Helper function to unwrap user click information received from the API
returns -> dict"""
return {
'campaign_id' : member['campaign_id'],
'clicks' : member['clicks'],
'email_id' : member['email_id'],
'list_id' : member['list_id'],
'url_id': params[0],
'url': params[1],
}
#====================================================Transformation functions====================================================
def transform_data(self, extracted_data, data_list = None):
'''
Function for transforming previously extracted data. Input a dict of
extracted data with table names for keys (see extract_data above), and a list of tables
to be extracted. If no tables are specified, transform all tables.
Outputs dictionary of table names and transformed data, similar to extracted data.
Also saves it to attribute self.transformed_data
After converting the extracted data into a dataframe, that dataframe is
saved in self.extracted_data_df in the same dictionary format. This data
can be saved in a csv and loaded into this function inplace of the
extracted data. This is to save on having to extract every time.
Tables are: campaigns, users, lists, opens, clicks
Lists and users are both take from the users data. If not specified,
both will be transformed
'''
#dictionary of functions to transform each table
self.transform_func = {
'campaigns': self.__transform_campaigns,
'lists': self.__transform_lists,
'users': self.__transform_users,
'opens': self.__transform_opens,
'clicks': self.__transformt_clicks
}
#if the tables to be transformed is not specified, transform all tables
if(data_list == None):
data_list = list(extracted_data.keys())
#if users is in the extracted data, add list to the extraction list as well
if('users' in extracted_data.keys()):
data_list.append('lists')
#remove unique values
data_list = list(set(data_list))
#remove all invalid values
transform_list = []
for val in data_list:
if(val in self.transform_func):
transform_list.append(val)
else:
print(f'{val} is not an transformable object')
transformed_data = {}
#transform the data
print('Transforming..')
for data in transform_list:
print(f'Transforming {data}...')
#if lists is being transformed, set the extracted data to lists
if(data == 'lists'):
_extracted_data = extracted_data['users']
else:
_extracted_data = extracted_data[data]
transformed_data[data] = self.transform_func[data](_extracted_data)
print(f'Finished transforming {data}')
print('Finished transforming')
self.transformed_data = transformed_data
return transformed_data
#====================================================Transformation functions======================================================
def __transform_campaigns(self, extracted_data):
# 1. Transform campaigns_dict -> df
df = pd.DataFrame(extracted_data)
self.extracted_data_df['campaigns'] = df
## Strip leading/treading whitespaces
df['subject_line'] = df['subject_line'].str.strip()
df['title'] = df['title'].str.strip()
col = ['click_rate', 'id', 'open_rate', 'send_time','status', 'subject_line', 'title']
df = df[col]
df['click_rate'] = df['click_rate'].astype(float)
df['open_rate'] = df['open_rate'].astype(float)
df['send_time'] = pd.to_datetime(df['send_time'])
#columns:
#click_rate (float)
#id
#open_rate (float)
#send_time (time)
#status
#subject_line
#title
#id is the primary key. get rid of duplicates
df = (df.drop_duplicates(subset='id')).reset_index(drop = True)
return df
#list and user data are both extracted from users
def __transform_lists(self,extracted_data):
#columns:
#list_id
#list_name
#email_id
return self.__transform_user_lists(extracted_data)[0]
def __transform_users(self,extracted_data):
#columns:
#email
#email_id
#first_name
#last_name
#status
#email_id is the primary key. get rid of duplicates
df = self.__transform_user_lists(extracted_data)[1]
df = (df.drop_duplicates(subset='email_id')).reset_index(drop = True)
return df
#user and lists are stored in the same extracted data
def __transform_user_lists(self,extracted_data):
# 2. Transform users_dict -> df
df = pd.DataFrame(extracted_data)
self.extracted_data_df['users'] = df
df['list_name'] = df['list_name'].str.strip()
df['first_name'] = df['first_name'].str.strip()
df['last_name'] = df['last_name'].str.strip()
## (a) Split off users_df by the list attributes
col_list = ['list_id', 'list_name', 'email_id']
list_df = df[col_list]
## (b) Drop the list attributes from users_df
col_users = ['email', 'email_id', 'first_name', 'last_name', 'status']
users_df = df[col_users]
return [list_df, users_df]
def __transform_opens(self, extracted_data):
# 3. Transform opens_dict -> df
df = pd.DataFrame(extracted_data)
self.extracted_data_df['opens'] = df
# df['last_open'] = df['last_open'].apply(lambda x: x['timestamp'])
col = ['campaign_id', 'email_id', 'last_open', 'opens', 'opens_count']
df = df[col]
df['last_open'] = pd.to_datetime(df['last_open'])
df['opens'] = df['opens'].astype(str)
df['opens_count'] = df['opens_count'].astype(int)
#columns:
#campaign_id
#email_id
#last_open (date)
#opens (list of dates)
#opens_count (int)
return df
def __transformt_clicks(self, extracted_data):
# 4. Transform clicks_dict into dataframe
df = pd.DataFrame(extracted_data)
self.extracted_data_df['clicks'] = df
## Strip leading/treading whitespaces
col = ['campaign_id', 'clicks', 'email_id', 'list_id', 'url', 'url_id']
df = df[col]
df['url'] = df['url'].str.strip()
df['clicks'] = df['clicks'].astype(int)
#columns:
#campaign_id
#clicks (int)
#email_id
#list_id
#url
#url_id
return df
#====================================================Loading functions======================================================
def load_data(self, transformed_data, data_list= None):
'''
Function for loading transformed data. Input a dict of transformed data
with table names for keys (see extract_data above), and a list of tables
to be loaded. If no tables are specified, load all tables.
Data is loaded row by row. This is so that loading doesn't all fail should
an error occur. A progress report is given every 50 rows.
Make sure to load campaigns and users first, as there are foreign key dependencies.
I didn't have time to make this an automatic feature so you have to do it yourself.
Tables are: campaigns, users, lists, opens, clicks
'''
#if the tables to be loaded is not specified, load all data in the input
if(data_list == None):
data_list = transformed_data.keys()
#load the data by table
for data in data_list:
print(f'Loading: {data}...')
#variables to keep track of rows loaded and errors encountered
error_count = 0
load_count = 0
_transformed_data = transformed_data[data]
#data is loaded row by row
for i in range(len(_transformed_data)):
try:
_transformed_data.iloc[i:i+1].to_sql(data, self.engine, if_exists='append', index = False)
except Exception as e:
print(e)
error_count+=1
pass
else:
load_count+=1
#give a progress update every 50 rows. feel free to change this number
if((load_count+error_count)%50 ==0):
print(f'Loaded {load_count} / {_transformed_data.shape[0]} entries')
print(f'Encountered {error_count} errors')
print(f'Loaded {load_count} / {_transformed_data.shape[0]} entries')
print(f'Encountered {error_count} errors')
#Main Function
scrapper = MailChimp_ETL()
extracted_data = scrapper.extract_data()
transformed_data = scrapper.transform_data(extracted_data)
scrapper.load_data(transformed_data, ['campaigns','users'])
scrapper.load_data(transformed_data, ['lists','opens','clicks'])
|
# -*- coding: utf-8 -*-
import itertools
import warnings
from typing import List, Optional, NamedTuple
import numpy as np
import skfmm as fmm
from scipy.interpolate import RegularGridInterpolator
from scipy.spatial.distance import euclidean
from ._base import mpe_module, PointType, InitialInfo, PathInfo, PathInfoResult, logger
from ._parameters import Parameters, default_parameters
from ._exceptions import ComputeTravelTimeError, PathExtractionError, EndPointNotReachedError
def make_interpolator(coords, values, fill_value: float = 0.0):
return RegularGridInterpolator(
coords, values, method='linear', bounds_error=False, fill_value=fill_value)
@mpe_module
class PathExtractionResult(NamedTuple):
"""The named tuple with info about extracted path
Notes
-----
The instance of the class is returned from :func:`MinimalPathExtractor.__call__`.
.. py:attribute:: path_points
The extracted path points in the list
.. py:attribute:: path_integrate_times
The list of integrate times for every path point
.. py:attribute:: path_travel_times
The list of travel time values for every path point
.. py:attribute:: step_count
The number of integration steps
.. py:attribute:: func_eval_count
The number of evaluations of the right hand function
"""
path_points: List[PointType]
path_integrate_times: List[float]
path_travel_times: List[float]
step_count: int
func_eval_count: int
@mpe_module
class MinimalPathExtractor:
"""Minimal path extractor
Minimal path extractor based on the fast marching method and ODE solver.
Parameters
----------
speed_data : np.ndarray
The speed data (n-d numpy array)
end_point : Sequence[int]
The ending point (a.k.a. "source point")
parameters : class:`Parameters`
The parameters
Examples
--------
.. code-block:: python
from skmpe import MinimalPathExtractor
# some function for computing speed data
speed_data_2d = compute_speed_data_2d()
mpe = MinimalPathExtractor(speed_data_2d, end_point=(10, 25))
path = mpe((123, 34))
Raises
------
ComputeTravelTimeError : Computing travel time has failed
"""
def __init__(self, speed_data: np.ndarray, end_point: PointType,
parameters: Optional[Parameters] = None) -> None:
if parameters is None: # pragma: no cover
parameters = default_parameters()
travel_time, phi = self._compute_travel_time(speed_data, end_point, parameters)
gradients = np.gradient(travel_time, parameters.travel_time_spacing)
grad_interpolants, tt_interpolant, phi_interpolant = self._compute_interpolants(gradients, travel_time, phi)
self._travel_time = travel_time
self._phi = phi
self._end_point = end_point
self._travel_time_interpolant = tt_interpolant
self._phi_interpolant = phi_interpolant
self._gradient_interpolants = grad_interpolants
self._parameters = parameters
# the output when computing ODE solution is finished
self._path_points = []
self._path_integrate_times = []
self._path_travel_times = []
self._step_count = 0
self._func_eval_count = 0
@property
def travel_time(self) -> np.ndarray:
"""Returns the computed travel time for given speed data
"""
return self._travel_time
@property
def phi(self) -> np.ndarray: # pragma: no cover
"""Returns the computed phi (zero contour) for given source point
"""
return self._phi
@property
def parameters(self) -> Parameters:
"""Returns the parameters
"""
return self._parameters
@staticmethod
def _compute_travel_time(speed_data: np.ndarray,
source_point: PointType,
parameters: Parameters):
# define the zero contour and set the wave source
phi = np.ones_like(speed_data)
phi[source_point] = -1
try:
travel_time = fmm.travel_time(phi, speed_data,
dx=parameters.travel_time_spacing,
order=parameters.travel_time_order)
except Exception as err: # pragma: no cover
raise ComputeTravelTimeError from err
return travel_time, phi
@staticmethod
def _compute_interpolants(gradients, travel_time, phi):
grid_coords = [np.arange(n) for n in travel_time.shape]
gradient_interpolants = []
for gradient in gradients:
interpolant = make_interpolator(grid_coords, gradient, fill_value=0.0)
gradient_interpolants.append(interpolant)
tt_interpolant = make_interpolator(grid_coords, travel_time, fill_value=0.0)
phi_interpolant = make_interpolator(grid_coords, phi, fill_value=1.0)
return gradient_interpolants, tt_interpolant, phi_interpolant
def __call__(self, start_point: PointType) -> PathExtractionResult:
"""Extract path from start point to source point (ending point)
Parameters
----------
start_point : Sequence[int]
The starting point
Returns
-------
path_extraction_result : :class:`PathExtractionResult`
The path extraction result
Raises
------
PathExtractionError : Extracting path has failed
EndPointNotReachedError : The extracted path is not reached the ending point
"""
gradient_interpolants = self._gradient_interpolants
travel_time_interpolant = self._travel_time_interpolant
def right_hand_func(time: float, point: np.ndarray) -> np.ndarray: # noqa
velocity = np.array([gi(point).item() for gi in gradient_interpolants])
if np.any(np.isclose(velocity, 0.0)):
# zero-velocity most often means masked travel time data
return velocity
return -velocity / np.linalg.norm(velocity)
solver_cls = self.parameters.ode_solver_method.solver
logger.debug("ODE solver '%s' will be used.", solver_cls.__name__)
with warnings.catch_warnings():
# filter warn "extraneous arguments"
warnings.simplefilter('ignore', category=UserWarning)
solver = solver_cls(
right_hand_func,
t0=0.0,
t_bound=self.parameters.integrate_time_bound,
y0=start_point,
min_step=self.parameters.integrate_min_step,
max_step=self.parameters.integrate_max_step,
first_step=None,
)
self._path_points = []
self._path_integrate_times = []
self._path_travel_times = []
self._step_count = 0
end_point = self._end_point
dist_tol = self.parameters.dist_tol
y = None
y_old = start_point
small_dist_steps_left = self.parameters.max_small_dist_steps
while True:
self._step_count += 1
message = solver.step()
if solver.status == 'failed': # pragma: no cover
raise PathExtractionError(
f"ODE solver '{solver_cls.__name__}' has failed: {message}",
travel_time=self.travel_time, start_point=start_point, end_point=end_point)
if y is not None:
y_old = y
y = solver.y
t = solver.t
tt = travel_time_interpolant(y).item()
add_point = True
y_dist = euclidean(y, y_old)
if y_dist < dist_tol:
logger.warning('step: %d, the distance between old and current extracted point (%f) is '
'too small (less than dist_tol=%f)', self._step_count, y_dist, dist_tol)
add_point = False
small_dist_steps_left -= 1
if add_point:
small_dist_steps_left = self.parameters.max_small_dist_steps
self._path_points.append(y)
self._path_integrate_times.append(t)
self._path_travel_times.append(tt)
self._func_eval_count = solver.nfev
step_size = solver.step_size
dist_to_end = euclidean(y, end_point)
logger.debug('step: %d, time: %.2f, point: %s, step_size: %.2f, nfev: %d, dist: %.2f, message: "%s"',
self._step_count, t, y, step_size, solver.nfev, dist_to_end, message)
if dist_to_end < step_size:
logger.debug(
'The minimal path has been extracted (time: %.2f, _step_count: %d, nfev: %d, dist_to_end: %.2f)',
t, self._step_count, solver.nfev, dist_to_end)
break
if solver.status == 'finished' or small_dist_steps_left == 0:
if small_dist_steps_left == 0:
reason = f'the distance between old and current point stay too small ' \
f'for {self.parameters.max_small_dist_steps} _step_count'
else:
reason = f'time bound {self.parameters.integrate_time_bound} is reached, solver was finished.'
err_msg = (
f'The extracted path from the start point {start_point} '
f'did not reach the end point {end_point} in {t} time and {self._step_count} _step_count '
f'with distance {dist_to_end:.2f} to the end point. Reason: {reason}'
)
raise EndPointNotReachedError(
err_msg,
travel_time=self.travel_time,
start_point=start_point,
end_point=end_point,
extracted_points=self._path_points,
last_distance=dist_to_end,
reason=reason,
)
return PathExtractionResult(
path_points=self._path_points,
path_integrate_times=self._path_integrate_times,
path_travel_times=self._path_travel_times,
step_count=self._step_count,
func_eval_count=self._func_eval_count,
)
def extract_path_without_way_points(init_info: InitialInfo,
parameters: Parameters) -> PathInfoResult:
extractor = MinimalPathExtractor(init_info.speed_data, init_info.end_point, parameters)
result = extractor(init_info.start_point)
path_info = PathInfo(
path=np.asarray(result.path_points),
start_point=init_info.start_point,
end_point=init_info.end_point,
travel_time=extractor.travel_time,
extraction_result=result,
reversed=False,
)
return PathInfoResult(path=path_info.path, pieces=(path_info,))
def make_whole_path_from_pieces(path_pieces_info: List[PathInfo]) -> PathInfoResult:
path_pieces = [path_pieces_info[0].path]
for path_info in path_pieces_info[1:]:
path = path_info.path
if path_info.reversed:
path = np.flipud(path)
path_pieces.append(path)
return PathInfoResult(
path=np.vstack(path_pieces),
pieces=tuple(path_pieces_info),
)
def extract_path_with_way_points(init_info: InitialInfo,
parameters: Parameters) -> PathInfoResult:
speed_data = init_info.speed_data
path_pieces_info = []
if parameters.travel_time_cache:
compute_ttime = [True, False]
last_extractor = None
for (start_point, end_point), compute_tt in zip(
init_info.point_intervals(), itertools.cycle(compute_ttime)):
if compute_tt:
extractor = MinimalPathExtractor(speed_data, end_point, parameters)
last_extractor = extractor
is_reversed = False
else:
extractor = last_extractor
start_point, end_point = end_point, start_point
is_reversed = True
result = extractor(start_point)
path_pieces_info.append(PathInfo(
path=np.asarray(result.path_points),
start_point=start_point,
end_point=end_point,
travel_time=extractor.travel_time,
extraction_result=result,
reversed=is_reversed
))
else:
for start_point, end_point in init_info.point_intervals():
extractor = MinimalPathExtractor(speed_data, end_point, parameters)
result = extractor(start_point)
path_piece_info = PathInfo(
path=np.asarray(result.path_points),
start_point=start_point,
end_point=end_point,
travel_time=extractor.travel_time,
extraction_result=result,
reversed=False
)
path_pieces_info.append(path_piece_info)
return make_whole_path_from_pieces(path_pieces_info)
def extract_path(init_info: InitialInfo,
parameters: Optional[Parameters] = None) -> PathInfoResult:
if parameters is None:
parameters = default_parameters()
if init_info.way_points:
return extract_path_with_way_points(init_info, parameters)
else:
return extract_path_without_way_points(init_info, parameters)
|
#!/usr/bin/env python
# coding: utf-8
# In[155]:
import pandas as pd
import numpy as np
# In[156]:
df = pd.read_csv('../data/compas.data',
parse_dates = ['DateOfBirth'])
# In[157]:
removed_columns = [
'Person_ID',
'AssessmentID',
'Case_ID',
'LastName',
'FirstName',
'MiddleName',
'Screening_Date',
'RecSupervisionLevelText',
'RawScore',
'DecileScore',
'IsCompleted',
'IsDeleted'
]
df.drop(removed_columns, axis=1, inplace=True)
# In[158]:
import datetime
import numpy as np
age = (datetime.datetime.now() - df.DateOfBirth).astype('timedelta64[Y]')
age = age.astype('int')
age[age<0] = np.nan
df['age_'] = age
# In[159]:
df.drop(df[df['ScoreText'].isnull()].index, inplace=True)
df.drop(df[df['age_'].isnull()].index, inplace=True)
df.drop(df[df['MaritalStatus']=='Unknown'].index, inplace=True)
# In[160]:
age_bins = [0, 30, 100]
age_groups = pd.cut(df['age_'], bins=age_bins)
df['Age'] = age_groups
num_groups = len(df['Age'].cat.categories)
df['Age'] = df['Age'].cat.rename_categories(range(num_groups))
# In[161]:
df.Sex_Code_Text = pd.Categorical(df.Sex_Code_Text)
df['Sex_Code_Text'] = df.Sex_Code_Text.cat.codes
df.Ethnic_Code_Text = pd.Categorical(df.Ethnic_Code_Text)
df['Ethnic_Code_Text'] = df.Ethnic_Code_Text.cat.codes
df.MaritalStatus = pd.Categorical(df.MaritalStatus)
df['MaritalStatus'] = df.MaritalStatus.cat.codes
df.CustodyStatus = pd.Categorical(df.CustodyStatus)
df['CustodyStatus'] = df.CustodyStatus.cat.codes
df.LegalStatus = pd.Categorical(df.LegalStatus)
df['LegalStatus'] = df.LegalStatus.cat.codes
df['ScoreText_'] = -1
mask = df['ScoreText']=='High'
df.loc[mask, 'ScoreText_'] = 0
df.loc[~mask, 'ScoreText_'] = 1
# In[162]:
df.drop(['DisplayText','ScoreText','Agency_Text', 'AssessmentType', 'ScaleSet_ID', 'ScaleSet', 'AssessmentReason', 'Language'], axis=1, inplace=True)
df.drop(['DateOfBirth', 'age_', 'Scale_ID'], axis=1, inplace=True)
# In[163]:
df
# In[164]:
df.to_csv('./compass_categorized.data')
# In[ ]:
|
import sys
n, big = map(int, raw_input().split())
total = [str(big)]
temp = big
while big > n:
if big % 2 == 0:
big = big / 2
total.append(str(big))
else:
temp = (big - 1) // 10
if temp * 10 + 1 == big:
total.append(str(temp))
big = temp
else:
break
if total[-1] == str(n):
sys.stdout.write('YES\n%d\n' %len(total) + ' '.join(total[::-1]) +'\n')
else:
sys.stdout.write('NO\n')
|
import sys
from fractions import gcd
import numpy as np
def primesfrom3to(n):
# http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Returns a array of primes, p < n """
assert n>=2
sieve = np.ones(n/2, dtype=np.bool)
for i in xrange(3,int(n**0.5)+1,2):
if sieve[i/2]:
sieve[i*i/2::i] = False
return np.r_[2,2*np.nonzero(sieve)[0][1::]+1]
primes=primesfrom3to(100)
prod_primes = 1L
for p in primes:
prod_primes *= long(p)
def nontrivial_divisor(n):
d = 1
for p in primes:
if n%p == 0:
d=p
break
return d
lines = open(sys.argv[1]).readlines()
T = int(lines[0])
let='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for casenum in xrange(1,T+1):
N = int(lines[2*casenum-1])
vals = lines[2*casenum].split()
vals = [int(x) for x in vals]
ans = ''
M = max(vals)
while M > 0:
first = vals.index(M)
ct = vals.count(M)
ans += ' ' + let[first]
vals[first] -= 1
if ct == 2:
first = vals.index(M)
ans += let[first]
vals[first] -= 1
M = max(vals)
print 'case #' + str(casenum) + ":" + ans
|
from django.contrib.auth.forms import AuthenticationForm
class LoginForm(AuthenticationForm):
username=forms.CharField(widget=forms.TextInput(attrs = {'class':'form-control'}))
password=forms.CharField(widget=forms.PasswordInput(attrs = {'class':'form-control'})) |
import random
lower_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
upper_letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to the py_password_generator!")
n_letters = int(input("How many letters would you like in your password?\n"))
choice_upper = input(
"Letters mixed with both upper case and lower case? Y/N \n").lower()
upper_case = True if choice_upper == 'y' else False
n_symbols = int(input("How many symbols would you like?\n"))
n_numbers = int(input("How many numbers would you like? \n"))
g_password = []
for i in range(n_letters):
ch = ''
if upper_case:
ch = random.choice(lower_letters+upper_letters)
else:
ch = random.choice(lower_letters)
g_password.append(ch)
for i in range(n_symbols):
g_password.append(random.choice(symbols))
for i in range(n_numbers):
g_password.append(random.choice(numbers))
random.shuffle(g_password)
print("Here is your password: " + ''.join(g_password))
|
import logging
import json
import patcherex.patches
l = logging.getLogger("patcherex.techniques.ManualPatcher")
class ManualPatcher:
def __init__(self, binary_fname, backend, patch_file):
with open(patch_file, "rb") as patch_file_obj:
self.patches = json.load(patch_file_obj)
self.binary_fname = binary_fname
self.backend = backend
def get_patches(self):
patches = []
for patch in self.patches:
patcher = getattr(patcherex.patches, patch["patch_type"])
if patcher is None:
raise ValueError("Got unknown patch type %s" % patch["patch_type"])
patches.append(patcher(**patch["data"]))
return patches
def init_technique(program_name, backend, options):
return ManualPatcher(program_name, backend, **options)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
# pandas索引类型常用方法
dl = {'城市':['北京', '上海', '广州', '深圳', '沈阳'], \
'环比':['101.5', '101.2', '101.3', '102.0', '100.1'], \
'同比':['120.7', '127.3', '119.4', '140.9', '101.4'], \
'定基':['121.4', '127.8', '120.0', '145.5', '101.6']}
d = pd.DataFrame(dl, index=['c1', 'c2', 'c3', 'c4', 'c5'])
nc = d.columns.delete(2)
ni = d.index.insert(5, 'c0')
print("d:\n{0}".format(d))
print("nc:\n{0}".format(nc))
print("ni:\n{0}".format(ni))
nd = d.reindex(index=ni, columns=nc, method='ffill')
print("nd:\n{0}".format(nd))
a = pd.Series([9, 8, 7, 6], index=['a', 'b', 'c', 'd'])
print("a:\n{0}".format(a))
ad =a.drop(['b', 'c'])
print("ad:\n{0}".format(ad))
nd_drop = nd.drop('c0')
print("nd_drop:\n{0}".format(nd_drop))
nd_drop2 = nd_drop.drop('同比', axis=1)
print("nd_drop2:\n{0}".format(nd_drop2)) |
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import os
from glob import glob
import numpy as np
from matplotlib.mlab import csv2rec
from mpl_toolkits.mplot3d.axes3d import Axes3D
# <codecell>
directory = '/Users/alex/Documents/PTV/test/res/'
# <codecell>
list_ptv_is_files = glob(os.path.join(directory,'ptv_is.*'))
# print list_ptv_is_files
# <codecell>
# reading the ptv_is.* files
frames = []
for counter, ptv_is_file in enumerate(list_ptv_is_files):
frame = csv2rec(ptv_is_file,skiprows=1,delimiter=' ',names=['p','n','x','y','z'])
frame = rec_append_fields(frame,['t','id'],[np.zeros_like(frame.x)+counter,np.zeros_like(frame.x)-999],dtypes=[np.int,np.int])
frames.append(frame)
# <codecell>
# adding trajectory id = linking
id = 0
for i,f in enumerate(frames):
for j,l in enumerate(f):
if l.p == -1 and l.n != -2:
l.id = id
id += 1
elif l.p != -1:
l.id = frames[i-1].id[l.p]
# <codecell>
for i,f in enumerate(frames):
ind = f.id == -999
frames[i] = f[~ind]
# <codecell>
last_traj = max(frames[-1].id)
traj = [[] for k in range(last_traj+1)]
for f in frames:
for p in f:
traj[p.id].append(p)
# <codecell>
fig = figure(figsize=(12,8))
ax = fig.add_subplot(1,1,1, projection='3d')
for t in traj:
x = [p.x for p in t]
y = [p.y for p in t]
z = [p.z for p in t]
ax.plot(x,y,z)
|
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split, validation_curve
def get_data():
# 导入数据集
iris = datasets.load_iris()
data = iris.data
target = iris.target
x_train, x_test, y_train, y_test = train_test_split(data, target, test_size=0.2, shuffle=True)
return x_train, x_test, y_train, y_test
# 校验曲线,方便的改变模型参数,获取模型表现
if __name__ == "__main__":
x_train, x_test, y_train, y_test = get_data()
"""
penalty:使用指定正则化项(默认:l2)
dual: n_samples > n_features取False(默认)
C:正则化强度的反,值越小正则化强度越大
n_jobs: 指定线程数
random_state:随机数生成器
fit_intercept: 是否需要常量
"""
model = LogisticRegression(penalty='l2', dual=False, tol=0.0001, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100, multi_class='ovr',
verbose=0, warm_start=False, n_jobs=1)
"""参数
---
model:用于fit和predict的对象
X, y: 训练集的特征和标签
param_name:将被改变的参数的名字
param_range: 参数的改变范围
cv:k-fold
返回值
---
train_score: 训练集得分(array)
test_score: 验证集得分(array)
"""
train_score, test_score = validation_curve(model, x_train, y_train, 'tol', [0.000001, 10], cv=None, scoring=None, n_jobs=1)
print(f'train score: {train_score}')
print(f'test score: {test_score}')
|
# -*- coding: utf-8 -*-
''' 曲别针换钢琴(使用dict实现)
'''
CLIP, POSTER, CD, GUITAR, DRUM, PIANO = ('clip', 'poster', 'cd', 'guitar',
'drum', 'piano')
infinity = float('inf')
graph = {CLIP: {POSTER: 0, CD: 5},
POSTER: {GUITAR: 30, DRUM: 35},
CD: {GUITAR: 15, DRUM: 20},
GUITAR: {PIANO: 20},
DRUM: {PIANO: 10},
PIANO: {}}
start, end = CLIP, PIANO
father_nodes = {POSTER: None, CD: None, GUITAR: None, DRUM: None, PIANO: None}
costs = {POSTER: infinity, CD: infinity, GUITAR: infinity, DRUM: infinity,
PIANO: infinity}
# 由于起点已知,可根据起点进行初始化
father_nodes[POSTER], costs[POSTER] = CLIP, 0
father_nodes[CD], costs[CD] = CLIP, 5
candidates = {POSTER, CD, GUITAR, DRUM}
# - 寻找价值最小的节点
def find_lowest(candidates):
candidates_dict = {k: v for k, v in costs.items() if k in candidates}
candidates_costs = candidates_dict.values()
candidates_costs.sort()
print candidates_costs
lowest = candidates_costs[0]
for k, v in candidates_dict.items():
if v == lowest:
print k, v
return k
# - 判断该节点的临界点是否要刷新
def refresh(lowest, candidates):
neighbors = graph[lowest].keys()
for neighbor in neighbors:
if costs[neighbor] > costs[lowest] + graph[lowest][neighbor]:
costs[neighbor] = costs[lowest] + graph[lowest][neighbor]
father_nodes[neighbor] = lowest
while candidates:
lowest = find_lowest(candidates)
candidates.remove(lowest)
refresh(lowest, candidates)
# 最低开销:
print costs[PIANO]
# 构建整条路径:
routine = [end]
while routine[0] != start:
routine.insert(0, father_nodes[routine[0]])
print ' --> '.join(routine)
|
"""
ボタン機能追加
"""
import pygame
from pygame.locals import *
import sys
import Helper
from Helper import BattleHelper
import Base2 as Base
class ObjectClass(Base.ObjectClass):
def __init__(self, MainClass, kwargs):
self.MainClass = MainClass
self.BattleHelper = MainClass.BattleHelper
self.Helper = MainClass.Helper
self.screen = MainClass.screen
self.OneCommandAnimation = MainClass.OneCommandAnimation
self.SetOptions(kwargs)
self.SetParameter()
self.SetButton()
self.OnInstanceFunc()
self.SetAction()
def SetOptions(self, kwargs):
self.options = {"name" : "名無し",
"picturepath" : "../../../pictures/mon_016.bmp",
"position" : pygame.math.Vector2(),
"scale" : pygame.math.Vector2(100, 100),
"btnFunc" : self.OnClick}
if kwargs != None:
self.options.update(kwargs)
def SetParameter(self):
self.position = pygame.math.Vector2(self.options["position"])
self.scale = pygame.math.Vector2(self.options["scale"])
def SetButton(self):
self.BtnAction = Helper.ButtonAction(self)
self.BoolAction = self.BtnAction.IsOnDown
self.BtnFunc = self.options["btnFunc"]
def OnInstanceFunc(self):
self.MainClass.AllObjectList.append(self)
def SetAction(self):
self.Action = self.Update
def Update(self):
self.HelperUpdate()
self.Draw()
self.BtnUpdate()
def HelperUpdate(self):
self.BtnAction.mousePos = self.Helper.mousePos
self.BtnAction.mousePressed = self.Helper.mousePressed
self.BtnAction.previousPressed = self.Helper.previousPressed
def AnimationUpdate(self):
if self.Animation != None:
self.Animation()
|
#Abrir y trabajar un archivo de texto
archivo = open('frutas.txt', 'r', encoding= 'utf-8')
for linea in archivo:
linea = linea.replace('\n', '')
print(linea)
archivo.close()
|
import pytorch_wrapper as pw
import torch
import os
import uuid
from torch import nn
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
from torch.optim import AdamW
from itertools import product
from ...utils.loss_wrappers import PassThroughLossWrapper
from .model import UDRNNModel
from .dataset import UDRNNDataset
class UDRNNSystemWrapper:
def __init__(self, embeddings, w2i, c2i, model_params):
self._w2i = w2i
self._c2i = c2i
model = UDRNNModel(embeddings=embeddings, **model_params)
if torch.cuda.is_available():
self._system = pw.System(model, last_activation=nn.Softmax(dim=-1), device=torch.device('cuda'))
else:
self._system = pw.System(model, last_activation=nn.Softmax(dim=-1), device=torch.device('cpu'))
def train(self,
train_dataset_file,
val_dataset_file,
lr,
batch_size,
grad_accumulation_steps,
run_on_multi_gpus,
verbose=True,
seed=0):
torch.manual_seed(seed)
train_dataset = UDRNNDataset(train_dataset_file, self._w2i, self._c2i)
val_dataset = UDRNNDataset(val_dataset_file, self._w2i, self._c2i)
self._train_impl(
train_dataset,
val_dataset,
lr,
batch_size,
grad_accumulation_steps,
run_on_multi_gpus,
verbose
)
def _train_impl(self,
train_dataset,
val_dataset,
lr,
batch_size,
grad_accumulation_steps,
run_on_multi_gpus,
verbose=True):
train_dataloader = DataLoader(
train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=batch_size,
collate_fn=UDRNNDataset.collate_fn
)
val_dataloader = DataLoader(
val_dataset,
sampler=SequentialSampler(val_dataset),
batch_size=batch_size,
collate_fn=UDRNNDataset.collate_fn
)
loss_wrapper = PassThroughLossWrapper()
optimizer = AdamW(self._system.model.parameters(), lr=lr)
base_es_path = f'/tmp/{uuid.uuid4().hex[:30]}/'
os.makedirs(base_es_path, exist_ok=True)
train_method = self._system.train_on_multi_gpus if run_on_multi_gpus else self._system.train
_ = train_method(
loss_wrapper,
optimizer,
train_data_loader=train_dataloader,
evaluation_data_loaders={'val': val_dataloader},
evaluators={
'macro-f1': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassF1Evaluator(average='macro'),
4
)
},
gradient_accumulation_steps=grad_accumulation_steps,
callbacks=[
pw.training_callbacks.EarlyStoppingCriterionCallback(
patience=3,
evaluation_data_loader_key='val',
evaluator_key='macro-f1',
tmp_best_state_filepath=f'{base_es_path}/temp.es.weights'
)
],
verbose=verbose
)
def evaluate(self, eval_dataset_file, batch_size, run_on_multi_gpus, verbose=True):
eval_dataset = UDRNNDataset(eval_dataset_file, self._w2i, self._c2i)
return self._evaluate_impl(eval_dataset, batch_size, run_on_multi_gpus, verbose)
def _evaluate_impl(self, eval_dataset, batch_size, run_on_multi_gpus, verbose=True):
eval_dataloader = DataLoader(
eval_dataset,
sampler=SequentialSampler(eval_dataset),
batch_size=batch_size,
collate_fn=UDRNNDataset.collate_fn
)
evaluators = {
'acc': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassAccuracyEvaluator(),
4
),
'macro-prec': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassPrecisionEvaluator(average='macro'),
4
),
'macro-rec': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassRecallEvaluator(average='macro'),
4
),
'macro-f1': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassF1Evaluator(average='macro'),
4
),
'micro-prec': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassPrecisionEvaluator(average='micro'),
4
),
'micro-rec': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassRecallEvaluator(average='micro'),
4
),
'micro-f1': pw.evaluators.TokenLabelingEvaluatorWrapper(
pw.evaluators.MultiClassF1Evaluator(average='micro'),
4
)
}
if run_on_multi_gpus:
return self._system.evaluate_on_multi_gpus(eval_dataloader, evaluators, verbose=verbose)
else:
return self._system.evaluate(eval_dataloader, evaluators, verbose=verbose)
@staticmethod
def tune(embeddings, w2i, c2i, train_dataset_file, val_dataset_file, run_on_multi_gpus):
lrs = [0.01, 0.001]
batch_size = [16, 32, 64]
dp = [0, 0.1, 0.2, 0.3]
hs = [100, 200, 300]
params = list(product(lrs, dp, batch_size, hs))
grad_accumulation_steps = 1
char_embedding_size = 30
train_dataset = UDRNNDataset(train_dataset_file, w2i, c2i)
val_dataset = UDRNNDataset(val_dataset_file, w2i, c2i)
results = []
for i, (lr, dp, batch_size, hs) in enumerate(params):
print(f'{i + 1}/{len(params)}')
torch.manual_seed(0)
current_system_wrapper = UDRNNSystemWrapper(
embeddings,
w2i,
c2i,
{
'rnn_dp': dp,
'mlp_dp': dp,
'rnn_hidden_size': hs,
'char_embeddings_shape': (len(c2i), char_embedding_size)
}
)
current_system_wrapper._train_impl(
train_dataset,
val_dataset,
lr,
batch_size,
grad_accumulation_steps,
run_on_multi_gpus
)
current_results = current_system_wrapper._evaluate_impl(val_dataset, batch_size, run_on_multi_gpus)
results.append([current_results['macro-f1'].score, (lr, dp, batch_size, hs)])
return results
|
from scoring_engine.db import session, delete_db, init_db
from scoring_engine.models.setting import Setting
class UnitTest(object):
def setup(self):
self.session = session
delete_db(self.session)
init_db(self.session)
self.create_default_settings()
def teardown(self):
delete_db(self.session)
self.session.remove()
def create_default_settings(self):
self.session.add(Setting(name='about_page_content', value='example content value'))
self.session.add(Setting(name='welcome_page_content', value='example welcome content <br>here'))
self.session.add(Setting(name='round_time_sleep', value=60))
self.session.add(Setting(name='worker_refresh_time', value=30))
self.session.add(Setting(name='blue_team_update_hostname', value=True))
self.session.add(Setting(name='blue_team_update_port', value=True))
self.session.add(Setting(name='blue_team_update_account_usernames', value=True))
self.session.add(Setting(name='blue_team_update_account_passwords', value=True))
self.session.add(Setting(name='overview_show_round_info', value=True))
self.session.commit()
|
import json
import csv
fileName = 'document.json'
with open(fileName, 'rb') as fin:
content = json.load(fin)
count = 0
data = ''
for k, v in content.items():
if k=='TimeStamp':
date = v
if k=='Categories':
for k1, v1 in v.items():
for p, cost in v1.items():
try:
f = open('today.csv', "a")
writer = csv.writer(f)
entries = [[p, date, cost]]
writer.writerows(entries)
f.close()
except:
pass
|
from django.db import migrations
from django.conf import settings
def create_data(apps, schema_editor):
Product = apps.get_model('catalog', 'Product')
Product(sku='sku1',name='Product 1', description='Product 1', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=100).save()
Product(sku='sku2',name='Product 2', description='Product 2', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=200).save()
Product(sku='sku3',name='Product 3', description='Product 3', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=300).save()
Product(sku='sku4',name='Product 4', description='Product 1', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=100).save()
Product(sku='sku5',name='Product 5', description='Product 2', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=200).save()
Product(sku='sku6',name='Product 6', description='Product 3', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=300).save()
Product(sku='sku7',name='Product 7', description='Product 4', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=400).save()
Product(sku='sku8',name='Product 8', description='Product 1', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=100).save()
Product(sku='sku9',name='Product 9', description='Product 2', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=200).save()
Product(sku='sku10',name='Product 10', description='Product 3', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=300).save()
Product(sku='sku11',name='Product 11', description='Product 4', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=400).save()
Product(sku='sku12',name='Product 12', description='Product 1', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=100).save()
Product(sku='sku13',name='Product 13', description='Product 2', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=200).save()
Product(sku='sku14',name='Product 14', description='Product 3', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=300).save()
Product(sku='sku15',name='Product 15', description='Product 4', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=400).save()
Product(sku='sku16',name='Product 16', description='Product 1', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=100).save()
Product(sku='sku17',name='Product 17', description='Product 2', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=200).save()
Product(sku='sku18',name='Product 18', description='Product 3', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=300).save()
Product(sku='sku19',name='Product 19', description='Product 4', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=400).save()
Product(sku='sku20',name='Product 20', description='Product 1', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=100).save()
Product(sku='sku21',name='Product 21', description='Product 2', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=200).save()
Product(sku='sku22',name='Product 22', description='Product 3', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=300).save()
Product(sku='sku23',name='Product 23', description='Product 4', buyPrice=100 , sellPrice=100,unit='kilogram', quantity=400).save()
class Migration(migrations.Migration):
dependencies = [
('catalog', '0001_initial'),
]
operations = [
migrations.RunPython(create_data),
] |
from source.domain.student import Student
class DataProviderStudent:
studentList = None
def __init__(self):
global studentList
studentList = dict()
def insert(self, student):
global studentList
studentList[student.getStudentId()] = student
return True
def update(self, student):
global studentList
for studentId, studentInfo in studentList.items():
if studentId == student.getStudentId():
currentStudent = Student()
currentStudent.setStudentId(student.getStudentId())
currentStudent.setName(student.getName())
currentStudent.setEmail(student.getEmail())
currentStudent.setAddress(student.getAddress())
currentStudent.setContactNumber(student.getContactNumber())
currentStudent.setPassword(student.getPassword())
studentList[studentId] = currentStudent
return True
return False
def delete(self, id):
global studentList
for studentId, studentInfo in studentList.items():
if studentId == id:
del studentList[id]
return True
return False
def getList(self) -> dict:
global studentList
return studentList
def getById(self, id):
global studentList
for studentId, studentInfo in studentList.items():
if studentId == id:
currentStudent = Student()
currentStudent.setStudentId(studentInfo.getStudentId())
currentStudent.setName(studentInfo.getName())
currentStudent.setEmail(studentInfo.getEmail())
currentStudent.setAddress(studentInfo.getAddress())
currentStudent.setContactNumber(studentInfo.getContactNumber())
currentStudent.setPassword(studentInfo.getPassword())
# currentStudent = studentInfo
return currentStudent
return False
|
import os, os.path
from hsc.integration.test import CommandsTest
from hsc.integration.camera import getCameraInfo
class SolveTansipTest(CommandsTest):
def __init__(self, name, camera, visit, rerun=None, **kwargs):
self.camera = camera
self.visit = visit
self.rerun = rerun
cameraInfo = getCameraInfo(camera)
command = os.path.join(os.environ['SOLVETANSIP_DIR'], 'bin', 'solvetansip.py')
command += " @WORKDIR@/" + cameraInfo.addDir + " --id visit=%d" % visit
if rerun is not None:
command += " --rerun=" + rerun
super(SolveTansipTest, self).__init__(name, ["data", "astrometry", camera], [command], **kwargs)
def validate(self, *args, **kwargs):
# No validation yet: we only care that it runs
pass
|
from twitter import *
from pushbullet import PushBullet
import config
CONSUMER_KEY = config.twitter_consumer_key
CONSUMER_SECRET = config.twitter_consumer_secret
OAUTH_TOKEN = config.twitter_oauth_token
OAUTH_SECRET = config.twitter_oauth_secret
pb_api_key = config.pb_api_key
twitter = Twitter(auth=OAuth(
OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
tweets = twitter.statuses.user_timeline(screen_name="CGShanghaiAir", count=1)
text = tweets[0]['text']
pm25 = text.split(";")[3]
if pm25 > 90:
pb = PushBullet(pb_api_key)
nexus6p = pb.get_device('Huawei Nexus 6P')
nexus6p.push_note('Shanghai Air Quality', text)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
s1 = 72
s2 = 85
r = (s2-s1)/s1*100
print("小明,成绩提升了%.1f%%。" % r)
print("小明,成绩提升了{0:.1f}%。".format(r)) |
H, W, N = map(int, input().split())
point = [tuple(map(int, input().split())) for _ in range(N)]
a = {j:i for i, j in enumerate(sorted(set(map(lambda x:x[0], point))), start=1)}
b = {j:i for i, j in enumerate(sorted(set(map(lambda x:x[1], point))), start=1)}
for i in point:
print(a[i[0]], b[i[1]])
|
# This shows the use of generators, and use of iter() and next() methods - scratch sheet
# simple use of generators
# def generate_nums():
# for i in range(10):
# yield i
#
# for k in generate_nums():
# print(k)
# Use of Generators to not allocate everything in memory
# the usual way to allocate everything in memory
def fib(n):
res = []
a, b = 1, 1
for i in range(0,n):
res.append(str(a)+",")
a,b = b,(a+b)
return res
# print(fib(10000))
# the Generator way to not allocate all of it in memory
def fib(n):
a, b = 1,1
for i in range(0,n):
yield a
a,b = b, a+b
# file_f = open("/Users/abhikbanerjee/fib_file", "w")
# for k in fib(10000):
# file_f.write(str(k)+"\n")
# file_f.close()
# print(k)
# Use next method to iterate over the generator func
def give_nums():
for i in range(4):
yield i
gen = give_nums()
# print(next(gen))
# print(next(gen))
# print(next(gen))
# print(next(gen))
# print(next(gen))
# print(next(gen))
# apply iter on a string
my_str = "abhik"
# print(next(my_str))
#create a iterator on the iterable my_str
it = iter(my_str)
print(next(it))
print(next(it))
print(next(it))
|
# First as a comparison: design an unwarped filter with 4 coefficients/taps with these specifications:
import scipy.signal as sp
cunw = sp.remez(4, [0, 0.025, 0.025+0.025, 0.5], [1,0], [1, 100])
print 'cunw = ', cunw
#impulse response:
import matplotlib.pyplot as plt
plt.plot(cunw)
plt.xlabel('Sample')
plt.ylabel('value')
plt.title('Unwarped Filter Coefficients')
plt.show()
#frequency response:
from freqz import freqz
freqz(cunw, 1)
from warpingphase import *
import numpy as np
#warping allpass coefficient:
a = 1.0674 * (2 / np.pi * np.arctan(0.6583 * 32)) ** 0.5 - 0.1916
print 'a = ', a
# ans = 0.85956
# with f_s=32 in kHz. from [1]
# The warped cutoff frequency then is:
fcw = -warpingphase(0.05 * np.pi, 0.85956)
print 'fcw = ', fcw
# fcw = 1.6120; %in radiants
# filter design:
# cutoff frequency normalized to nyquist:
fcny=fcw/np.pi
print 'fcny = ', fcny
# fcny = 0.51312
# python:
c = sp.remez(4, [0, fcny/2.0, fcny/2.0+0.1, 0.5], [1, 0],[1, 100])
#The resulting Impulse Response:
plt.plot(c);
plt.xlabel('Sample')
plt.ylabel('value')
plt.title('Filter Coefficients in Warped Domain')
plt.show()
#The resulting Frequency response:
freqz(c,1)
# Warping Allpass filters:
#Numerrator:
B = [-a.conjugate(), 1]
#Denominator:
A = [1, -a]
# Impulse with 80 zeros:
Imp = np.zeros(80)
Imp[0] = 1
x = Imp
# Y1(z)=A(z), Y2(z)=A^2(z),...
# Warped delays:
y1 = sp.lfilter(B,A,x)
y2 = sp.lfilter(B,A,y1)
y3 = sp.lfilter(B,A,y2)
# Output of warped filter with impulse as input:
yout = c[0]*x+c[1]*y1+c[2]*y2+c[3]*y3
# frequency response:
freqz(yout, 1)
#Impulse response:
plt.plot(yout);
plt.xlabel('Sample')
plt.ylabel('value')
plt.title('Impulse Response of Warped Lowpass Filter')
plt.show()
|
# -*- coding: utf-8 -*-
class ValidationException(Exception):
def __init__(self, message, fields):
super(ValidationException, self).__init__(message)
self.message = message
self.fields = fields
|
#coding=utf-8
class Person(object):
sex = 'man'
age = 18
def fun(self):
print 'fdhsfhsd'
class Metel(object):
height = 21
def fun(self):
print 'hello'
a = Person()
a.name = 'zhangsan'#添加私有变量
a.age = 20
b = Person()
print b.sex
print a.sex
print a.name
print a.age
print '+++++++++++++++++++++++++'
print Person.sex
Person.age = 19
print Person.age
# print Person.name不能打印私有变量
a.fun()
c =a.Metel()
print c.height
c.fun()
|
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class AddressBookConfig(AppConfig):
name = "packman.address_book"
verbose_name = _("Address Book")
|
#!/usr/bin/python/
import numpy as np
from math import *
from pprint import pprint
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
identityMatrix_44 = np.matrix([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
max_thetaX = 90
min_thetaX = -90
max_thetaY = 90
min_thetaY = -90
max_thetaZ = 90
min_thetaZ = -90
max_thetaA = 45
min_thetaA = -45
default_thetaA = 45
interval_level1 = 10
interval_level2 = 4
interval_level3 = 2
interval_level4 = 1
# class of transformation matrix
# forward kinematic class
# inverse kinematic class
# class of a transformation matrix
# the end effector class
class EndEffector(object):
def __init__(self):
self.px = 0
self.py = 0
self.pz = 0
self.pos = np.matrix([[self.px], [self.py], [self.pz], [1]])
def setPos(self, x, y, z):
self.px = x
self.py = y
self.pz = z
self.pos = np.matrix([[self.px], [self.py], [self.pz], [1]])
# the Forward Kinematics class
class FK(object):
def __init__(self):
self.transMatrix = identityMatrix_44
self.endEffector = EndEffector()
def setEndEffector(self, x, y, z):
self.endEffector.setPos(x, y, z)
def addTransMatrix(self, type, a, p):
a = radians(a)
if type == "rx":
added = np.matrix([
[1, 0, 0, p[0]],
[0, cos(a), -sin(a), p[1]],
[0, sin(a), cos(a), p[2]],
[0, 0, 0, 1]
])
elif type == "ry":
added = np.matrix([
[cos(a), 0, sin(a), p[0]],
[0, 1, 0, p[1]],
[-sin(a), 0, cos(a), p[2]],
[0, 0, 0, 1]
])
elif type == "rz":
added = np.matrix([
[cos(a), -sin(a), 0, p[0]],
[sin(a), cos(a), 0, p[1]],
[0, 0, 1, p[2]],
[0, 0, 0, 1]
])
else:
added = identityMatrix_44
self.transMatrix = added * self.transMatrix
#print self.transMatrix
def calculateFK(self):
return self.transMatrix * self.endEffector.pos
""" set default position of the robot, in its operational space,
let thetaX = 0, thetaY = 0, thetaZ = 0, thetaA = 45, calculate its coordinates in the
base frame.
"""
human_arm = FK()
human_arm.setEndEffector(0, 0, 0)
human_arm.addTransMatrix("rx", 0, [0.25, 0, 0])
human_arm.addTransMatrix("rz", default_thetaA, [0.2, 0, 0])
human_arm.addTransMatrix("rx", 90, [0, 0, 0])
human_arm.addTransMatrix("rz", 90, [0, 0, 0])
human_arm.addTransMatrix("rx", 90, [0, 0, 0])
# print human_arm.calculateFK()
lookUpTable = dict()
""" function for simulate every position of the robotic linkage
we use Python dictionary to store all possible configurations of the joints according to
every point in the operational space
"""
def simulateIK():
for tx in np.arange(min_thetaX, max_thetaX + interval_level1, interval_level1):
for ty in np.arange(min_thetaY, max_thetaY + interval_level1, interval_level1):
for tz in np.arange(min_thetaZ, max_thetaZ + interval_level1, interval_level1):
for ta in np.arange(min_thetaA, max_thetaA + interval_level1, interval_level1):
robot = FK()
robot.setEndEffector(0, 0, 0)
robot.addTransMatrix("rx", 0, [0.25, 0, 0])
robot.addTransMatrix("rz", default_thetaA, [0.2, 0, 0])
robot.addTransMatrix("rx", 90, [0, 0, 0])
robot.addTransMatrix("rz", 90, [0, 0, 0])
robot.addTransMatrix("rx", 90, [0, 0, 0])
robot.addTransMatrix("rx", tx, [0, 0, 0])
robot.addTransMatrix("ry", ty, [0, 0, 0])
robot.addTransMatrix("rz", tz, [0, 0, 0])
robot.addTransMatrix("rz", ta, [0, 0, 0])
loopUpKey = robot.calculateFK().tolist()
# convert all float to integers
for i in range(0, 3):
loopUpKey[i][0] = round(loopUpKey[i][0], 2)
lookUpTable[str(loopUpKey)] = [tx, ty, tz, ta]
return lookUpTable
# [[0.0], [0.04], [0.41], [1.0]]': [-30, 0, 40, -45],
# lookUpKey = [[0.03], [-0.03], [0.41], [1.0]]
# posIK = simulateIK()
# pprint(posIK)
# forward kinematics trajectory
pos0 = human_arm.calculateFK().tolist()
print pos0
human_arm.addTransMatrix("rx", -30, [0, 0, 0])
pos1 = human_arm.calculateFK().tolist()
print pos1
human_arm.addTransMatrix("ry", 0, [0, 0, 0])
pos2 = human_arm.calculateFK().tolist()
print pos2
human_arm.addTransMatrix("rz", 40, [0, 0, 0])
pos3 = human_arm.calculateFK().tolist()
print pos3
human_arm.addTransMatrix("rz", -45, [0, 0, 0])
pos4 = human_arm.calculateFK().tolist()
print pos4
# generate forward kinematics trajectory plots
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x = [pos0[0], pos1[0], pos3[0], pos4[0]]
y = [pos0[1], pos1[1], pos3[1], pos4[1]]
z = [pos0[2], pos1[2], pos3[2], pos4[2]]
xs = [pos0[0], pos4[0]]
ys = [pos0[1], pos4[1]]
zs = [pos0[2], pos4[2]]
ax.scatter(x, y, z, c="b", marker="o")
ax.scatter(xs, ys, zs, c="r", marker="^")
ax.set_xlabel('X Axis')
ax.set_ylabel('Y Axis')
ax.set_zlabel('Z Axis')
fig.savefig('data.png')
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clientes', '0005_auto_20151017_2118'),
]
operations = [
migrations.AlterField(
model_name='cliente',
name='cpf',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='infoabst',
name='telefone',
field=models.CharField(max_length=14),
),
]
|
import tkinter as tk
from Utilities import BGLogger, Graph
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import os
class GraphPage(tk.Frame):
def __init__(self, master, controller):
tk.Frame.__init__(self, master)
self.controller = controller
self.Logger = BGLogger.KeyboardLogger()
self.Grapher = Graph.BarGraph()
self.createWidgets()
def setLists(self):
"""
Set black and white-list of keyboardlogger
"""
# Set blackList, if empty, set instead whiteList
blackList, whiteList = self.controller.readSettings("blackList", "whiteList").values()
if blackList:
self.Logger.setBlackList(blackList)
elif whiteList:
self.Logger.setWhiteList(whiteList)
def createWidgets(self):
"""
Create all the interactive widgets of the page
"""
self.setLists()
self.rowconfigure((0,1,2), weight=1)
self.columnconfigure(1, weight=1)
def saveImg():
"""
Save an image of the current graph
"""
fTypes, dpi = self.controller.readSettings("imageFormats", "imageDPI").values()
# I know the following line isnt very practical but hey, who doesn't like a one-liner
fileTypeList = tuple(map(lambda f, t : tuple((s+t) for s in f), [("", "*.")]*len(fTypes), fTypes))
location = tk.filedialog.asksaveasfilename(
initialdir=self.controller.destinationDir,
title="save image",
defaultextension="png",
filetypes=fileTypeList)
name, ext = os.path.splitext(location)
if location:
self.Grapher.saveImg(location, format=ext.replace(".", ""), dpi=dpi)
self.keyLogButton = tk.Button(self,
text="Start logging",
background="green2",
activebackground="green2",
command=lambda : self.setToggleState(self.Logger.toggle()))
self.graphButton = tk.Button(self,
text="Update Graph",
background="yellow2",
activebackground="yellow2",
command=self.plotData)
self.saveImgButton = tk.Button(self,
text="Save Image",
background="royalblue1",
activebackground="royalblue1",
command=saveImg)
self.graphCanvas = FigureCanvasTkAgg(self.Grapher.figSetup(
title="Letter Frequency",
xlabel="Character",
ylabel="Percentage (%)",
size=(10, 6)), master= self)
self.keyLogButton.grid(row=0, column=0, sticky="NSEW")
self.graphButton.grid(row=1, column=0 ,sticky="NSEW")
self.saveImgButton.grid(row=2, column=0, sticky="NSEW")
self.graphCanvas.get_tk_widget().grid(
row=0, rowspan=3, column=1, sticky="NSEW")
def setToggleState(self, default=None):
"""
Toggle the state of the logging button.
This is needed as the logging can be toggled BGLogger stopButton (default f12).
"""
toggleBool = default or self.Logger.logging
if toggleBool:
self.keyLogButton.config(
text="Stop logging",
relief="raised",
background="red2",
activebackground="red2")
# Check for any updates to the logging state
self.after(100, self.setToggleState)
else:
self.setLists()
self.keyLogButton.config(
text="Start logging",
relief="raised",
background="green2",
activebackground="green2")
def plotData(self, event=None):
"""
Plot the current log
"""
self.Grapher.loadData(self.Logger.keyDict, mode="percent")
self.Grapher.plotData()
self.graphCanvas.draw()
def menuBar(self, root):
"""
Return the menubar object of this page
"""
def newLOG():
"""
Flush the currently logged data
"""
self.Logger.flush()
self.plotData()
def readLOGFile(path):
"""
Read and return logged data from .log file at path
"""
try:
with open(path, mode="r") as file:
newDict = {}
for line in file.readlines():
line = line.replace("'", "")
key, value = line.split(":")
newDict[key.strip()] = int(value.strip())
return newDict
except IOError:
print("Path not found")
def writeLOGFile(path, dataDict):
"""
Write logged data to .log file at path
"""
try:
with open(path, mode="w") as file:
for key in dataDict:
file.write("{}:{}\n".format(key, dataDict[key]))
except:
print("Unable to open file")
def loadLOGFile(replace=True):
"""
Load logged data from .log file into page using readLOGFile()
"""
if self.Logger.logging:
self.setToggleState(self.Logger.toggle())
filePath = tk.filedialog.askopenfilename(
initialdir=self.controller.destinationDir,
title="Select file",
filetypes=(("log files", "*.LOG"),))
self.Logger.keyDict = readLOGFile(filePath)
try:
self.plotData()
except AttributeError:
print("Unable to open file")
def saveNewLOGFile():
"""
Save logged data into new .log file using writeLOGFile()
"""
if self.Logger.logging:
self.setToggleState(self.Logger.toggle())
filePath = tk.filedialog.asksaveasfilename(
initialdir=self.controller.destinationDir,
defaultextension=".dat",
title="Create file",
filetypes=(("log file", "*.LOG"),))
writeLOGFile(filePath, self.Logger.keyDict)
def saveToLOGFile():
"""
Save logged data into old .log file using writeLOGFile()
"""
if self.Logger.logging:
self.setToggleState(self.Logger.toggle())
filePath = tk.filedialog.askopenfilename(
initialdir=self.controller.destinationDir,
title="Select file",
filetypes=(("log file", "*.LOG"),))
oldData = readLOGFile(filePath)
newData = self.Logger.keyDict
try:
for key in oldData:
if key in newData:
newData[key] += oldData[key]
else:
newData[key] = oldData[key]
writeLOGFile(filePath, self.Logger.keyDict)
except TypeError:
print("Unable to open file")
menu = tk.Menu(root)
filemenu = tk.Menu(menu, tearoff=0)
filemenu.add_command(label="New Log", command=newLOG)
filemenu.add_command(label="Open Log", command=loadLOGFile)
filemenu.add_command(label="Save As", command=saveNewLOGFile)
filemenu.add_command(label="Save To", command=saveToLOGFile)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=self.controller.destroy)
menu.add_cascade(label="File", menu=filemenu)
def showSettings():
"""
Stop logging if active and show settingsPage
"""
if self.Logger.logging:
self.setToggleState(self.Logger.toggle())
self.controller.showFrame("SettingsPage")
menu.add_command(label="Settings",
command=showSettings)
menu.add_command(label="Exit",
command=self.controller.destroy)
return menu
|
from pwn import *
io = remote('oucs.cry.wanictf.org','50010')
plaintext = ""
ciphertext = ""
n = 0
# Get n
io.recvuntil('> ')
io.sendline('4')
exec(io.recvline().decode('utf-8')) # n
# Get c1
io.recvuntil('> ')
io.sendline('1')
exec(io.recvline().decode('utf-8'))
flag_encrypt = ciphertext
# Get c2
io.recvuntil('> ')
io.sendline('2')
io.recvuntil('> ')
io.sendline('0x01')
exec(io.recvline().decode('utf-8'))
one_encrypt = ciphertext
# decrypt c1*c2 mod n
io.recvuntil('> ')
io.sendline('3')
io.recvuntil('> ')
io.sendline(str(hex((flag_encrypt * one_encrypt)%n)))
exec(io.recvline().decode('utf-8'))
plaintext = hex(plaintext - 1)
print(bytes.fromhex(plaintext[2:]))
|
import re
import subprocess
from math import log
from pathlib import Path
from nltk import FreqDist, TweetTokenizer
from nltk.corpus import stopwords
from sqlalchemy.orm import sessionmaker
from tqdm import tqdm
import settings
from db import events
from db.engines import engine_lmartine as engine
from db.models_new import EventGroup
from evaluation.automatic_evaluation import remove_and_stemming
tknzr = TweetTokenizer()
stop_words = set(stopwords.words('english'))
stop_words.update(
['~', '.', ':', ',', ';', '?', '¿', '!', '¡', '...', '/', '\'', '\\', '\"', '-', 'amp', '&', 'rt', '[', ']',
'":', '--&',
'(', ')', '|', '*', '+', '%', '$', '_', '@', 's', 'ap', '=', '}', '{', '**', '--', '()', '!!', '::', '||',
'.:', ':.', '".', '))', '((', '’'])
def create_input_dir(event_name, event_ids, session):
tweets = events.get_tweets(event_name, event_ids, session)
tweets_text = [re.sub(r"@\w+", '', re.sub(r"http\S+", '', tweet.text.replace('#', ''))) + '\n' for tweet in tweets]
with Path('data_simetrix', event_name, f'input_{event_name}.txt').open('w') as tweets_file:
tweets_file.writelines(tweets_text)
def create_mappings(event_name):
summaries = [file for file in Path(settings.LOCAL_DATA_DIR_2, 'data', event_name, 'summaries', 'system').iterdir()
if file.is_file()]
source = Path('data_simetrix', event_name, f'input_{event_name}.txt')
# source = Path(settings.LOCAL_DATA_DIR_2, 'data', event_name, 'summaries', 'reference')
with Path('data_simetrix', 'mappings.txt').open('a') as mappings:
for summary in summaries:
line = f'{event_name} {summary.name[:-4]} {source.absolute()} {summary.absolute()} \n'
mappings.write(line)
def calculate_idf_background(session, tweets_text):
# tweets_text = get_tweets_text(session)
docs_size = len(tweets_text)
counts_words = {}
for text in tqdm(tweets_text):
tokens = remove_and_stemming(text)
for token in tokens:
token = token.replace('\n', '').replace(' ', '').replace('.', '').replace('\t', '')
if not token == '':
if token in counts_words.keys():
counts_words[token] = counts_words[token] + 1
else:
counts_words[token] = 1
with Path('data_simetrix', 'bgIdfFreq.stemmed.txt').open('w') as idf_backgroud:
idf_backgroud.write(str(docs_size) + '\n')
for word, count in counts_words.items():
idf_value = log(docs_size / (1 + count))
idf_backgroud.write(f'{word} {idf_value} \n')
def calculate_background_corpus(session, tweets_text):
#tweets_text = get_tweets_text(session)
list_tokens = [[i.lower() for i in tknzr.tokenize(text) if i.lower() not in stop_words] for text in tweets_text]
words = []
for tokens in tqdm(list_tokens):
for word in tokens:
word = word.replace('\n', '').replace(' ', '').replace('.', '').replace('\t', '')
if word not in stop_words and not word == '':
words.append(word.replace('\n', '').replace(' ', ''))
fdist_all = FreqDist(words)
with Path('data_simetrix', 'bgFreqCounts.unstemmed.txt').open('w') as background_corpus:
for word, count in tqdm(fdist_all.items()):
background_corpus.write(f'{word} {count} \n')
return fdist_all
def get_tweets_text(session):
tweets = []
for name in tqdm(events_names):
event = session.query(EventGroup).filter(EventGroup.name == name).first()
event_ids = list(map(int, event.event_ids.split(',')))
tweets_event = events.get_tweets(name, event_ids, session)
tweets.extend(tweets_event)
tweets_text = [re.sub(r"@\w+", '', re.sub(r"http\S+", '', tweet.text.replace('#', ''))) for tweet in tweets]
return tweets_text
if __name__ == '__main__':
Session = sessionmaker(engine, autocommit=True)
session = Session()
# events_names = ['hurricane_irma2', 'oscar_pistorius', 'nepal_earthquake', 'libya_hotel']
events_names = ['hurricane_irma2']
if not Path('data_simetrix', 'bgFreqCounts.unstemmed.txt').exists():
tweets_text = get_tweets_text(session)
calculate_background_corpus(session, tweets_text)
calculate_idf_background(session, tweets_text)
for name in events_names:
event = session.query(EventGroup).filter(EventGroup.name == name).first()
event_ids = list(map(int, event.event_ids.split(',')))
input_dir = Path('data_simetrix', name)
if not input_dir.exists():
input_dir.mkdir()
create_input_dir(name, event_ids, session)
print("Creating Mapping file")
create_mappings(name)
subprocess.call(
['java', '-jar', 'simetrix.jar', 'data_simetrix/mappings.txt', 'data_simetrix/config.example'])
|
from django.db import models
from django.contrib.auth.models import User
class Pessoa(models.Model):
usuario = models.OneToOneField(User,
on_delete = models.CASCADE,
verbose_name ='Usuário')
nome = models.CharField('Nome',
max_length = 128)
data_de_nascimento = models.DateField('Data de Nascimento',
blank = True,
null = True)
telefone_celular = models.CharField('Telefone celular',
max_length = 15,
help_text = 'Número do telefone celular no formato (99) 99999-9999',
null = True,
blank = True)
telefone_fixo = models.CharField('Telefone fixo',
max_length = 15,
help_text = 'Número do telefone fixo no formato (99) 9999-9999',
null = True,
blank = True)
email = models.EmailField('E-mail',
null = True,
blank = True)
def __str__(self):
return self.nome
class Tag(models.Model):
nome = models.CharField(max_length = 64)
slug = models.SlugField(max_length = 64)
def __str__(self):
return self.nome
class Noticia(models.Model):
class Meta:
verbose_name = 'Notícias'
verbose_name_plural = 'Notícias'
titulo = models.CharField('Título', max_length=128)
conteudo = models.TextField()
data_criacao = models.DateTimeField(blank = True, null = True)
data_publicacao = models.DateTimeField(blank = True, null = True)
publicado = models.BooleanField(default = True)
autor = models.ForeignKey(Pessoa, on_delete = models.CASCADE, default = None)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.titulo
class MensagemDeContato(models.Model):
class Meta:
verbose_name = 'Mensagem de Contato'
verbose_name_plural = 'Mensagens de Contato'
nome = models.CharField(max_length=128)
email = models.EmailField('E-mail', null = True, blank = True)
mensagem = models.TextField()
data = models.DateTimeField(auto_now_add = True)
def __str__(self):
return 'Mensagem de '+self.nome |
from django.views.generic import DetailView
from braces.views import LoginRequiredMixin
from .models import User
class ProfileDetailView(LoginRequiredMixin, DetailView):
'''
Displays the user profile information
'''
model = User
def get_object(self):
# Get the currently logged in user
return self.request.user
|
from django.contrib.auth import authenticate
from rest_framework import serializers
from account.models import MyUser
from account.utils import send_activation_code
class RegisterSerializer(serializers.ModelSerializer):
password = serializers.CharField(min_length=6, write_only=True)
password_confirm = serializers.CharField(min_length=6, write_only=True)
class Meta:
model = MyUser
fields = ('email', 'password', 'password_confirm')
def validate(self, validated_data):
password = validated_data.get('password')
password_confirm = validated_data.get('password_confirm')
if password != password_confirm:
raise serializers.ValidationError('Password do not match')
return validated_data
def create(self,validated_data):
email=validated_data.get('email')
password=validated_data.get('password')
user=MyUser.objects.create_user(email=email,password=password)
send_activation_code(email=user.email,activation_code=user.activation_code)
return user
class LoginSerializer(serializers.Serializer):
email=serializers.EmailField()
password=serializers.CharField(
label='password',
style={'input_type':'password'},
trim_whitespace=False
)
def validate(self,attrs):
email=attrs.get('email')
password=attrs.get('password')
if email and password:
user=authenticate(request=self.context.get('request'),email=email,password=password)
if not user:
message='Unable to log in with provided credentials'
raise serializers.ValidationError(message,code='authorization')
else:
message='Must include email and password.'
raise serializers.ValidationError(message,code='authorization')
attrs['user']=user
return attrs
|
# Generated by Django 3.0.7 on 2020-07-05 15:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('english', '0011_auto_20200705_1949'),
]
operations = [
migrations.RemoveField(
model_name='testtest',
name='correct_answer',
),
migrations.AddField(
model_name='testtest',
name='correct_answer_four',
field=models.CharField(choices=[('YES', 'yes'), ('NO', 'no')], default='NO', max_length=255),
),
migrations.AddField(
model_name='testtest',
name='correct_answer_one',
field=models.CharField(choices=[('YES', 'yes'), ('NO', 'no')], default='NO', max_length=255),
),
migrations.AddField(
model_name='testtest',
name='correct_answer_three',
field=models.CharField(choices=[('YES', 'yes'), ('NO', 'no')], default='NO', max_length=255),
),
migrations.AddField(
model_name='testtest',
name='correct_answer_two',
field=models.CharField(choices=[('YES', 'yes'), ('NO', 'no')], default='NO', max_length=255),
),
]
|
import tkinter as tk
from tkinter import font as tkfont
from tkinter import *
from tkinter import ttk
import tkinter.messagebox
import sqlite3
class SSIS:
def __init__(self,root):
self.root = root
self.root.title("Student Information System")
self.root.geometry("1300x700+0+0")
self.root.config(bg="plum")
c_code = StringVar()
c_name = StringVar()
c_search = StringVar()
stud_id = StringVar()
stud_name = StringVar()
stud_yl = StringVar()
stud_gender = StringVar()
stud_course = StringVar()
search = StringVar()
#======================================COURSE========================================================
def connectCourse():
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("CREATE TABLE IF NOT EXISTS courses (c_code TEXT PRIMARY KEY, c_name TEXT)")
conn.commit()
conn.close()
def addCourse():
try:
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("INSERT INTO courses(c_code, c_name) VALUES (?,?)",\
(c_code.get(),c_name.get()))
conn.commit()
clearCourse()
conn.close()
tkinter.messagebox.showinfo("Student Information System", "Course Recorded Successfully")
displayCourse()
except:
tkinter.messagebox.showinfo("Student Information System", "Cannot Add Course")
def displayCourse():
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
ctree.delete(*ctree.get_children())
cur.execute("SELECT * FROM courses")
rows = cur.fetchall()
for row in rows:
ctree.insert("", tk.END, text=row[0], values=row[0:])
conn.close()
def updateCourse():
for selected in ctree.selection():
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("UPDATE courses SET c_code=?, c_name=? WHERE c_code=?", \
(c_code.get(),c_name.get(), ctree.set(selected, '#1')))
conn.commit()
tkinter.messagebox.showinfo("Student Information System", "Course Updated Successfully")
displayCourse()
clearCourse()
conn.close()
def editCourse():
if ctree.focus() == "":
tkinter.messagebox.showerror("Student Information System", "Please select a record from the table.")
return
values = ctree.item(ctree.focus(), "values")
c_code.set(values[0])
c_name.set(values[1])
def deleteCourse():
try:
messageDelete = tkinter.messagebox.askyesno("SSIS", "Do you want to permanently delete this record?")
if messageDelete > 0:
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
id_no = ctree.item(ctree.selection()[0])["values"][0]
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("DELETE FROM courses WHERE c_code = ?",(id_no,))
conn.commit()
ctree.delete(ctree.selection()[0])
tkinter.messagebox.askyesno("Student Information System", "Course Deleted Successfully")
displayCourse()
conn.close()
except:
tkinter.messagebox.showerror("Student Information System", "Students are still enrolled in this course")
def searchCourse():
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("SELECT * FROM courses WHERE c_code = ?",(c_search.get(),))
conn.commit()
ctree.delete(*ctree.get_children())
rows = cur.fetchall()
for row in rows:
ctree.insert("", tk.END, text=row[0], values=row[0:])
conn.close()
def RefreshCourse():
displayCourse()
def clearCourse():
c_code.set('')
c_name.set('')
#======================================STUDENTS========================================================
def connect():
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("CREATE TABLE IF NOT EXISTS students (stud_id TEXT PRIMARY KEY, stud_name TEXT, stud_course TEXT, \
stud_yl TEXT, stud_gender TEXT, \
FOREIGN KEY(stud_course) REFERENCES courses(c_code) ON UPDATE CASCADE)")
conn.commit()
conn.close()
def addStud():
if stud_id.get() == "" or stud_name.get() == "" or stud_course.get() == "" or stud_yl.get() == "" or stud_gender.get() == "":
tkinter.messagebox.showinfo("Student Information System", "Please fill in the box with *")
else:
ID = stud_id.get()
ID_list = []
for i in ID:
ID_list.append(i)
a = ID.split("-")
if len(a[0]) == 4:
if "-" in ID_list:
x = ID.split("-")
year = x[0]
number = x[1]
if year.isdigit()==False or number.isdigit()==False:
try:
tkinter.messagebox.showerror("Student Information System", "Invalid ID")
except:
pass
elif year==" " or number==" ":
try:
tkinter.messagebox.showerror("Student Information System", "Invalid ID")
except:
pass
else:
try:
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("INSERT INTO students(stud_id, stud_name, stud_course, stud_yl, stud_gender) VALUES (?,?,?,?,?)",\
(stud_id.get(),stud_name.get(),stud_course.get(),stud_yl.get(),stud_gender.get()))
tkinter.messagebox.showinfo("Student Information System", "Student Recorded Successfully")
conn.commit()
clear()
displayStud()
conn.close()
except:
tkinter.messagebox.showerror("Student Information System", "Course Unavailable")
else:
tkinter.messagebox.showerror("Student Information System", "Invalid ID")
else:
tkinter.messagebox.showerror("Student Information System", "Invalid ID")
def updateStud():
if stud_id.get() == "" or stud_name.get() == "" or stud_course.get() == "" or stud_yl.get() == "" or stud_gender.get() == "":
tkinter.messagebox.showinfo("Student Information System", "Please select a student")
else:
try:
for selected in stree.selection():
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("UPDATE students SET stud_id=?, stud_name=?, stud_course=?, stud_yl=?, stud_gender=?\
WHERE stud_id=?", (stud_id.get(),stud_name.get(),stud_course.get(),stud_yl.get(),stud_gender.get(),\
stree.set(selected, '#1')))
conn.commit()
tkinter.messagebox.showinfo("Student Information System", "Student Updated Successfully")
displayStud()
clear()
conn.close()
except:
tkinter.messagebox.showerror("Student Information System", "Cannot Update Student")
def deleteStud():
try:
messageDelete = tkinter.messagebox.askyesno("Student Information System", "Do you want to permanently delete this record?")
if messageDelete > 0:
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
x = stree.selection()[0]
id_no = stree.item(x)["values"][0]
cur.execute("DELETE FROM students WHERE stud_id=?",(id_no,))
conn.commit()
stree.delete(x)
tkinter.messagebox.showinfo("Student Information System", "Student Deleted Successfully")
displayStud()
conn.close()
except:
tkinter.messagebox.showinfo("Student Information System", "Cannot Delete Student")
def searchStud():
stud_ID = search.get()
try:
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur .execute("PRAGMA foreign_keys = ON")
cur.execute("SELECT * FROM students")
conn.commit()
stree.delete(*stree.get_children())
rows = cur.fetchall()
for row in rows:
if row[0].startswith(stud_ID):
stree.insert("", tk.END, text=row[0], values=row[0:])
conn.close()
except:
tkinter.messagebox.showerror("Student Information System", "Invalid ID")
def displayStud():
stree.delete(*stree.get_children())
conn = sqlite3.connect("SSIS.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute("SELECT * FROM students")
rows = cur.fetchall()
for row in rows:
stree.insert("", tk.END, text=row[0], values=row[0:])
conn.close()
def editStud():
x = stree.focus()
if x == "":
tkinter.messagebox.showerror("Student Information System", "Please select a record from the table.")
return
values = stree.item(x, "values")
stud_id.set(values[0])
stud_name.set(values[1])
stud_course.set(values[2])
stud_yl.set(values[3])
stud_gender.set(values[4])
def Refresh():
displayStud()
def clear():
stud_id.set("")
stud_name.set("")
stud_course.set("")
stud_yl.set("")
stud_gender.set("")
#======================================ENTRIES AND WIDGET========================================================
courseCode = Label(self.root, font=("Poppins", 12, "bold"), text="Course Code:", padx=5, pady=5, bg="plum")
courseCode.place(x=125,y=500)
courseCode = Entry(self.root, font=("Poppins", 13), textvariable=c_code, width=33)
courseCode.place(x=260,y=505)
courseName = Label(self.root, font=("Poppins", 12,"bold"), text="Course Name:", padx=5, pady=5, bg="plum")
courseName.place(x=125,y=540)
courseName = Entry(self.root, font=("Poppins", 13), textvariable=c_name, width=33)
courseName.place(x=260, y=545)
Search = Entry(self.root, font=("Poppins", 11), textvariable=c_search, width=29)
Search.place(x=876,y=475)
Search.insert(0,'Search course code here')
StudentID = Label(self.root, font=("Poppins", 12, "bold"), text="Student ID:", padx=5, pady=5, bg="plum")
StudentID.place(x=125,y=40)
StudentIDFormat = Label(self.root, font=("Poppins", 8,"bold"), text="(YYYY - NNNN)", bg="plum")
StudentIDFormat.place(x=255,y=70)
StudentID = Entry(self.root, font=("Poppins", 13), textvariable=stud_id, width=33)
StudentID.place(x=255,y=45)
StudentName = Label(self.root, font=("Poppins", 12,"bold"), text="Full Name:", padx=5, pady=5, bg="plum")
StudentName.place(x=125,y=100)
StudentNAMEFormat = Label(self.root, font=("Poppins", 8,"bold"), text="LASTNAME, FISRTNAME MIDDLEINITIAL", bg="plum")
StudentNAMEFormat.place(x=255,y=130)
StudentName = Entry(self.root, font=("Poppins", 13), textvariable=stud_name, width=33)
StudentName.place(x=255,y=105)
StudentCourse = Label(self.root, font=("Poppins", 12,"bold"), text="Course:", padx=5, pady=5, bg="plum")
StudentCourse.place(x=125,y=160)
StudentCourse = Entry(self.root, font=("Poppins", 13), textvariable=stud_course, width=33)
StudentCourse.place(x=255,y=165)
StudentYearLevel = Label(self.root, font=("Poppins", 12,"bold"), text="Year Level:", padx=5, pady=5, bg="plum")
StudentYearLevel.place(x=125,y=200)
StudentYearLevel = ttk.Combobox(self.root,
value=["1st Year", "2nd Year", "3rd Year", "4th Year", "5th Year"],
state="readonly", font=("Poppins", 13), textvariable=stud_yl,
width=31)
StudentYearLevel.place(x=255,y=205)
StudentGender = Label(self.root, font=("Poppins", 12,"bold"), text="Gender:", padx=5, pady=5, bg="plum")
StudentGender.place(x=125,y=240)
StudentGender = ttk.Combobox(self.root, value=["Male", "Female"], font=("Poppins", 13),
state="readonly", textvariable=stud_gender, width=31)
StudentGender.place(x=255,y=245)
SearchBar = Entry(self.root, font=("Poppins", 11), textvariable=search, width=29)
SearchBar.place(x=876,y=10)
SearchBar.insert(0,'Search ID here')
#======================================TREEVIEW========================================================
scrollbar = Scrollbar(self.root, orient=VERTICAL)
ctree = ttk.Treeview(self.root,
columns=("Course Code", "Course Name"),
height = 5,
yscrollcommand=scrollbar.set)
ctree.heading("Course Code", text="Course Code", anchor=W)
ctree.heading("Course Name", text="Course Name",anchor=W)
ctree['show'] = 'headings'
ctree.column("Course Code", width=200, anchor=W, stretch=False)
ctree.column("Course Name", width=430, stretch=False)
ctree.place(x=575,y=500)
scrollbar.config(command=ctree.yview)
stree = ttk.Treeview(self.root,
columns=("ID Number", "Name", "Course", "Year Level", "Gender"),
height = 13,
yscrollcommand=scrollbar.set)
stree.heading("ID Number", text="ID Number", anchor=W)
stree.heading("Name", text="Name",anchor=W)
stree.heading("Course", text="Course",anchor=W)
stree.heading("Year Level", text="Year Level",anchor=W)
stree.heading("Gender", text="Gender",anchor=W)
stree['show'] = 'headings'
stree.column("ID Number", width=100, anchor=W, stretch=False)
stree.column("Name", width=200, stretch=False)
stree.column("Course", width=130, anchor=W, stretch=False)
stree.column("Year Level", width=100, anchor=W, stretch=False)
stree.column("Gender", width=100, anchor=W, stretch=False)
stree.place(x=575,y=40)
scrollbar.config(command=stree.yview)
#======================================BUTTONS========================================================
btnAddCourse = Button(self.root, text="ADD", font=('Poppins', 10), height=1, width=10, bd=4, bg="sky blue",command=addCourse)
btnAddCourse.place(x=240,y=600)
btnUpdateCourse = Button(self.root, text="UPDATE", font=('Poppins', 10), height=1, width=10, bd=4, bg="sky blue",command=updateCourse)
btnUpdateCourse.place(x=350,y=600)
btnClearCourse = Button(self.root, text="CLEAR", font=('Poppins', 10), height=1, width=10, bd=4, bg="sky blue",command=clearCourse)
btnClearCourse.place(x=130,y=600)
btnDeleteCourse = Button(self.root, text="DELETE", font=('Poppins', 10), height=1, width=10, bd=4,bg="sky blue",command=deleteCourse)
btnDeleteCourse.place(x=460,y=600)
btnSelectCourse = Button(self.root, text="Select", font=('Poppins', 10), height=1, width=11,bg="pink",command=editCourse)
btnSelectCourse.place(x=575,y=465)
btnSearchCourse = Button(self.root, text="Search", font=('Poppins', 10), height=1, width=10, bg="pink",command=searchCourse)
btnSearchCourse.place(x=1117,y=465)
btnRefreshCourse = Button(self.root, text="Show All", font=('Poppins', 10), height=1, width=11,bg="pink",command=RefreshCourse)
btnRefreshCourse.place(x=685,y=465)
btnAddID = Button(self.root, text="Add", font=('Poppins', 10), height=1, width=10, bd=4,bg="sky blue",command=addStud)
btnAddID.place(x=240,y=300)
btnUpdate = Button(self.root, text="Update", font=('Poppins', 10), height=1, width=10, bd=4, bg="sky blue",command=updateStud)
btnUpdate.place(x=350,y=300)
btnClear = Button(self.root, text="Clear", font=('Poppins', 10), height=1, width=10, bd=4,bg="sky blue", command=clear)
btnClear.place(x=130,y=300)
btnDelete = Button(self.root, text="Delete", font=('Poppins', 10), height=1, width=10, bd=4,bg="sky blue",command=deleteStud)
btnDelete.place(x=460,y=300)
btnSelect = Button(self.root, text="Select", font=('Poppins', 10), height=1, width=11,bg="pink",command=editStud)
btnSelect.place(x=575,y=10)
btnSearch = Button(self.root, text="Search", font=('Poppins', 10), height=1, width=10, bg="pink",command=searchStud)
btnSearch.place(x=1117,y=10)
btnRefresh = Button(self.root, text="Show All", font=('Poppins', 10), height=1, width=11,bg="pink",command=Refresh)
btnRefresh.place(x=685,y=10)
#=======================================================================================================
connectCourse()
displayCourse()
connect()
displayStud()
if __name__ == '__main__':
root = Tk()
application = SSIS(root)
root.mainloop() |
import logging
from typing import Any, Dict, Tuple
class NullHandler(logging.Handler):
def emit(self, record: Any) -> None: ...
ScalarTypes: Tuple[str, ...]
BOTOCORE_ROOT: str
class UNSIGNED:
def __copy__(self) -> UNSIGNED: ...
def __deepcopy__(self, memodict: object) -> UNSIGNED: ...
def xform_name(
name: str, sep: str = ..., _xform_cache: Dict[Tuple[str, str], str] = ...
) -> str: ...
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from osv import fields, osv
from tools.translate import _
import jasperclient
import base64
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class wizard_report_incomming(osv.osv_memory):
def act_cancel(self, cr, uid, ids, context=None):
return {'type':'ir.actions.act_window_close' }
def act_destroy(self, *args):
return {'type':'ir.actions.act_window_close' }
def create_report(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids)[0]
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
datas['model'] = 'wizard.report.incomming'
datas['form'] = self.read(cr, uid, ids, context=context)[0]
for field in datas['form'].keys():
if isinstance(datas['form'][field], tuple):
datas['form'][field] = datas['form'][field][0]
criteries = None
report= None
if datas['ids']:
criteries = 'stock_picking.id in '+ str(tuple( sorted(datas['ids'])))
criteries = criteries.replace(',)',')')
if criteries:
host_ids = self.pool.get('omg.configuration').search(cr, uid, [('type','=','jasper')])
hostname = 'localhost'
if host_ids:
host_obj = self.pool.get('omg.configuration').browse(cr, uid, host_ids)[0]
hostname = host_obj['host']
url = 'http://'+hostname+':8000/jasperserver/services/repository?wsdl'
j = jasperclient.JasperClient(url,host_obj.username,host_obj.password)
a = j.runReport('/Openerp/OA/omg-incomming-order',this.format, {'pick_ids': criteries})
this.name = "%s.%s" % ('incomming', this.format)
buf = StringIO()
buf.write(a['data'])
report = base64.encodestring(buf.getvalue())
buf.close()
return self.write(cr, uid, ids, {'state':'get', 'report':report, 'name':this.name}, context=context)
_name = "wizard.report.incomming"
_description = "New Incomming Report Style"
_columns = {
'name': fields.char('Filename', 16, readonly=True),
'report': fields.binary('Report File'),
'state': fields.selection( ( ('choose','choose'), ('get','get'), ) ),
'format': fields.selection( ( ('csv','CSV File'), ('pdf','PDF File'), ('xls', 'Excel 2003')), 'File Format', required=True),
}
_defaults = {
'state': lambda *a: 'choose',
'name': lambda *a: 'incomming.pdf'
}
wizard_report_incomming()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-04-04 14:57
from __future__ import unicode_literals
from django.db import migrations
def load_articles(apps, schema_editor):
Article = apps.get_model('articles', 'Article')
OldArticle = apps.get_model('aldryn_newsblog', 'Article')
Tag = apps.get_model('taggit', 'Tag')
for old_article in OldArticle.objects.all():
article = Article()
article.created_by = old_article.owner
article.publishing_date = old_article.publishing_date
article.content = old_article.content
article.featured_image = old_article.featured_image
article.is_draft = True
article.save()
# copy categories
for category in old_article.categories.all():
article.categories.add(category)
def unload_articles(apps, schema_editor):
Article = apps.get_model('articles', 'Article')
Article.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('articles', '0001_initial'),
]
operations = [
migrations.RunPython(load_articles, unload_articles),
]
|
#!/usr/bin/env python
import optparse
import time
def check_time(input):
try:
time.strptime(input, '%H:%M')
return True
except ValueError:
return False
def set_commands_for_sssr():
print 'setting commands for sssr', options.sssr
def set_commands_for_time():
print 'setting commands for time', options.time
if __name__ == '__main__':
usage = 'Tool for creating an at job to turn on or off tellstick devices'
parser = optparse.OptionParser(usage=usage)
parser.add_option('-c', '--command',
type='choice',
dest='command',
choices=['on', 'off'],
action='store',
help='Turn on or off')
parser.add_option('-s', '--sssr',
action='store_true',
default=False,
help='Use sunset and sunrise for time')
parser.add_option('-t', '--time',
action='store',
dest='time',
help='Use supplied time, ex 12:00')
parser.add_option('-l', '--long',
action='store',
dest='long',
help='Longitude for location')
parser.add_option('-a', '--lat',
action='store',
dest='lat',
help='Latitude for location')
(options, args) = parser.parse_args()
# print options
# print 'sssr:', options.sssr
# print 'time:', options.time
if (options.sssr and options.time is not None or
options.time is None and not options.sssr):
parser.error('you must use either time or sssr')
elif options.lat is None and options.long is None and options.time is None:
parser.error('you need to set both lat and long')
elif options.sssr:
# get on and off time from sunrise sunset
set_commands_for_sssr()
elif options.time is not None and options.command is None:
parser.error('you need to set command when using time')
if options.time is not None and options.command is not None:
if check_time(options.time):
# commands
set_commands_for_time()
else:
parser.error('invalid time specified')
|
import sys
f = open('teams.txt')
n = int(f.readline().strip())
#n = int(input().strip())
n = 1
if n == 0:
print("0")
sys.exit(0)
for student_group in range(0, n):
# line = [int(x) for x in input().strip().split(' ')]
# line = [int(x) for x in f.readline().strip().split(' ')]
line = [int(x) for x in "10 4048 4048 4050 4046 4047 4049 4048 4049 4047 4047".split()]
m = line[0]
s = sorted(line[1:])
groups = list()
prev_skill = None
l = 0
i = 0
prev_overlap_degree = 0
overlap_degree = 0
overlap = False
while len(s):
if prev_skill:
if i >= len(s):
groups.append(l)
if l == 1:
break
i = 0
l = 0
prev_skill = None
overlap = False
elif s[i] == prev_skill + 1:
l += 1
prev_skill += 1
s.pop(i)
elif s[i] == prev_skill:
if i + 2 < len(s):
if s[i + 1] > s[i] and not s[i + 1] == s[i + 2]:
groups.append(l)
if l == 1:
break
i = 0
l = 0
prev_skill = None
continue
elif i + 1 < len(s):
if s[i + 1] > s[i]:
groups.append(l)
if l == 1:
break
i = 0
l = 0
prev_skill = None
continue
i += 1
else:
groups.append(l)
if l == 1:
break
i = 0
l = 0
prev_skill = None
overlap = False
else:
l += 1
prev_skill = s[i]
s.pop(i)
groups.append(l)
print(min(groups))
|
"""This module is part of Swampy, a suite of programs available from
allendowney.com/swampy.
Copyright 2011 Allen B. Downey
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
from __future__ import print_function, division
import optparse
import os
import copy
import random
import sys
import string
import time
# the following definitions can be accessed in the simulator
CURRENT_THREAD = None
def noop(*args):
"""A handy function that does nothing."""
def balk():
"""Jumps to the top of the column."""
CURRENT_THREAD.balk()
class Semaphore:
"""Represents a semaphore in the simulator.
Maintains a random queue.
"""
def __init__(self, n=0):
self.n = n
self.queue = []
def __str__(self):
return str(self.n)
def wait(self):
self.n -= 1
if self.n < 0:
self.block()
return self.n
def block(self):
thread = CURRENT_THREAD
thread.enqueue()
self.queue.append(thread)
def signal(self, n=1):
for _ in range(n):
self.n += 1
if self.queue:
self.unblock()
def unblock(self):
"""Chooses a random thread and unblocks it."""
thread = random.choice(self.queue)
self.queue.remove(thread)
thread.dequeue()
thread.next_loop()
class FifoSemaphore(Semaphore):
"""Semaphore that implements a FIFO queue."""
def unblock(self):
"""Chooses the first thread and unblocks it."""
thread = self.queue.pop(0)
thread.dequeue()
thread.next_loop()
class Lightswitch:
"""Encapsulates the lightswitch pattern."""
def __init__(self):
self.counter = 0
self.mutex = Semaphore(1)
def lock(self, semaphore):
self.mutex.wait()
self.counter += 1
if self.counter == 1:
semaphore.wait()
self.mutex.signal()
def unlock(self, semaphore):
self.mutex.wait()
self.counter -= 1
if self.counter == 0:
semaphore.signal()
self.mutex.signal()
def pid():
"""Gets the ID of the current thread."""
return CURRENT_THREAD.name
def num_threads():
"""Gets the number of threads."""
sync = CURRENT_THREAD.column.p
return len(sync.threads)
# make globals and locals for the simulator
SIM_GLOBALS = copy.copy(globals())
SIM_LOCALS = dict()
# anything defined after this point is not available inside the simulator
try:
from tkinter import N, S, E, W, TOP, BOTTOM, LEFT, RIGHT, END
except ImportError:
from Tkinter import N, S, E, W, TOP, BOTTOM, LEFT, RIGHT, END
from Gui import Gui, GuiCanvas
ALL_THREAD_NAMES = string.ascii_uppercase + string.ascii_lowercase
FONT = ("Ubuntu Mono", 24)
FSU = 24 # FSU, the fundamental Sync unit,
# determines the size of most things.
class Sync(Gui):
"""Represents the thread simulator."""
def __init__(self, args=['']):
Gui.__init__(self)
self.parse_args(args)
self.namer = Namer()
self.locals = SIM_LOCALS
self.globals = SIM_GLOBALS
# views is a map from a variable name to the row that
# should be updated when the variable changes
self.views = {}
self.w = self
self.threads = []
self.running = False
self.delay = 0.2
self.setup()
self.run_init()
for col in self.cols:
col.create_thread()
def parse_args(self, args):
parser = optparse.OptionParser()
parser.add_option('-w', '--write', dest='write',
action='store_true', default=False,
help='Write thread code in code subdirectory?')
parser.add_option('-s', '--side', dest='initside',
action='store_true', default=False,
help='Move the initialization code to the left side?')
(self.options, args) = parser.parse_args(args)
if args:
self.filename = args[0]
else:
self.filename = ''
def get_name(self, name=None):
return self.namer.next(name)
def get_threads(self):
return self.threads
def set_global(self, **kwds):
self.globals.update(kwds)
def get_global(self, attr):
return self.globals[attr]
def destroy(self):
"""Closes the top window."""
self.running = False
Gui.destroy(self)
def setup(self):
"""Makes the GUI."""
if self.filename:
self.read_file(self.filename)
self.make_columns()
if self.options.write:
self.write_files(self.filename)
return
self.topcol = Column(self, n=5)
self.colfr = self.fr()
self.cols = [Column(self, LEFT, n=5) for i in range(2)]
self.bu(side=RIGHT, text='Add\ncolumn', font=FONT, command=self.add_col)
self.endfr()
self.buttons()
def buttons(self):
"""Makes the buttons."""
self.row([1, 1, 1, 1, 1])
self.bu(text='Run', font=FONT, command=self.run)
self.bu(text='Random Run', font=FONT, command=self.random_run)
self.bu(text='Stop', font=FONT, command=self.stop)
self.bu(text='Step', font=FONT, command=self.step)
self.bu(text='Random Step', font=FONT, command=self.random_step)
self.endfr()
def register(self, thread):
"""Adds a new thread."""
self.threads.append(thread)
def unregister(self, thread):
"""Removes a thread."""
self.threads.remove(thread)
def run(self):
"""Runs the simulator with round-robin scheduling."""
self.run_helper(self.step)
def random_run(self):
"""Runs the simulator with random scheduling."""
self.run_helper(self.random_step)
def run_helper(self, step=None):
"""Runs the threads until someone clears self.running."""
self.running = True
while self.running:
step()
self.update()
time.sleep(self.delay)
def step(self):
"""Advances all the threads in order"""
for thread in self.threads:
thread.step_loop()
def random_step(self):
"""Advances one random thread."""
threads = [thread for thread in self.threads if not thread.queued]
if not threads:
print('There are currently no threads that can run.')
return
thread = random.choice(threads)
thread.step_loop()
def stop(self):
"""Stops running."""
self.running = False
def read_file(self, filename):
"""Read a file that contains code for the simulator to execute.
Lines that start with ## do not appear
in the display.
A line that starts with "## thread" indicates the beginning of
a new column of code.
Returns a list of blocks where each block is a list of lines.
"""
def is_new_thread(line):
if line[0:2] != '##':
return False
words = line.strip('#').split()
word = words[0].lower()
return word == 'thread'
self.blocks = []
block = []
self.blocks.append(block)
fp = open(filename)
for line in fp:
line = line.rstrip()
if is_new_thread(line):
block = []
self.blocks.append(block)
else:
block.append(line)
fp.close()
def make_columns(self):
"""Adds the code in self.blocks to the GUI."""
if not self.blocks:
return
side = LEFT if self.options.initside else TOP
self.topcol = TopColumn(self, side=side)
self.topcol.add_rows(self.blocks[0])
self.colfr = self.fr()
self.cols = []
self.endfr()
for block in self.blocks[1:]:
col = self.add_col(0)
col.add_rows(block)
self.buttons()
def write_files(self, filename, dirname='book_code'):
"""Writes the code into separate files for the init and threads.
filename: name of the file we read
dirname: name of the destination subdirectory
Destination is a subdirectory of the directory the filename is in.
"""
path, filename = os.path.split(filename)
dest = os.path.join(path, dirname, filename)
block = self.blocks[0]
self.write_file(block, dest, 0)
for i, block in enumerate(self.blocks[1:]):
self.write_file(block, dest, i+1)
def write_file(self, block, filename, suffix=0):
trim_block(block)
name = '%s.%s' % (filename, str(suffix))
fp = open(name, 'w')
for line in block:
fp.write(line + '\n')
fp.close()
def add_col(self, n=5):
"""Adds a new column of code to the display."""
self.pushfr(self.colfr)
col = Column(self, LEFT, n)
self.cols.append(col)
self.popfr()
return col
def run_init(self):
"""Runs the initialization code in the top column."""
if not self.topcol.num_rows():
return
print('running init')
self.clear_views()
self.views = {}
thread = Thread(self.topcol, name='0')
while True:
thread.step()
if thread.row == None:
break
self.unregister(thread)
def update_views(self):
"""Loops through the views and updates them."""
for key, view in self.views.items():
view.update(self.locals[key])
def clear_views(self):
"""Loops through the views and clears them."""
for view in self.views.values():
view.clear()
def qu(self, **options):
"""Makes a queue."""
return self.widget(QueueCanvas, **options)
def subtract(d1, d2):
"""Subtracts two dictionaries.
Returns a new dictionary containing all the keys from
d1 that are not in d2.
"""
d = {}
for key in d1:
if key not in d2:
d[key] = d1[key]
return d
def diff_dict(d1, d2):
"""Diffs two dictionaries.
Returns two dictionaries: the first contains all the keys
from d1 that are not in d2; the second contains all the keys
that are in both dictionaries, but which have different values.
"""
d = {}
c = {}
for key in d1:
if key not in d2:
d[key] = d1[key]
elif d1[key] is not d2[key]:
c[key] = d1[key]
return d, c
def trim_block(block):
"""Removes comments from the beginning and empty lines from the end."""
if block and block[0].startswith('#'):
block.pop(0)
while block and not block[-1].strip():
block.pop(-1)
"""
The following classes define the composite objects that make
up the display: Row, TopRow, Column and TopColumn. They are
all subclasses of Widget.
"""
class Widget:
"""Superclass of all display objects.
Each Widget keeps a reference to its immediate parent Widget (p)
and to the top-most thing (w).
"""
def __init__(self, p, *args, **options):
self.p = p
self.w = p.w
self.setup(*args, **options)
class Row(Widget):
"""A row of code.
Each row contains two queues, runnable and queued,
and an entry that contains a line of code.
"""
def setup(self, text=''):
self.tag = None
self.fr = self.w.row([0, 0, 1])
self.queued = self.w.qu(side=LEFT, n=3)
self.runnable = self.w.qu(side=LEFT, n=3, label='Run')
self.en = self.w.en(side=LEFT, font=FONT)
self.en.bind('<Key>', self.keystroke)
self.w.endrow()
self.put(text)
def update(self, val):
"""Updates the text in the runnable widget.
val: value to display (can be anything that provides str)
"""
# TODO: maybe config existing text rather than delete
if self.tag:
self.clear()
text = str(val)
self.tag = self.runnable.display_text(text)
def clear(self):
self.runnable.delete(self.tag)
def keystroke(self, event=None):
"resize the entry whenever the user types a character"
self.entry_size()
def entry_size(self):
"resize the entry"
text = self.get()
width = self.en.cget('width')
l = len(text) + 2
if l > width:
self.en.configure(width=l)
def add_thread(self, thread):
self.runnable.add_thread(thread)
def remove_thread(self, thread):
self.runnable.remove_thread(thread)
def enqueue_thread(self, thread):
self.queued.add_thread(thread)
def dequeue_thread(self, thread):
self.queued.remove_thread(thread)
def put(self, text):
self.en.delete(0, END)
self.en.insert(0, text)
self.entry_size()
def get(self):
return self.en.get()
class TopRow(Row):
"""Rows in the initialization code at the top.
The top row is special because there is no queue for
queued threads, and the "runnable" queue is actually used
to display the value of variables.
"""
def setup(self, text=''):
Row.setup(self, text)
self.queued.destroy()
self.runnable.delete('all')
class Column(Widget):
"""A list of rows and a few buttons."""
def setup(self, side=TOP, n=0, row_factory=Row):
self.fr = self.w.fr(side=side, bd=3)
self.row_factory = row_factory
self.rows = [self.row_factory(self) for i in range(n)]
self.buttons = self.w.row([1, 1], side=BOTTOM)
self.bu1 = self.w.bu(text='Create thread', font=FONT,
command=self.create_thread)
self.bu2 = self.w.bu(text='Add row', font=FONT,
command=self.add_row)
self.w.endrow()
self.w.endfr()
def num_rows(self):
return len(self.rows)
def add_rows(self, block, keep_blanks=False):
for line in block:
if line or keep_blanks:
self.add_row(line)
def add_row(self, text=''):
self.w.pushfr(self.fr)
row = self.row_factory(self, text)
self.w.popfr()
self.rows.append(row)
def create_thread(self):
new = Thread(self)
return new
def next_row(self, row):
if row is None:
return self.rows[0]
index = self.rows.index(row)
try:
return self.rows[index+1]
except IndexError:
return None
class TopColumn(Column):
"""The top column where the initialization code is.
The top column is different from the other columns in
two ways: it has different buttons, and it uses the TopRow
constructor to make new rows rather than the Row constructor.
"""
def setup(self, side=TOP, n=0, row_factory=TopRow):
Column.setup(self, side, n, row_factory)
self.bu1.configure(text='Run initialization', font=FONT,
command=self.p.run_init)
class QueueCanvas(GuiCanvas):
"""Displays the runnable and queued threads."""
def __init__(self, w, n=1, label='Queue'):
self.n = n
self.label = label
width = 2 * n * FSU
height = 3 * FSU
GuiCanvas.__init__(self, w, width=width, height=height,
transforms=[])
self.threads = []
self.setup()
def setup(self):
self.text([3, 15], self.label, font=FONT, anchor=W, fill='white')
def add_thread(self, thread):
self.undraw_queue()
self.threads.append(thread)
self.draw_queue()
def remove_thread(self, thread):
self.undraw_queue()
self.threads.remove(thread)
self.draw_queue()
def draw_queue(self):
x = FSU
y = FSU
r = 0.9 * FSU
for thread in self.threads:
self.draw_thread(thread, x, y, r)
x += 1.5*r
if x > self.get_width():
x = FSU
y += 1.5*r
def undraw_queue(self):
for thread in self.threads:
self.delete(thread.tag)
def draw_thread(self, thread, x=FSU, y=FSU, r=0.9*FSU):
thread.tag = 'Thread' + thread.name
self.circle([x, y], r, fill=thread.color, tags=thread.tag)
font = ('FONT', int(r+3))
self.text([x, y], thread.name, font=font, tags=thread.tag)
self.tag_bind(thread.tag, '<Button-1>', thread.step_loop)
def undraw_thread(self, thread):
self.delete(thread.tag)
def display_text(self, text):
"""Displays text on this canvas.
text: string
"""
tag = self.text([15, 15], text, font=FONT)
return tag
class Namer(object):
def __init__(self):
self.names = ALL_THREAD_NAMES
self.next_name = 0
self.colors = ['red', 'orange', 'yellow', 'greenyellow',
'green', 'mediumseagreen', 'skyblue',
'violet', 'magenta']
self.next_color = 0
def next(self, name=None):
if name == None:
name = self.names[self.next_name]
self.next_name += 1
self.next_name %= len(self.names)
color = self.colors[self.next_color]
self.next_color += 1
self.next_color %= len(self.colors)
return name, color
else:
return name, 'white'
class Namespace:
"""Used to store thread-local variables.
Inside the simulator, self refers to the thread's namespace.
"""
class Thread:
"""Represents simulated threads."""
def __init__(self, column, name=None):
self.column = column
self.sync = column.p
self.name, self.color = self.sync.get_name(name)
self.namespace = Namespace()
self.flag_map = {}
self.while_stack = []
self.sync.register(self)
self.start()
def __str__(self):
return '<' + self.name + '>'
def enqueue(self):
"""Puts this thread into queue."""
self.queued = True
self.row.remove_thread(self)
self.row.enqueue_thread(self)
def dequeue(self):
"""Removes this thread from queue."""
self.queued = False
self.row.dequeue_thread(self)
self.row.add_thread(self)
def jump_to(self, row):
"""Removes this thread from its current row and moves it to row."""
if self.row:
self.row.remove_thread(self)
self.row = row
if self.row:
self.row.add_thread(self)
def balk(self):
self.row.remove_thread(self)
self.row = None
def start(self):
"""Moves this thread to the top of the column."""
self.queued = False
self.row = None
self.next_loop()
def next_loop(self):
"""Moves to the next row, looping to the top if necessary."""
self.next_row()
if self.row == None:
self.start()
def next_row(self):
"""Moves this thread to the next row in the column."""
if self.queued:
return
row = self.column.next_row(self.row)
self.jump_to(row)
def skip_body(self):
"""Skips the body of a conditional."""
# get the current line
# get the next line
# compute the change in indent
# find the outdent
source = self.row.get()
head_indent = self.count_spaces(source)
self.next_row()
source = self.row.get()
body_indent = self.count_spaces(source)
indent = body_indent - head_indent
if indent <= 0:
raise SyntaxError('Body of compound statement must be indented.')
while True:
self.next_row()
if self.row == None:
break
source = self.row.get()
line_indent = self.count_spaces(source)
if line_indent <= head_indent:
break
def count_spaces(self, source):
"""Returns the number of leading spaces after expanding tabs."""
s = source.expandtabs(4)
t = s.lstrip(' ')
return len(s) - len(t)
def step(self, event=None):
"""Executes the current line of code, then moves to the next row.
The current limitation of this simulator is that each row
has to contain a complete Python statement. Also, each line
of code is executed atomically.
Args:
event: unused, provided so that this method can be used
as a binding callback
Returns:
line of code that executed or None
"""
if self.queued:
return None
if self.row == None:
return None
self.check_end_while()
source = self.row.get()
print(self, source)
before = copy.copy(self.sync.locals)
flag = self.exec_line(source, self.sync)
# see if any variables were defined or changed
after = self.sync.locals
defined = subtract(after, before)
for key in defined:
self.sync.views[key] = self.row
self.sync.update_views()
# either skip to the next line or to the end of a false conditional
if flag:
self.next_row()
else:
self.skip_body()
return source
def exec_line(self, source, sync):
"""Runs a line of source code in the context of the given Sync.
Args:
source: source code from a Row
sync: Sync object
Returns:
if the line is an if statement, returns the result of
evaluating the condition
"""
global CURRENT_THREAD
CURRENT_THREAD = self
sync.globals['self'] = self.namespace
try:
s = source.strip()
code = compile(s, '<user-provided code>', 'exec')
exec(code, sync.globals, sync.locals)
return True
except SyntaxError as error:
# check whether it's a conditional statement
keyword = s.split()[0]
if keyword in ['if', 'else:', 'while']:
flag = self.handle_conditional(keyword, source, sync)
return flag
else:
raise error
def handle_conditional(self, keyword, source, sync):
"""Evaluates the condition part of an if statement.
Args:
keyword: if, else or while
source: source code from a Row
sync: Sync object
Returns:
if the line is an if statement, returns the result of
evaluating the condition; otherwise raises a SyntaxError
"""
s = source.strip()
if not s.endswith(':'):
raise SyntaxError('Header must end with :')
if keyword in ['if']:
# evaluate the condition
n = len(keyword)
condition = s[n:-1].strip()
flag = eval(condition, sync.globals, sync.locals)
# store the flag
indent = self.count_spaces(source)
self.flag_map[indent] = flag
return flag
elif keyword in ['while']:
# evaluate the condition
n = len(keyword)
condition = s[n:-1].strip()
flag = eval(condition, sync.globals, sync.locals)
if flag:
indent = self.count_spaces(source)
self.while_stack.append((indent, self.row))
return flag
else:
assert keyword == 'else:'
# see whether the condition was true
indent = self.count_spaces(source)
try:
flag = self.flag_map[indent]
return not flag
except KeyError:
raise SyntaxError('else does not match if')
def check_end_while(self):
"""Check if we are at the end of a while loop.
If so, jump to the top.
"""
if not self.while_stack:
return
indent, row = self.while_stack[-1]
source = self.row.get()
if self.count_spaces(source) <= indent:
self.while_stack.pop()
self.jump_to(row)
def step_loop(self, event=None):
self.step()
if self.row == None:
self.start()
def run(self):
while True:
self.step()
if self.row == None: break
def main():
sync = Sync(sys.argv[1:])
sync.mainloop()
if __name__ == '__main__':
main()
|
def recursion(m, n):
# Базовый случай
if m == 0:
return n + 1
# Шаг рекурсии / рекурсивное условие
elif n == 0 and m > 0:
return recursion(m - 1, 1)
# Шаг рекурсии / рекурсивное условие
else:
return recursion(m - 1, recursion(m, n - 1))
print(recursion(0, 15))
|
'''
Level: Easy
Given an integer array nums, return the third distinct maximum number in this array.
If the third maximum does not exist, return the maximum number.
Example 1:
Input: nums = [3,2,1]
Output: 1
Explanation:
The first distinct maximum is 3.
The second distinct maximum is 2.
The third distinct maximum is 1.
Example 2:
Input: nums = [1,2]
Output: 2
Explanation:
The first distinct maximum is 2.
The second distinct maximum is 1.
The third distinct maximum does not exist, so the maximum (2) is returned instead.
Example 3:
Input: nums = [2,2,3,1]
Output: 1
Explanation:
The first distinct maximum is 3.
The second distinct maximum is 2 (both 2's are counted together since they have the same value).
The third distinct maximum is 1.
Constraints:
1 <= nums.length <= 10^4
-2^31 <= nums[i] <= 2^31 - 1
Follow up: Can you find an O(n) solution?
'''
'''
找出整個數列裡不重複 第三大的數
如果不重複的數小於三個 就找出最大的那個數
'''
class Solution(object):
def thirdMax(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ans = list(set(nums))
ans.sort()
if len(ans) >= 3:
return ans[-3]
else:
return max(ans)
nums = [2,2,3,1]
assert 1 == Solution().thirdMax(nums)
nums = [3,2,1]
assert 1 == Solution().thirdMax(nums)
nums = [1,2]
assert 2 == Solution().thirdMax(nums)
nums = [5,2,2]
assert 5 == Solution().thirdMax(nums)
nums = [1,2,2]
assert 2 == Solution().thirdMax(nums)
nums = [1,2,2,5,3,5]
assert 2 == Solution().thirdMax(nums) |
class Quadruplets2:
# Returns the number of quadruplets that sum to zero.
# a - [int]
# b - [int]
# c - [int]
# d - [int]
@staticmethod
def zero_quadruplets_count(a, b, c, d):
left_sums = {}
right_sums = {}
result = 0
for element1 in a:
for element2 in b:
if element1 + element2 not in left_sums:
left_sums[element1 + element2] = 1
else:
left_sums[element1 + element2] += 1
for element1 in c:
for element2 in d:
if element1 + element2 not in right_sums:
right_sums[element1 + element2] = 1
else:
right_sums[element1 + element2] += 1
for key in left_sums:
if -key in right_sums:
result = result + (left_sums[key] * right_sums[-key])
return result
def main():
N = int(input())
res = [[], [], [], []]
for a in range(len(res)):
line = input()
elements = line.split(' ')
for element in elements:
res[a].append(int(element))
print(Quadruplets2.zero_quadruplets_count(res[0], res[1], res[2], res[3]))
if __name__ == '__main__':
main()
|
import smtplib
sender = 'ayushgoel2004@gmail.com'
receivers = ['goel.monica1@gmail.com']
message = """From: From Person <from@fromdomain.com>
To: To Person <goel.monica1@gmail.com>
Subject: Python email number 1
This is awesome.
"""
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
print "Successfully sent email"
print "Error: unable to send email"
|
#!/usr/bin/env python
# coding: utf-8
import math
import sys
pempty = lambda x: math.e ** (-x)
pone = lambda x: x * math.e ** (-x)
pcollision = lambda x: 1 - (1+x) * math.e ** (-x)
perc = lambda x: '%02.0f%%' % (x*100)
if __name__ == "__main__":
loadfactors = [float(x) for x in sys.argv[1:]]
for lf in loadfactors:
print 'load factor: %02.2f, empty: %s, 1-key: %s, collision: %s' % (
lf,
perc(pempty(lf)),
perc(pone(lf)),
perc(pcollision(lf)),
)
|
import pytest
# FIXME This test is too flaky
# https://github.com/ClickHouse/ClickHouse/issues/42561
pytestmark = pytest.mark.skip
import logging
from string import Template
import time
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import assert_eq_with_retry
from pyhdfs import HdfsClient
SHARDS = 2
FILES_OVERHEAD_PER_TABLE = 1 # format_version.txt
FILES_OVERHEAD_PER_PART_COMPACT = 7
def wait_for_hdfs_objects(cluster, fp, expected, num_tries=30):
fs = HdfsClient(hosts=cluster.hdfs_ip)
while num_tries > 0:
num_hdfs_objects = len(fs.listdir(fp))
if num_hdfs_objects == expected:
break
num_tries -= 1
time.sleep(1)
assert len(fs.listdir(fp)) == expected
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance(
"node1",
main_configs=["configs/config.d/storage_conf.xml"],
macros={"replica": "node1"},
with_zookeeper=True,
with_hdfs=True,
)
cluster.add_instance(
"node2",
main_configs=["configs/config.d/storage_conf.xml"],
macros={"replica": "node2"},
with_zookeeper=True,
with_hdfs=True,
)
logging.info("Starting cluster...")
cluster.start()
if cluster.instances["node1"].is_debug_build():
# https://github.com/ClickHouse/ClickHouse/issues/27814
pytest.skip(
"libhdfs3 calls rand function which does not pass harmful check in debug build"
)
logging.info("Cluster started")
fs = HdfsClient(hosts=cluster.hdfs_ip)
fs.mkdirs("/clickhouse1")
fs.mkdirs("/clickhouse2")
logging.info("Created HDFS directory")
yield cluster
finally:
cluster.shutdown()
def test_hdfs_zero_copy_replication_insert(cluster):
node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
try:
node1.query(
"""
CREATE TABLE hdfs_test ON CLUSTER test_cluster (dt DateTime, id Int64)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/hdfs_test', '{replica}')
ORDER BY (dt, id)
SETTINGS storage_policy='hdfs_only'
"""
)
wait_for_hdfs_objects(
cluster, "/clickhouse1", SHARDS * FILES_OVERHEAD_PER_TABLE
)
node1.query("INSERT INTO hdfs_test VALUES (now() - INTERVAL 3 DAY, 10)")
node2.query("SYSTEM SYNC REPLICA hdfs_test", timeout=30)
assert node1.query("SELECT count() FROM hdfs_test FORMAT Values") == "(1)"
assert node2.query("SELECT count() FROM hdfs_test FORMAT Values") == "(1)"
assert (
node1.query("SELECT id FROM hdfs_test ORDER BY dt FORMAT Values") == "(10)"
)
assert (
node2.query("SELECT id FROM hdfs_test ORDER BY dt FORMAT Values") == "(10)"
)
assert (
node1.query(
"SELECT partition_id,disk_name FROM system.parts WHERE table='hdfs_test' FORMAT Values"
)
== "('all','hdfs1')"
)
assert (
node2.query(
"SELECT partition_id,disk_name FROM system.parts WHERE table='hdfs_test' FORMAT Values"
)
== "('all','hdfs1')"
)
wait_for_hdfs_objects(
cluster,
"/clickhouse1",
SHARDS * FILES_OVERHEAD_PER_TABLE + FILES_OVERHEAD_PER_PART_COMPACT,
)
finally:
node1.query("DROP TABLE IF EXISTS hdfs_test SYNC")
node2.query("DROP TABLE IF EXISTS hdfs_test SYNC")
@pytest.mark.parametrize(
("storage_policy", "init_objects"),
[("hybrid", 0), ("tiered", 0), ("tiered_copy", FILES_OVERHEAD_PER_TABLE)],
)
def test_hdfs_zero_copy_replication_single_move(cluster, storage_policy, init_objects):
node1 = cluster.instances["node1"]
try:
node1.query(
Template(
"""
CREATE TABLE single_node_move_test (dt DateTime, id Int64)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/single_node_move_test', '{replica}')
ORDER BY (dt, id)
SETTINGS storage_policy='$policy',temporary_directories_lifetime=1
"""
).substitute(policy=storage_policy)
)
wait_for_hdfs_objects(cluster, "/clickhouse1", init_objects)
node1.query(
"INSERT INTO single_node_move_test VALUES (now() - INTERVAL 3 DAY, 10), (now() - INTERVAL 1 DAY, 11)"
)
assert (
node1.query(
"SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values"
)
== "(10),(11)"
)
node1.query(
"ALTER TABLE single_node_move_test MOVE PARTITION ID 'all' TO VOLUME 'external'"
)
assert (
node1.query(
"SELECT partition_id,disk_name FROM system.parts WHERE table='single_node_move_test' FORMAT Values"
)
== "('all','hdfs1')"
)
assert (
node1.query(
"SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values"
)
== "(10),(11)"
)
wait_for_hdfs_objects(
cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT
)
node1.query(
"ALTER TABLE single_node_move_test MOVE PARTITION ID 'all' TO VOLUME 'main'"
)
assert (
node1.query(
"SELECT id FROM single_node_move_test ORDER BY dt FORMAT Values"
)
== "(10),(11)"
)
finally:
node1.query("DROP TABLE IF EXISTS single_node_move_test SYNC")
@pytest.mark.parametrize(
("storage_policy", "init_objects"),
[("hybrid", 0), ("tiered", 0), ("tiered_copy", SHARDS * FILES_OVERHEAD_PER_TABLE)],
)
def test_hdfs_zero_copy_replication_move(cluster, storage_policy, init_objects):
node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
try:
node1.query(
Template(
"""
CREATE TABLE move_test ON CLUSTER test_cluster (dt DateTime, id Int64)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/move_test', '{replica}')
ORDER BY (dt, id)
SETTINGS storage_policy='$policy'
"""
).substitute(policy=storage_policy)
)
wait_for_hdfs_objects(cluster, "/clickhouse1", init_objects)
node1.query(
"INSERT INTO move_test VALUES (now() - INTERVAL 3 DAY, 10), (now() - INTERVAL 1 DAY, 11)"
)
node2.query("SYSTEM SYNC REPLICA move_test", timeout=30)
assert (
node1.query("SELECT id FROM move_test ORDER BY dt FORMAT Values")
== "(10),(11)"
)
assert (
node2.query("SELECT id FROM move_test ORDER BY dt FORMAT Values")
== "(10),(11)"
)
node1.query(
"ALTER TABLE move_test MOVE PARTITION ID 'all' TO VOLUME 'external'"
)
wait_for_hdfs_objects(
cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT
)
node2.query(
"ALTER TABLE move_test MOVE PARTITION ID 'all' TO VOLUME 'external'"
)
assert (
node1.query(
"SELECT partition_id,disk_name FROM system.parts WHERE table='move_test' FORMAT Values"
)
== "('all','hdfs1')"
)
assert (
node2.query(
"SELECT partition_id,disk_name FROM system.parts WHERE table='move_test' FORMAT Values"
)
== "('all','hdfs1')"
)
assert (
node1.query("SELECT id FROM move_test ORDER BY dt FORMAT Values")
== "(10),(11)"
)
assert (
node2.query("SELECT id FROM move_test ORDER BY dt FORMAT Values")
== "(10),(11)"
)
wait_for_hdfs_objects(
cluster, "/clickhouse1", init_objects + FILES_OVERHEAD_PER_PART_COMPACT
)
finally:
node1.query("DROP TABLE IF EXISTS move_test SYNC")
node2.query("DROP TABLE IF EXISTS move_test SYNC")
@pytest.mark.parametrize(("storage_policy"), ["hybrid", "tiered", "tiered_copy"])
def test_hdfs_zero_copy_with_ttl_move(cluster, storage_policy):
node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
try:
node1.query(
Template(
"""
CREATE TABLE ttl_move_test ON CLUSTER test_cluster (dt DateTime, id Int64)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/ttl_move_test', '{replica}')
ORDER BY (dt, id)
TTL dt + INTERVAL 2 DAY TO VOLUME 'external'
SETTINGS storage_policy='$policy'
"""
).substitute(policy=storage_policy)
)
node1.query("INSERT INTO ttl_move_test VALUES (now() - INTERVAL 3 DAY, 10)")
node1.query("INSERT INTO ttl_move_test VALUES (now() - INTERVAL 1 DAY, 11)")
node1.query("OPTIMIZE TABLE ttl_move_test FINAL")
node2.query("SYSTEM SYNC REPLICA ttl_move_test", timeout=30)
assert_eq_with_retry(node1, "SELECT count() FROM ttl_move_test", "2")
assert_eq_with_retry(node2, "SELECT count() FROM ttl_move_test", "2")
assert (
node1.query("SELECT id FROM ttl_move_test ORDER BY id FORMAT Values")
== "(10),(11)"
)
assert (
node2.query("SELECT id FROM ttl_move_test ORDER BY id FORMAT Values")
== "(10),(11)"
)
finally:
node1.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
node2.query("DROP TABLE IF EXISTS ttl_move_test SYNC")
def test_hdfs_zero_copy_with_ttl_delete(cluster):
node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
try:
node1.query(
"""
CREATE TABLE ttl_delete_test ON CLUSTER test_cluster (dt DateTime, id Int64)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/{cluster}/{shard}/ttl_delete_test', '{replica}')
ORDER BY (dt, id)
TTL dt + INTERVAL 2 DAY
SETTINGS storage_policy='tiered'
"""
)
node1.query("INSERT INTO ttl_delete_test VALUES (now() - INTERVAL 3 DAY, 10)")
node1.query("INSERT INTO ttl_delete_test VALUES (now() - INTERVAL 1 DAY, 11)")
node1.query("OPTIMIZE TABLE ttl_delete_test FINAL")
node2.query("SYSTEM SYNC REPLICA ttl_delete_test", timeout=30)
assert_eq_with_retry(node1, "SELECT count() FROM ttl_delete_test", "1")
assert_eq_with_retry(node2, "SELECT count() FROM ttl_delete_test", "1")
assert (
node1.query("SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values")
== "(11)"
)
assert (
node2.query("SELECT id FROM ttl_delete_test ORDER BY id FORMAT Values")
== "(11)"
)
finally:
node1.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
node2.query("DROP TABLE IF EXISTS ttl_delete_test SYNC")
|
from random import randint
# Specify weapons
weapons = ("rock", "paper", "scissors")
# Initialize global variables
game_continue = ""
computer_score = 0
player_score = 0
while game_continue != "N":
# Generate opponent weapon
number = randint(1,3)
if number == 1:
computer_weapon = weapons[0]
elif number == 2:
computer_weapon = weapons[1]
elif number == 3:
computer_weapon = weapons[2]
# Get player weapon
player_weapon = input("\nChoose your weapon: {}, {}, or {}? ".format(weapons[0],weapons[1],weapons[2]))
player_weapon = player_weapon.lower()
print("{} vs. {}.".format(computer_weapon.title(), player_weapon))
# Validate user entry
if player_weapon not in weapons:
print("You forfeit because you did not enter {}, {}, or {}. \nCheck your spelling and try again.".format(weapons[0],weapons[1],weapons[2]))
# Game results
elif player_weapon == computer_weapon:
print('DRAW!')
elif player_weapon == weapons[0] and computer_weapon == weapons[2]:
print('Player wins!')
player_score += 1
elif player_weapon == weapons[0] and computer_weapon == weapons[1]:
print('Computer wins!')
computer_score += 1
elif player_weapon == weapons[1] and computer_weapon == weapons[0]:
print('Player wins!')
player_score += 1
elif player_weapon == weapons[1] and computer_weapon == weapons[2]:
print('Computer wins!')
computer_score += 1
elif player_weapon == weapons[2] and computer_weapon == weapons[1]:
print('Player wins!')
player_score += 1
elif player_weapon == weapons[2] and computer_weapon == weapons[0]:
print('Computer wins!')
computer_score += 1
# Display scores
print("\nComputer:\tPlayer:")
print("{}\t\t{}\n".format(computer_score, player_score))
# Continue game?
print("Play again? (Y/N)")
game_continue = input().strip().upper()
print("\nGood bye.")
|
"""Dump JSON data from Postgres to local storage."""
import json
from typing import Optional
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from psycopg2.extras import RealDictCursor
class PostgresToLocalOperator(BaseOperator):
"""
Airflow operator for storing a JSON-formatted
Postgres query result on local disk.
"""
ui_color = "#705B74"
ui_fgcolor = "#8FA48B"
@apply_defaults
def __init__(
self,
pg_query: str,
local_path: str,
postgres_conn_id: Optional[str] = None,
**kwargs
):
super().__init__(**kwargs)
self._pg_query = pg_query
self._local_path = local_path
self._postgres_conn_id = postgres_conn_id
def execute(self, context):
postgres_hook = PostgresHook(postgres_conn_id=self._postgres_conn_id)
conn = postgres_hook.get_conn()
cursor = conn.cursor(cursor_factory=RealDictCursor)
cursor.execute(self._pg_query)
with open(self._local_path, "w") as f:
json.dump(cursor.fetchall(), f, indent=4)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import scipy.io
import cPickle
import configuration
def main(unused_argv):
# load data disk
x = cPickle.load(open("./data/mscoco/data.p","rb"))
train, val, test = x[0], x[1], x[2]
wordtoix, ixtoword = x[3], x[4]
del x
n_words = len(ixtoword)
x = cPickle.load(open("./data/mscoco/word2vec.p","rb"))
W = x[0]
del x
data = scipy.io.loadmat('./data/mscoco/resnet_feats.mat')
img_feats = data['feats'].astype(float)
print("finish loading data")
g = tf.Graph()
with g.as_default():
# creat config objects which contain model and training configs
model_config = configuration.ModelConfig()
training_config = configuration.TrainingConfig()
#initializer method
initializer = tf.random_uniform_initializer(
minval=-model_config.initializer_scale,
maxval=model_config.initializer_scale)
batch_size = model_config.batch_size # batch_size = 32
image_fea = tf.placeholder(tf.float32, shape=[None,2048])
input_seqs = tf.placeholder(tf.int32, shape=[None,None])
target_seqs = tf.placeholder(tf.int32, shape=[None,None])
input_mask = tf.placeholder(tf.int32, shape=[None,None])
#creat the seq embedding map. It is random init.
with tf.variable_scope("seq_embedding"), tf.device("/cpu:0"):
embedding_map = tf.get_variable(
name="map",
shape=[model_config.vocab_size, model_config.embedding_size],
initializer=initializer)
seq_embeddings = tf.nn.embedding_lookup(embedding_map, input_seqs)
#input dropout
seq_embeddings = tf.nn.dropout(seq_embeddings, keep_prob=model_config.lstm_dropout_keep_prob)
#creat image embedding layer. It is just fully connected layer.
with tf.variable_scope("image_embedding") as scope:
image_embeddings = tf.contrib.layers.fully_connected(
inputs=image_fea,
num_outputs=model_config.embedding_size,
activation_fn=None,
weights_initializer=initializer,
biases_initializer=None,
scope=scope)
W = tf.get_variable('W', shape=[4, model_config.num_lstm_units, model_config.num_lstm_units], initializer=initializer)
U = tf.get_variable('U', shape=[4, model_config.num_lstm_units, model_config.num_lstm_units], initializer=initializer)
def step(prev, x):
# gather previous internal state and output state
st_1, ct_1 = tf.unstack(prev)
####
# GATES
#
# input gate
i = tf.sigmoid(tf.matmul(x,U[0]) + tf.matmul(st_1,W[0]))
# forget gate
f = tf.sigmoid(tf.matmul(x,U[1]) + tf.matmul(st_1,W[1]))
# output gate
o = tf.sigmoid(tf.matmul(x,U[2]) + tf.matmul(st_1,W[2]))
# gate weights
g = tf.tanh(tf.matmul(x,U[3]) + tf.matmul(st_1,W[3]))
###
# new internal cell state
ct = ct_1*f + g*i
# output state
st = tf.tanh(ct)*o
return tf.stack([st, ct])
image_embeddings = tf.stack([image_embeddings,image_embeddings])
states = tf.scan(step,
tf.transpose(seq_embeddings, [1,0,2]),
initializer=image_embeddings)
#states = tf.Print(states, ["lstm states shape:",tf.shape(states)])
states = tf.transpose(states, [1,2,0,3])[0]
#states = tf.Print(states, ["lstm states REshape:",tf.shape(states)])
lstm_outputs = tf.reshape(states, [-1, model_config.num_lstm_units])
#lstm_outputs = tf.Print(lstm_outputs, [tf.shape(lstm_outputs), "lstm_outputs"])
#output dropout
lstm_outputs = tf.nn.dropout(lstm_outputs, keep_prob=model_config.lstm_dropout_keep_prob)
with tf.variable_scope("logits") as logits_scope:
logits = tf.contrib.layers.fully_connected(
inputs=lstm_outputs,
num_outputs=model_config.vocab_size,
activation_fn=None,
weights_initializer=initializer,
scope=logits_scope)
targets = tf.reshape(target_seqs, [-1])
weights = tf.to_float(tf.reshape(input_mask, [-1]))
# Compute losses.
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
logits=logits)
batch_loss = tf.div(tf.reduce_sum(tf.multiply(losses, weights)),
tf.reduce_sum(weights),
name="batch_loss")
tf.losses.add_loss(batch_loss)
total_loss = tf.losses.get_total_loss()
# Add summaries.
tf.summary.scalar("losses/batch_loss", batch_loss)
tf.summary.scalar("losses/total_loss", total_loss)
for var in tf.trainable_variables():
tf.summary.histogram("parameters/" + var.op.name, var)
#get the steps
global_step = tf.Variable(
initial_value=0,
name="global_step",
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
#learing rate
learning_rate_decay_fn = None
learning_rate = tf.constant(training_config.initial_learning_rate)
if training_config.learning_rate_decay_factor > 0:
num_batches_per_epoch = (training_config.num_examples_per_epoch /
model_config.batch_size)
decay_steps = int(num_batches_per_epoch *
training_config.num_epochs_per_decay)
def _learning_rate_decay_fn(learning_rate, global_step):
return tf.train.exponential_decay(
learning_rate,
global_step,
decay_steps=decay_steps,
decay_rate=training_config.learning_rate_decay_factor,
staircase=True)
learning_r = _learning_rate_decay_fn(learning_rate, global_step)
# Set up the training ops.
# We change the learing_rate directly here rather than using learning_rate_decay_fn
train_op = tf.contrib.layers.optimize_loss(
loss=total_loss,
global_step=global_step,
learning_rate=learning_rate,#learning_r,
optimizer=training_config.optimizer,
clip_gradients=training_config.clip_gradients,
learning_rate_decay_fn=None)#learning_rate_decay_fn)
# Set up the Saver for saving and restoring model checkpoints.
saver = tf.train.Saver(max_to_keep=training_config.max_checkpoints_to_keep)
print("finish building network")
g.as_default()
#gpu_options = tf.GPUOptions(allow_growth=True)
#sess = tf.Session(graph=g,config=tf.ConfigProto(gpu_options=gpu_options))
sess=tf.InteractiveSession(graph=g)
#sess = tf.Session(graph=g)
#init = tf.global_variables_initializer()
with sess.as_default():
tf.global_variables_initializer().run()
print("finish initialization")
#prepare the data.
#add a 6880('#') before the input seqs
def prepare_data(seqs):
# x: a list of sentences
lengths = [len(s) for s in seqs]
n_samples = len(seqs)
maxlen = np.max(lengths)
inputs = np.zeros(( n_samples,maxlen)).astype('int64')
outputs = np.zeros((n_samples,maxlen)).astype('int64')
x_mask = np.zeros((n_samples,maxlen)).astype(float)
for idx, s in enumerate(seqs):
inputs[idx,0] = 6880
inputs[idx,1:lengths[idx]] = s[:lengths[idx]-1]
outputs[idx,:lengths[idx]] = s[:lengths[idx]]
x_mask[idx,:lengths[idx]] = 1.
return inputs, x_mask,outputs
#generate data index by batches. It can shuffle data at the same time
def get_minibatches_idx(n, minibatch_size, shuffle=False):
idx_list = np.arange(n, dtype="int32")
if shuffle:
np.random.shuffle(idx_list)
minibatches = []
minibatch_start = 0
for i in range(n // minibatch_size):
minibatches.append(idx_list[minibatch_start:
minibatch_start + minibatch_size])
minibatch_start += minibatch_size
if (minibatch_start != n):
# Make a minibatch out of what is left
minibatches.append(idx_list[minibatch_start:])
return zip(range(len(minibatches)), minibatches)
kf = get_minibatches_idx(len(val[0]), batch_size, shuffle=True)
max_epochs = 20#57 #56.46 for 1000000 steps
for eidx in xrange(max_epochs):
print("the " + str(eidx) + " epochs")
kf = get_minibatches_idx(len(train[0]), batch_size, shuffle=True)
for steps, train_index in kf:
x = [train[0][t]for t in train_index]
z = np.array([img_feats[:,train[1][t]]for t in train_index])
x, mask,y = prepare_data(x)
if (x.shape[0]==batch_size):
feed_dict = {image_fea:z,input_seqs:x,target_seqs:y,input_mask:mask}
_,loss_value = sess.run([train_op,total_loss],feed_dict=feed_dict)
if steps%100==0:#print loss every 1000 steps
print("steps:"+str(steps+eidx*17710))
print("loss_value:"+str(loss_value))
saver_path = saver.save(sess, "log/model.ckpt",global_step=eidx) # save/model.ckpt
print("Model saved in file:", saver_path)
if __name__ == "__main__":
tf.app.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.