text stringlengths 38 1.54M |
|---|
#!/usr/bin/env python
'''
fizz buzz test with normal functions
The fizz buzz tests is aimed to check if you know how to write a function.
For a different example using lambda functions, see fizzBuzz_lambda.py
Check out fizzBuzz_errors.py for an example that provides error messages to the user.
You may find useful documentation at the bottom.
© Damian Romero, 2019
Python 3.7
License: MIT license
'''
################################## Start program ##################################
# First, create a variable with input from user, aka standard input (stdin):
number = input("Tell me your integer and I\'ll fizz or buzz: ")
# Now, define the function which will evaluate the number in our variable:
def fizzbuzz(int:number): # int means the variable must be an integer
either = 0 # this switch will help us keep track of fizz and/or buzz
# We will use the modulo operator '%', which renders the reminder of a division.
if ( (number%5) + (number%7) ) == 0: # if the sum of both modulos is 0:
print("fizz-buzz")
# If the sum is not 0, then maybe one of them is 0
else:
if (number%5) == 0: # check if the reminder of number /5 is 0
print("fizz")
either += 1 # we have found a fizz, so add +1
if (number%7) == 0:
print("buzz")
either += 1 # we have found a fizz, so add +1
# If we did not see a fizz or buzz, then tell the user:
if not either: # 'not' will evaluate 'true' if 'either' is empty or zero:
print("neither fizz nor buzz")
################################## Loop ##################################
# While loops keep happening as long as their condition is true:
while number.isnumeric(): # .isnumeric() is a string method to check for numbers
# we will convert the number to an integer
number = int(number)
# we will pass the number to our fizzbuzz function above
fizzbuzz(number)
# We will ask the user for another number.
# If the user types anything other than a number, the while loop will stop.
number = input("\nTell me your integer and I\'ll fizz or buzz or exit with enter: ")
################################## End ##################################
'''
Documentation
To learn more about modulo and other operations, go to: https://docs.python.org/3.3/reference/expressions.html
To learn more about control flow tools such as if and while statements, go to: https://docs.python.org/3/tutorial/controlflow.html
''' |
import tensorflow as tf
import module.conv_training as conv_training
# import module.transfer_training as transfer_training
import random
def to_ds_degree(degree):
ds = tf.data.experimental.SqlDataset(
"sqlite", "/data/aoi-wzs-p1-dip-fa-nvidia/training/p1-dip-metadata.db",
f"""select path, degree from metadata
where degree = '{degree}' and
component_class = 'label' and
label = 'OK'
""", (tf.string, tf.string))
return ds
def to_ds_label(label):
ds = tf.data.experimental.SqlDataset(
"sqlite", "/data/aoi-wzs-p1-dip-fa-nvidia/training/p1-dip-metadata.db",
f"""select path, label from metadata
where label = '{label}' and
component_class = 'label'
""", (tf.string, tf.string))
return ds
# def to_ds_comp_label(label, component):
# ds = tf.data.experimental.SqlDataset(
# "sqlite", "/data/aoi-wzs-p1-dip-fa-nvidia/training/p1-dip-metadata.db",
# f"""select path, label from metadata
# where label = '{label}' and
# component_class = '{component}'
# """, (tf.string, tf.string))
# return ds
tfrecords = [
# '/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/other_comps_random_1w.tfrecord',
# '/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/stylized_screw_heatsink_before_20200503.tfrecord',
'/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/LabelOrientationGenerateImage/NG.tfrecord',
'/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/LabelOrientationGenerateImage/000.tfrecord',
'/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/LabelOrientationGenerateImage/090.tfrecord',
'/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/LabelOrientationGenerateImage/180.tfrecord',
'/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/LabelOrientationGenerateImage/270.tfrecord',
]
# train_ds_list = []
# for degree in ['000', '090', '180', '270']:
# train_ds_list.append(to_ds_degree(degree))
# train_ds_list.append(to_ds_label('NG'))
LABEL_CLASS_LIST = ['000', '090', '180', '270', 'NG']
label_lookup = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(LABEL_CLASS_LIST, tf.constant([i for i in range(len(LABEL_CLASS_LIST))], dtype=tf.int64)), -1)
# mirrored_strategy = tf.distribute.MirroredStrategy()
# with mirrored_strategy.scope():
METRICS = [
tf.keras.metrics.CategoricalAccuracy(name='acc'),
tf.keras.metrics.Precision(name="precision/000", class_id=0),
tf.keras.metrics.Precision(name="precision/090", class_id=1),
tf.keras.metrics.Precision(name="precision/180", class_id=2),
tf.keras.metrics.Precision(name="precision/270", class_id=3),
tf.keras.metrics.Precision(name="precision/NG", class_id=4),
tf.keras.metrics.Recall(name="recall/000", class_id=0),
tf.keras.metrics.Recall(name="recall/090", class_id=1),
tf.keras.metrics.Recall(name="recall/180", class_id=2),
tf.keras.metrics.Recall(name="recall/270", class_id=3),
tf.keras.metrics.Recall(name="recall/NG", class_id=4),
]
# data_augmentation = tf.keras.Sequential([
# tf.keras.layers.experimental.preprocessing.RandomZoom(.5, .2),
# tf.keras.layers.experimental.preprocessing.RandomContrast([1.0 - 0.9, 1.0 + 1.0]),
# tf.keras.layers.experimental.preprocessing.RandomCrop(192, 192)
# ])
@tf.function
def parse_img(img):
# img = tf.io.decode_image(img, channels=all_var_dict['target_shape'][-1],
# dtype=tf.dtypes.float32, expand_animations = False)
img = tf.io.decode_jpeg(img,channels=1,dct_method='INTEGER_ACCURATE',try_recover_truncated=True)
img = tf.cast(img, dtype=tf.dtypes.float32) / 255.0
# img_with_batch = tf.expand_dims(img, axis=0)
# grad_components = tf.image.sobel_edges(img_with_batch)
# edg_image = tf.math.reduce_euclidean_norm(grad_components, axis=-1)
# grad_mag_square = tf.clip_by_value(edg_image, 0., 1.)
# # grad_mag_components = grad_components**2
# # grad_mag_square = tf.sqrt(tf.math.reduce_sum(grad_mag_components,axis=-1)) # sum all magnitude components
# img = tf.squeeze(grad_mag_square, axis=[0])
img = tf.image.resize_with_pad(img, all_var_dict['target_shape'][1], all_var_dict['target_shape'][0])
return img
@tf.function
def random_aug_parse_img(x, p=0.5):
x = tf.io.decode_jpeg(x,channels=1,dct_method='INTEGER_ACCURATE',try_recover_truncated=True)
x = tf.cast(x, dtype=tf.dtypes.float32) / 255.0
# img_with_batch = tf.expand_dims(x, axis=0)
# grad_components = tf.image.sobel_edges(img_with_batch)
# edg_image = tf.math.reduce_euclidean_norm(grad_components, axis=-1)
# grad_mag_square = tf.clip_by_value(edg_image, 0., 1.)
# # grad_mag_components = grad_components**2
# # grad_mag_square = tf.sqrt(tf.math.reduce_sum(grad_mag_components,axis=-1)) # sum all magnitude components
# x = tf.squeeze(grad_mag_square, axis=[0]) # this is the image tensor you want
if tf.random.uniform([]) < p:
x = tf.image.random_jpeg_quality(x, 0, 100)
# if tf.random.uniform([]) < p:
# x = tf.image.rgb_to_grayscale(x)
# x = tf.squeeze(x, axis=-1)
# x = tf.stack([x, x, x], axis=-1)
# if tf.random.uniform([]) < p:
# x = tf.image.flip_left_right(x)
# if tf.random.uniform([]) < p:
# x = tf.image.rgb_to_hsv(x)
# if tf.random.uniform([]) < p:
# # x = tf.image.random_saturation(x, 5, 10)
# x = tf.image.adjust_saturation(x, random.uniform(0, 1) * 3) # 0-3
if tf.random.uniform([]) < p:
x = tf.image.random_brightness(x, 0.5)
# x = tf.image.adjust_brightness(x, random.uniform(0, 1) / 2) # 0-0.5
if tf.random.uniform([]) < p:
x = tf.image.random_contrast(x, 0.1, 2.0)
# if tf.random.uniform([]) < p:
# x = tf.image.random_hue(x, 0.5)
# if tf.random.uniform([]) < p:
# x = tf.image.central_crop(x, central_fraction=(random.uniform(0, 1) + 1 ) / 2) # 0.5-1
x = tf.image.resize_with_pad(x, all_var_dict['target_shape'][1], all_var_dict['target_shape'][0])
return x
@tf.function
def label_to_onehot(label):
label = all_var_dict['ok_lookup'].lookup(label)
label = tf.one_hot(label, all_var_dict['LABEL_NUM'])
# label = tf.cast(label, dtype=tf.float32)
return label
@tf.function
def parse_func(path, label):
features = {
'image': parse_img(tf.io.read_file(path)),
'label': label_to_onehot(label),
}
label = features['label']
return features, label
@tf.function
def parse_func_with_aug(path, label):
features = {
'image': random_aug_parse_img(tf.io.read_file(path)),
'label': label_to_onehot(label),
}
label = features['label']
return features, label
@tf.function
def parse_example(example_proto):
image_feature_description = {
"path": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.string),
}
features_in_example = tf.io.parse_single_example(example_proto, image_feature_description)
# features = {
# 'image': tf.io.read_file(features_in_example["path"]),
# 'label': label_to_onehot(label),
# }
return features_in_example["path"], features_in_example["label"]
dir_basename = 'preprocessed_0121_conv'
all_var_dict = {
'called_module': 'conv', # trans or conv
'dir_basename': dir_basename,
'base_tb_dir': f'/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tb_logs/{dir_basename}/',
'base_h5_dir': f'/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/trained_h5/{dir_basename}/',
'random_times': 300,
'LOG_VERBOSE': False, # only print print places in code
'RUN_ALL_VERBOSE': False, #
'CACHE': True, #
'MP_POLICY': False, #
'DISTRIBUTED': None, # Not in distributed mode, return specific strategy then means true
# 'DISTRIBUTED': mirrored_strategy,
# Error occurred when finalizing GeneratorDataset iterator: Cancelled: Operation was cancelled
'EPOCH': 500, #
'BATCH_SIZE': 64, # Resource exhausted: OOM with batch size:1024, 512
# 'train_step': 700,
'train_total_images': 25000,
# 'valid_step': 30,
'shuffle_buffer': 10000, #
'target_shape': (640, 640, 1),
# 'valid_size': 2000, #
'split_ratio': 0.5,
'augment': True,
'data_augmentation': None, # None if not defined
'METRICS': METRICS,
'LABEL_NUM': len(LABEL_CLASS_LIST), #
'train_ds_list': [], #
'val_ds_list': [], #
'test_ds': [],
'gan_ds_list': [],
'tfrecords': tfrecords,
'ok_lookup': label_lookup, #
# 'hparams_list': HPARAMS_LIST,
# 'initial_bias': np.log([pos/neg]),
# 'class_weight': {0: (1 / neg)*(total)/2.0, 1: (1 / pos)*(total)/2.0}
# 'degree_lookup': degree_lookup, #
# 'DEGREE_NUM': len(DEGREE_CLASS_LIST),
}
# def test_ds_to_eva(test_ds, batch_size=all_var_dict['BATCH_SIZE']):
# return test_ds.map(parse_func, tf.data.experimental.AUTOTUNE).batch(batch_size)
def split_to_train_valid(ds, ratio):
amount = [i for i,_ in enumerate(ds)][-1] + 1
amount_to_take = int(amount * ratio)
shuffle_ds = ds.shuffle(amount)
return shuffle_ds.take(amount_to_take), shuffle_ds.skip(amount_to_take)
def prepare_trainable_ds(train_ds_list=all_var_dict['train_ds_list'],
shuffle_buffer=all_var_dict['shuffle_buffer'],
val_ds_list=all_var_dict['val_ds_list'],
split_ratio=all_var_dict['split_ratio'],
tfrecords=all_var_dict['tfrecords'],
batch_size=all_var_dict['BATCH_SIZE'],
augment=all_var_dict['augment'],
cache=all_var_dict['CACHE'],
train_total_images=all_var_dict['train_total_images']
):
AUTOTUNE = tf.data.experimental.AUTOTUNE
if train_ds_list == [] and tfrecords == []:
return None
if tfrecords != []:
for d in map(tf.data.TFRecordDataset, tfrecords):
train_ds_list.append(d.map(parse_example, AUTOTUNE))
if val_ds_list == []:
tar_train_ds_list = []
for i in range(len(train_ds_list)):
splitted_take_ds, splitted_skip_ds = split_to_train_valid(train_ds_list[i], split_ratio)
tar_train_ds_list.append(splitted_take_ds.repeat())
if i==0:
valid_ds = splitted_skip_ds
else:
valid_ds = valid_ds.concatenate(splitted_skip_ds)
# print([i for i,_ in enumerate(valid_ds)][-1] + 1)
balanced_weights = [1/len(tar_train_ds_list) for t in tar_train_ds_list]
train_ds = tf.data.experimental.sample_from_datasets(tar_train_ds_list, balanced_weights)
# valid_ds = valid_ds.concatenate(tf.data.TFRecordDataset('/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/to_tune/valid.tfrecord').map(parse_example, AUTOTUNE))
elif len(val_ds_list) == 1:
tar_train_ds_list = [d.repeat() for d in train_ds_list]
balanced_weights = [1/len(tar_train_ds_list) for t in tar_train_ds_list]
train_ds = tf.data.experimental.sample_from_datasets(tar_train_ds_list, balanced_weights)
valid_ds = val_ds_list[0]
else:
tar_train_ds_list = [d.repeat() for d in train_ds_list]
balanced_weights = [1/len(tar_train_ds_list) for t in tar_train_ds_list]
train_ds = tf.data.experimental.sample_from_datasets(tar_train_ds_list, balanced_weights)
for i in range(len(val_ds_list)):
if i==0:
valid_ds = val_ds_list[i]
else:
valid_ds = valid_ds.concatenate(val_ds_list[i])
if augment:
train_ds = train_ds.shuffle(shuffle_buffer).map(parse_func_with_aug, num_parallel_calls=AUTOTUNE)
else:
train_ds = train_ds.shuffle(shuffle_buffer).map(parse_func, num_parallel_calls=AUTOTUNE)
train_ds = train_ds.take(int(train_total_images))
if cache:
train_ds = train_ds.cache().batch(batch_size).prefetch(AUTOTUNE)
valid_ds = valid_ds.map(parse_func, num_parallel_calls=AUTOTUNE).cache().batch(batch_size).prefetch(AUTOTUNE)
else:
train_ds = train_ds.batch(batch_size).prefetch(AUTOTUNE)
valid_ds = valid_ds.map(parse_func, num_parallel_calls=AUTOTUNE).batch(batch_size).prefetch(AUTOTUNE)
return train_ds, valid_ds
# to tfrecord
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def parse_metadata_and_serialize_example(path, label):
image = tf.io.read_file(path)
features = {
"path": _bytes_feature([path.numpy()]),
"image": _bytes_feature([image.numpy()]),
"label": _bytes_feature([label.numpy()]),
}
example_proto = tf.train.Example(
features=tf.train.Features(feature=features))
return example_proto.SerializeToString()
def tf_serialize_example(path, label):
tf_string = tf.py_function(
parse_metadata_and_serialize_example,
(path, label),
tf.string)
return tf.reshape(tf_string, ())
def generate_training_tfrecord(dataset, export_path):
dataset = dataset.map(tf_serialize_example, tf.data.experimental.AUTOTUNE)
writer = tf.data.experimental.TFRecordWriter(export_path)
writer.write(dataset)
def split_ds_by_ratio_tfrecord(
train_ds_list=all_var_dict['train_ds_list'],
split_ratio=all_var_dict['split_ratio'],
tfrecords=all_var_dict['tfrecords'],
):
AUTOTUNE = tf.data.experimental.AUTOTUNE
if train_ds_list == [] and tfrecords == []:
return None
if tfrecords != []:
for d in map(tf.data.TFRecordDataset, tfrecords):
train_ds_list.append(d.map(parse_example, AUTOTUNE))
for i in range(len(train_ds_list)):
splitted_take_ds, splitted_skip_ds = split_to_train_valid(train_ds_list[i], split_ratio)
if i==0:
valid_ds = splitted_skip_ds
train_ds = splitted_take_ds
else:
valid_ds = valid_ds.concatenate(splitted_skip_ds)
train_ds = train_ds.concatenate(splitted_take_ds)
# valid_ds = valid_ds.concatenate(tf.data.TFRecordDataset('/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/to_tune/valid.tfrecord').map(parse_example, AUTOTUNE))
generate_training_tfrecord(train_ds, '/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/to_train/train.tfrecord')
generate_training_tfrecord(valid_ds, '/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/to_train/valid.tfrecord')
# saved tfrecord to train_ds, valid_ds
@tf.function
def parse_example_from_cache_tfrecord(example_proto):
image_feature_description = {
"path": tf.io.FixedLenFeature([], tf.string),
"image": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.string),
}
features_in_example = tf.io.parse_single_example(example_proto, image_feature_description)
features = {
'image': parse_img(features_in_example['image']),
'label': label_to_onehot(features_in_example['label']),
}
label = features['label']
return features, label
@tf.function
def aug_parse_example_from_cache_tfrecord(example_proto):
image_feature_description = {
"path": tf.io.FixedLenFeature([], tf.string),
"image": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.string),
}
features_in_example = tf.io.parse_single_example(example_proto, image_feature_description)
features = {
'image': random_aug_parse_img(features_in_example['image']),
'label': label_to_onehot(features_in_example['label']),
}
label = features['label']
return features, label
def from_tfrecord_to_train_valid(
shuffle_buffer=all_var_dict['shuffle_buffer'],
batch_size=all_var_dict['BATCH_SIZE'],
augment=all_var_dict['augment'],
cache=all_var_dict['CACHE'],
# train_total_images=all_var_dict['train_total_images'],
):
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = tf.data.TFRecordDataset('/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/to_train/train.tfrecord')
valid_ds = tf.data.TFRecordDataset('/data/aoi-wzs-p1-dip-fa-nvidia/label_heatsink_screw/tfrecord/to_train/valid.tfrecord')
if augment:
train_ds = train_ds.shuffle(shuffle_buffer).map(aug_parse_example_from_cache_tfrecord, AUTOTUNE).repeat()
else:
train_ds = train_ds.shuffle(shuffle_buffer).map(parse_example_from_cache_tfrecord, AUTOTUNE).repeat()
# train_ds = train_ds.take(int(train_total_images))
if cache:
train_ds = train_ds.cache().batch(batch_size).prefetch(AUTOTUNE)
valid_ds = valid_ds.shuffle(shuffle_buffer).map(parse_example_from_cache_tfrecord, AUTOTUNE).cache().batch(batch_size).prefetch(AUTOTUNE)
else:
train_ds = train_ds.batch(batch_size).prefetch(AUTOTUNE)
valid_ds = valid_ds.shuffle(shuffle_buffer).map(parse_example_from_cache_tfrecord, AUTOTUNE).batch(batch_size).prefetch(AUTOTUNE)
return train_ds, valid_ds
all_var_dict.update({
# 'prepare_function': from_tfrecord_to_train_valid,
'prepare_function': prepare_trainable_ds,
# 'test_ds_to_eva': test_ds_to_eva,
})
if __name__ == "__main__":
# split_ds_by_ratio_tfrecord()
if all_var_dict['called_module'] == 'conv':
conv_training.main(all_var_dict)
elif all_var_dict['called_module'] == 'trans':
transfer_training.main(all_var_dict) |
# Generated by Django 2.2.10 on 2020-03-11 17:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("recruitment", "0013_add_job_application_url_query"),
("recruitment", "0015_job_alert_task_started_auto_timestamp"),
]
operations = []
|
import requests
from bs4 import BeautifulSoup
def consulta_asegurado(nro_cic):
def clean_data(data):
return data.get_text().replace('\n', '').replace('\t', '').strip()
url = 'https://servicios.ips.gov.py/consulta_asegurado/comprobacion_de_derecho_externo.php'
form_data = {'nro_cic': str(nro_cic), 'recuperar': 'Recuperar', 'elegir': '', 'envio':'ok'}
session = requests.Session()
try:
soup = BeautifulSoup(
session.post(
url,
data=form_data,
timeout=10,
headers={'user-agent': 'Mozilla/5.0'},
verify=True
).text,
"html.parser"
)
table = soup.select('form > table')[1]
head = table.select('th')
data_row = table.select('td')
titular = dict(zip(map(clean_data, head), map(clean_data,data_row)))
table = soup.select('form > table')[2]
head = table.select('th')
data_row = table.select('tr')
patronales = []
for i in range(1, len(data_row)):
patronales.append(dict(zip(map(clean_data, head), map(clean_data,data_row[i].select('td')))))
return {
"Titular": titular,
"Patronales": patronales
}
except requests.ConnectionError:
print("Connection Error")
except Exception as e:
print(e)
if __name__ == '__main__':
data = consulta_asegurado(1234567)
print(data) |
import re, folium, math
def creategeojson(point, dummy): # geojson erstellen aus dummy
name = str(id(point))
with open(dummy) as file: # öffnen des dummy geojson
data = file.read()
data = re.sub("XXX", str(point), data) # ersetzen der XXX durch die coordinaten in die geojson
with open('./temp/' + name + '.geojson', 'w') as save: # als eigene geojson speichern
save.write(data)
def addlayer(point, layer, maplayer): # erstelltes geojson zur karte hinzufügen
layer.add_child(folium.GeoJson(open("./temp/" + str(id(point)) + ".geojson",).read()))
layer.add_to(maplayer)
def newpointcore(point, x, windspeed, winddirection, steps): # erstellen der corelinie
newlon = point[x][0] + (180 / (math.pi * 6137000)) * math.cos(math.radians(winddirection)) \
/ math.cos(point[x][1] * math.pi / 180) * windspeed * steps * 60
newlat = point[x][1] + (180 / (math.pi * 6137000)) * math.sin(math.radians(winddirection)) \
* windspeed * steps * 60
point.append([newlon, newlat])
def newpointpoly(point, x, windspeed, winddirection, degreesplit, target1, target2, steps): # erstellen der polygone
newpointlon1 = point[x][0] + (180 / (math.pi * 6137000)) * math.cos(math.radians(winddirection + degreesplit)) \
/ math.cos(point[x][1] * math.pi/180) * (windspeed * steps * 60) # * math.cos(lenghtcorrection(degreesplit))
print(math.cos(math.radians(winddirection+degreesplit)))
newpointlat1 = point[x][1] + (180 / (math.pi * 6137000)) * math.sin(math.radians(winddirection + degreesplit)) \
* (windspeed * steps * 60) # * math.sin(lenghtcorrection(degreesplit))
if x >= 1:
del target1[-1]
target1.append([newpointlon1, newpointlat1])
target1.append([point[x+1][0], point[x+1][1]])
# für das gespiegelte polygon
newpointlon2 = point[x][0] + (180 / (math.pi * 6137000)) * math.cos(math.radians(winddirection - degreesplit)) \
/ math.cos(point[x][1] * math.pi / 180) * (windspeed * steps * 60) # * math.cos(lenghtcorrection(degreesplit))
newpointlat2 = point[x][1] + (180 / (math.pi * 6137000)) * math.sin(math.radians(winddirection - degreesplit)) \
* (windspeed * steps * 60) # * math.sin(lenghtcorrection(degreesplit))
if x >= 1:
del target2[-1]
target2.append([newpointlon2, newpointlat2])
target2.append([point[x+1][0], point[x+1][1]])
def createangle(input, distance):
return math.degrees(math.asin(input/math.sqrt((input * input) + (distance * distance))))
def lenghtcorrection(alpha):
return 1/math.sin(math.radians(90 - alpha))
def distancepoints(x1, y1, x2, y2):
return 6137000 * math.acos(math.sin(math.radians(y1)) * math.sin(math.radians(y2)) + math.cos(math.radians(y1))
* math.cos(math.radians(y2)) * math.cos(math.radians(x2 - x1)))
|
from django.contrib import admin
from .models import Movie, Cinema, Show
# Register your models here.
admin.site.register(Movie)
admin.site.register(Cinema)
admin.site.register(Show)
|
import numpy as np
# read in binary file
numbers = np.fromfile("numbers.dat", dtype="longdouble")[1:]
result1 = np.longdouble(0)
for item in np.nditer(numbers[np.isfinite(numbers)], order="K"):
result1 += item
print(result1)
|
#!venv/bin/python
from flask_frozen import Freezer
from flask import Flask, render_template, send_from_directory
from flask_bootstrap import Bootstrap
from flask_script import Manager
from flask_moment import Moment
from glob import glob
app = Flask(__name__, static_folder='static', static_url_path='/static')
Bootstrap(app)
Moment(app)
manager = Manager(app)
freezer = Freezer(app)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/random/')
def random_page():
return render_template('random.html')
@app.route('/replication/')
def replication():
return render_template('replication.html')
@app.route('/review/')
def review():
names = glob('reviews/*.txt')
return render_template('review.html', names=names)
@app.route('/docs/<postname>')
def docs_post(postname):
return send_from_directory('docs', postname)
@app.route('/reviews/<postname>')
def review_post(postname):
content = open('reviews/' + postname).read()#.decode('utf-8')
return render_template('review_post.html', content=content)
@app.route('/timeTable/')
def math_timeTable():
return render_template("timeTable_templates/index.html")
@app.route('/2048/')
def game_2048():
return render_template("2048_templates/index.html")
@app.route("/shooter/")
def game_shooter():
return render_template("shooter_templates/index.html")
@app.route("/snake/")
def game_snake():
return render_template("snake_templates/index.html")
@app.route('/blog/')
def blog():
return render_template('blog_templates/index.html')
@app.route('/blog/about/')
def blog_about():
return render_template('blog_templates/about.html')
@app.route('/blog/post/<postname>')
def blog_post(postname):
return render_template('blog_templates/' + postname + '.html')
@app.route('/blog/contact/')
def blog_contract():
return render_template('blog_templates/contact.html')
@app.route('/blog/mail/contact_me.php', methods = ['POST'])
def run_php():
return send_from_directory(app.static_folder, 'blog_static/mail/contact_me.php')
@app.route('/papers/<papername>')
def papers(papername):
return send_from_directory('static/papers', papername)
@app.route('/cv/')
def cv():
return send_from_directory('static', 'resume2020.pdf')
@app.route('/googlee07c61c8e4157065.html')
def google_crawl():
return render_template('googlee07c61c8e4157065.html')
if __name__ == '__main__':
manager.run()
# freezer.freeze()
|
%sql
-- describe database new_schema
CREATE DATABASE IF NOT EXISTS new_schema COMMENT 'This is <your_new_schema> database' |
import pytest
import os
from selenium import webdriver
from nerodia.browser import Browser
TEST_APP_URL = "https://www.saucedemo.com"
@pytest.fixture
def browser(request):
caps = {
"browserName": "Chrome",
"sauce:options": {
"browserName": "Chrome",
"platformName": "Windows 10",
"browserVersion": "latest"
}
}
username = os.environ['SAUCE_USERNAME']
access_key = os.environ['SAUCE_ACCESS_KEY']
remote_url = 'https://{}:{}@ondemand.saucelabs.com/wd/hub/'.format(username, access_key)
remote = webdriver.Remote(command_executor=remote_url, desired_capabilities=caps)
driver = Browser(browser=remote, desired_capabilities=caps)
driver.goto(TEST_APP_URL)
yield driver
driver.quit()
def test_title(browser):
assert "Swag Labs" in browser.title
def test_error(browser):
elem = browser.text_field(className="btn_action")
elem.click()
assert browser.button(".error-button").is_displayed()
def test_login(browser):
browser.text_field(id="user-name").send_keys("standard_user")
browser.text_field(id="password").send_keys("secret_sauce")
browser.button(className="btn_action").click()
assert browser.element(".shopping_cart_container").is_displayed()
|
#!/usr/bin/env python3
"""
Script permettant de crée nos modeles
"""
__author__ = "Casa de Crypto"
__build__ = "Casa de Crypto"
__copyright__ = "Copyleft 2018 - Casa de Crypto"
__license__ = "GPL"
__title__ = "Machine Learning pour la prediction du cours du Bitcoin"
__version__ = "1.0.0"
__maintainer__ = "Casa de Crypto"
__email__ = "casa-de-crypto@gmail.com"
__status__ = "Production"
__credits__ = "LOUNIS, BOUKHEMKHAM, BIREM, SUKUMAR, GHASAROSSIAN"
import sqlite3 #SQLITE pour la base de donnée
import pandas as pd #Lire dans la base de donnée
import multiprocessing #Pour effectuer la creation de nos modeles en même temps grace au multi tasking
from sklearn.model_selection import train_test_split
from tpot import TPOTClassifier, TPOTRegressor
#Fonction qui crée le model pour un type d'apprentissage supervisé donner
def models_building(type_):
"""
@type_ = Classification ou regression
"""
conn = sqlite3.connect("cryptodata.db") #Connexion a la bdd
cursor = conn.cursor()
coins = cursor.execute("SELECT symbol FROM crypto;") #On choisis la table
#On parcourt cette table
for symbol, in coins:
#On lit les données
data = pd.read_sql("SELECT price, price_ave, increased, volume, google_trend, twitter_sent FROM logbook WHERE symbol='{}';".format(symbol), conn)
#Notre variable X
X = data[["price_ave", "volume", "google_trend", "twitter_sent"]]
#Si c'est un classifier notre variable Y sera l'augmentation ou la diminution 0 ou 1.
if type_ == "classifier":
y = data[["increased"]]
#Sinon si c'est un regresseur ce sera le prix.
elif type_ == "regressor":
y = data[["price"]]
#4 set de données, les deux premiers sont les features pour les training et testing data et les deux derniers sont les labels.
#Divisenos données en sous-ensembles de tests et de trains aléatoires
#le training representera 80% de nos données et le testing 20%
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.80, test_size=0.20)
if type_ == "classifier":
model = TPOTClassifier
elif type_ == "regressor":
model = TPOTRegressor
#Verbosity a 3
tpot = model(verbosity = 3)
#On fit le model
tpot.fit(X_train, y_train.values.ravel())
#On export notre fichier
if type_ == "classifier":
tpot.export('classifier_{}.py'.format(symbol))
elif type_ == "regressor":
tpot.export('regressor_{}.py'.format(symbol))
#Fonction main
if __name__ == '__main__':
#Pour le classifieur on crée notre multi tasking pour pouvoir crée le modele des 3 cryptomonnaies en meme temps pour la classification
classifier = multiprocessing.Process(target = models_building, args=("classifier",))
#Pareil pour la regression
regressor = multiprocessing.Process(target = models_building, args=("regressor",))
#On lance le tout
classifier.start()
regressor.start() |
#!/usr/bin/env python
# coding: utf-8
# In[10]:
from sklearn.preprocessing import LabelEncoder
# In[11]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[12]:
#importing the dataset
dataset = pd.read_csv("Dataset.csv")
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
dataset['Date']= le.fit_transform(dataset['Date'])
X = dataset.iloc[:, 1:6].values
y = dataset.iloc[:, 6:7].values
# In[26]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# In[14]:
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.fit_transform(X_test)
# In[18]:
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
regressor.fit(X_train,np.ravel(y_train,order='C'))
# In[19]:
y_pred = regressor.predict(X_test)
# In[20]:
if(y_pred.all()<2.5):
y_pred=np.round(y_pred-0.5)
else:
y_pred=np.round(y_pred+0.5)
# In[21]:
df1=(y_pred-y_test)/y_test
df1=round(df1.mean()*100,2)
print("Error = ",df1,"%")
a=100-df1
print("Accuracy= ",a,"%")
# In[22]:
y_test
# In[ ]:
# In[27]:
y_pred
# In[ ]:
|
from IBMQuantumExperience import IBMQuantumExperience
from IBMQuantumExperience import ApiError # noqa
import helper
import sys
import os
import Qconfig
from pprint import pprint
verbose = False
if 'CK_IBM_VERBOSE' in os.environ:
_verb = int(os.environ['CK_IBM_VERBOSE'])
if (_verb > 0): verbose = True
# to fix via ck
mytoken= Qconfig.API_TOKEN
cloud_frontend = 'https://quantumexperience.ng.bluemix.net/api'
api = IBMQuantumExperience(mytoken, config={'url': cloud_frontend}, verify=True)
# Exec
_device_list = ['ibmqx5', 'ibmqx4', 'ibmqx_hpc_qasm_simulator', 'ibmqx2', 'ibmqx_qasm_simulator', 'local_unitary_simulator', 'local_qasm_simulator']
# number of repetition
shots = 1
# in sec
_tout = 1200
#device
device = ""
available_backends = api.available_backends()
if 'CK_IBM_BACKEND' in os.environ:
device = os.environ['CK_IBM_BACKEND']
if 'CK_IBM_REPETITION' in os.environ:
shots = os.environ['CK_IBM_REPETITION']
if 'CK_IBM_TIMEOUT' in os.environ:
_tout = os.environ['CK_IBM_TIMEOUT']
found = False
for n in available_backends:
if verbose: print (n['name'])
if n['name'] == device:
found = True
if (found is False):
device = _device_list[0]
if verbose: print(api.backend_status(device))
if verbose: print(api.get_my_credits())
# get qasm code to manage via ck too
#api.run_experiment(qasm, device, shots, name=None, timeout)
lc = api.get_last_codes()
#if verbose: print(lc)
limit = 5
my_jobs = api.get_jobs(limit)
#for j in my_jobs:
# print(j)
# print("\n")
njobs =len(my_jobs)
print(njobs)
exec_ids = []
for i in range(0,4):
qasms = my_jobs[i]['qasms']
for j in qasms:
exec_ids.append(j['executionId'])
#if verbose: lc qasms
for i in exec_ids:
print(api.get_result_from_execution(i))
|
class Solution(object):
def openLock(self, deadends, target):
def neighbors(key):
lst = []
for i in range(4):
x = int(key[i])
for d in (-1, 1):
y = (x + d) % 10
val = key[:i] + str(y) + key[i+1:]
lst.append(val)
return lst
from collections import deque
q = deque()
q.append(["0000", 0])
visited = set()
while q:
cur, cnt = q.popleft()
if cur == target:
return cnt
if cur in deadends:
continue
for next_key in neighbors(cur):
if next_key not in visited:
visited.add(next_key)
q.append([next_key, cnt + 1])
return -1
deadends = ["0201","0101","0102","1212","2002"]
target = "0202"
print(Solution().openLock(deadends, target)) |
import glob
import argparse
import re
import collections
import itertools
import pandas as pd
import pathlib
parser = argparse.ArgumentParser()
REGEX_DICT = {
"CLB": "\| CLB LUTs\*\s+\|.*?\|.*?\|.*?\|\s+(?P<CLB>.*?)\s+\|",
"DSP": "\| DSPs\s+\|.*?\|.*?\|.*?\|\s+(?P<DSP>.*?)\s+\|",
"CYC": "Design ran for (?P<CYC>\d+) cycles"
}
parser.add_argument("files", type=str)
parser.add_argument("-fields", type=str, nargs="+", action="append")
parser.add_argument("-output", type=pathlib.Path)
parser.add_argument("-lstrip_parts", type=int, default=0)
parser.add_argument("-rstrip_parts", type=int, default=0)
args = parser.parse_args()
fields = list(itertools.chain(*args.fields))
regexes = [re.compile(REGEX_DICT[k]) for k in fields]
results = collections.defaultdict(dict)
for filename in glob.glob(args.files):
with open(filename) as f:
for line in f:
matches = [regex.match(line) for regex in regexes]
for match in filter(bool, matches):
results[filename].update(match.groupdict())
# currently results is in fname: {data} form.
transposed = collections.defaultdict(dict)
for fname, data in results.items():
parts = pathlib.Path(fname).parts
if args.lstrip_parts:
parts = parts[args.lstrip_parts:]
if args.rstrip_parts:
parts = parts[:-args.rstrip_parts]
cleaned_fname = str(pathlib.Path(*parts))
for key, value in data.items():
transposed[key][cleaned_fname] = value
if args.output.exists():
old_df = pd.read_csv(args.output)
new_df = pd.DataFrame(transposed)
new_df.reset_index(inplace=True)
cols_to_use = list(set(new_df.columns.difference(old_df.columns).to_list() + ["index"]))
df = pd.merge(old_df, new_df[cols_to_use], on="index")
else:
df = pd.DataFrame(transposed)
df.reset_index(inplace=True)
df.to_csv(args.output, index=False)
|
import os
import boto3
s3_resource = boto3.resource("s3", region_name="us-east-1")
def upload_objects():
try:
bucket_name = "<<Bucket-Name>>"
root_path = 'C:/<<Path-of-the-folder>>'
my_bucket = s3_resource.Bucket(bucket_name)
for path, subdirs, files in os.walk(root_path):
path = path.replace("\\", "/")
directory_name = path.replace(root_path, "test_folder")
for file in files:
my_bucket.upload_file(os.path.join(
path, file), directory_name+'/'+file)
except Exception as err:
print(err)
if __name__ == '__main__':
upload_objects()
|
# Generated by Django 3.1.2 on 2020-10-29 20:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('s4in', '0010_projects_shortdescription'),
]
operations = [
migrations.AddField(
model_name='projects',
name='date',
field=models.DateField(null=True, verbose_name='Proje tarihi'),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forms', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='isettings',
name='insurance',
field=models.ForeignKey(to='main.RegInsurance'),
),
migrations.AddField(
model_name='hsschedule',
name='category',
field=models.ForeignKey(to='forms.HSCategory'),
),
migrations.AddField(
model_name='hsschedule',
name='insurance',
field=models.ForeignKey(to='main.RegInsurance'),
),
migrations.AddField(
model_name='hscodes',
name='category',
field=models.ForeignKey(to='forms.HSCategory'),
),
migrations.AddField(
model_name='hscategory',
name='section',
field=models.ForeignKey(to='forms.HSSection'),
),
migrations.AddField(
model_name='customerorders',
name='agent',
field=models.ForeignKey(to='main.RegAgents', null=True),
),
migrations.AddField(
model_name='customerorders',
name='bank',
field=models.ForeignKey(to='main.RegBank', null=True),
),
migrations.AddField(
model_name='customerorders',
name='broker',
field=models.ForeignKey(to='main.RegBroker', null=True),
),
migrations.AddField(
model_name='customerorders',
name='consolidator',
field=models.ForeignKey(to='main.RegConsolidators', null=True),
),
migrations.AddField(
model_name='customerorders',
name='country',
field=models.ForeignKey(to='main.RegCountry'),
),
migrations.AddField(
model_name='customerorders',
name='created_by',
field=models.ForeignKey(related_name='order_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='customerorders',
name='dest_port',
field=models.ForeignKey(related_name='dest_port', to='main.RegPorts'),
),
migrations.AddField(
model_name='customerorders',
name='insurance',
field=models.ForeignKey(to='main.RegInsurance'),
),
migrations.AddField(
model_name='customerorders',
name='origin_port',
field=models.ForeignKey(to='main.RegPorts'),
),
migrations.AddField(
model_name='customerorders',
name='person',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='customerorders',
name='vessel',
field=models.ForeignKey(to='main.RegVessel', null=True),
),
migrations.AddField(
model_name='customerinvoice',
name='approved_by',
field=models.ForeignKey(related_name='invoice_approver', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='customerinvoice',
name='created_by',
field=models.ForeignKey(related_name='invoice_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='customerinvoice',
name='insurance',
field=models.ForeignKey(to='main.RegInsurance'),
),
migrations.AddField(
model_name='customerinvoice',
name='orders',
field=models.ForeignKey(to='forms.CustomerOrders'),
),
migrations.AddField(
model_name='customerinvoice',
name='person',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='customergoods',
name='created_by',
field=models.ForeignKey(related_name='created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='customergoods',
name='goods',
field=models.ForeignKey(to='forms.HSCodes'),
),
migrations.AddField(
model_name='customergoods',
name='orders',
field=models.ForeignKey(to='forms.CustomerOrders'),
),
migrations.AddField(
model_name='customergoods',
name='person',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bondsapplication',
name='approved_by',
field=models.ForeignKey(related_name='bond_approver', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='bondsapplication',
name='bond',
field=models.ForeignKey(to='forms.BondsType'),
),
migrations.AddField(
model_name='bondsapplication',
name='client',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bondsapplication',
name='created_by',
field=models.ForeignKey(related_name='bond_creator', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bondsapplication',
name='insurance',
field=models.ForeignKey(to='main.RegInsurance'),
),
migrations.AddField(
model_name='bondsapplication',
name='validated_by',
field=models.ForeignKey(related_name='bond_validator', to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='approvalnotes',
name='created_by',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
|
# ch19_29.py
from tkinter import * # Import tkinter
import random
# 傳回球的隨機顏色
def getColor():
colorlist = ['red', 'green', 'blue', 'aqua', 'gold', 'purple']
return random.choice(colorlist)
# 定義Ball類別
class Ball:
def __init__(self):
self.x = width / 2 # 發球的x軸座標
self.y = 0 # 發球的y軸座標
self.dx = 3 # 每次移動x距離
self.dy = 3 # 每次移動y距離
self.radius = 5 # 求半徑
self.color = getColor() # 隨機取得球的顏色
def addBall(): # 增加球
ballList.append(Ball())
def removeBall(): # 刪除串列最後一個球
ballList.pop()
def stop(): # 動畫停止
global ballRunning
ballRunning = True
def resume(): # 恢復動畫
global ballRunning
ballRunning = False
animate()
def animate(): # 球體移動
global ballRunning
while not ballRunning:
canvas.after(sleepTime)
canvas.update() # 更新
canvas.delete("ball")
for ball in ballList: # 更新所有球
redisplayBall(ball)
def redisplayBall(ball): # 重新顯示球
if ball.x > width or ball.x < 0:
ball.dx = -ball.dx
if ball.y > height or ball.y < 0:
ball.dy = -ball.dy
ball.x += ball.dx
ball.y += ball.dy
canvas.create_oval(ball.x - ball.radius, ball.y - ball.radius,
ball.x + ball.radius, ball.y + ball.radius,
fill = ball.color, tags = "ball")
tk = Tk()
tk.title("ch19_29")
ballList = [] # 建立球的串列
width, height = 400, 260
canvas = Canvas(tk, width=width, height=height)
canvas.pack()
frame = Frame(tk) # 建立下方功能紐
frame.pack()
btnStop = Button(frame, text = "暫停", command = stop)
btnStop.pack(side = LEFT)
btnResume = Button(frame, text = "恢復",command = resume)
btnResume.pack(side = LEFT)
btnAdd = Button(frame, text = "增加球", command = addBall)
btnAdd.pack(side = LEFT)
btnRemove = Button(frame, text = "減少球", command = removeBall)
btnRemove.pack(side = LEFT)
btnExit = Button(frame, text = "結束", command=tk.destroy)
btnExit.pack(side = LEFT)
sleepTime = 50 # 動畫速度
ballRunning = False
animate()
tk.mainloop()
|
#!/usr/bin/python
import os
import sys
import re
class haproxy():
__Excute = None
def __init__(self):
self.__Excute = os.system
def haproxy_install(self):
self.__Excute("rpm -ivh resource/cache/haproxy/haproxy-1.5.4-4.el7_1.x86_64.rpm") # this block should use popen but don't have time
return
def haproxy_config(self,ips):
reader = open("config/haproxy/haproxy.cfg","r")
configs = reader.readlines()
reader.close()
writer = open("/etc/haproxy/haproxy.cfg","w")
for config in configs:
if re.match(r" server app1 10.110.19.240:8080 check inter 2000 fall 3 weight 30.*$",config):
for i in range(len(ips)):
writer.write(" server app"+str(i)+" "+ips[i]+":8080 check inter 2000 fall 3 weight 30\n")
elif re.match(r" server mq1 10.110.19.240:5672 check inter 2000 fall 3.*$",config):
for i in range(len(ips)):
writer.write(" server mq"+str(i)+" "+ips[i]+":5672 check inter 2000 fall 3\n")
elif re.match(r" server aaaa 10.110.19.241:81 check",config):
for i in range(len(ips)):
writer.write(" server swift"+str(i)+" "+ips[i]+" check inter 2000 fall 3 weight 30\n")
else:
writer.write(config+"\n")
return
def run(self):
self.__Excute("haproxy -f /etc/haproxy/haproxy.cfg") # change the code with os.popen
if os.path.exists("/run/haproxy.pid"):
return True
else:
return False
|
def memoize(original_function):
memo = {}
def wrapper(top, v):
key = tuple([top, len(v)])
if key not in memo:
memo[key] = original_function(top, v)
#uncomment the following lines to see how many times the grid gets filled
#memoize.counter+=1
#print (memoize.counter, memo)
return memo[key]
return wrapper
memoize.counter = 0 ### function attribute to count filled cells
@memoize
# Solution from here https://www.techiedelight.com/longest-increasing-subsequence-using-dynamic-programming/
# but I started from the end of the sequence, just as a matter of preference.
def lis(top, v):
n = len(v)
if n == 0:
return 0
# exclude the last item
excluded = lis(v[n - 1], v[0:n -1])
# include the last item - but only if it's less than the current item at top of sequence
if v[n - 1] < top:
included = 1 + lis(v[n - 1], v[0:n -1])
else:
included = 0
return max(included, excluded)
def solve(v):
# start with infinite, so the last item in the list gets a chance to be in the final subsequence
return lis(float('inf'), v)
# unit tests
# YOUR CASES HERE ###
assert(solve(v = [3, 1, 4, 1, 5, 1, 9, 2, 6]) == 5) |
import ROOT
import QFramework
import CommonAnalysisHelpers
def addAlgorithms(visitor,config):
unfoldingConfig = QFramework.TQFolder()
unfoldingConfig.importTagsWithoutPrefix(config, "unfolding.")
# unfoldingConfig.printTags()
unfoldingCuts = CommonAnalysisHelpers.analyze.loadCuts(unfoldingConfig)
QFramework.TQHistoMakerAnalysisJob.setDefaultAuthor("unfolding")
CommonAnalysisHelpers.analyze.bookAnalysisJobs(unfoldingConfig,unfoldingCuts)
executionAlgorithm = QFramework.TQAnalysisAlgorithm("AnalysisAlgorithm", unfoldingCuts)
unfoldingCuts.printCuts()
executionAlgorithm.setBaseCut(unfoldingCuts)
visitor.addAlgorithm(executionAlgorithm)
return True
|
import struct
import socket
import select
import re
import json
Format = 'iii'
HOST = '127.0.0.1'
output_ports = {}
input_ports = []
def configParser(filename):
'''Read the config file and construct the routing table'''
lines = []
table = {}
file = open(filename, 'r')
for line in file.readlines():
line = re.split(', | |\n',line)
lines.append(line)
router_id = lines[0][1]
print(lines[1])
for i in range(1,len(lines[1]) - 1):
input_ports.append(int(lines[1][i]))
for n in range(1,len(lines[2])):
line = lines[2][n]
output = line.split('-')
output_port = int(output[0])
metric = int(output[1])
dest_id = int(output[2])
output_ports[output_port] = dest_id
next_hop = dest_id
flag = False
timers = [0,0]
table[dest_id] = [metric, next_hop, flag,timers]
print('input ports : {},\noutput ports : {}'.format(input_ports, output_ports))
return table
def rip_header(router_id):
'''the header of rip packet'''
command = 2
version = 2
source = int(router_id)
print("RIP Header : command {}, version {}, source {}".format(command, version, source))
header = [command, version, source]
return header
def rip_entry(table):
'''the entry of rip packet'''
entry = []
for dst in table.keys():
metric = table[dst][0]
print("RIP Entry : metric {}, destination {}".format(metric, dst))
entry.append((metric, dst))
return entry
def rip_packet(header, entry):
'''pack header and every entry together'''
packet = {}
packet['header'] = header
packet['entry'] = entry
return packet
def listen_packet(input_ports):
'''listen all the input port'''
listen_table = []
for port in input_ports:
inSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
inSock.bind(('0.0.0.0', port))
listen_table.append(inSock)
return listen_table
def send_packet(packet, output_ports):
'''send packet to destination router'''
for port in output_ports.keys():
print('port', port)
outSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
outSock.sendto(packet, ("0.0.0.0", port))
return 'packet send successed'
def routing_algorithms(table, packet):
'''return a format of current routing table'''
#initilize received routing table
dst = table.keys()
ndst = []
for i in packet['entry']:
ndst.append(i[1])
i[0] += 1
i[1] = packet['header'][2]
#produrce routing table
for j in range(len(ndst)):
if ndst[j] not in dst:
table[ndst[j]] = rec_packet['entry'][j]
for k in dst:
if k == ndst[j]:
if table[k][1] == rec_packet['entry'][j][1]:
table[k] = rec_packet['entry'][j]
elif table[k][0] > rec_packet['entry'][j][0]:
table[k] = rec_packet['entry'][j]
return table
def receive_packet(listen_list, packet):
'''receive packet from source router'''
r, w, e = select.select(listen_list, [], [], 30)
if r != []:
sock = r[0]
unpacked_packet, address = sock.recvfrom(2048)
rev_packet = json.loads(unpacked_packet)
new_table = routing_algorithms(table, rev_packet)
return new_table
filename = 'router4.cfg'
router_id = filename[6]
table = configParser(filename)
print('table : {}'.format(table))
header = rip_header(router_id)
entry = rip_entry(table)
packet = rip_packet(header, entry)
print('packet: {}'.format(packet))
packed_packet = json.dumps(packet)
print('packed_packet : {}'.format(packed_packet))
listen_list = listen_packet(input_ports)
print('listen list : {}'.format(listen_list))
send_packet(packed_packet, output_ports)
rev_packet = receive_packet(listen_list, packed_packet)
print('receive packet : {}'.format(rev_packet))
|
from socket import *
ip_port=('127.0.0.1',9000)
bufsize=1024
tcp_client=socket(AF_INET,SOCK_DGRAM)
while True:
msg=input("请输入时间格式: ").strip()
tcp_client.sendto(msg.encode('utf-8'),ip_port)
data=tcp_client.recv(bufsize) |
# Generated by Django 3.0.5 on 2020-05-03 18:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('project_first_app', '0004_auto_20200503_1905'),
]
operations = [
migrations.RemoveField(
model_name='owner',
name='email',
),
]
|
from __future__ import print_function
import random
adjectives = ('complete', 'modern', 'self-service', 'integrated', 'end-to-end')
ci_cd = ('continuous testing', 'continuous integration', 'continuous deployment', 'continuous improvement', 'DevOps')
adverbs = ('remarkably', 'enormously', 'substantially', 'significantly', 'seriously')
verbs = ('accelerates', 'improves', 'enhances', 'revamps', 'boosts')
buzz = ('smaller code changes', 'Faster Mean Time To Resolution (MTTR)', 'Smaller Backlog', 'Faster Release Rate')
def get_sample(tuple_data, n=1):
result = random.sample(tuple_data, n)
if n == 1:
return result[0]
return result
def generate_buzz():
phrase = ' '.join([get_sample(adjectives), get_sample(ci_cd), get_sample(adverbs), get_sample(verbs),
get_sample(buzz)])
return phrase.title()
if __name__ == "__main__":
print(generate_buzz())
|
import time,os,psycopg2
from flask import Flask
app = Flask(__name__)
# KUBERNETES otomatik olarak HOSTNAME'i container'ın environment'ına enjekte eder
hostname = os.environ['HOSTNAME']
# Postgresql bağlantı bilgilerini env'den alalım
postgre_hostname=os.environ['POSTGRE_HOSTNAME']
postgre_port=os.environ['POSTGRE_PORT']
postgre_username=os.environ['POSTGRE_USERNAME']
postgre_password=os.environ['POSTGRE_PASSWORD']
postgre_dbname=os.environ['POSTGRE_DBNAME']
DB_VERSION = ""
@app.route('/')
def hello_world():
connect()
return hostname+'- PostgreSQL DB Version is:' + DB_VERSION + '\n'
def connect():
conn = None
try:
print('Connecting PostgreSQL db..')
conn = psycopg2.connect(
host=postgre_hostname,
port=postgre_port,
database=postgre_dbname,
user=postgre_username,
password=postgre_password)
cur = conn.cursor()
cur.execute('SELECT version()')
global DB_VERSION
DB_VERSION = str(cur.fetchone())
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
time.sleep(15) # baslarken 15 saniye gecik, sonradan ayaga kalk
app.run(host='0.0.0.0',port=8080)
|
import turtle
from generator.shapes import *
class GeometricShapes:
__GENERATORS__ = [
Triangle, Circle, Heptagon, Octagon, Hexagon, Square, Star,
Nonagon, Pentagon
]
def __init__(self, destination, size, animation=False):
turtle.colormode(255)
# the canvas substract a pixel from the height
turtle.setup(width=200, height=200)
turtle.hideturtle()
turtle.tracer(animation)
container = turtle.Turtle()
self.__size__ = size
self.__shapes = [
generator(
destination, container
) for generator in self.__GENERATORS__
]
def generate(self):
for _ in range(self.__size__):
for shape in self.__shapes:
shape.generate()
|
from enum import Enum
from PyQt5.QtCore import QTimer, QTime
from PyQt5.QtWidgets import QLCDNumber
class TimerObject(QLCDNumber):
def __init__(self):
super().__init__()
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.increment)
self.update(QTime(0, 0, 0, 0))
def increment(self):
self.time = self.time.addSecs(1)
self.update(self.time)
def restart(self):
self.update(QTime(0, 0, 0, 0))
self.timer.start()
def pause(self):
self.timer.stop()
def start(self):
self.timer.start()
def update(self, time=QTime):
self.time = time
self.time_string = self.time.toString('mm:ss')
self.display(self.time_string)
class State(Enum):
ACTIVE = 0
REST = 1
|
# -*- coding: utf-8 -*-
from pysped.xml_sped import *
from pysped.nfe.manual_300 import ESQUEMA_ATUAL
import os
DIRNAME = os.path.dirname(__file__)
class InfInutEnviado(XMLNFe):
def __init__(self):
super(InfInutEnviado, self).__init__()
self.Id = TagCaracter(nome=u'infInut', codigo=u'DP03', tamanho=[41, 41] , raiz=u'//inutNFe', propriedade=u'Id')
self.tpAmb = TagInteiro(nome=u'tpAmb' , codigo=u'DP05', tamanho=[1, 1, 1], raiz=u'//inutNFe/infInut', valor=2)
self.xServ = TagCaracter(nome=u'xServ' , codigo=u'DP06', tamanho=[10, 10] , raiz=u'//inutNFe/infInut', valor=u'INUTILIZAR')
self.cUF = TagInteiro(nome=u'cUF' , codigo=u'DP07', tamanho=[2, 2, 2], raiz=u'//inutNFe/infInut')
self.ano = TagCaracter(nome=u'ano' , codigo=u'DP08', tamanho=[2, 2] , raiz=u'//inutNFe/infInut')
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'DP09', tamanho=[3, 14] , raiz=u'//inutNFe/infInut')
self.mod = TagInteiro(nome=u'mod' , codigo=u'DP10', tamanho=[2, 2, 2], raiz=u'//inutNFe/infInut', valor=55)
self.serie = TagInteiro(nome=u'serie' , codigo=u'DP11', tamanho=[1, 3] , raiz=u'//inutNFe/infInut')
self.nNFIni = TagInteiro(nome=u'nNFIni' , codigo=u'DP12', tamanho=[1, 9] , raiz=u'//inutNFe/infInut')
self.nNFFin = TagInteiro(nome=u'nNFFin' , codigo=u'DP13', tamanho=[1, 9] , raiz=u'//inutNFe/infInut')
self.xJust = TagCaracter(nome=u'xJust' , codigo=u'DP14', tamanho=[15, 255], raiz=u'//inutNFe/infInut')
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.Id.xml
xml += self.tpAmb.xml
xml += self.xServ.xml
xml += self.cUF.xml
xml += self.ano.xml
xml += self.CNPJ.xml
xml += self.mod.xml
xml += self.serie.xml
xml += self.nNFIni.xml
xml += self.nNFFin.xml
xml += self.xJust.xml
xml += u'</infInut>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Id.xml = arquivo
self.tpAmb.xml = arquivo
self.xServ.xml = arquivo
self.cUF.xml = arquivo
self.ano.xml = arquivo
self.CNPJ.xml = arquivo
self.mod.xml = arquivo
self.serie.xml = arquivo
self.nNFIni.xml = arquivo
self.nNFFin.xml = arquivo
self.xJust.xml = arquivo
xml = property(get_xml, set_xml)
class InutNFe(XMLNFe):
def __init__(self):
super(InutNFe, self).__init__()
self.versao = TagDecimal(nome=u'inutNFe', codigo=u'DP01', propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'1.07', raiz=u'/')
self.infInut = InfInutEnviado()
self.Signature = Signature()
self.caminho_esquema = os.path.join(DIRNAME, u'schema', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'inutNFe_v1.07.xsd'
self.chave = u''
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.infInut.xml
#
# Define a URI a ser assinada
#
self.Signature.URI = u'#' + self.infInut.Id.valor
xml += self.Signature.xml
xml += u'</inutNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.infInut.xml = arquivo
self.Signature.xml = self._le_noh('//inutNFe/sig:Signature')
xml = property(get_xml, set_xml)
def monta_chave(self):
chave = unicode(self.infInut.cUF.valor).zfill(2)
chave += self.infInut.ano.valor.zfill(2)
chave += self.infInut.CNPJ.valor.zfill(14)
chave += unicode(self.infInut.mod.valor).zfill(2)
chave += unicode(self.infInut.serie.valor).zfill(3)
chave += unicode(self.infInut.nNFIni.valor).zfill(9)
chave += unicode(self.infInut.nNFFin.valor).zfill(9)
self.chave = chave
return chave
def gera_nova_chave(self):
chave = self.monta_chave()
#
# Na versão 1.07 da NF-e a chave de inutilização não tem
# o ano
#
chave = chave[0:2] + chave[4:]
#
# Define o Id
#
self.infInut.Id.valor = u'ID' + chave
class InfInutRecebido(XMLNFe):
def __init__(self):
super(InfInutRecebido, self).__init__()
self.Id = TagCaracter(nome=u'infInut' , codigo=u'DR03', tamanho=[17, 17] , raiz=u'//retInutNFe', propriedade=u'Id', obrigatorio=False)
self.tpAmb = TagInteiro(nome=u'tpAmb' , codigo=u'DR05', tamanho=[1, 1, 1] , raiz=u'//retInutNFe/infInut', valor=2)
self.verAplic = TagCaracter(nome=u'verAplic', codigo=u'DR06', tamanho=[1, 20] , raiz=u'//retInutNFe/infInut')
self.cStat = TagCaracter(nome=u'cStat' , codigo=u'DR07', tamanho=[3, 3, 3] , raiz=u'//retInutNFe/infInut')
self.xMotivo = TagCaracter(nome=u'xMotivo' , codigo=u'DR08', tamanho=[1, 255] , raiz=u'//retInutNFe/infInut')
self.cUF = TagInteiro(nome=u'cUF' , codigo=u'DR09', tamanho=[2, 2, 2] , raiz=u'//retInutNFe/infInut')
self.ano = TagCaracter(nome=u'ano' , codigo=u'DR10', tamanho=[2, 2] , raiz=u'//retInutNFe/infInut', obrigatorio=False)
self.CNPJ = TagCaracter(nome=u'CNPJ' , codigo=u'DR11', tamanho=[3, 14] , raiz=u'//retInutNFe/infInut', obrigatorio=False)
self.mod = TagInteiro(nome=u'mod' , codigo=u'DR12', tamanho=[2, 2, 2] , raiz=u'//retInutNFe/infInut', obrigatorio=False)
self.serie = TagInteiro(nome=u'serie' , codigo=u'DR13', tamanho=[1, 3] , raiz=u'//retInutNFe/infInut', obrigatorio=False)
self.nNFIni = TagInteiro(nome=u'nNFIni' , codigo=u'DR14', tamanho=[1, 9] , raiz=u'//retInutNFe/infInut', obrigatorio=False)
self.nNFFin = TagInteiro(nome=u'nNFFin' , codigo=u'DR15', tamanho=[1, 9] , raiz=u'//retInutNFe/infInut', obrigatorio=False)
self.dhRecbto = TagDataHora(nome=u'dhRecbto', codigo=u'DR16', raiz=u'//retInutNFe/infInut', obrigatorio=False)
self.nProt = TagInteiro(nome=u'nProt' , codigo=u'DR17', tamanho=[15, 15, 15], raiz=u'//retInutNFe/infInut', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.Id.xml:
xml += self.Id.xml
else:
xml += u'<infInut>'
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += self.cUF.xml
xml += self.ano.xml
xml += self.CNPJ.xml
xml += self.mod.xml
xml += self.serie.xml
xml += self.nNFIni.xml
xml += self.nNFFin.xml
xml += self.dhRecbto.xml
xml += self.nProt.xml
xml += u'</infInut>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.Id.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.cUF.xml = arquivo
self.ano.xml = arquivo
self.CNPJ.xml = arquivo
self.mod.xml = arquivo
self.serie.xml = arquivo
self.nNFIni.xml = arquivo
self.nNFFin.xml = arquivo
self.dhRecbto.xml = arquivo
self.nProt.xml = arquivo
xml = property(get_xml, set_xml)
class RetInutNFe(XMLNFe):
def __init__(self):
super(RetInutNFe, self).__init__()
self.versao = TagDecimal(nome=u'retInutNFe', codigo=u'DR01', propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'1.07', raiz=u'/')
self.infInut = InfInutRecebido()
self.Signature = Signature()
self.caminho_esquema = os.path.join(DIRNAME, u'schema', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'retInutNFe_v1.07.xsd'
self.chave = u''
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.infInut.xml
if len(self.Signature.URI) and (self.Signature.URI.strip() != u'#'):
xml += self.Signature.xml
xml += u'</retInutNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.infInut.xml = arquivo
self.Signature.xml = self._le_noh('//retInutNFe/sig:Signature')
xml = property(get_xml, set_xml)
def monta_chave(self):
chave = unicode(self.infInut.cUF.valor).zfill(2)
chave += self.infInut.ano.valor.zfill(2)
chave += self.infInut.CNPJ.valor.zfill(14)
chave += unicode(self.infInut.mod.valor).zfill(2)
chave += unicode(self.infInut.serie.valor).zfill(3)
chave += unicode(self.infInut.nNFIni.valor).zfill(9)
chave += unicode(self.infInut.nNFFin.valor).zfill(9)
self.chave = chave
return chave
class ProcInutNFe(XMLNFe):
def __init__(self):
super(ProcInutNFe, self).__init__()
#
# Atenção --- a tag ProcInutNFe tem que começar com letra maiúscula, para
# poder validar no XSD. Os outros arquivos proc, procCancNFe, e procNFe
# começam com minúscula mesmo
#
self.versao = TagDecimal(nome=u'ProcInutNFe', propriedade=u'versao', namespace=NAMESPACE_NFE, valor=u'1.07', raiz=u'/')
self.inutNFe = InutNFe()
self.retInutNFe = RetInutNFe()
self.caminho_esquema = os.path.join(DIRNAME, u'schema', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'procInutNFe_v1.07.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.inutNFe.xml.replace(ABERTURA, u'')
xml += self.retInutNFe.xml.replace(ABERTURA, u'')
xml += u'</ProcInutNFe>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.inutNFe.xml = arquivo
self.retInutNFe.xml = arquivo
xml = property(get_xml, set_xml)
|
"""
A Pythagorean triplet is a set of three natural numbers,
a < b < c, for which, a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which
a + b + c = 1000.
Find the product abc.
"""
def pythagorean_loop():
product = 0
for c in range(1, 1000):
for b in range(1, c):
for a in range(1, b):
if (a*a + b*b == c*c) and (a + b + c == 1000):
product = a * b * c
return product # I dont know how to break mutiply loops
print pythagorean_loop() |
import mysql.connector
PW_FILE = '../.pw'
DB_USER = 'samyong'
DB_NAME = 'scala_chatter'
USER_TABLE = 'seed_user_details'
FOLLOWER_TABLE = 'seed_user_followers'
FOLLOWING_TABLE = 'seed_user_friends'
TWEET_TABLE = 'seed_user_tweets'
CONNECTION_USER_TABLE = 'seed_connection_details'
pw = [s for s in open(PW_FILE)][0].strip()
db = mysql.connector.connect(
host='localhost',
user=DB_USER,
password=pw,
database=DB_NAME,
)
def execute(query, has_res = True):
cursor = db.cursor()
cursor.execute(query)
if not has_res:
return
res = cursor.fetchall()
return res
def seed_users(details=False):
"""
If details = True, return everything from seed user table
If details = False, return list of seed user (username, id) pairs
"""
cursor = db.cursor()
cursor.execute('select * from ' + USER_TABLE)
res = cursor.fetchall()
return res if details else [s[:2] for s in res]
def connection_users():
""" return list of (connection user id, connection user details json string) """
cursor = db.cursor()
cursor.execute('select * from ' + CONNECTION_USER_TABLE)
res = cursor.fetchall()
return res
def followers():
""" return list of (seed user id, follower id) pairs """
cursor = db.cursor()
cursor.execute('select * from ' + FOLLOWER_TABLE)
res = cursor.fetchall()
return res
def connection_as_follower_count():
""" return list of (connection user id, # seed users with this connection as follower) """
cursor = db.cursor()
cursor.execute('select follower_id, count(*) as s from ' + FOLLOWER_TABLE + ' group by follower_id order by s desc')
res = cursor.fetchall()
return res
def followings():
""" return list of (seed user id, following id) pairs """
cursor = db.cursor()
cursor.execute('select * from ' + FOLLOWING_TABLE)
res = cursor.fetchall()
return res
def connection_as_following_count():
""" return list of (connection user id, # seed users with this connection as following) """
cursor = db.cursor()
cursor.execute('select friend_id, count(*) as s from ' + FOLLOWING_TABLE + ' group by friend_id order by s desc')
res = cursor.fetchall()
return res
def tweets():
""" return list of (seed user id, tweet id, tweet object json) tuples """
cursor = db.cursor()
cursor.execute('select * from ' + TWEET_TABLE)
res = cursor.fetchall()
return res
def mk_seed_user_map():
users = seed_users(True)
map = {}
import json
for username, id, details in users:
obj = json.loads(details)
map[id] = (username, obj['name'], obj['followers_count'], obj['friends_count'])
return map
def mk_connection_user_map():
users = connection_users()
map = {}
import json
for id, details in users:
obj = json.loads(details)
map[id] = (obj['screen_name'], obj['name'], obj['followers_count'], obj['friends_count'])
return map
|
import configparser
import os
basedir = os.path.abspath(os.path.dirname(__file__))
# ref: https://www.blog.pythonlibrary.org/2013/10/25/python-101-an-intro-to-configparser/
class UtilsConfig(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('utils.cfg')
self.AWS_REGION_NAME = self.config['AWS']['AWS_REGION_NAME']
self.AWS_S3_INPUTS_BUCKET = self.config['AWS']['AWS_S3_INPUTS_BUCKET']
self.AWS_S3_RESULTS_BUCKET = self.config['AWS']['AWS_S3_RESULTS_BUCKET']
# Set the S3 key (object name) prefix to your CNetID
# Keep the trailing '/' if using my upload code in views.py
self.AWS_S3_KEY_PREFIX = self.config['AWS']['AWS_S3_KEY_PREFIX']
self.AWS_GLACIER_VAULT = self.config['AWS']['AWS_GLACIER_VAULT']
# Change the ARNs below to reflect your SNS topics
self.AWS_SNS_JOB_REQUEST_TOPIC = self.config['AWS']['AWS_SNS_JOB_REQUEST_TOPIC']
self.AWS_SNS_JOB_COMPLETE_TOPIC = self.config['AWS']['AWS_SNS_JOB_COMPLETE_TOPIC']
self.AWS_SNS_JOB_ARCHIVE_TOPIC = self.config['AWS']['AWS_SNS_JOB_ARCHIVE_TOPIC']
self.AWS_SNS_JOB_RESTORE_TOPIC = self.config['AWS']['AWS_SNS_JOB_RESTORE_TOPIC']
# SQS Name
self.AWS_SQS_JOB_REQUEST_NAME = self.config['AWS']['AWS_SQS_JOB_REQUEST_NAME']
self.AWS_SQS_JOB_COMPLETE_NAME = self.config['AWS']['AWS_SQS_JOB_COMPLETE_NAME']
self.AWS_SQS_JOB_ARCHIVE_NAME = self.config['AWS']['AWS_SQS_JOB_ARCHIVE_NAME']
self.AWS_SQS_JOB_RESTORE_NAME = self.config['AWS']['AWS_SQS_JOB_RESTORE_NAME']
# Change the table name to your own
self.AWS_DYNAMODB_ANNOTATIONS_TABLE = self.config['AWS']['AWS_DYNAMODB_ANNOTATIONS_TABLE']
# Change the email address to your username
self.MAIL_DEFAULT_SENDER = self.config['GASAPP']['MAIL_DEFAULT_SENDER']
# time before free user results are archived (in seconds)
self.FREE_USER_DATA_RETENTION = int(self.config['GASAPP']['FREE_USER_DATA_RETENTION'])
self.FREE_USER_FILE_LIMIT = int(self.config['GASAPP']['FREE_USER_FILE_LIMIT'])
self.LOCAL_DATA_PREFIX = self.config['GASAPP']['LOCAL_DATA_PREFIX']
|
"""This script tests the cli methods to get samples in status-db"""
from datetime import datetime
from cg.store import Store
def test_get_sample_bad_sample(invoke_cli, disk_store: Store):
"""Test to get a sample using a non-existing sample-id """
# GIVEN an empty database
# WHEN getting a sample
db_uri = disk_store.uri
name = 'dummy_name'
result = invoke_cli(['--database', db_uri, 'get', 'sample', name])
# THEN then it should warn about missing sample id instead of getting a sample
# it will not fail since the API accepts multiple samples
assert result.exit_code == 0
def test_get_sample_required(invoke_cli, disk_store: Store):
"""Test to get a sample using only the required argument"""
# GIVEN a database with a sample
sample_id = add_sample(disk_store).internal_id
assert disk_store.Sample.query.count() == 1
# WHEN getting a sample
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id])
# THEN then it should have been get
assert result.exit_code == 0
assert sample_id in result.output
def test_get_samples_required(invoke_cli, disk_store: Store):
"""Test to get several samples using only the required arguments"""
# GIVEN a database with two samples
sample_id1 = add_sample(disk_store, '1').internal_id
sample_id2 = add_sample(disk_store, '2').internal_id
assert disk_store.Sample.query.count() == 2
# WHEN getting a sample
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id1, sample_id2])
# THEN then it should have been get
assert result.exit_code == 0
assert sample_id1 in result.output
assert sample_id2 in result.output
def test_get_sample_output(invoke_cli, disk_store: Store):
"""Test that the output has the data of the sample"""
# GIVEN a database with a sample with data
sample = add_sample(disk_store)
sample_id = sample.internal_id
name = sample.name
customer_id = sample.customer.internal_id
application_tag = sample.application_version.application.tag
state = sample.state
priority_human = sample.priority_human
# WHEN getting a sample
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id])
# THEN then it should have been get
assert result.exit_code == 0
assert sample_id in result.output
assert name in result.output
assert customer_id in result.output
assert application_tag in result.output
assert state in result.output
assert priority_human in result.output
def test_get_sample_external_false(invoke_cli, disk_store: Store):
"""Test that the output has the external-value of the sample"""
# GIVEN a database with a sample with data
sample_id = add_sample(disk_store, is_external=False).internal_id
is_external_false = 'No'
is_external_true = 'Yes'
# WHEN getting a sample
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id])
# THEN then it should have been get
assert result.exit_code == 0
assert is_external_false in result.output
assert is_external_true not in result.output
def test_get_sample_external_true(invoke_cli, disk_store: Store):
"""Test that the output has the external-value of the sample"""
# GIVEN a database with a sample with data
sample_id = add_sample(disk_store, is_external=True).internal_id
is_external_false = 'No'
is_external_true = 'Yes'
# WHEN getting a sample
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id])
# THEN then it should have been get
assert result.exit_code == 0
assert is_external_true in result.output
assert is_external_false not in result.output
def test_get_sample_no_families_without_family(invoke_cli, disk_store: Store):
"""Test that the --no-families flag works without families"""
# GIVEN a database with a sample without related samples
name = add_sample(disk_store).internal_id
assert not disk_store.Sample.query.first().links
# WHEN getting a sample with the --no-families flag
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', name, '--no-families'])
# THEN everything is fine
assert result.exit_code == 0
def test_get_sample_no_families_with_family(invoke_cli, disk_store: Store):
"""Test that the --no-families flag doesn't show family info"""
# GIVEN a database with a sample with related samples
family = add_family(disk_store)
sample = add_sample(disk_store)
link = add_relationship(disk_store, sample=sample, family=family)
assert link in disk_store.Sample.query.first().links
sample_id = sample.internal_id
# WHEN getting a sample with the --no-families flag
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id, '--no-families'])
# THEN all related families should be listed in the output
assert result.exit_code == 0
for link in disk_store.Sample.query.first().links:
assert link.family.internal_id not in result.output
def test_get_sample_families_without_family(invoke_cli, disk_store: Store):
"""Test that the --families flag works without families"""
# GIVEN a database with a sample without related samples
sample_id = add_sample(disk_store).internal_id
assert not disk_store.Sample.query.first().links
# WHEN getting a sample with the --families flag
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id, '--families'])
# THEN everything is fine
assert result.exit_code == 0
def test_get_sample_families_with_family(invoke_cli, disk_store: Store):
"""Test that the --families flag does show family info"""
# GIVEN a database with a sample with related samples
family = add_family(disk_store)
sample = add_sample(disk_store)
sample_id = sample.internal_id
add_relationship(disk_store, sample, family)
assert disk_store.Sample.query.first().links
# WHEN getting a sample with the --families flag
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id, '--families'])
# THEN all related families should be listed in the output
assert result.exit_code == 0
for link in disk_store.Sample.query.first().links:
assert link.family.internal_id in result.output
def test_get_sample_flowcells_without_flowcell(invoke_cli, disk_store: Store):
"""Test that we can query samples for flowcells even when there are none"""
# GIVEN a database with a sample without related flowcells
sample_id = add_sample(disk_store).internal_id
assert not disk_store.Flowcell.query.first()
# WHEN getting a sample with the --flowcells flag
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id, '--flowcells'])
# THEN everything is fine
assert result.exit_code == 0
def test_get_sample_flowcells_with_flowcell(invoke_cli, disk_store: Store):
"""Test that we can query samples for flowcells and that the flowcell name is in the output"""
# GIVEN a database with a sample and a related flowcell
flowcell = add_flowcell(disk_store)
sample = add_sample(disk_store, flowcell=flowcell)
assert flowcell in disk_store.Sample.query.first().flowcells
sample_id = sample.internal_id
# WHEN getting a sample with the --flowcells flag
db_uri = disk_store.uri
result = invoke_cli(
['--database', db_uri, 'get', 'sample', sample_id, '--flowcells'])
# THEN the related flowcell should be listed in the output
assert result.exit_code == 0
for flowcell in disk_store.Sample.query.first().flowcells:
assert flowcell.name in result.output
def ensure_application_version(disk_store, application_tag='dummy_tag', is_external=False):
"""utility function to return existing or create application version for tests"""
application = disk_store.application(tag=application_tag)
if not application:
application = disk_store.add_application(tag=application_tag, category='wgs',
description='dummy_description',
is_external=is_external)
disk_store.add_commit(application)
prices = {'standard': 10, 'priority': 20, 'express': 30, 'research': 5}
version = disk_store.application_version(application, 1)
if not version:
version = disk_store.add_version(application, 1, valid_from=datetime.now(),
prices=prices)
disk_store.add_commit(version)
return version
def ensure_customer(disk_store, customer_id='cust_test'):
"""utility function to return existing or create customer for tests"""
customer_group = disk_store.customer_group('dummy_group')
if not customer_group:
customer_group = disk_store.add_customer_group('dummy_group', 'dummy group')
customer = disk_store.add_customer(internal_id=customer_id, name="Test Customer",
scout_access=False, customer_group=customer_group,
invoice_address='dummy_address',
invoice_reference='dummy_reference')
disk_store.add_commit(customer)
customer = disk_store.customer(customer_id)
return customer
def add_sample(disk_store, sample_id='test_sample', is_external=False, flowcell=None):
"""utility function to add a sample to use in tests"""
customer = ensure_customer(disk_store)
application_version_id = ensure_application_version(disk_store, is_external=is_external).id
sample = disk_store.add_sample(name=sample_id, sex='female')
sample.application_version_id = application_version_id
sample.customer = customer
sample.is_external = is_external
if flowcell:
sample.flowcells.append(flowcell)
disk_store.add_commit(sample)
return sample
def add_flowcell(disk_store, sample_id='flowcell_test', sample=None):
"""utility function to get a flowcell to use in tests"""
flowcell = disk_store.add_flowcell(name=sample_id, sequencer='dummy_sequencer',
sequencer_type='hiseqx',
date=datetime.now())
if sample:
flowcell.samples.append(sample)
disk_store.add_commit(flowcell)
return flowcell
def add_panel(disk_store, panel_id='panel_test', customer_id='cust_test'):
"""utility function to add a panel to use in tests"""
customer = ensure_customer(disk_store, customer_id)
panel = disk_store.add_panel(customer=customer, name=panel_id, abbrev=panel_id,
version=1.0,
date=datetime.now(), genes=1)
disk_store.add_commit(panel)
return panel
def add_family(disk_store, family_id='family_test', customer_id='cust_test'):
"""utility function to add a family to use in tests"""
panel_name = add_panel(disk_store).name
customer = ensure_customer(disk_store, customer_id)
family = disk_store.add_family(name=family_id, panels=panel_name)
family.customer = customer
disk_store.add_commit(family)
return family
def add_relationship(disk_store, sample, family):
"""utility function to add a sample to use in tests"""
link = disk_store.relate_sample(sample=sample, family=family, status='unknown')
disk_store.add_commit(link)
return link
|
""" Exceptions
"""
from typing import Dict
from fastapi import HTTPException
def bad_request(errors: Dict[str, str], code='validation_error'):
""" Return HTTP 400 error
"""
raise HTTPException(status_code=400, detail={
'code': code,
'reason': errors,
})
def business_error(errors: Dict[str, str], code='internal_error'):
""" Return HTTP 500 error
"""
raise HTTPException(status_code=500, detail={
'code': code,
'reason': 'Server error',
}) |
from flask import request, redirect, render_template, url_for
from app import app
import locale
locale.setlocale(locale.LC_TIME, "sp") # swedish
import openpyxl
from datetime import datetime, timedelta
import calendar
#import numpy
def cerradas():
#File Log
FILEPATH_LOG = open(r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Log_Cerradas.txt','a')
#Read Excel
NAME_FILE=openpyxl.load_workbook(r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Prueba.xlsx')
sheet = NAME_FILE['Cerradas']
DIA_ONE = 1
DIA_TWO = 2
try:
filepath_cerrradas = r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Cerradas.xlsx'
wb = openpyxl.Workbook()
wb.save(filepath_cerrradas)
except:
print("******************************************************\n")
print("Cerrar archivo Cerradas.xlsx y despues ejecutar el programa\n")
print("******************************************************\n")
FILEPATH_Cerradas = openpyxl.load_workbook(r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Cerradas.xlsx')
sheet_Cerradas = FILEPATH_Cerradas.active
FINAL_COUNT_NUM_TOTAL_ROW = 2
for r in range(1, 1 + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
#print('%-8s' % d.value, end='')
#print('', end=""),
row_final = sheet_Cerradas.cell(row=r, column=c)
row_final.value = d.value
#print('')
count_num_total_rows = 1
next = False
#my_date_actual_compare_with_excel = (datetime.now()- timedelta(2)).strftime('%Y-%m-%d')
my_date_actual_compare_with_excel = datetime.now()
FILEPATH_LOG.write('-----------------------------------\n')
FILEPATH_LOG.write(str(datetime.now())+'\n')
my_date_yesterday_compare_with_excel = (datetime.now()- timedelta(0)).strftime('%Y-%m-%d')
my_year_actual = datetime.now().strftime('%Y-%m-%d')
#print ("my_date_actual_compare_with_excel : ", my_date_actual_compare_with_excel)
#print ("my_date_yesterday_compare_with_excel: ", my_date_yesterday_compare_with_excel)
#We have the total rows
while(next == False):
column_name_f = str("f"+str(count_num_total_rows))
if (sheet[column_name_f].value == None):
next = True
else:
count_num_total_rows = count_num_total_rows + 1
day_studying_number = int(my_date_actual_compare_with_excel.strftime('%d'))
#print (day_studying_number)
day_studying = my_date_actual_compare_with_excel.weekday()
day_studying_number_change_month = int(my_date_actual_compare_with_excel.strftime('%d'))
month_actual_compare_change = 0
month_actual_compare_change_less = 0
month_actual_compare_change = my_date_actual_compare_with_excel.strftime('%m')
month_actual_compare_change_less = my_date_yesterday_compare_with_excel[5:7]
#print ("MONTH_actual_compare_change_LESS: ", month_actual_compare_change_less)
my_year_actual = my_year_actual[0:4]
if (day_studying_number_change_month == 1 and day_studying == 0):
# SI HAY CAMBIO DE MES Y ES LUNES 1; LA COMPROBACION DE CAMBIO DE MES LA HACEMOS EN UN IF DE ABAJO
# print ("****************SI HAY CAMBIO DE MES Y ES LUNES 1***********************")
FILEPATH_LOG.write("SI HAY CAMBIO DE MES Y ES LUNES 1\n")
last_day_month_before = calendar.monthrange(int(my_year_actual), int(int(month_actual_compare_change_less) - 1))
last_day_month_before = int(last_day_month_before[1])
# print (last_day_month_before )
# We have the files that we are interested
for final_count_num_total_rows in range(1, count_num_total_rows):
column_name_f = str("f" + str(final_count_num_total_rows))
column_name_k = str("k" + str(final_count_num_total_rows))
column_name_r = str("r" + str(final_count_num_total_rows))
if (sheet[column_name_f].value == 'TIWS' or sheet[column_name_f].value == 'TIWS '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 1 and (
day_actual_excel == last_day_month_before or day_actual_excel == last_day_month_before - 1 or day_actual_excel == last_day_month_before - 2)):
FILEPATH_LOG.write("SELECCIONAMOS TIWS\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TISA ' or sheet[column_name_f].value == 'TISA'):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 1 and (
day_actual_excel == last_day_month_before or day_actual_excel == last_day_month_before - 1 or day_actual_excel == last_day_month_before - 2)):
FILEPATH_LOG.write("SELECCIONAMOS TISA\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TEDIG' or sheet[column_name_f].value == 'TEDIG '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 1 and (
day_actual_excel == last_day_month_before or day_actual_excel == last_day_month_before - 1 or day_actual_excel == last_day_month_before - 2)):
FILEPATH_LOG.write("SELECCIONAMOS TEDIG\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
FILEPATH_Cerradas.save(filepath_cerrradas)
# FIN SI HAY CAMBIO DE MES Y ES LUNES 1
elif (day_studying_number_change_month == 2 and day_studying == 0):
# SI HAY CAMBIO DE MES Y ES LUNES 2; LA COMPROBACION DE CAMBIO DE MES LA HACEMOS EN UN IF DE ABAJO
# print("**************SI HAY CAMBIO DE MES Y ES LUNES 2******************")
FILEPATH_LOG.write("SI HAY CAMBIO DE MES Y ES LUNES 2\n")
last_day_month_before = calendar.monthrange(int(my_year_actual), int(int(month_actual_compare_change_less) - 1))
last_day_month_before = int(last_day_month_before[1])
# print(last_day_month_before)
# We have the files that we are interested
for final_count_num_total_rows in range(1, count_num_total_rows):
column_name_f = str("f" + str(final_count_num_total_rows))
column_name_k = str("k" + str(final_count_num_total_rows))
column_name_r = str("r" + str(final_count_num_total_rows))
if (sheet[column_name_f].value == 'TIWS' or sheet[column_name_f].value == 'TIWS '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if ((compare_month == 0 and day_actual_excel == DIA_ONE) or (compare_month == 1 and (
day_actual_excel == last_day_month_before or day_actual_excel == last_day_month_before - 1))):
FILEPATH_LOG.write("SELECCIONAMOS TIWS\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TISA ' or sheet[column_name_f].value == 'TISA'):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if ((compare_month == 0 and day_actual_excel == DIA_ONE) or (compare_month == 1 and (
day_actual_excel == last_day_month_before or day_actual_excel == last_day_month_before - 1))):
FILEPATH_LOG.write("SELECCIONAMOS TISA\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TEDIG' or sheet[column_name_f].value == 'TEDIG '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if ((compare_month == 0 and day_actual_excel == DIA_ONE) or (compare_month == 1 and (
day_actual_excel == last_day_month_before or day_actual_excel == last_day_month_before - 1))):
FILEPATH_LOG.write("SELECCIONAMOS TEDIG\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
FILEPATH_Cerradas.save(filepath_cerrradas)
# FIN SI HAY CAMBIO DE MES Y ES LUNES 2
elif (day_studying_number_change_month == 3 and day_studying == 0):
# SI HAY CAMBIO DE MES Y ES LUNES 3; LA COMPROBACION DE CAMBIO DE MES LA HACEMOS EN UN IF DE ABAJO
FILEPATH_LOG.write("SI HAY CAMBIO DE MES Y ES LUNES 3\n")
last_day_month_before = calendar.monthrange(int(my_year_actual), int(int(month_actual_compare_change_less) - 1))
last_day_month_before = int(last_day_month_before[1])
# print(last_day_month_before)
# We have the files that we are interested
for final_count_num_total_rows in range(1, count_num_total_rows):
column_name_f = str("f" + str(final_count_num_total_rows))
column_name_k = str("k" + str(final_count_num_total_rows))
column_name_r = str("r" + str(final_count_num_total_rows))
if (sheet[column_name_f].value == 'TIWS' or sheet[column_name_f].value == 'TIWS '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 0 and (day_actual_excel == DIA_ONE or day_actual_excel == DIA_TWO)) or (
compare_month == 1 and (day_actual_excel == last_day_month_before)):
FILEPATH_LOG.write("SELECCIONAMOS TIWS\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TISA ' or sheet[column_name_f].value == 'TISA'):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 0 and (day_actual_excel == DIA_ONE or day_actual_excel == DIA_TWO)) or (
compare_month == 1 and (day_actual_excel == last_day_month_before)):
FILEPATH_LOG.write("SELECCIONAMOS TISA\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TEDIG' or sheet[column_name_f].value == 'TEDIG '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 0 and (day_actual_excel == DIA_ONE or day_actual_excel == DIA_TWO)) or (
compare_month == 1 and (day_actual_excel == last_day_month_before)):
FILEPATH_LOG.write("SELECCIONAMOS TEDIG\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
FILEPATH_Cerradas.save(filepath_cerrradas)
# FIN SI HAY CAMBIO DE MES Y ES LUNES 3
elif (day_studying_number_change_month == 1 and not day_studying == 0):
# SI HAY CAMBIDO DE MES 1 Y NO ES LUNES;
# print ("************SI HAY CAMBIDO DE MES 1 Y NO ES LUNES********************")
FILEPATH_LOG.write("SI HAY CAMBIDO DE MES 1 Y NO ES LUNES\n")
last_day_month_before = calendar.monthrange(int(my_year_actual), int(int(month_actual_compare_change_less) - 1))
last_day_month_before = int(last_day_month_before[1])
# print (last_day_month_before )
# We have the files that we are interested
for final_count_num_total_rows in range(1, count_num_total_rows):
column_name_f = str("f" + str(final_count_num_total_rows))
column_name_k = str("k" + str(final_count_num_total_rows))
column_name_r = str("r" + str(final_count_num_total_rows))
if (sheet[column_name_f].value == 'TIWS' or sheet[column_name_f].value == 'TIWS '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 1 and (day_actual_excel == last_day_month_before)):
FILEPATH_LOG.write("SELECCIONAMOS TIWS\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TISA ' or sheet[column_name_f].value == 'TISA'):
print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 1 and (day_actual_excel == last_day_month_before)):
FILEPATH_LOG.write("SELECCIONAMOS TISA\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TEDIG' or sheet[column_name_f].value == 'TEDIG '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if (compare_month == 1 and (day_actual_excel == last_day_month_before)):
FILEPATH_LOG.write("SELECCIONAMOS TEDIG\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
FILEPATH_Cerradas.save(filepath_cerrradas)
# FIN SI HAY CAMBIDO DE MES 1 Y NO ES LUNES;
elif (
not day_studying_number_change_month == 3 and not day_studying_number_change_month == 2 and not day_studying_number_change_month == 1 and day_studying == 0):
# SI ES UN LUNES CUALQUIERA; LA COMPROBACION DE CAMBIO DE MES LA HACEMOS EN UN IF DE ABAJO
FILEPATH_LOG.write("SI ES UN LUNES CUALQUIERA\n")
last_day_month_before = calendar.monthrange(int(my_year_actual), int(int(month_actual_compare_change_less) - 1))
last_day_month_before = int(last_day_month_before[1])
# print(last_day_month_before)
# We have the files that we are interested
for final_count_num_total_rows in range(1, count_num_total_rows):
column_name_f = str("f" + str(final_count_num_total_rows))
column_name_k = str("k" + str(final_count_num_total_rows))
column_name_r = str("r" + str(final_count_num_total_rows))
if (sheet[column_name_f].value == 'TIWS' or sheet[column_name_f].value == 'TIWS '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if ((day_actual_excel_compare - day_actual_excel == 1
or day_actual_excel_compare - day_actual_excel == 2
or day_actual_excel_compare - day_actual_excel == 3)
and (int(month_actual_excel_compare) - int(month_actual_excel) == 0)):
FILEPATH_LOG.write("SELECCIONAMOS TIWS\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TISA ' or sheet[column_name_f].value == 'TISA'):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if ((day_actual_excel_compare - day_actual_excel == 1
or day_actual_excel_compare - day_actual_excel == 2
or day_actual_excel_compare - day_actual_excel == 3)
and (int(month_actual_excel_compare) - int(month_actual_excel) == 0)):
FILEPATH_LOG.write("SELECCIONAMOS TISA\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TEDIG' or sheet[column_name_f].value == 'TEDIG '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print(day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
compare_month = int(month_actual_excel_compare) - int(month_actual_excel)
if ((day_actual_excel_compare - day_actual_excel == 1
or day_actual_excel_compare - day_actual_excel == 2
or day_actual_excel_compare - day_actual_excel == 3)
and (int(month_actual_excel_compare) - int(month_actual_excel) == 0)):
FILEPATH_LOG.write("SELECCIONAMOS TEDIG\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
FILEPATH_Cerradas.save(filepath_cerrradas)
# SI ES UN LUNES CUALQUIERA;
elif (day_studying == 1 or day_studying == 2 or day_studying == 3 or day_studying == 4):
# DE MARTES A VIERNES SIN CAMBIO DE MES; CASO MAS FACIL
FILEPATH_LOG.write("DE MARTES A VIERNES SIN CAMBIO DE MES\n")
# We have the files that we are interested
for final_count_num_total_rows in range(1, count_num_total_rows):
column_name_f = str("f" + str(final_count_num_total_rows))
column_name_k = str("k" + str(final_count_num_total_rows))
column_name_r = str("r" + str(final_count_num_total_rows))
if (sheet[column_name_f].value == 'TIWS' or sheet[column_name_f].value == 'TIWS '):
# print (column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
#print (time_total_open[0:2])
#print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
#print(day_actual_excel_compare - day_actual_excel )
# print("Month Actual Compare", int(month_actual_excel_compare) - int(month_actual_excel) )
if ((day_actual_excel_compare - day_actual_excel == 1) and (
int(month_actual_excel_compare) - int(month_actual_excel) == 0)):
FILEPATH_LOG.write("SELECCIONAMOS TIWS\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
#print('%-8s' % d.value, end='')
#print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TISA ' or sheet[column_name_f].value == 'TISA'):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = 0
# print (day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
# print("Month Actual Compare", int(month_actual_excel_compare) - int(month_actual_excel) )
if ((day_actual_excel_compare - day_actual_excel == 1) and (
int(month_actual_excel_compare) - int(month_actual_excel) == 0)):
FILEPATH_LOG.write("SELECCIONAMOS TISA\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
row_final.value = d.value
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
elif (sheet[column_name_f].value == 'TEDIG' or sheet[column_name_f].value == 'TEDIG '):
# print(column_name_f)
if (sheet[column_name_k].value) != 'OPEN':
cadena = str(sheet[column_name_k].value)
day_actual_excel = cadena[8:10]
month_actual_excel = cadena[5:7]
day_actual_excel = int(day_actual_excel)
day_open = sheet[column_name_r].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
# print (day_actual_excel)
# print("Month Actual", month_actual_excel)
day_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%d')
month_actual_excel_compare = my_date_actual_compare_with_excel.strftime('%m')
day_actual_excel_compare = int(day_actual_excel_compare)
month_actual_excel_compare = int(month_actual_excel_compare)
# print(day_actual_excel_compare)
# print("Month Actual Compare", int(month_actual_excel_compare) - int(month_actual_excel) )
if ((day_actual_excel_compare - day_actual_excel == 1) and (
int(month_actual_excel_compare) - int(month_actual_excel) == 0)):
FILEPATH_LOG.write("SELECCIONAMOS TEDIG\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
# print('%-8s' % d.value, end='')
# print('', end=""),
row_final = sheet_Cerradas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
row_final.value = d.value
if c == 17: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
# print('')
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
FILEPATH_Cerradas.save(filepath_cerrradas)
# FIN DE MARTES A VIERNES SIN CAMBIO DE MES
else:
FILEPATH_LOG.write("NO ENTRO EN NIGUNO DE LAS OPCIONES\n")
FILEPATH_LOG.write('-----------------------------------\n')
FILEPATH_LOG.write("\n")
FILEPATH_LOG.close()
print("\n")
print("Terminado Cerradas\n")
def abiertas():
# File Log
FILEPATH_LOG = open(r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Log_Abiertas.txt', 'a')
# Read Excel
NAME_FILE = openpyxl.load_workbook(r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Prueba.xlsx')
sheet = NAME_FILE['Abiertas']
try:
filepath_abiertas = r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Abiertas.xlsx'
wb = openpyxl.Workbook()
wb.save(filepath_abiertas)
except:
print("******************************************************\n")
print("Cerrar archivo Abiertas.xlsx y despues ejecutar el programa\n")
print("******************************************************\n")
FILEPATH_Abiertas = openpyxl.load_workbook(r'C:\Users\usr1CR\.PyCharmCE2018.2\proyects\probando_jinja2\probando_jinja2\Automatizacion_UCC\excel\Abiertas.xlsx')
sheet_Abiertas = FILEPATH_Abiertas.active
FINAL_COUNT_NUM_TOTAL_ROW = 2
for r in range(1, 1 + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
row_final = sheet_Abiertas.cell(row=r, column=c)
row_final.value = d.value
count_num_total_rows = 1
next = False
my_date_actual_compare_with_excel = datetime.now()
FILEPATH_LOG.write('-----------------------------------\n')
FILEPATH_LOG.write(str(datetime.now()) + '\n')
# We have the total rows
while (next == False):
column_name_h = str("h" + str(count_num_total_rows))
if (sheet[column_name_h].value == None):
next = True
else:
count_num_total_rows = count_num_total_rows + 1
# We have the files that we are interested
for final_count_num_total_rows in range(1, count_num_total_rows):
column_name_h = str("h" + str(final_count_num_total_rows))
column_name_m = str("m" + str(final_count_num_total_rows))
column_name_t = str("t" + str(final_count_num_total_rows))
if (sheet[column_name_h].value == 'TIWS' or sheet[column_name_h].value == 'TIWS '
or sheet[column_name_h].value == 'TEDIG' or sheet[column_name_h].value == 'TEDIG '
or sheet[column_name_h].value == 'TISA' or sheet[column_name_h].value == 'TISA '):
day_open = sheet[column_name_t].value
if (day_open != None):
time_total_open = str(my_date_actual_compare_with_excel - day_open)
else:
time_total_open = str(00)
if ((sheet[column_name_m].value) == 'OPEN' or (sheet[column_name_m].value == 'OPEN ')):
FILEPATH_LOG.write("SELECCIONAMOS TIWS o TEDIG o TISA\n")
for r in range(final_count_num_total_rows, final_count_num_total_rows + 1):
for c in range(1, 24):
d = sheet.cell(row=r, column=c)
row_final = sheet_Abiertas.cell(row=FINAL_COUNT_NUM_TOTAL_ROW, column=c)
if c == 19: # control para introducir columna tiempo abierta
row_final.value = int(time_total_open[0:2])
else:
row_final.value = d.value
FINAL_COUNT_NUM_TOTAL_ROW = FINAL_COUNT_NUM_TOTAL_ROW + 1
FILEPATH_Abiertas.save(filepath_abiertas)
FILEPATH_LOG.write('-----------------------------------\n')
FILEPATH_LOG.write("\n")
FILEPATH_LOG.close()
print("Terminado Abiertas\n")
print("Proceso finalizado con exito\n")
my_date=datetime.now()
month=""
if (my_date.strftime('%m') == '01'):
month = "Enero"
elif (my_date.strftime('%m') == '02'):
month = "Febrero"
elif (my_date.strftime('%m') == '03'):
month = "Marzo"
elif (my_date.strftime('%m') == '04'):
month = "Arbil"
elif (my_date.strftime('%m') == '05'):
month = "Mayo"
elif (my_date.strftime('%m') == '06'):
month = "Junio"
elif (my_date.strftime('%m') == '07'):
month = "Julio"
elif (my_date.strftime('%m') == '08'):
month = "Agosto"
elif (my_date.strftime('%m') == '09'):
month = "Septiembre"
elif (my_date.strftime('%m') == '10'):
month = "Octubre"
elif (my_date.strftime('%m') == '11'):
month = "Noviembre"
elif (my_date.strftime('%m') == '12'):
month = "Diciembre"
day=""
if (my_date.weekday()== 0):
day = "Lunes"
elif (my_date.weekday() == 1):
day = "Martes"
elif (my_date.weekday() == 2):
day = "Miércoles"
elif (my_date.weekday() == 3):
day = "Jueves"
elif (my_date.weekday() == 4):
day= "Viernes"
elif (my_date.weekday() == 5):
day = "Sábado"
elif (my_date.weekday() == 6):
day = "Domingo"
cerradas()
abiertas()
'''
@app.route('/')
def index():
return render_template('index.html', name_columns=['Infinity', 'Cisco SR', 'Cisco RMA', 'Ticket SMC', 'Cliente',
'Sala de apertura', 'Adm. de circuito', 'Salas afectadas',
'País','Fecha de cierre','Escalado','Proactiva','Responsable',
'Motivo de apertura','Resolución','Tiempo abierta',
'Fecha de apertura'],
month_actual=month,my_date=datetime.now(),day_actual=day)
''' |
import subprocess
import time
from Profitability import check_proditability
def init_miner(config):
old_result = check_proditability(config.gpu)
cmd = command(config, old_result)
p = subprocess.Popen(cmd) # something long running
run_miner_prof_check(cmd, old_result, config, p)
def run_miner_prof_check(cmd, old_result, config, p):
time.sleep(config.profitability_time_check)
result = check_proditability(config.gpu)
if result != old_result:
p.terminate()
cmd = command(config, result)
p = subprocess.Popen(cmd)
old_result = result
run_miner_prof_check(cmd, old_result, config, p)
def command(config, result):
cmd = ['Claymore\\ethdcrminer64.exe', '-epool', 'eth-eu1.nanopool.org:9999', '-ewal', config.wallet_address_ETH, '-epsw', 'x']
if result == "Pascalcoin":
cmd = ['Claymore\\ethdcrminer64.exe', '-epool', 'eth-eu1.nanopool.org:9999', '-ewal', config.wallet_address_ETH, '-epsw', 'x', '-dpool', 'stratum+tcp://pasc-eu1.nanopool.org:15555', '-dwal', config.wallet_address_Pascalcoin, '-dpsw', 'x', '-dcoin', 'pasc']
elif result == "Decred":
cmd = ['Claymore\\ethdcrminer64.exe', '-epool', 'eth-eu1.nanopool.org:9999', '-ewal', config.wallet_address_ETH, '-epsw', 'x', '-dpool', 'stratum+tcp://yiimp.ccminer.org:4252', '-dwal', config.wallet_address_Decred, '-dpsw', 'x']
elif result == "LBRY":
cmd = ['Claymore\\ethdcrminer64.exe', '-epool', 'eth-eu1.nanopool.org:9999', '-ewal', config.wallet_address_ETH, '-epsw', 'x', '-dpool', 'stratum+tcp://lbry.suprnova.cc:6256', '-dwal', config.suprnova_login_worker, '-dpsw', 'x', '-dcoin', 'lbc']
elif result == "Sia":
cmd = ['Claymore\\ethdcrminer64.exe', '-epool', 'eth-eu1.nanopool.org:9999', '-ewal', config.wallet_address_ETH, '-epsw', 'x', '-dpool', 'stratum+tcp://sia-eu1.nanopool.org:7777', '-dwal', config.wallet_address_Sia, '-dpsw', 'x', '-dcoin', 'sia']
return cmd |
from __future__ import unicode_literals
from django.apps import AppConfig
class CargafilesConfig(AppConfig):
name = 'cargafiles'
|
from kafka import KafkaProducer
from elasticsearch import Elasticsearch
from kafka import KafkaConsumer
import time
import json
print("Batch Script Running")
time.sleep(35)
consumer = KafkaConsumer('new-listings-topic', group_id='listing-indexer', bootstrap_servers=['kafka:9092'])
es = Elasticsearch(['es'])
fixtures = [{"name": "Apartment 1", "price": 750, "rating": "3.50", "username": "cyeung", "id": 1}, {"name": "Apartment 2", "price": 875, "rating": "2.95", "username": "cyeung", "id": 2},
{"name": "Apartment 3", "price": 1925, "rating": "4.25", "username": "cyeung", "id": 3}, {"name": "Apartment 4", "price": 968, "rating": "4.95", "username": "tk9at", "id": 4},
{"name": "Apartment 5", "price": 478, "rating": "3.81", "username": "tk9at", "id": 5}, {"name": "Apartment 6 ", "price": 899, "rating": "4.50", "username": "tk9at", "id": 6},
{"name": "Apartment 7", "price": 2500, "rating": "1.50", "username": "bradyw7", "id": 7}, {"name": "Apartment 8", "price": 2384, "rating": "0.75", "username": "bradyw7", "id": 8}]
for apartment in fixtures:
es.index(index='listing_index', doc_type='listing', id=apartment['id'], body=apartment)
es.indices.refresh(index="listing_index")
print("Apartment Fixtures Loaded.")
# user_consumer = KafkaConsumer('user-listings-topic', group_id='listing-indexer', bootstrap_servers=['kafka:9092'])
users_fixtures = [{"username": "cyeung", "email": "cy4bv@virginia.edu"},
{"username": "tk9at", "email": "tk9at@virginia.edu"},
{"username": "bradyw7", "email": "bwz3kt@virginia.edu"}]
for user in users_fixtures:
es.index(index='user_index', doc_type='listing', body=user)
es.indices.refresh(index="user_index")
print("User Fixtures Loaded.")
for message in consumer:
new_listing = json.loads((message.value).decode('utf-8'))[0]
print(new_listing)
if 'email' in new_listing:
es.index(index='user_index', doc_type='listing', body=new_listing)
es.indices.refresh(index="user_index")
else:
es.index(index='listing_index', doc_type='listing', id=new_listing['id'], body=new_listing)
es.indices.refresh(index="listing_index")
# for message in user_consumer:
# new_listing = json.loads((message.value).decode('utf-8'))[0]
# print(new_listing)
# es.index(index='user_index', doc_type='listing', body=new_listing)
# es.indices.refresh(index="user_index")
#
|
import json
import os
import sys
import psycopg2
from imaging import OmeroConstants, OmeroUtil
from imaging.OmeroProperties import OmeroProperties
class RetrieveAndSerializeOmeroIds:
omeroProperties = None
outFolder = None
drTag = None
dsList = None
def __init__(self, omeroDevPropetiesFile, outFolder, drTag):
self.omeroProperties = OmeroProperties(omeroDevPropetiesFile).getProperties()
self.outFolder = outFolder
self.drTag = drTag
self.dsList = self.consolidateDatasources()
def consolidateDatasources(self):
dsData = OmeroUtil.retrieveDatasourcesFromDB(self.omeroProperties)
dsList = []
for dsId in dsData:
dsList.append(dsId)
for ds in OmeroConstants.DATASOURCE_LIST:
dsList.append(ds)
return dsList
def retrieveAnnotationsAndSerialize(self):
conn = psycopg2.connect(database=self.omeroProperties[OmeroConstants.OMERO_DB_NAME],
user=self.omeroProperties[OmeroConstants.OMERO_DB_USER],
password=self.omeroProperties[OmeroConstants.OMERO_DB_PASS],
host=self.omeroProperties[OmeroConstants.OMERO_DB_HOST],
port=self.omeroProperties[OmeroConstants.OMERO_DB_PORT])
cur = conn.cursor()
fileData = []
for ds in self.dsList:
query = 'SELECT a.id,of.name,of.path FROM annotation a INNER JOIN datasetannotationlink dsal ON a.id=dsal.child INNER JOIN originalfile of ON a.file=of.id WHERE dsal.parent=' + str(
ds)
cur.execute(query)
for (id, name, path) in cur.fetchall():
clientPath = path
if clientPath.startswith('/'):
clientPath = clientPath[1:]
fileData.append({
'id': id,
'name': name,
'path': clientPath,
'type': 'annotation'
})
conn.close()
with open(self.outFolder + self.drTag + '_annotations.json', 'w') as filehandle:
json.dump(fileData, filehandle, sort_keys=True, indent=4)
def retrieveImagesAndSerialize(self):
conn = psycopg2.connect(database=self.omeroProperties[OmeroConstants.OMERO_DB_NAME],
user=self.omeroProperties[OmeroConstants.OMERO_DB_USER],
password=self.omeroProperties[OmeroConstants.OMERO_DB_PASS],
host=self.omeroProperties[OmeroConstants.OMERO_DB_HOST],
port=self.omeroProperties[OmeroConstants.OMERO_DB_PORT])
cur = conn.cursor()
fileData = []
count = 1
masterCount = 1
for ds in self.dsList:
query = 'SELECT i.id,i.name,fse.clientpath FROM image i INNER JOIN datasetimagelink dsil ON i.id=dsil.child INNER JOIN filesetentry fse ON i.fileset=fse.fileset WHERE dsil.parent=' + str(
ds)
cur.execute(query)
for (id, name, clientpath) in cur.fetchall():
if count % 500000 == 0:
with open(os.path.join(self.outFolder, self.drTag + '_' + str(masterCount) + '.json'), 'w') as fh:
json.dump(fileData, fh, sort_keys=True, indent=4)
masterCount += 1
fileData = []
count += 1
fileData.append({
'id': id,
'name': name,
'path': clientpath,
'type': 'image'
})
conn.close()
with open(os.path.join(self.outFolder, self.drTag + '_' + str(masterCount) + '.json'), 'w') as fh:
json.dump(fileData, fh, sort_keys=True, indent=4)
def main(omeroDevPropetiesFile, outFolder, drTag):
retrieveAndSerializeOmeroIds = RetrieveAndSerializeOmeroIds(omeroDevPropetiesFile, outFolder, drTag)
retrieveAndSerializeOmeroIds.retrieveAnnotationsAndSerialize()
retrieveAndSerializeOmeroIds.retrieveImagesAndSerialize()
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
from django.test import SimpleTestCase
from django.urls import reverse, resolve
from listings.views import listing, search
class TestUrls(SimpleTestCase):
def test_search_url_is_resolved(self):
url = reverse('search')
self.assertEqual(resolve(url).func, search) |
'''
Created on 10-Aug-2018
@author: srinivasan
'''
from collections import defaultdict
import datetime
import gzip
from io import BytesIO
import logging
from jinja2.environment import Environment
from scrapy import signals
from scrapy.exceptions import NotConfigured
from scrapy.mail import MailSender
from scrapy.utils.serialize import ScrapyJSONEncoder
from Data_scuff import config
logger = logging.getLogger(__name__)
def format_size(size):
for x in ['bytes', 'KB', 'MB', 'GB']:
if size < 1024.0:
return "{:3.1f} {}".format(size, x)
size /= 1024.0
class GzipCompressor(gzip.GzipFile):
extension = '.gz'
mimetype = 'application/gzip'
def __init__(self):
super(GzipCompressor, self).__init__(fileobj=PlainCompressor(), mode='wb')
self.read = self.fileobj.read
class PlainCompressor(BytesIO):
extension = ''
mimetype = 'text/plain'
def read(self, *args, **kwargs):
self.seek(0)
return BytesIO.read(self, *args, **kwargs)
@property
def size(self):
return len(self.getvalue())
class StatsMailSend:
def __init__(self, crawler, compressor):
self.stats = crawler.stats
self.settings = crawler.settings
self.bots_name = crawler.settings.get('BOT_NAME')
self.files = defaultdict(compressor)
self.encoder = ScrapyJSONEncoder()
@classmethod
def from_crawler(cls, crawler):
compression = crawler.settings.get('STATUSMAILER_COMPRESSION')
if not compression:
compressor = PlainCompressor
elif compression.lower().startswith('gz'):
compressor = GzipCompressor
else:
raise NotConfigured
instance = cls(crawler, compressor)
crawler.signals.connect(instance.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(instance.item_dropped, signal=signals.item_dropped)
crawler.signals.connect(instance.item_scraped, signal=signals.item_scraped)
crawler.signals.connect(instance.spider_error, signal=signals.spider_error)
crawler.signals.connect(instance.spider_closed, signal=signals.spider_closed)
return instance
def spider_opened(self, spider):
logger.info("spider started to send Mail %s", spider.name)
self.start_time = datetime.datetime.now()
def item_scraped(self, item, response, spider):
self.files[spider.name + '-items.json'].write(bytes(self.encoder.encode(item), 'utf-8'))
def item_dropped(self, item, response, exception, spider):
self.files[spider.name + '-dropped-items.json'].write(bytes(self.encoder.encode(item), 'utf-8'))
self.files[spider.name + '-dropped-items.json'].write(bytes('\n', 'utf-8'))
def spider_error(self, failure, response, spider):
self.files[spider.name + '-errors.log'].write(bytes(response.url + '\n', 'utf-8'))
self.files[spider.name + '-errors.log'].write(bytes(failure.getTraceback(), 'utf-8'))
def spider_closed(self, spider, reason):
jira_id = spider.custom_settings['JIRA_ID']
self.finish_time = datetime.datetime.now()
self.used_time = self.finish_time - self.start_time
files = []
for name, compressed in self.files.items():
compressed.fileobj.write(compressed.compress.flush())
gzip.write32u(compressed.fileobj, compressed.crc)
gzip.write32u(compressed.fileobj, compressed.size & 0xffffffff)
files.append((name + compressed.extension, compressed.mimetype, compressed))
try:
size = self.files[spider.name + '-items.json'].size
except KeyError:
size = 0
stats = spider.crawler.stats.get_stats()
dqr_status = stats.pop('columns_stats_information', {})
if ('downloader/exception_count' in stats and stats['downloader/exception_count'] > 0) \
or ('log_count/ERROR' in stats and stats['log_count/ERROR'] > 0):
subject = "failed"
else:
subject = "succeed"
mailsender = MailSender.from_settings(self.settings)
mailsender.send(to=self.settings.getlist('JOB_NOTIFICATION_EMAILS'),
subject='JIRA ID:{} job ends with {}'.format(jira_id, subject),
# attachs=files,
body=Environment().from_string(config.HTML).render({'stats':stats,
'dqr_status':dqr_status,
'jira':jira_id,
'size':format_size(size)}),
mimetype='text/html', _callback=self._catch_mail_sent)
def _catch_mail_sent(self, **kwargs):
logger.info("Mail Send Notification")
|
#
# @lc app=leetcode id=105 lang=python3
#
# [105] Construct Binary Tree from Preorder and Inorder Traversal
#
import TreeNode
from typing import List
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
return self.buildTreeRec(preorder, 0, len(preorder)-1, inorder, 0, len(inorder)-1)
def buildTreeRec(self, preorder, pLeft, pRight, inorder, iLeft, iRight):
if pLeft>pRight or iLeft>iRight:
return None
i = 0
# find root
for i in range(iLeft, iRight+1):
if preorder[pLeft] == inorder[i]:
break
cur = TreeNode(preorder[pLeft])
cur.left = self.buildTreeRec(preorder, pLeft+1, pLeft+i-iLeft, inorder, iLeft, i-1)
cur.right = self.buildTreeRec(preorder, pLeft+i-iLeft+1, pRight, inorder, i+1, iRight)
return cur
# @lc code=end
|
def constant_current(t):
"""Constant current function"""
# output has to have same shape and type as t
return 0 * t + 1
|
# -*- coding=utf-8
'''
Created on 2016年9月23日
听牌规则
@author: zhaol
'''
from difang.majiang2.ai.ting import MTing
from difang.majiang2.player.hand.hand import MHand
from difang.majiang2.table.table_config_define import MTDefine
from difang.majiang2.tile.tile import MTile
from difang.majiang2.ting_rule.ting_rule import MTingRule
from difang.majiang2.win_rule.win_rule_haerbin import MWinRuleHaerbin
from freetime.util import log as ftlog
class MTingHaerbinRule(MTingRule):
"""听牌规则
1)听牌时,牌中必须带至少一个“幺”或“九”。
2)听牌时,牌中必须带至少一组刻牌(即三张一样的牌,一对红中一般可以代替)。特例:没有刻牌,可以和对倒。和牌时必须至少有一组顺牌(例123或789)
3)红中就可代替这个“幺九牌条件”。
4)必需先开门(非“门清”状态)才能听牌,即必须吃、碰一组牌后才可听牌。
5)特殊玩法:吃听。当手牌吃一次就可以上听时,别人打出一张要吃的牌,不管是不是上家都可以吃。吃后自动报听(注:只有在听牌状态下才可胡牌)
6)配置了夹起步选项时只能和夹和3,7
"""
def __init__(self):
super(MTingHaerbinRule, self).__init__()
def getKeCount(self, patterns):
"""
patterns当中有几个刻
[[6, 6], [5, 6, 7], [4, 5, 6], [1, 2, 3]]
"""
count = 0
for pattern in patterns:
if (len(pattern) == 2) and (pattern[0] == MTile.TILE_HONG_ZHONG):
count += 1
if (len(pattern) == 3) and (pattern[0] == pattern[1]) and (pattern[1] == pattern[2]):
count += 1
return count
def getShunCount(self, patterns):
"""获取顺子的数量"""
count = 0
for p in patterns:
if len(p) != 3:
continue
if (p[0] + 2 == p[2]) and (p[1] + 1 == p[2]):
count += 1
return count
def canTing(self, tiles, leftTiles, tile, magicTiles=[]):
"""子类必须实现
参数:
1)tiles 该玩家的手牌
2)leftTiles 剩余手牌
返回值:
是否可以听牌,听牌详情
"""
handCount = len(tiles[MHand.TYPE_HAND])
if handCount < 5:
return False, []
# ftlog.debug( 'MTingHaerbinRule.canTing 0 tiles:', tiles )
isTing, tingResults = MTing.canTing(MTile.cloneTiles(tiles), leftTiles, self.winRuleMgr, tile, magicTiles)
# ftlog.debug( 'MTingHaerbinRule.canTing 1 tiles:', tiles )
ftlog.debug('MTingHaerbinRule.MTing.canTing isTing:', isTing, ' tingResults:', tingResults)
# [{'dropTile': 11, 'winNodes': [{'winTile': 1, 'winTileCount': 3, 'pattern': [[6, 6], [5, 6, 7], [4, 5, 6], [1, 2, 3]]}, {'winTile': 2, 'winTileCount': 2, 'pattern': [[6, 6, 6], [5, 6, 7], [3, 4, 5], [2, 2]]}, {'winTile': 4, 'winTileCount': 3, 'pattern': [[6, 6], [5, 6, 7], [4, 5, 6], [2, 3, 4]]}, {'winTile': 5, 'winTileCount': 2, 'pattern': [[6, 6, 6], [5, 6, 7], [5, 5], [2, 3, 4]]}, {'winTile': 7, 'winTileCount': 1, 'pattern': [[6, 6], [5, 6, 7], [5, 6, 7], [2, 3, 4]]}, {'winTile': 8, 'winTileCount': 1, 'pattern': [[6, 7, 8], [6, 6, 6], [5, 5], [2, 3, 4]]}]}]
if not isTing:
return False, []
chiCount = len(tiles[MHand.TYPE_CHI])
pengCount = len(tiles[MHand.TYPE_PENG])
gangCount = len(tiles[MHand.TYPE_GANG])
if (chiCount + pengCount + gangCount) == 0:
return False, []
# 检查刻,刻的来源,碰牌/明杠牌/手牌
keCount = pengCount + gangCount
# 必须有顺牌
shunCount = chiCount
newTingResults = []
for tingResult in tingResults:
newWinNodes = []
winNodes = tingResult['winNodes']
for winNode in winNodes:
newTiles = MTile.cloneTiles(tiles)
newTiles[MHand.TYPE_HAND].remove(tingResult['dropTile'])
newTiles[MHand.TYPE_HAND].append(winNode['winTile'])
tileArr = MTile.changeTilesToValueArr(MHand.copyAllTilesToList(newTiles))
# ftlog.debug( 'MTingHaerbinRule.canTing tileArr:', tileArr )
# 夹起步(顺牌只能和夹和3,7) 除单吊
chunJiaConfig = self.getTableConfig(MTDefine.MIN_MULTI, 0)
if chunJiaConfig:
chunJiaContinue = False
patterns = winNode['pattern']
for pattern in patterns:
if winNode['winTile'] in pattern:
if len(pattern) == 3 and pattern[0] != pattern[1]:
if (pattern.index(winNode['winTile'])) == 2 and MTile.getValue(winNode['winTile']) != 3:
chunJiaContinue = True
break
if (pattern.index(winNode['winTile'])) == 0 and MTile.getValue(winNode['winTile']) != 7:
chunJiaContinue = True
break
# 夹起步不能和对倒
if len(pattern) == 3 and pattern[0] == pattern[1]:
chunJiaContinue = True
break
if chunJiaContinue:
ftlog.debug('MTingHaerbinRule.canTing chunJiaConfig:', chunJiaConfig, ' can not win tile:',
winNode['winTile'], ', continue....')
continue
if self.getTableConfig(MTDefine.YISE_CAN_TING, 0) != 1:
# 清一色不可以听牌/和牌
colorCount = MTile.getColorCount(tileArr)
if colorCount == 1:
# 清一色不能和牌
ftlog.debug('MTingHaerbinRule.canTing colorCount:', colorCount, ' can not win, continue....')
continue
zhongCount = tileArr[MTile.TILE_HONG_ZHONG]
# ftlog.debug( 'MTingHaerbinRule.canTing hongzhong count: ', zhongCount )
# 检查牌中的幺/九
yaoCount = tileArr[MTile.TILE_ONE_WAN] + tileArr[MTile.TILE_ONE_TONG] + tileArr[MTile.TILE_ONE_TIAO]
jiuCount = tileArr[MTile.TILE_NINE_WAN] + tileArr[MTile.TILE_NINE_TONG] + tileArr[MTile.TILE_NINE_TIAO]
# ftlog.debug( 'MTingHaerbinRule.canTing yaoCount:', yaoCount, ' jiuCount:', jiuCount )
if (yaoCount + jiuCount + zhongCount) == 0:
continue
patterns = winNode['pattern']
checkKeCount = keCount + self.getKeCount(patterns)
checkShunCount = shunCount + self.getShunCount(patterns)
ftlog.debug('MTingHaerbinRule.canTing keCount:', keCount, ' shunCount:', shunCount)
if checkKeCount and checkShunCount:
newWinNodes.append(winNode)
if len(newWinNodes) > 0:
newTingResult = {}
newTingResult['dropTile'] = tingResult['dropTile']
newTingResult['winNodes'] = newWinNodes
newTingResults.append(newTingResult)
return len(newTingResults) > 0, newTingResults
if __name__ == "__main__":
tiles = [[3, 4, 15, 5, 5, 6, 9, 9], [[26, 27, 28]], [[8, 8, 8]], [], [], []]
rule = MTingHaerbinRule()
rule.setWinRuleMgr(MWinRuleHaerbin())
ftlog.debug(rule.canTing(tiles, [], 4, []))
|
import xlwings as xw
file_path = '/Users/chenhaolin/PycharmProjects/SRT/发改委/NDRC/FILES/test_xls.xls'
wb = xw.Book(file_path)
sheet = wb.sheets[0]
RANGE = sheet.range('A1').expand('table')
row_count = RANGE.rows.count
col_count = RANGE.columns.count
print(str(row_count) + ' ' + str(col_count))
print(RANGE.rows)
for row in range(row_count):
print(sheet.range('A' + str(row + 1)).value)
|
#########################
# 定义类,属性、方法、私有属性和方法
#########################
class Car:
"""一辆汽车"""
__code = "2123120FJJ" # private的类变量
_motor = "牛逼的要死" # protected的类变量
country = "德国" # public的类属型
# 构造方法
def __init__(self, make, year):
self.make = make # 对象属性
self.year = year
self.color = "red"
self.__address = "china" # 私有对象属性
# 类似Java的toString方法
def __str__(self):
return "Car made in %s brand = %s year = %d color = %s" % (self.__address, self.make, self.year, self.color)
# 析构函数,类似C++中的析构函数
def __del__(self):
print(self.__str__() + " was deleted")
# 示例方法
def get_address(self):
return self.__address
# 私有示例方法
def __start_motor(self):
print(self.make + "motor is start")
# 示例方法
def start(self):
self.__start_motor()
BMW = Car("BMW", 2014)
BMW.color = "white"
print(BMW)
print(Car.country) # 打印类变量
print(BMW.country) # 如果实例对象没有该属性,则在类上查找
|
import logging
import os
from quasimodo.parts_of_facts import PartsOfFacts
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
from quasimodo.assertion_fusion.trainer import Trainer
from quasimodo.parameters_reader import ParametersReader
save_weights = True
parameters_reader = ParametersReader()
annotations_file = parameters_reader.get_parameter("annotations-file") or "data/training_active_learning.tsv"
save_file = parameters_reader.get_parameter("weights-file") or os.path.dirname(__file__) + "/../temp/weights.tsv"
def _save_weights(parts_of_facts):
annotations = get_annotated_data()
header = parts_of_facts.get_header()
header.append("label")
save = ["\t".join(header)]
for fact in parts_of_facts.get_all_facts():
row = parts_of_facts.get_fact_row(fact)
row.append(annotations.get((fact.get_subject().get(),
fact.get_predicate().get(),
fact.get_object().get(),
str(int(fact.is_negative()))),
-1))
row = [str(x) for x in row]
save.append("\t".join(row))
with open(save_file, "w") as f:
for element in save:
f.write(element + "\n")
class LinearCombinationWeightedSubmodule(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "Linear Combination Per Module Submodule"
def process(self, input_interface):
logging.info("Start linear combining per module submodule")
logging.info("Grouping facts")
parts_of_facts = PartsOfFacts.from_generated_facts(input_interface.get_generated_facts())
if save_weights:
logging.info("Saving weights facts")
_save_weights(parts_of_facts)
logging.info("Training the model...")
trainer = Trainer(save_file)
trainer.train()
logging.info("Generating new facts")
new_generated_facts = []
for fact in parts_of_facts.get_all_facts():
new_generated_facts.append(parts_of_facts.get_generated_fact_with_score_from_classifier(fact, trainer))
new_generated_facts = sorted(new_generated_facts,
key=lambda x: -sum([score[0] for score in x.get_score().scores]))
return input_interface.replace_generated_facts(new_generated_facts)
def get_annotated_data():
annotations = dict()
with open(annotations_file) as f:
for line in f:
line = line.strip().split("\t")
annotations[(line[0], line[1], line[2], line[3])] = line[4]
return annotations
|
from django.db import models
from django.db.models import fields
import graphene
from graphql_jwt.decorators import login_required
from graphene_django import DjangoObjectType
from .models import Deal
#debug
from .tests.deals_data import mock_data
DEALS_PER_QUERY = 8
class FreeDeal(DjangoObjectType):
class Meta:
model = Deal
fields = ("title", 'storeID', 'salePrice', 'normalPrice', 'thumb')
class FullDeal(DjangoObjectType):
class Meta:
model = Deal
class FullDealGroup(graphene.ObjectType):
deals_list = graphene.List(FullDeal)
is_end = graphene.Boolean(default_value=False)
def to_full_deal_group(deals_list, start, deals_group_size):
if len(deals_list) == 0:
return FullDealGroup(deals_list=[], is_end=True)
deals_count = len(deals_list)
if start >= deals_count:
raise Exception('Start index out of bound')
end_index = start + deals_group_size
is_end = False
if end_index >= deals_count:
is_end = True
out_list = deals_list[start:]
else:
out_list = deals_list[start:end_index]
return FullDealGroup(deals_list=out_list, is_end=is_end)
class Query(graphene.AbstractType):
one_per_store = graphene.List(FreeDeal)
deal_by_id = graphene.Field(FullDeal, id = graphene.String())
# deals = graphene.Field(FullDealGroup, start=graphene.Int())
deals = graphene.Field(
FullDealGroup,
start = graphene.Int(),
storeID = graphene.String(),
low_price = graphene.Float(),
high_price = graphene.Float(),
sort_by = graphene.String()
)
#debug
# create_records = graphene.String()
def resolve_one_per_store(root, info):
steam_deal = Deal.objects.filter(storeID='1').first()
gog_deal = Deal.objects.filter(storeID='7').first()
humble_deal = Deal.objects.filter(storeID='11').first()
return [steam_deal, gog_deal, humble_deal]
@login_required
def resolve_deal_by_id(root, info, id):
deal = Deal.objects.get(dealID = id)
return deal
# @login_required
# def resolve_deals(root, info, start):
# deals_list = Deal.objects.all()
# return to_full_deal_group(deals_list, start, DEALS_PER_QUERY)
@login_required
def resolve_deals(
root,
info,
start,
storeID = 'default',
low_price = -1,
high_price = -1,
sort_by = 'default'):
deals_list = Deal.objects.all()
if storeID != 'default':
deals_list = deals_list.filter(storeID = storeID)
if high_price > -1:
deals_list = deals_list.filter(salePrice__range=[low_price, high_price])
if sort_by == 'price':
deals_list = deals_list.order_by('salePrice')
elif sort_by in ['savings', 'dealRating']:
deals_list = deals_list.order_by('-' + sort_by)
return to_full_deal_group(deals_list, start, DEALS_PER_QUERY)
#debug
# def resolve_create_records(root, info):
# deals = mock_data
# for deal in deals:
# Deal.objects.update_or_create(
# title = deal['title'],
# dealID = deal['dealID'],
# storeID = deal['storeID'],
# salePrice = deal['salePrice'],
# normalPrice = deal['normalPrice'],
# savings = deal['savings'],
# steamRatingText = deal['steamRatingText'],
# releaseDate = deal['releaseDate'],
# dealRating = deal['dealRating'],
# thumb = deal['thumb']
# )
# return 'Records created' |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('barkochba', '0003_story_order_number'),
]
operations = [
migrations.DeleteModel(
name='Person',
),
migrations.AlterField(
model_name='story',
name='people',
field=models.ManyToManyField(to=b'tabor.Person', blank=True),
),
]
|
# $Id$
##
## This file is part of pyFormex 0.8.5 Sun Nov 6 17:27:05 CET 2011
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: https://savannah.nongnu.org/projects/pyformex/
## Copyright (C) Benedict Verhegghe (benedict.verhegghe@ugent.be)
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
"""OpenGL actors for populating the 3D scene."""
import pyformex as pf
import sys
from OpenGL import GL,GLU
from drawable import *
from formex import *
from elements import elementType,elementName
from mesh import Mesh
from plugins.trisurface import TriSurface
from plugins.nurbs import NurbsCurve,NurbsSurface
from marks import TextMark
import timer
### Actors ###############################################
class Actor(Drawable):
"""An Actor is anything that can be drawn in an OpenGL 3D Scene.
The visualisation of the Scene Actors is dependent on camera position and
angles, clipping planes, rendering mode and lighting.
An Actor subclass should minimally reimplement the following methods:
- `bbox()`: return the actors bounding box.
- `drawGL(mode)`: to draw the actor. Takes a mode argument so the
drawing function can act differently depending on the mode. There are
currently 5 modes: wireframe, flat, smooth, flatwire, smoothwire.
drawGL should only contain OpenGL calls that are allowed inside a
display list. This may include calling the display list of another
actor but *not* creating a new display list.
The interactive picking functionality requires the following methods,
for which we porvide do-nothing defaults here:
- `npoints()`:
- `nelems()`:
- `pickGL()`:
"""
def __init__(self,**kargs):
Drawable.__init__(self,**kargs)
def bbox(self):
"""Default implementation for bbox()."""
try:
return self.coords.bbox()
except:
raise ValueError,"No bbox() defined and no coords attribute"
def npoints(self):
return 0
def nelems(self):
return 0
def pickGL(self,mode):
pass
class TranslatedActor(Actor):
"""An Actor translated to another position."""
def __init__(self,A,trl=(0.,0.,0.),**kargs):
Actor.__init__(self,**kargs)
self.actor = A
self.trans = A.trans
self.trl = asarray(trl)
def bbox(self):
return self.actor.bbox() + self.trl
def redraw(self,mode,color=None):
self.actor.redraw(mode=mode,color=color)
Drawable.redraw(self,mode=mode,color=color)
def drawGL(self,**kargs):
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glPushMatrix()
GL.glTranslate(*self.trl)
self.actor.use_list()
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glPopMatrix()
class RotatedActor(Actor):
"""An Actor rotated to another position."""
def __init__(self,A,rot=(1.,0.,0.),twist=0.0,**kargs):
"""Created a new rotated actor.
If rot is an array with shape (3,), the rotation is specified
by the direction of the local 0 axis of the actor.
If rot is an array with shape (4,4), the rotation is specified
by the direction of the local 0, 1 and 2 axes of the actor.
"""
Actor.__init__(self,**kargs)
self.actor = A
self.trans = A.trans
if shape(rot) == (3,):
self.rot = rotMatrix(rot,n=4)
else:
self.rot = rot
def bbox(self):
return self.actor.bbox() # TODO : rotate the bbox !
def redraw(self,mode,color=None):
self.actor.redraw(mode=mode,color=color)
Drawable.redraw(self,mode=mode,color=color)
def drawGL(self,**kargs):
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glPushMatrix()
GL.glMultMatrixf(self.rot)
self.actor.use_list()
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glPopMatrix()
class CubeActor(Actor):
"""An OpenGL actor with cubic shape and 6 colored sides."""
def __init__(self,size=1.0,color=[red,cyan,green,magenta,blue,yellow],**kargs):
Actor.__init__(self,**kargs)
self.size = size
self.color = color
def bbox(self):
return (0.5 * self.size) * array([[-1.,-1.,-1.],[1.,1.,1.]])
def drawGL(self,**kargs):
"""Draw the cube."""
drawCube(self.size,self.color)
class SphereActor(Actor):
"""An OpenGL actor representing a sphere."""
def __init__(self,size=1.0,color=None,**kargs):
Actor.__init__(self)
self.size = size
self.color = color
def bbox(self):
return (0.5 * self.size) * array([[-1.,-1.,-1.],[1.,1.,1.]])
def drawGL(self,**kargs):
"""Draw the cube."""
drawSphere(self.size,self.color)
# This could be subclassed from GridActor
class BboxActor(Actor):
"""Draws a bbox."""
def __init__(self,bbox,color=None,linewidth=None,**kargs):
Actor.__init__(self,**kargs)
self.color = color
self.linewidth = linewidth
self.bb = bbox
Hex8 = elementType('hex8')
self.vertices = Hex8.vertices * (bbox[1]-bbox[0]) + bbox[0]
self.edges = Hex8.edges
self.facets = Hex8.faces
def bbox(self):
return self.bb
def drawGL(self,**kargs):
"""Always draws a wireframe model of the bbox."""
if self.linewidth is not None:
GL.glLineWidth(self.linewidth)
drawLines(self.vertices,self.edges,self.color)
class AxesActor(Actor):
"""An actor showing the three axes of a CoordinateSystem.
If no CoordinateSystem is specified, the global coordinate system is drawn.
The default actor consists of three colored lines of unit length along
the unit vectors of the axes and three colored triangles representing the
coordinate planes. This can be modified by the following parameters:
size: scale factor for the unit vectors.
color: a set of three colors to use for x,y,z axes.
colored_axes = False: draw black axes.
draw_planes = False: do not draw the coordinate planes.
"""
def __init__(self,cs=None,size=1.0,color=[red,green,blue],colored_axes=True,draw_planes=False,linewidth=None,**kargs):
Actor.__init__(self,**kargs)
if cs is None:
cs = CoordinateSystem()
self.cs = cs
self.color = saneColorArray(saneColor(color),(3,1))
self.colored_axes = colored_axes
self.draw_planes = draw_planes
self.linewidth = linewidth
self.setSize(size)
def bbox(self):
origin = self.cs[3]
return array([origin-self.size,origin+self.size])
def setSize(self,size):
size = float(size)
if size > 0.0:
self.size = size
self.delete_list()
def drawGL(self,**kargs):
"""Draw the axes."""
x = self.cs.trl(-self.cs[3]).scale(self.size).trl(self.cs[3])
if self.draw_planes:
e = array([[3,1,2],[3,2,0],[3,0,1]])
drawPolygons(x,e,'flat')
e = array([[3,0],[3,1],[3,2]])
if self.colored_axes:
c = self.color
else:
c = None
if self.linewidth:
GL.glLineWidth(self.linewidth)
drawLines(x,e,c)
class GridActor(Actor):
"""Draws a (set of) grid(s) in one of the coordinate planes."""
def __init__(self,nx=(1,1,1),ox=(0.0,0.0,0.0),dx=(1.0,1.0,1.0),linecolor=black,linewidth=None,planecolor=white,alpha=0.2,lines=True,planes=True,**kargs):
Actor.__init__(self,**kargs)
self.linecolor = saneColor(linecolor)
self.planecolor = saneColor(planecolor)
self.linewidth = linewidth
self.alpha = alpha
self.trans = True
self.lines = lines
self.planes = planes
self.nx = asarray(nx)
self.x0 = asarray(ox)
self.x1 = self.x0 + self.nx * asarray(dx)
def bbox(self):
return array([self.x0,self.x1])
def drawGL(self,**kargs):
"""Draw the grid."""
if self.lines:
if self.linewidth:
GL.glLineWidth(self.linewidth)
glColor(self.linecolor)
drawGridLines(self.x0,self.x1,self.nx)
if self.planes:
glColor(self.planecolor,self.alpha)
drawGridPlanes(self.x0,self.x1,self.nx)
class CoordPlaneActor(Actor):
"""Draws a set of 3 coordinate planes."""
def __init__(self,nx=(1,1,1),ox=(0.0,0.0,0.0),dx=(1.0,1.0,1.0),linecolor=black,linewidth=None,planecolor=white,alpha=0.5,lines=True,planes=True,**kargs):
Actor.__init__(self,**kargs)
self.linecolor = saneColor(linecolor)
self.planecolor = saneColor(planecolor)
self.linewidth = linewidth
self.alpha = alpha
self.trans = True
self.lines = lines
self.planes = planes
self.nx = asarray(nx)
self.x0 = asarray(ox)
self.x1 = self.x0 + self.nx * asarray(dx)
def bbox(self):
return array([self.x0,self.x1])
def drawGL(self,**kargs):
"""Draw the grid."""
for i in range(3):
nx = self.nx.copy()
nx[i] = 0
if self.lines:
if self.linewidth:
GL.glLineWidth(self.linewidth)
glColor(self.linecolor)
drawGridLines(self.x0,self.x1,nx)
if self.planes:
glColor(self.planecolor,self.alpha)
drawGridPlanes(self.x0,self.x1,nx)
class PlaneActor(Actor):
"""A plane in a 3D scene."""
def __init__(self,nx=(2,2,2),ox=(0.,0.,0.),size=((0.0,1.0,1.0),(0.0,1.0,1.0)),linecolor=black,linewidth=None,planecolor=white,alpha=0.5,lines=True,planes=True,**kargs):
"""A plane perpendicular to the x-axis at the origin."""
Actor.__init__(self,**kargs)
self.linecolor = saneColor(linecolor)
self.planecolor = saneColor(planecolor)
self.linewidth = linewidth
self.alpha = alpha
self.trans = True
self.lines = lines
self.planes = planes
self.nx = asarray(nx)
ox = asarray(ox)
sz = asarray(size)
self.x0,self.x1 = ox-sz[0], ox+sz[1]
def bbox(self):
return array([self.x0,self.x1])
def drawGL(self,**kargs):
"""Draw the grid."""
for i in range(3):
nx = self.nx.copy()
nx[i] = 0
if self.lines:
if self.linewidth is not None:
GL.glLineWidth(self.linewidth)
color = self.linecolor
if color is None:
color = canvas.settings.fgcolor
glColor(color)
drawGridLines(self.x0,self.x1,nx)
if self.planes:
glColor(self.planecolor,self.alpha)
drawGridPlanes(self.x0,self.x1,nx)
###########################################################################
class GeomActor(Actor):
"""An OpenGL actor representing a geometrical model.
The model can either be in Formex or Mesh format.
"""
mark = False
def __init__(self,data,elems=None,eltype=None,color=None,colormap=None,bkcolor=None,bkcolormap=None,alpha=1.0,mode=None,linewidth=None,linestipple=None,marksize=None,**kargs):
"""Create a geometry actor.
The geometry is either in Formex model: a coordinate block with
shape (nelems,nplex,3), or in Mesh format: a coordinate block
with shape (npoints,3) and an elems block with shape (nelems,nplex).
In both cases, an eltype may be specified if the default is not
suitable. Default eltypes are Point for plexitude 1, Line for
plexitude 2 and Triangle for plexitude 3 and Polygon for all higher
plexitudes. Actually, Triangle is just a special case of Polygon.
Here is a list of possible eltype values (which should match the
corresponding plexitude):
========= =========== ============================================
plexitude `eltype` element type
========= =========== ============================================
4 ``tet4`` a tetrahedron
6 ``wedge6`` a wedge (triangular prism)
8 ``hex8`` a hexahedron
========= =========== ============================================
The colors argument specifies a list of OpenGL colors for each
of the property values in the Formex. If the list has less
values than the PropSet, it is wrapped around. It can also be
a single OpenGL color, which will be used for all elements.
For surface type elements, a bkcolor color can be given for
the backside of the surface. Default will be the same
as the front color.
The user can specify a linewidth to be used when drawing
in wireframe mode.
"""
Actor.__init__(self,**kargs)
# Store a reference to the drawn object
self.object = data
if isinstance(data,GeomActor) or isinstance(data,Mesh):
self.coords = data.coords
self.elems = data.elems
self.eltype = data.eltype
elif isinstance(data,Formex):
self.coords = data.coords
self.elems = None
self.eltype = data.eltype
else:
self.coords = data
self.elems = elems
self.eltype = eltype
self.mode = mode
self.setLineWidth(linewidth)
self.setLineStipple(linestipple)
self.setColor(color,colormap)
self.setBkColor(bkcolor,bkcolormap)
self.setAlpha(alpha)
self.marksize = marksize
#print "GEOMACTOR: %s -> %s" % (color.shape,self.color.shape)
def getType(self):
return self.object.__class__
def nplex(self):
return self.shape()[1]
def nelems(self):
return self.shape()[0]
def shape(self):
if self.elems is None:
return self.coords.shape[:-1]
else:
return self.elems.shape
def npoints(self):
return self.vertices().shape[0]
def nedges(self):
# This is needed to be able to pick edges!!
try:
return self.object.nedges()
except:
try:
return self.object.getEdges().shape[0]
except:
return 0
def vertices(self):
"""Return the vertives as a 2-dim array."""
return self.coords.reshape(-1,3)
def setColor(self,color,colormap=None):
"""Set the color of the Actor."""
self.color,self.colormap = saneColorSet(color,colormap,self.shape())
def setBkColor(self,color,colormap=None):
"""Set the backside color of the Actor."""
self.bkcolor,self.bkcolormap = saneColorSet(color,colormap,self.shape())
def setAlpha(self,alpha):
"""Set the Actors alpha value."""
self.alpha = float(alpha)
self.trans = self.alpha < 1.0
def bbox(self):
return self.coords.bbox()
def draw(self,**kargs):
if 'mode' in kargs:
mode = kargs['mode']
else:
canvas = kargs.get('canvas',pf.canvas)
mode = canvas.rendermode
if mode.endswith('wire'):
if not hasattr(self,'wire'):
import copy
wire = copy.copy(self)
wire.nolight = True
wire.ontop = False # True will make objects transparent for edges
wire.list = None
Drawable.prepare_list(wire,mode='wireframe',color=asarray(black))
self.wire = wire
# Add the existing wire to the extra list, and then draw w/o wire
if self.wire not in self.extra:
self.extra.append(self.wire)
# AVOID RECURSION
self.wire.extra = []
mode = mode[:-4]
else:
if hasattr(self,'wire') and self.wire in self.extra:
self.extra.remove(self.wire)
if self.list is None or mode != self.mode:
kargs['mode'] = mode
self.delete_list()
self.list = self.create_list(**kargs)
self.mode = mode
self.use_list()
def drawGL(self,canvas=None,mode=None,color=None,**kargs):
"""Draw the geometry on the specified canvas.
The drawing parameters not provided by the Actor itself, are
derived from the canvas defaults.
mode and color can be overridden for the sole purpose of allowing
the recursive use for modes ending on 'wire' ('smoothwire' or
'flatwire'). In these cases, two drawing operations are done:
one with mode='wireframe' and color=black, and one with mode=mode[:-4].
"""
from canvas import glLineStipple
if canvas is None:
canvas = pf.canvas
if mode is None:
mode = self.mode
if mode is None:
mode = canvas.rendermode
if mode.endswith('wire'):
mode = mode[:-4]
############# set drawing attributes #########
alpha = self.alpha
if alpha is None:
alpha = canvas.settings.alpha
if color is None:
color,colormap = self.color,self.colormap
bkcolor, bkcolormap = self.bkcolor,self.bkcolormap
else:
# THIS OPTION IS ONLY MEANT FOR OVERRIDING THE COLOR
# WITH THE EDGECOLOR IN ..wire DRAWING MODES
# SO NO NEED TO SET bkcolor
color,colormap = saneColor(color),None
bkcolor, bkcolormap = None,None
# convert color index to full colors
if color is not None and color.dtype.kind == 'i':
color = colormap[color]
if bkcolor is not None and bkcolor.dtype.kind == 'i':
bkcolor = bkcolormap[bkcolor]
linewidth = self.linewidth
if linewidth is None:
linewidth = canvas.settings.linewidth
if self.linewidth is not None:
GL.glLineWidth(self.linewidth)
if self.linestipple is not None:
glLineStipple(*self.linestipple)
if mode.startswith('smooth'):
if hasattr(self,'specular'):
fill_mode = GL.GL_FRONT
import colors
if color is not None:
spec = color * self.specular# * pf.canvas.specular
spec = append(spec,1.)
else:
spec = colors.GREY(self.specular)# * pf.canvas.specular
GL.glMaterialfv(fill_mode,GL.GL_SPECULAR,spec)
GL.glMaterialfv(fill_mode,GL.GL_EMISSION,spec)
GL.glMaterialfv(fill_mode,GL.GL_SHININESS,self.specular)
################## draw the geometry #################
nplex = self.nplex()
if nplex == 1:
marksize = self.marksize
if marksize is None:
marksize = canvas.settings.pointsize
# THIS SHOULD GO INTO drawPoints
if self.elems is None:
coords = self.coords
else:
coords = self.coords[self.elems]
drawPoints(coords,color,alpha,marksize)
elif nplex == 2:
drawLines(self.coords,self.elems,color)
# beware: some Formex eltypes are strings and may not
# represent a valid Mesh elementType
# THis is only here for Formex type.
# We can probably remove it if we avoid eltype 'curve'
elif nplex == 3 and self.eltype in ['curve','line3']:
drawQuadraticCurves(self.coords,self.elems,color)
elif self.eltype is None:
# polygons
if mode=='wireframe' :
drawPolyLines(self.coords,self.elems,color)
else:
if bkcolor is not None:
GL.glEnable(GL.GL_CULL_FACE)
GL.glCullFace(GL.GL_BACK)
drawPolygons(self.coords,self.elems,mode,color,alpha)
if bkcolor is not None:
GL.glCullFace(GL.GL_FRONT)
drawPolygons(self.coords,self.elems,mode,bkcolor,alpha)
GL.glDisable(GL.GL_CULL_FACE)
else:
el = elementType(self.eltype)
if mode=='wireframe' or el.ndim < 2:
for edges in el.getDrawEdges(el.name() in pf.cfg['draw/quadline']):
drawEdges(self.coords,self.elems,edges,edges.eltype,color)
else:
for faces in el.getDrawFaces(el.name() in pf.cfg['draw/quadsurf']):
if bkcolor is not None:
# Enable drawing front and back with different colors
GL.glEnable(GL.GL_CULL_FACE)
GL.glCullFace(GL.GL_BACK)
# Draw the front sides
drawFaces(self.coords,self.elems,faces,faces.eltype,mode,color,alpha)
if bkcolor is not None:
# Draw the back sides
GL.glCullFace(GL.GL_FRONT)
drawFaces(self.coords,self.elems,faces,faces.eltype,mode,bkcolor,alpha)
GL.glDisable(GL.GL_CULL_FACE)
def pickGL(self,mode):
""" Allow picking of parts of the actor.
mode can be 'element', 'edge' or 'point'
"""
if mode == 'element':
pickPolygons(self.coords,self.elems)
elif mode == 'edge':
edges = self.object.getEdges()
if edges is not None:
pickPolygons(self.coords,edges)
elif mode == 'point':
pickPoints(self.coords)
def select(self,sel):
"""Return a GeomActor with a selection of this actor's elements
Currently, the resulting Actor will not inherit the properties
of its parent, but the eltype will be retained.
"""
# This selection should be reworked to allow edge and point selections
if self.elems is None:
x = self.coords[sel]
e = self.elems
else:
x = self.coords
e = self.elems[sel]
return GeomActor(x,e,eltype=self.eltype)
class NurbsActor(Actor):
def __init__(self,data,color=None,colormap=None,bkcolor=None,bkcolormap=None,**kargs):
from gui.drawable import saneColor
Actor.__init__(self,**kargs)
self.object = data
self.setColor(color,colormap)
self.setBkColor(bkcolor,bkcolormap)
if isinstance(self.object,NurbsCurve):
self.samplingTolerance = 5.0
elif isinstance(self.object,NurbsSurface):
self.samplingTolerance = 10.0
self.list = None
def shape(self):
return self.object.coords.shape[:-1]
def setColor(self,color,colormap=None):
"""Set the color of the Actor."""
self.color,self.colormap = saneColorSet(color,colormap,self.shape())
def setBkColor(self,color,colormap=None):
"""Set the backside color of the Actor."""
self.bkcolor,self.bkcolormap = saneColorSet(color,colormap,self.shape())
def bbox(self):
return self.object.bbox()
def drawGL(self,canvas=None,**kargs):
if canvas is None:
canvas = pf.canvas
mode = canvas.rendermode
if mode.endswith('wire'):
mode = mode[:-4]
if isinstance(self.object,NurbsCurve):
drawNurbsCurves(self.object.coords,self.object.knots,color=self.color,samplingTolerance=self.samplingTolerance)
elif isinstance(self.object,NurbsSurface):
if mode == 'wireframe':
pass
else:
drawNurbsSurfaces(self.object.coords,self.object.vknots,self.object.uknots,color=self.color,normals='auto',samplingTolerance=self.samplingTolerance)
# End
|
def who_is_there(lis):
if "bear" in lis:
print("There's a bear")
if "lion" in lis:
print("There's a lion")
if "daisy" in lis or "iris" in lis:
print("There are flowers")
if "daisy" in lis and "iris" in lis:
print("There are at least two flowers")
if "donkey" in lis:
print("There is a donkey")
if "horse" not in lis:
print("There is no horse in the list")
print ("The list has", len(lis), "items")
lis = []
while True:
print("Type \'q\' to end the loop")
a = input("Insert the element you want to put in the list: ")
if a == 'q':
break
else:
lis.append(a)
who_is_there(lis)
i = 0
print("We have", end = ' ')
while i<len(lis):
print(lis[i], ",", end = ' ')
i = i+1
print("as an element for the list.")
|
stock = [
{'name': 'Xiaomi', 'stock': 5, 'price': 65000.0, 'recomend': [
'Xiaomi', 'iPhone XS', 'Samsung', 'OnePlus']},
{'name': 'iPhone XS', 'stock': 8, 'price': 50000.0, 'discount': 50},
{'name': 'OnePlus', 'stock': 20, 'price': 38000.0},
]
print(type(stock))
print(type(stock[0]))
# выводит из ключа recomend значение с индексом 1
print(stock[0]['recomend'][1])
|
from django.db import models
class LetsEncrypt(models.Model):
url = models.CharField(max_length=255)
text = models.CharField(max_length=255)
def save(self, *args, **kwargs):
self.pk = 1
super().save(*args, **kwargs)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'sqlite_main_window.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SqliteMainWindow(object):
def setupUi(self, SqliteMainWindow):
SqliteMainWindow.setObjectName("SqliteMainWindow")
SqliteMainWindow.resize(922, 688)
self.centralwidget = QtWidgets.QWidget(SqliteMainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.groupBox_table_field = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_table_field.setObjectName("groupBox_table_field")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox_table_field)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.verticalLayout_6 = QtWidgets.QVBoxLayout()
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label_table = QtWidgets.QLabel(self.groupBox_table_field)
font = QtGui.QFont()
font.setPointSize(16)
self.label_table.setFont(font)
self.label_table.setAlignment(QtCore.Qt.AlignCenter)
self.label_table.setObjectName("label_table")
self.verticalLayout_6.addWidget(self.label_table)
self.scrollArea_table = QtWidgets.QScrollArea(self.groupBox_table_field)
self.scrollArea_table.setWidgetResizable(True)
self.scrollArea_table.setObjectName("scrollArea_table")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 182, 497))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.scrollArea_table.setWidget(self.scrollAreaWidgetContents)
self.verticalLayout_6.addWidget(self.scrollArea_table)
self.horizontalLayout.addLayout(self.verticalLayout_6)
self.verticalLayout_5 = QtWidgets.QVBoxLayout()
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_field = QtWidgets.QLabel(self.groupBox_table_field)
font = QtGui.QFont()
font.setPointSize(16)
self.label_field.setFont(font)
self.label_field.setAlignment(QtCore.Qt.AlignCenter)
self.label_field.setObjectName("label_field")
self.verticalLayout_4.addWidget(self.label_field)
self.scrollArea_field = QtWidgets.QScrollArea(self.groupBox_table_field)
self.scrollArea_field.setWidgetResizable(True)
self.scrollArea_field.setObjectName("scrollArea_field")
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 180, 412))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.scrollArea_field.setWidget(self.scrollAreaWidgetContents_2)
self.verticalLayout_4.addWidget(self.scrollArea_field)
self.verticalLayout_5.addLayout(self.verticalLayout_4)
self.groupBox = QtWidgets.QGroupBox(self.groupBox_table_field)
self.groupBox.setTitle("")
self.groupBox.setObjectName("groupBox")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.radioButton_notall = QtWidgets.QRadioButton(self.groupBox)
font = QtGui.QFont()
font.setPointSize(12)
self.radioButton_notall.setFont(font)
self.radioButton_notall.setObjectName("radioButton_notall")
self.horizontalLayout_3.addWidget(self.radioButton_notall)
self.radioButton_all = QtWidgets.QRadioButton(self.groupBox)
font = QtGui.QFont()
font.setPointSize(12)
self.radioButton_all.setFont(font)
self.radioButton_all.setObjectName("radioButton_all")
self.horizontalLayout_3.addWidget(self.radioButton_all)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.verticalLayout_5.addWidget(self.groupBox)
self.horizontalLayout.addLayout(self.verticalLayout_5)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
self.gridLayout.addWidget(self.groupBox_table_field, 0, 0, 1, 1)
self.tableView_content = QtWidgets.QTableView(self.centralwidget)
self.tableView_content.setObjectName("tableView_content")
self.gridLayout.addWidget(self.tableView_content, 0, 1, 1, 1)
self.verticalLayout_8 = QtWidgets.QVBoxLayout()
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.pushButton_newTable = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_newTable.setObjectName("pushButton_newTable")
self.verticalLayout.addWidget(self.pushButton_newTable)
self.pushButton_delTable = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_delTable.setObjectName("pushButton_delTable")
self.verticalLayout.addWidget(self.pushButton_delTable)
self.verticalLayout_8.addLayout(self.verticalLayout)
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.pushButton_add = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_add.setObjectName("pushButton_add")
self.verticalLayout_2.addWidget(self.pushButton_add)
self.pushButton_del = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_del.setObjectName("pushButton_del")
self.verticalLayout_2.addWidget(self.pushButton_del)
self.verticalLayout_8.addLayout(self.verticalLayout_2)
self.verticalLayout_7 = QtWidgets.QVBoxLayout()
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.pushButton_update = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_update.setObjectName("pushButton_update")
self.verticalLayout_7.addWidget(self.pushButton_update)
self.pushButton_query = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_query.setObjectName("pushButton_query")
self.verticalLayout_7.addWidget(self.pushButton_query)
self.verticalLayout_8.addLayout(self.verticalLayout_7)
self.gridLayout.addLayout(self.verticalLayout_8, 0, 2, 1, 1)
self.label_cmd = QtWidgets.QLabel(self.centralwidget)
font = QtGui.QFont()
font.setPointSize(16)
self.label_cmd.setFont(font)
self.label_cmd.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_cmd.setObjectName("label_cmd")
self.gridLayout.addWidget(self.label_cmd, 1, 0, 1, 1)
self.lineEdit_cmd = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_cmd.setObjectName("lineEdit_cmd")
self.gridLayout.addWidget(self.lineEdit_cmd, 1, 1, 1, 1)
SqliteMainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(SqliteMainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 922, 23))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
SqliteMainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(SqliteMainWindow)
self.statusbar.setObjectName("statusbar")
SqliteMainWindow.setStatusBar(self.statusbar)
self.actionOpen_File = QtWidgets.QAction(SqliteMainWindow)
self.actionOpen_File.setObjectName("actionOpen_File")
self.menuFile.addAction(self.actionOpen_File)
self.menubar.addAction(self.menuFile.menuAction())
self.retranslateUi(SqliteMainWindow)
QtCore.QMetaObject.connectSlotsByName(SqliteMainWindow)
def retranslateUi(self, SqliteMainWindow):
_translate = QtCore.QCoreApplication.translate
SqliteMainWindow.setWindowTitle(_translate("SqliteMainWindow", "Sqlite Main Window"))
self.groupBox_table_field.setTitle(_translate("SqliteMainWindow", "选择一个表和若干字段"))
self.label_table.setText(_translate("SqliteMainWindow", "表"))
self.label_field.setText(_translate("SqliteMainWindow", "字段"))
self.radioButton_notall.setText(_translate("SqliteMainWindow", "全不选"))
self.radioButton_all.setText(_translate("SqliteMainWindow", "全选"))
self.pushButton_newTable.setText(_translate("SqliteMainWindow", "创建新表"))
self.pushButton_delTable.setText(_translate("SqliteMainWindow", " 删除表"))
self.pushButton_add.setText(_translate("SqliteMainWindow", "添加数据"))
self.pushButton_del.setText(_translate("SqliteMainWindow", "删除数据"))
self.pushButton_update.setText(_translate("SqliteMainWindow", "更新数据"))
self.pushButton_query.setText(_translate("SqliteMainWindow", "查询数据"))
self.label_cmd.setText(_translate("SqliteMainWindow", "命令"))
self.menuFile.setTitle(_translate("SqliteMainWindow", "File"))
self.actionOpen_File.setText(_translate("SqliteMainWindow", "Open File"))
|
# -*- coding utf-8 -*-
from decimal import Decimal
from django.db import models
class Plan(models.Model):
name = models.CharField(max_length=100)
minutes = models.IntegerField(default=0)
data = models.IntegerField(default=0)
sms = models.IntegerField(default=0)
value = models.DecimalField(max_digits=8, decimal_places=2)
def make_dataset(self, data, sms):
'''Return Dataset for Chart
Parameters:
data: Internet data amount
sms: SMS amount
Returns:
[plan_minute, plan_data, plan_sms, plan_value, data_package_value, sms_package_value, total]
'''
dataset = [
self.minutes,
self.data,
self.sms,
self.value
]
data_package_value = Decimal()
sms_package_value = Decimal()
# Need more data packages
if self.data < data:
remaining_data = data - self.data
data_package_value = Package.calculate(remaining_data, Package.DATA)
# Need more sms packages
if self.sms < sms:
remaining_sms = sms - self.sms
sms_package_value = Package.calculate(remaining_sms, Package.SMS)
total = sum([self.value, data_package_value, sms_package_value])
dataset.extend([
data_package_value,
sms_package_value,
total
])
return dataset
class Package(models.Model):
SMS = 'sms'
DATA = 'data'
TYPES = (
(SMS, 'SMS'),
(DATA, 'data')
)
type = models.CharField(max_length=25, choices=TYPES)
unlimited = models.BooleanField(default=False)
amount = models.IntegerField()
value = models.DecimalField(max_digits=8, decimal_places=2)
@staticmethod
def calculate(amount, _type, accumulated=Decimal()):
'''Recursive method to calculate packages cost
Parameters:
amount: Amount that needs to be satisfied
type: Type of Package (SMS or DATA)
accumulated: Recursive SUM cache (not change manually)
'''
if amount <= 0:
return accumulated
# Greater than 800 means unlimited SMS plan
if _type == Package.SMS and amount > 800:
return Package.objects.values('value').get(unlimited=True).get('value')
package = Package.objects.filter(type=_type, amount__gte=amount).values('value').first()
# Found the package with correct amount
if package:
return package.get('value') + accumulated
# We are unlucky, need to combine the packages :(
else:
package = Package.objects.values('value', 'amount').latest('amount')
amount = amount - package.get('amount')
accumulated = package.get('value') + accumulated
return Package.calculate(amount, _type, accumulated=accumulated)
|
# Copyright (c) 2015-2021, Manfred Moitzi
# License: MIT License
import pytest
import math
import ezdxf
from ezdxf.entities import Hatch, BoundaryPathType, EdgeType
from ezdxf.lldxf.tagwriter import TagCollector, Tags
from ezdxf.lldxf import const
from ezdxf.math import Vec3
@pytest.fixture
def hatch():
return Hatch.new()
@pytest.fixture
def path_hatch():
return Hatch.from_text(PATH_HATCH)
@pytest.fixture
def edge_hatch():
return Hatch.from_text(EDGE_HATCH)
@pytest.fixture
def spline_edge_hatch():
return Hatch.from_text(EDGE_HATCH_WITH_SPLINE)
@pytest.fixture
def hatch_pattern():
return Hatch.from_text(HATCH_PATTERN)
def test_default_settings(hatch):
assert hatch.dxf.layer == "0"
assert hatch.dxf.color == 256 # by layer
assert hatch.dxf.linetype == "BYLAYER"
assert hatch.dxf.ltscale == 1.0
assert hatch.dxf.invisible == 0
assert hatch.dxf.extrusion == (0.0, 0.0, 1.0)
assert hatch.dxf.elevation == (0.0, 0.0, 0.0)
def test_default_hatch_settings(hatch):
assert hatch.has_solid_fill is True
assert hatch.has_gradient_data is False
assert hatch.has_pattern_fill is False
assert hatch.dxf.solid_fill == 1
assert hatch.dxf.hatch_style == 0
assert hatch.dxf.pattern_type == 1
assert hatch.dxf.pattern_angle == 0
assert hatch.dxf.pattern_scale == 1
assert hatch.dxf.pattern_double == 0
assert hatch.dxf.n_seed_points == 0
def test_get_seed_points(hatch):
assert len(hatch.seeds) == 0
def test_set_seed_points(hatch):
seed_points = [(1.0, 1.0), (2.0, 2.0)]
hatch.set_seed_points(seed_points)
assert 2 == hatch.dxf.n_seed_points
assert seed_points == hatch.seeds
def test_remove_all_paths(path_hatch):
path_hatch.paths.clear()
assert 0 == len(path_hatch.paths), "invalid boundary path count"
def test_polyline_path_attribs(path_hatch):
path = path_hatch.paths[0] # test first boundary path
assert path.type == BoundaryPathType.POLYLINE
assert 4 == len(path.vertices)
assert path.has_bulge() is False
assert path.is_closed == 1
assert 7 == path.path_type_flags, "unexpected path type flags"
def test_polyline_path_vertices(path_hatch):
path = path_hatch.paths[0] # test first boundary path
assert path.type == BoundaryPathType.POLYLINE
assert 4 == len(path.vertices)
# vertex format: x, y, bulge_value
assert (10, 10, 0) == path.vertices[0], "invalid first vertex"
assert (10, 0, 0) == path.vertices[3], "invalid last vertex"
def test_edge_path_count(edge_hatch):
assert len(edge_hatch.paths) == 1, "invalid boundary path count"
def test_edge_path_type(edge_hatch):
path = edge_hatch.paths[0]
assert path.type == BoundaryPathType.EDGE
def test_edge_path_edges(edge_hatch):
path = edge_hatch.paths[0]
edge = path.edges[0]
assert edge.type == EdgeType.ELLIPSE, "expected ellipse edge as 1. edge"
assert (10, 5) == edge.center
assert (3, 0) == edge.major_axis
assert 1.0 / 3.0 == edge.ratio
assert 270 == edge.start_angle
assert 450 == edge.end_angle # this value was created by AutoCAD == 90 degree
assert 1 == edge.ccw
edge = path.edges[1]
assert edge.type == EdgeType.LINE, "expected line edge type as 2. edge"
assert (10, 6) == edge.start
assert (10, 10) == edge.end
edge = path.edges[2]
assert edge.type == EdgeType.LINE, "expected line edge as 3. edge"
assert (10, 10) == edge.start
assert (6, 10) == edge.end
edge = path.edges[3]
assert edge.type == EdgeType.ARC, "expected arc edge as 4. edge"
assert (5, 10) == edge.center
assert 1 == edge.radius
# clockwise arc edge:
assert 0 == edge.ccw
# now we get converted and swapped angles
assert 360 == 360.0 - edge.end_angle # this value was created by AutoCAD (0 degree)
assert (
540 == 360.0 - edge.start_angle
) # this value was created by AutoCAD (-180 degree)
assert -180 == edge.start_angle # ezdxf representation
assert 0 == edge.end_angle # ezdxf representation
edge = path.edges[4]
assert edge.type == EdgeType.LINE, "expected line edge as 5. edge"
assert (4, 10) == edge.start
assert (0, 10) == edge.end
edge = path.edges[5]
assert edge.type == EdgeType.LINE, "expected line edge as 6. edge"
assert (0, 10) == edge.start
assert (0, 0) == edge.end
edge = path.edges[6]
assert edge.type == EdgeType.LINE, "expected line edge as 7. edge"
assert (0, 0) == edge.start
assert (10, 0) == edge.end
edge = path.edges[7]
assert edge.type == EdgeType.LINE, "expected line edge as 8. edge"
assert (10, 0) == edge.start
assert (10, 4) == edge.end
def test_spline_edge_hatch_get_params(spline_edge_hatch):
path = spline_edge_hatch.paths[0]
spline = None
for edge in path.edges:
if edge.type == EdgeType.SPLINE:
spline = edge
break
assert spline is not None, "Spline edge not found."
assert 3 == spline.degree
assert 0 == spline.rational
assert 0 == spline.periodic
assert (0, 0) == spline.start_tangent
assert (0, 0) == spline.end_tangent
assert 10 == len(spline.knot_values)
assert 11.86874452602773 == spline.knot_values[-1]
assert 6 == len(spline.control_points)
assert (0, 10) == spline.control_points[0], "Unexpected start control point."
assert (0, 0) == spline.control_points[-1], "Unexpected end control point."
assert 0 == len(spline.weights)
assert 4 == len(spline.fit_points)
assert (0, 10) == spline.fit_points[0], "Unexpected start fit point."
assert (0, 0) == spline.fit_points[-1], "Unexpected end fit point."
def test_create_spline_edge(spline_edge_hatch):
# create the spline
path = spline_edge_hatch.paths[0]
spline = path.add_spline([(1, 1), (2, 2), (3, 3), (4, 4)], degree=3, periodic=1)
# the following values do not represent a mathematically valid spline
spline.control_points = [(1, 1), (2, 2), (3, 3), (4, 4)]
spline.knot_values = [1, 2, 3, 4, 5, 6]
spline.weights = [4, 3, 2, 1]
spline.start_tangent = (10, 1)
spline.end_tangent = (2, 20)
# test the spline
path = spline_edge_hatch.paths[0]
spline = path.edges[-1]
assert 3 == spline.degree
assert 1 == spline.periodic
assert (10, 1) == spline.start_tangent
assert (2, 20) == spline.end_tangent
assert [(1, 1), (2, 2), (3, 3), (4, 4)] == spline.control_points
assert [(1, 1), (2, 2), (3, 3), (4, 4)] == spline.fit_points
assert [1, 2, 3, 4, 5, 6] == spline.knot_values
assert [4, 3, 2, 1] == spline.weights
writer = TagCollector()
spline.export_dxf(writer)
tags = Tags(writer.tags)
assert tags.get_first_value(97) == 4, "expected count of fit points"
def test_create_required_tangents_for_spline_edge_if_fit_points_present(
spline_edge_hatch,
):
# create the spline
path = spline_edge_hatch.paths[0]
spline = path.add_spline_control_frame(fit_points=[(1, 1), (2, 2), (3, 3), (4, 4)])
writer = TagCollector()
spline.export_dxf(writer)
tags = Tags(writer.tags)
assert tags.get_first_value(97) == 4, "expected count of fit points"
assert tags.has_tag(12), "expected start tangent to be present"
assert tags.has_tag(13), "expected end tangent to be present"
def test_no_fit_points_export(spline_edge_hatch):
path = spline_edge_hatch.paths[0]
spline = path.add_spline(
control_points=[(1, 1), (2, 2), (3, 3), (4, 4)], degree=3, periodic=1
)
spline.knot_values = [1, 2, 3, 4, 5, 6]
assert [(1, 1), (2, 2), (3, 3), (4, 4)] == spline.control_points
assert len(spline.fit_points) == 0
writer = TagCollector(dxfversion=const.DXF2007)
spline.export_dxf(writer)
# do not write length tag 97 if no fit points exists for DXF2007 and prior
assert any(tag.code == 97 for tag in writer.tags) is False
writer = TagCollector(dxfversion=const.DXF2010)
spline.export_dxf(writer)
# do write length tag 97 if no fit points exists for DXF2010+
assert (97, 0) in writer.tags
def test_is_pattern_hatch(hatch_pattern):
assert hatch_pattern.has_solid_fill is False
assert hatch_pattern.has_gradient_data is False
assert hatch_pattern.has_pattern_fill is True
def test_edit_pattern(hatch_pattern):
pattern = hatch_pattern.pattern
assert 2 == len(pattern.lines)
line0 = pattern.lines[0]
assert 45 == line0.angle
assert (0, 0) == line0.base_point
assert (-0.1767766952966369, 0.1767766952966369) == line0.offset
assert 0 == len(line0.dash_length_items)
line1 = pattern.lines[1]
assert 45 == line1.angle
assert (0.176776695, 0) == line1.base_point
assert (-0.1767766952966369, 0.1767766952966369) == line1.offset
assert 2 == len(line1.dash_length_items)
assert [0.125, -0.0625] == line1.dash_length_items
@pytest.fixture()
def pattern():
return [
[45, (0, 0), (0, 1), []], # 1. Line: continuous
[45, (0, 0.5), (0, 1), [0.2, -0.1]], # 2. Line: dashed
]
def test_create_new_pattern_hatch(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
assert hatch.has_solid_fill is False
assert hatch.has_gradient_data is False
assert hatch.has_pattern_fill is True
assert "MOZMAN" == hatch.dxf.pattern_name
line0 = hatch.pattern.lines[0]
assert 45 == line0.angle
assert (0, 0) == line0.base_point
assert (0, 1) == line0.offset
assert 0 == len(line0.dash_length_items)
line1 = hatch.pattern.lines[1]
assert 45 == line1.angle
assert (0, 0.5) == line1.base_point
assert (0, 1) == line1.offset
assert 2 == len(line1.dash_length_items)
assert [0.2, -0.1] == line1.dash_length_items
def test_pattern_scale(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
hatch.set_pattern_scale(2)
assert hatch.dxf.pattern_scale == 2
line1, line2 = hatch.pattern.lines
assert line1.base_point == (0, 0)
assert line1.offset == (0, 2)
assert line2.base_point == (0, 1)
assert line2.offset == (0, 2)
def test_pattern_scale_x_times(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
hatch.set_pattern_scale(2)
# scale pattern 3 times of actual scaling 2
# = base pattern x 6
hatch.set_pattern_scale(hatch.dxf.pattern_scale * 3)
assert hatch.dxf.pattern_scale == 6
line1, line2 = hatch.pattern.lines
assert line1.base_point == (0, 0)
assert line1.offset == (0, 6)
assert line2.base_point == (0, 3)
assert line2.offset == (0, 6)
def test_pattern_rotation(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
assert hatch.dxf.pattern_angle == 0
hatch.set_pattern_angle(45)
assert hatch.dxf.pattern_angle == 45
line1, line2 = hatch.pattern.lines
assert line1.angle == 90
assert line1.base_point == (0, 0)
assert line1.offset.isclose(Vec3(-0.7071067811865475, 0.7071067811865476))
assert line2.angle == 90
assert line2.base_point.isclose(Vec3(-0.35355339059327373, 0.3535533905932738))
assert line2.offset.isclose(Vec3(-0.7071067811865475, 0.7071067811865476))
def test_pattern_rotation_add_angle(hatch, pattern):
hatch.set_pattern_fill("MOZMAN", definition=pattern)
assert hatch.dxf.pattern_angle == 0
hatch.set_pattern_angle(45)
assert hatch.dxf.pattern_angle == 45
# add 45 degrees to actual pattern rotation
hatch.set_pattern_angle(hatch.dxf.pattern_angle + 45)
assert hatch.dxf.pattern_angle == 90
def test_create_gradient(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
assert hatch.has_gradient_data is True
assert hatch.has_solid_fill is True
assert hatch.has_pattern_fill is False
gdata = hatch.gradient
assert (10, 10, 10) == gdata.color1
assert (250, 250, 250) == gdata.color2
assert 180 == int(gdata.rotation)
assert 0 == gdata.centered
assert 0 == gdata.tint
assert "LINEAR" == gdata.name
def test_create_gradient_low_level_dxf_tags(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
tags = TagCollector.dxftags(hatch.gradient)
for code in [450, 451, 452, 453, 460, 461, 462, 470]:
assert tags.has_tag(code) is True, "missing required tag: %d" % code
assert 2 == len(tags.find_all(463))
assert 2 == len(tags.find_all(421))
def test_remove_gradient_data(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
assert hatch.has_gradient_data is True
hatch.set_solid_fill(color=4) # remove gradient data
assert hatch.has_gradient_data is False, "gradient data not removed"
assert hatch.has_pattern_fill is False
assert hatch.has_solid_fill is True
def test_remove_gradient_low_level_dxf_tags(hatch):
hatch.set_gradient((10, 10, 10), (250, 250, 250), rotation=180.0)
assert hatch.has_gradient_data is True
hatch.set_solid_fill(color=4) # remove gradient data
assert hatch.gradient is None
def test_bgcolor_not_exists(hatch):
assert hatch.bgcolor is None
def test_set_new_bgcolor(hatch):
hatch.bgcolor = (10, 20, 30)
assert (10, 20, 30) == hatch.bgcolor
def test_change_bgcolor(hatch):
hatch.bgcolor = (10, 20, 30)
assert (10, 20, 30) == hatch.bgcolor
hatch.bgcolor = (30, 20, 10)
assert (30, 20, 10) == hatch.bgcolor
def test_delete_bgcolor(hatch):
hatch.bgcolor = (10, 20, 30)
assert (10, 20, 30) == hatch.bgcolor
del hatch.bgcolor
assert hatch.bgcolor is None
def test_delete_not_existing_bgcolor(hatch):
del hatch.bgcolor
assert hatch.bgcolor is None
@pytest.fixture(scope="module")
def msp():
doc = ezdxf.new()
return doc.modelspace()
VERTICES = [(0, 0), (1, 0), (1, 1), (0, 1)]
def add_hatch(msp):
hatch = msp.add_hatch()
path = hatch.paths.add_polyline_path(VERTICES)
return hatch, path
def test_associate_valid_entity(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
hatch.associate(path, [pline])
assert path.source_boundary_objects == [pline.dxf.handle]
def test_if_hatch_is_alive_before_association(msp):
hatch, path = add_hatch(msp)
hatch.destroy()
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [])
def test_can_not_associate_entity_from_different_document(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
pline.doc = None
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [pline])
def test_can_not_associate_entity_with_different_owner(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
pline.dxf.owner = None
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [pline])
def test_can_not_associate_destroyed_entity(msp):
hatch, path = add_hatch(msp)
pline = msp.add_lwpolyline(VERTICES, close=True)
pline.destroy()
with pytest.raises(const.DXFStructureError):
hatch.associate(path, [pline])
@pytest.fixture
def square_hatch():
hatch = Hatch()
hatch.paths.add_polyline_path([(0, 0), (10, 0), (10, 10), (0, 10)])
return hatch
def test_triangulate_hatch(square_hatch: Hatch):
square_hatch.set_solid_fill(3)
triangles = list(square_hatch.triangulate(0.01))
assert len(triangles) == 2
assert (
len(list(square_hatch.render_pattern_lines())) == 0
), "pattern rendering not supported"
def test_triangulate_with_elevation(square_hatch: Hatch):
square_hatch.dxf.elevation = Vec3(0, 0, 10)
square_hatch.set_solid_fill(3)
triangles = list(square_hatch.triangulate(0.01))
assert all([math.isclose(v.z, 10) for v in t] for t in triangles) is True
def test_render_pattern_lines(square_hatch: Hatch):
square_hatch.set_pattern_fill("ANSI31", scale=0.5)
lines = list(square_hatch.render_pattern_lines())
assert len(lines) > 8
assert (
len(list(square_hatch.triangulate(0.01))) == 2
), "expected triangulation support"
def test_render_pattern_lines_with_elevation(square_hatch: Hatch):
square_hatch.set_pattern_fill("ANSI31", scale=0.5)
square_hatch.dxf.elevation = Vec3(0, 0, 10)
lines = list(square_hatch.render_pattern_lines())
assert all([math.isclose(v.z, 10) for v in line] for line in lines) is True
PATH_HATCH = """ 0
HATCH
5
27C
330
1F
100
AcDbEntity
8
0
62
1
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
SOLID
70
1
71
0
91
1
92
7
72
0
73
1
93
4
10
10.0
20
10.0
10
0.0
20
10.0
10
0.0
20
0.0
10
10.0
20
0.0
97
0
75
1
76
1
47
0.0442352806926743
98
1
10
4.826903383179796
20
4.715694827530256
450
0
451
0
460
0.0
461
0.0
452
0
462
1.0
453
2
463
0.0
63
5
421
255
463
1.0
63
2
421
16776960
470
LINEAR
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
EDGE_HATCH = """ 0
HATCH
5
1FE
330
1F
100
AcDbEntity
8
0
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
SOLID
70
1
71
1
91
1
92
5
93
8
72
3
10
10.0
20
5.0
11
3.0
21
0.0
40
0.3333333333333333
50
270
51
450
73
1
72
1
10
10.0
20
6.0
11
10.0
21
10.0
72
1
10
10.0
20
10.0
11
6.0
21
10.0
72
2
10
5.0
20
10.0
40
1.0
50
360.0
51
540.0
73
0
72
1
10
4.0
20
10.0
11
0.0
21
10.0
72
1
10
0.0
20
10.0
11
0.0
21
0.0
72
1
10
0.0
20
0.0
11
10.0
21
0.0
72
1
10
10.0
20
0.0
11
10.0
21
4.0
97
8
330
1E7
330
1EC
330
1E4
330
1E6
330
1EA
330
1E5
330
1E2
330
1E3
75
1
76
1
47
0.0226465124087611
98
1
10
5.15694040451099
20
5.079032000141936
450
0
451
0
460
0.0
461
0.0
452
0
462
1.0
453
2
463
0.0
63
5
421
255
463
1.0
63
2
421
16776960
470
LINEAR
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
EDGE_HATCH_WITH_SPLINE = """ 0
HATCH
5
220
330
1F
100
AcDbEntity
8
0
62
1
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
SOLID
70
1
71
1
91
1
92
5
93
4
72
1
10
10.0
20
10.0
11
0.0
21
10.0
72
4
94
3
73
0
74
0
95
10
96
6
40
0.0
40
0.0
40
0.0
40
0.0
40
3.354101966249684
40
7.596742653368969
40
11.86874452602773
40
11.86874452602773
40
11.86874452602773
40
11.86874452602773
10
0.0
20
10.0
10
0.8761452790665735
20
8.935160214313272
10
2.860536415354832
20
6.523392802252294
10
-3.08307347911064
20
4.314363374126372
10
-1.030050983735315
20
1.441423393837641
10
0.0
20
0.0
97
4
11
0.0
21
10.0
11
1.5
21
7.0
11
-1.5
21
4.0
11
0.0
21
0.0
12
0.0
22
0.0
13
0.0
23
0.0
72
1
10
0.0
20
0.0
11
10.0
21
0.0
72
1
10
10.0
20
0.0
11
10.0
21
10.0
97
4
330
215
330
217
330
213
330
214
75
1
76
1
47
0.0365335049696054
98
1
10
5.5
20
4.5
450
0
451
0
460
0.0
461
0.0
452
0
462
1.0
453
2
463
0.0
63
5
421
255
463
1.0
63
2
421
16776960
470
LINEAR
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
HATCH_PATTERN = """0
HATCH
5
1EA
330
1F
100
AcDbEntity
8
0
100
AcDbHatch
10
0.0
20
0.0
30
0.0
210
0.0
220
0.0
230
1.0
2
ANSI33
70
0
71
0
91
1
92
7
72
0
73
1
93
4
10
10.0
20
10.0
10
0.0
20
10.0
10
0.0
20
0.0
10
10.0
20
0.0
97
0
75
1
76
1
52
0.0
41
1.0
77
0
78
2
53
45.0
43
0.0
44
0.0
45
-0.1767766952966369
46
0.1767766952966369
79
0
53
45.0
43
0.176776695
44
0.0
45
-0.1767766952966369
46
0.1767766952966369
79
2
49
0.125
49
-0.0625
47
0.0180224512632811
98
1
10
3.5
20
6.0
1001
GradientColor1ACI
1070
5
1001
GradientColor2ACI
1070
2
1001
ACAD
1010
0.0
1020
0.0
1030
0.0
"""
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utilities for analyzing Enron email data"""
import sys
import logging
__author__ = "Pujaa Rajan"
__email__ = "pujaa.rajan@gmail.com"
def logger():
"""
Create and format logger that logs to file and console
@return None:
"""
logger = logging.getLogger('Enron_email_analysis')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('Enron_email_analysis.log')
fh.setLevel(logging.DEBUG)
# create console handler with the same log level
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
logger.info('Finished creating logger')
def take_input(fib, **kwargs):
word_to_replace = ''
if fib == 'cli':
sentence = input('Enter a sentence without any punctuation.\n')
word_to_replace = input('Enter the word you want to replace.\n')
logging.info(f'User Input Sentence: {sentence}')
logging.info(f'User Input Word to Replace: {word_to_replace}')
fib = sentence.replace(word_to_replace, '_')
before_and_after_blank = fib.split('_')
before_blank_tokens = before_and_after_blank[0].split()[-3:]
after_blank_tokens = before_and_after_blank[1].split()[:3]
if len(before_blank_tokens) < 3 or len(after_blank_tokens) < 3:
print("Please enter at least 3 words before and after the blank!")
return before_blank_tokens, after_blank_tokens, word_to_replace
elif fib == 'flask_app':
before_and_after_word = kwargs['sentence'].split(kwargs['word_to_replace'])
before_blank_tokens = before_and_after_word[0].split()[-3:]
after_blank_tokens = before_and_after_word[1].split()[:3]
if len(before_blank_tokens) < 3 or len(after_blank_tokens) < 3:
print("Please enter at least 3 words before and after the blank!")
return before_blank_tokens, after_blank_tokens, kwargs['word_to_replace']
elif fib == 'test':
before_and_after_blank = kwargs['sentence'].split('_')
before_blank_tokens = before_and_after_blank[0].split()[-3:]
after_blank_tokens = before_and_after_blank[1].split()[:3]
if len(before_blank_tokens) < 3 or len(after_blank_tokens) < 3:
print("Please enter at least 3 words before and after the blank!")
return before_blank_tokens, after_blank_tokens, word_to_replace
else:
raise Exception('Enter correct option')
exit()
|
import sys
from common import *
def convert_to_abc(directory_path):
subprocess_arguments = [sys.executable, "xml2abc\\xml2abc.py"]
for file_name in os.listdir(directory_path):
if file_name.endswith(".musicxml"):
subprocess_arguments.append(os.path.join(directory_path, file_name))
subprocess_arguments.append("-o")
subprocess_arguments.append(directory_path)
subprocess.call(subprocess_arguments)
def clean_temporary_files(directory_path):
for file_name in os.listdir(directory_path):
if file_name.endswith(".musicxml"):
os.remove(os.path.join(directory_path, file_name))
def main(muse_score_path, directory_path):
muse_score_export(muse_score_path, directory_path, OutputFormat.music_xml)
convert_to_abc(directory_path)
clean_temporary_files(directory_path)
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2])
|
from data_batcher import SantanderDataObject
import tensorflow as tf
import numpy as np
class SantanderVanillaModel(object):
def __init__(self,FLAGS):
self.FLAGS=FLAGS
self.dataObject=SantanderDataObject(self.FLAGS.batch_size,self.FLAGS.test_size)
with tf.variable_scope("SantanderModel",initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,uniform=True)):
self.add_placeholders()
self.build_graph()
self.add_loss()
self.add_training_step()
def add_placeholders(self):
self.x=tf.placeholder(dtype=tf.float32,shape=[None,200])
self.y=tf.placeholder(dtype=tf.float32,shape=[None,1])
self.keep_prob=tf.placeholder_with_default(1.0, shape=())
def build_graph(self):
HIDDEN_LAYER_1=4096
HIDDEN_LAYER_2=4096
HIDDEN_LAYER_3=4096
output1=tf.contrib.layers.fully_connected(self.x,HIDDEN_LAYER_1,activation_fn=tf.nn.relu)
output2=tf.contrib.layers.fully_connected(output1,HIDDEN_LAYER_2,activation_fn=tf.nn.relu)
output3=tf.contrib.layers.fully_connected(output2,HIDDEN_LAYER_3,activation_fn=tf.nn.relu)
self.final_output=tf.contrib.layers.fully_connected(output3,1,activation_fn=None)
self.logits=tf.identity(self.final_output,name='logits')
def add_loss(self):
self.loss=tf.nn.sigmoid_cross_entropy_with_logits(labels=self.y,logits=self.logits)
self.cost=tf.reduce_mean(self.loss)
self.prediction=tf.nn.sigmoid(self.final_output)
self.correct_pred=tf.equal(tf.round(self.prediction),self.y)
self.accuracy=tf.reduce_mean(tf.cast(self.correct_pred,tf.float32))
def add_training_step(self):
self.train_step=tf.train.AdamOptimizer(learning_rate=self.FLAGS.learning_rate).minimize(self.cost)
def run_train_iter(self,sess,x,y):
train_data_feed={
self.x:x,
self.y:y,
self.keep_prob:(1.0-self.FLAGS.dropout),
}
sess.run(self.train_step,feed_dict=train_data_feed)
def get_validation_accuracy(self,sess):
validation_accuracy=0.0
total_items=0
for x,y in self.dataObject.generate_dev_data():
total_items+=x.shape[0]
dev_data_feed={
self.x:x,
self.y:y,
self.keep_prob:1.0,
}
validation_accuracy_batch=sess.run([self.accuracy],dev_data_feed)
validation_accuracy += validation_accuracy_batch[0]*x.shape[0]
validation_accuracy/=total_items
return validation_accuracy
def get_validation_predictions(self,sess):
output=[]
values=[]
for x,y in self.dataObject.generate_dev_data():
dev_data_feed={
self.x:x,
self.keep_prob:1.0,
}
dev_output=sess.run(self.prediction,feed_dict=dev_data_feed)
dev_output=np.squeeze(dev_output )
output.extend(dev_output.tolist())
values.extend(y)
return output,values
def get_test_data(self,sess):
output=[]
for x in self.dataObject.generate_test_data():
test_data_feed={
self.x:x,
self.keep_prob:1.0,
}
test_output=sess.run(self.prediction,feed_dict=test_data_feed)
test_output=np.squeeze(test_output)
output.extend(test_output.tolist())
return self.dataObject.test_ids.tolist(),output
def run_epoch(self,sess):
for x,y in self.dataObject.generate_one_epoch():
self.run_train_iter(sess,x,y)
validation_accuracy=self.get_validation_accuracy(sess)
return validation_accuracy
|
from unittest import TestCase
import boto3
from moto import mock_ec2
from altimeter.aws.resource.ec2.volume import EBSVolumeResourceSpec
from altimeter.aws.scan.aws_accessor import AWSAccessor
from altimeter.core.graph.links import LinkCollection, ResourceLink, SimpleLink
from altimeter.core.resource.resource import Resource
class TestEBSVolumeResourceSpec(TestCase):
@mock_ec2
def test_scan(self):
account_id = "123456789012"
region_name = "us-east-1"
session = boto3.Session()
ec2_client = session.client("ec2", region_name=region_name)
resp = ec2_client.create_volume(Size=1, AvailabilityZone="us-east-1a")
create_time = resp["CreateTime"]
created_volume_id = resp["VolumeId"]
created_volume_arn = f"arn:aws:ec2:us-east-1:123456789012:volume/{created_volume_id}"
scan_accessor = AWSAccessor(session=session, account_id=account_id, region_name=region_name)
resources = EBSVolumeResourceSpec.scan(scan_accessor=scan_accessor)
expected_resources = [
Resource(
resource_id=created_volume_arn,
type="aws:ec2:volume",
link_collection=LinkCollection(
simple_links=(
SimpleLink(pred="availability_zone", obj="us-east-1a"),
SimpleLink(pred="create_time", obj=create_time),
SimpleLink(pred="size", obj=True),
SimpleLink(pred="state", obj="available"),
SimpleLink(pred="volume_type", obj="gp2"),
SimpleLink(pred="encrypted", obj=False),
),
resource_links=(
ResourceLink(pred="account", obj="arn:aws::::account/123456789012"),
ResourceLink(pred="region", obj="arn:aws:::123456789012:region/us-east-1"),
),
),
)
]
self.assertEqual(resources, expected_resources)
|
from wxpy import Bot, embed # 微信机器人
import os
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
os.chdir(r'.\PythonLearn\src\images') # 创建工作路径
bot = Bot()
myself = bot.self # 机器人账号自身
# bot.file_helper.send('Hello from wxpy!') # 向文件传输助手发送消息
# 若需要给自己发送消息,请先进行以下一次性操作:
# myself.add() # 在 Web 微信中把自己加为好友
# myself.accept()
# myself.send('能收到吗?') # 发送消息给自己
my_friend = bot.friends().search('林', Sex=1, City='揭阳')[0:] # 搜索名称含有 "林" 的男性揭阳好友
# my_friend.send("祝大家元宵节快乐!")
# my_friend.send_image('元宵节.jpg')
# 启用 puid 属性,并指定 puid 所需的映射数据保存/载入路径
bot.enable_puid('wxpy_puid.pkl')
# 指定一个好友
my_friends = bot.friends().search('林')[0] # [0]表示第一个
# 查看他的 puid
print(my_friends.puid)
all_Chat_object = bot.chats(bot.friends(update=True) + bot.groups(update=True) + bot.mps(update=True)) # 获取所有聊天对象
print(all_Chat_object)
if bot.friends(update=True): # 获取所有好友
# all_friends = bot.core.get_friends(update=bot.friends(update=True))
# print(all_friends)
Friend_all = bot.friends() # 获取好友列表
print(Friend_all.stats_text()) # 获取好友的统计信息
Friends = bot.core.get_friends(update=True)[0:] # 获取好友列表
print(Friends)
male = female = other = 0 # 初始化计数器,有男有女,当然,有些人是不填的
for i in Friends[1:]: # 遍历这个列表,列表里第一位是自己,所以从"自己"之后(也就是第二位)开始计算
sex = i['Sex']
if sex == 1: # 1表示男性,2女性
male += 1
elif sex == 2:
female += 1
else:
other += 1
total = len(Friends[1:]) # 计算好友总数
# 输出男女比例
print('男性:%.2f%%' % (float(male) / total * 100))
print('女性:%.2f%%' % (float(female) / total * 100))
print('未填性别:%.2f%%' % (float(other) / total * 100))
else:
all_friends = bot._retrieve_itchat_storage('memberList') # 获取所有好友
print(all_friends)
labels = '男性', '女性', '未填性别'
sizes = [male, female, other]
explode = (0, 0.1, 0)
# 指定饼图某些部分的突出显示,即呈现爆炸式
# 其中第一个参数是‘男性’部分,0表示饼图合在一起,1表示分裂开来
# 其中第一个参数是‘女性’部分,0表示饼图合在一起,1表示分裂开来,0.1表示偏离的距离
# 其中第一个参数是‘未填性别’部分,0表示饼图合在一起,1表示分裂开来
fig1, ax1 = plt.subplots() # 创建一个图形和一个子图
ax1.pie(sizes, explode=explode, labels=labels, autopct='%.2f%%', shadow=True, startangle=90)
# autopct:设置百分比格式,如'%.2f%%'为保留两位小数
# shadow:是否添加饼图的阴影效果
# startangle:设置饼图的初始摆放角度, 180为水平;
ax1.axis('equal') # 相等的长宽比可确保将饼图绘制为圆形
plt.savefig('wechat_sex.png') # 保存图片
bot.file_helper.send_image('wechat_sex.png') # 向文件传输助手发送图片
bot.file_helper.send(str('男性:%.0f 位' % male))
bot.file_helper.send(str('女性:%.0f 位' % female))
bot.file_helper.send(str('未填性别:%.0f 位' % other))
thismanager = plt.get_current_fig_manager()
thismanager.window.wm_iconbitmap('LOGO.ico')
thismanager.canvas.set_window_title('林旭东的可视化图表')
plt.show()
if bot.mps(update=True): # 获取所有公众号
all_mps = bot.core.get_mps(update=bot.mps(update=True))
print(all_mps)
else:
all_mps = bot._retrieve_itchat_storage('mpList')
print(all_mps)
@bot.register(my_friend) # 回复 my_friend 的消息 (优先匹配后注册的函数!)
def reply_my_friend(msg):
return 'received:{}({})'.format(msg.text, msg.type)
@bot.register(msg_types=bot.friends)
def auto_accept_friends(msg): # 自动接受新的好友请求
new_friend = msg.card.accept() # 接受好友请求
new_friend.send('您好!我自动接受了你的好友请求。') # 向新的好友发送消息
embed() # 进入 Python 命令行、让程序保持运行
# bot.join() # 或者仅仅堵塞线程
|
#!/usr/bin/env python
from tthAnalysis.HiggsToTauTau.safe_root import ROOT
from tthAnalysis.HiggsToTauTau.common import logging, SmartFormatter
from tthAnalysis.HiggsToTauTau.configs.EvtYieldHistManager_cfi import * # EvtYieldHistManager_201*
import logging
import argparse
import os
import hashlib
# Credit to: https://stackoverflow.com/a/3431838/4056193
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
class Histograms(object):
def __init__(self, fn, sf):
self.fn = fn
logging.info("Scaling histograms in file {} ({}) by SF {}".format(self.fn, md5(self.fn), sf))
fptr = ROOT.TFile.Open(self.fn, 'read')
keys = [ key.GetName() for key in fptr.GetListOfKeys() ]
self.histograms = {}
for key in keys:
histogram = fptr.Get(key).Clone()
histogram.SetDirectory(0)
assert(type(histogram) == ROOT.TH2D)
assert(key not in self.histograms)
histogram.Scale(sf)
logging.info("Found histogram {} in file {}".format(key, self.fn))
self.histograms[key] = histogram
fptr.Close()
def get_excess(self, common_histograms):
return list(sorted(set(self.histograms.keys()) - common_histograms))
def compatible_binning(lhs, rhs):
lhs_axis_x = lhs.GetXaxis()
lhs_axis_y = lhs.GetYaxis()
rhs_axis_x = rhs.GetXaxis()
rhs_axis_y = rhs.GetYaxis()
lhs_nbinsx = lhs_axis_x.GetNbins()
lhs_nbinsy = lhs_axis_y.GetNbins()
rhs_nbinsx = rhs_axis_x.GetNbins()
rhs_nbinsy = rhs_axis_y.GetNbins()
if lhs_nbinsx != rhs_nbinsx:
return False
if lhs_nbinsy != rhs_nbinsy:
return False
lhs_binning_x = [ lhs_axis_x.GetBinUpEdge(i) for i in range(lhs_nbinsx + 1) ]
lhs_binning_y = [ lhs_axis_y.GetBinUpEdge(i) for i in range(lhs_nbinsy + 1) ]
rhs_binning_x = [ rhs_axis_x.GetBinUpEdge(i) for i in range(rhs_nbinsx + 1) ]
rhs_binning_y = [ rhs_axis_y.GetBinUpEdge(i) for i in range(rhs_nbinsy + 1) ]
if lhs_binning_x != rhs_binning_x:
return False
if lhs_binning_y != rhs_binning_y:
return False
return True
def get_sfs(era, periods):
if era == 2016:
evt_yields = EvtYieldHistManager_2016
elif era == 2017:
evt_yields = EvtYieldHistManager_2017
elif era == 2018:
evt_yields = EvtYieldHistManager_2018
else:
raise RuntimeError("Invalid era: %d" % era)
subtotals = []
for period in periods:
subtotal = 0.
for acquisition_era in period:
key = 'Run{}{}'.format(era, acquisition_era)
if not hasattr(evt_yields, key):
raise RuntimeError("No such acquisition era found in year %d: %s" % (era, acquisition_era))
subtotal += float(getattr(evt_yields, key).luminosity.configValue())
logging.info("Sum of integrated luminosity across eras {} in year {}: {}".format(period, era, subtotal))
subtotals.append(subtotal)
lumi_sum = sum(subtotals)
logging.info("Total integrated luminosity in year {}: {}".format(era, lumi_sum))
sfs = [ subtotal / lumi_sum for subtotal in subtotals ]
return sfs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input',
type = str, dest = 'input', metavar = 'directory', required = True, nargs = '+',
help = 'R|Input TH2 SFs',
)
parser.add_argument('-e', '--era',
type = int, dest = 'era', metavar = 'year', required = True, choices = [ 2016, 2017, 2018 ],
help = 'R|Era',
)
parser.add_argument('-p', '--periods',
type = str, dest = 'periods', metavar = 'period', required = True, nargs = '+',
help = 'R|Acquisition eras (eg AB and CDE)',
)
parser.add_argument('-o', '--output',
type = str, dest = 'output', metavar = 'file', required = True,
help = 'R|Output file path',
)
args = parser.parse_args()
if len(args.input) != len(args.periods):
raise ValueError("Number of input files must equal to the number of acquisition eras")
sfs = get_sfs(args.era, args.periods)
assert(all([ os.path.isfile(fn) for fn in args.input ]))
assert(len(args.input) == len(sfs))
inputs = [ Histograms(args.input[i], sfs[i]) for i in range(len(args.input)) ]
# make sure that all input ROOT files the same histograms in them
assert(len(inputs) > 0)
common_histograms = set(inputs[0].histograms.keys())
for input in inputs[1:]:
common_histograms.intersection_update(set(input.histograms.keys()))
for input in inputs:
excess = input.get_excess(common_histograms)
if excess:
raise RuntimeError("Found uncommon histograms in file %s: %s" % (input.fn, ', '.join(excess)))
# add the histograms
result = {}
for common_histogram in common_histograms:
histogram_base = inputs[0].histograms[common_histogram].Clone()
for input in inputs[1:]:
other_histogram = input.histograms[common_histogram]
assert(compatible_binning(histogram_base, other_histogram))
histogram_base.Add(other_histogram)
result[common_histogram] = histogram_base
output_dir = os.path.dirname(os.path.abspath(args.output))
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
output_f = ROOT.TFile.Open(args.output, 'recreate')
output_f.cd()
for common_histogram in common_histograms:
result[common_histogram].Write()
output_f.Close()
logging.info("Wrote file: {}".format(args.output))
|
class PluginAlreadyRegistered(Exception):
pass
class PluginNotRegistered(Exception):
pass
class AppAllreadyRegistered(Exception):
pass
class NotImplemented(Exception):
pass
class SubClassNeededError(Exception):
pass
class MissingFormError(Exception):
pass
class NoHomeFound(Exception):
pass
class PermissionsException(Exception):
"""Base permission exception
"""
class NoPermissionsException(PermissionsException):
"""Can be fired when some violate action is performed on permission system.
"""
class DuplicatePlaceholderWarning(Warning): pass
class DontUsePageAttributeWarning(Warning): pass |
to_do = ''
def add_task(date, start_time, duration, attendees, curr_list):
global to_do
curr_list = date + '\n' + start_time + '\n' + duration + '\n' + attendees + '\n' + "NEW:" + '\n'
to_do += curr_list
def add_event(date, time, location, curr_list):
global to_do
curr_list = '\n' + date + '\n' + time + '\n' + location + '\n' + "NEW:"
to_do += curr_list
def remove_item(to_do_list):
global to_do
if to_do.count('\n') > 5:
to_do = to_do[to_do.index("NEW:"):]
elif to_do.count('\n') == 4 or to_do.count('\n') == 5 or to_do.count('\n') == 6:
to_do = ''
else:
to_do = ''
print('Everything has been done! Nothing to remove.')
item = input('If you would like to add a task or event, please type task or event. If you want to remove, type rm:{}'.format('\n'))
while item != 'Exit':
if item == 'task':
add_task(input('Date: '), input('Start Time: '), input('Duration: '), input('Attendees: '), to_do)
print(to_do)
elif item == 'event':
add_event(input('Date: '), input('Time: '), input('Location: '), to_do)
print(to_do)
elif item == 'rm':
remove_item(to_do)
print(to_do)
item = input('What else would you like to add or remove?')
|
"""
Plot fitted GSMF and original data
"""
import json
import sys
import numpy as np
from scipy.stats import binned_statistic
from matplotlib import cm
import matplotlib.pyplot as plt
from methods import piecewise_linear
from methods import mass_bins, binned_weighted_quantile
exec(open("./obs_data_sfs.py").read())
import flares
fl = flares.flares(fname='../../flares/data/flares.hdf5')
tags = fl.tags
zeds = [float(tag[5:].replace('p','.')) for tag in tags]
## ---- Overdensity Weights
dat = np.loadtxt(fl.weights, skiprows=1, delimiter=',')
weights = dat[:,8]
index = dat[:,0]
## ---- Plot
ticks = np.linspace(0.05, .95, len(tags))
colors = [ cm.viridis(i) for i in ticks ]
fig, (ax1,ax2) = plt.subplots(2,1,figsize=(5.5,10.5))
plt.subplots_adjust(hspace=0.1)
axes = [ax1,ax2]#,ax4,ax5,ax6]
for c,ax,tag,z in zip(colors[4:],axes,tags[4:],zeds[4:]):
print(tag)
with open('samples/sfs_fit_%s.json'%tag) as f:
p = json.load(f)
x = np.linspace(8,12,int(1e3))
x0,y0,m1,m2 = p['x0']['median'],p['y0']['median'],p['m1']['median'],p['m2']['median']
ax.plot(x,piecewise_linear(x-9.7,*[x0,y0,m1,m2]),color=c,lw=4)
## Observations ##
mask = (santini17['z_low'] < z-0.1) & (santini17['z_high'] > z-0.1)
if np.sum(mask) != 0:
s17_artist = ax.errorbar(np.log10(santini17['mstar'][mask]), np.log10(santini17['sfr'][mask]),
yerr=santini17['sigma_sfr'][mask], fmt='p', label='Santini+17', color='grey')
mask = ((salmon15['z']-0.5) < z) & ((salmon15['z']+0.5) > z)
if np.sum(mask) != 0:
print('salmon15')
s15_artist = ax.errorbar(salmon15['logM'][mask], salmon15['logSFR'][mask],
yerr=salmon15['sigma_MC'][mask], fmt='s', label='Salmon+15', color='grey')
ax.text(0.1, 0.8, '$z = %.1f$'%z, transform=ax.transAxes, size=15)
ax.set_xlim(8,11.5)
ax.set_ylim(-1,3)
ax.grid(alpha=0.5)
ax2.set_xlabel('$\mathrm{log_{10}} \, (M_{\mathrm{*}} \,/\, \mathrm{M_{\odot}})$', size=16)
for ax in axes:#[ax4,ax5,ax6]:
ax.set_ylabel('$\mathrm{log_{10}}\,(\mathrm{SFR} \,/\, \mathrm{M_{\odot}} \, \mathrm{yr^{-1}})$', size=16)
#for ax in [ax1,ax4]:
# ax.set_ylabel('$\mathrm{log_{10}}\,(\mathrm{SFR} \,/\, \mathrm{M_{\odot}} \, \mathrm{yr^{-1}})$', size=16)
for ax in [ax2]:#[ax5,ax6,ax2,ax3]:
ax.set_yticklabels([])
#for ax in [ax1,ax2,ax3]:
# ax.set_xticklabels([])
ax2.legend(frameon=False, loc=3);
plt.show()
# imgf='images/sfs_obs.png'
# print(imgf)
# fig.savefig(imgf, dpi=150, bbox_inches='tight')
|
from django.db import models
from common.attrs import get_attr_values
from common.constants import FALSE
from common.models import BaseModel
class SystemRoles(BaseModel):
sys_role_id = models.AutoField(primary_key=True, verbose_name='系统角色标识')
sys_role_name = models.CharField(max_length=50, verbose_name='系统角色名称')
sys_role_code = models.CharField(max_length=30, verbose_name='系统角色编码', null=True, blank=True)
sys_role_desc = models.CharField(max_length=250, verbose_name='系统角色描述', null=True, blank=True)
init_flag = models.IntegerField(verbose_name='是否系统初始数据', choices=get_attr_values('BaseModel', 'TRUE_OR_FALSE'),
null=True, blank=True, default=FALSE)
class Meta:
db_table = 'system_roles'
verbose_name_plural = "系统角色"
verbose_name = "系统角色"
def __str__(self):
return u'%s' % self.sys_role_name
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from common import manage_province_city_district
from common import database
from common import prepare_data
from calculate import calculate_price
import json
# Create your views here.
@csrf_exempt
def window(request):
return render(request, "page/window.html")
@csrf_exempt
def finished_automobile_line(request):
return render(request, "page/finished_automobile_line.html")
@csrf_exempt
def finished_automobile_price(request):
return render(request, "page/finished_automobile_price.html")
@csrf_exempt
def distpicker_data_js(request):
return render(request,"page/js/distpicker.data.js")
@csrf_exempt
def distpicker_js(request):
return render(request,"page/js/distpicker.js")
@csrf_exempt
def main_js(request):
return render(request,"page/js/main.js")
@csrf_exempt
def getDistance(request):
data = request.POST
senderProvince = data.get("senderProvince")
senderCity = data.get("senderCity")
senderDistrict = data.get("senderDistrict")
recipientProvince = data.get("recipientProvince")
recipientCity = data.get("recipientCity")
recipientDistrict = data.get("recipientDistrict")
environment = data.get("environment")
if (environment == "205环境"):
sessionId = "201811199INnDXmzMduEjiL"
if (environment == "预发环境"):
sessionId = "20181124or0qjhYfEFtlDtb"
origin = manage_province_city_district.getLonAndLat(senderProvince, senderCity, senderDistrict)
destination = manage_province_city_district.getLonAndLat(recipientProvince, recipientCity, recipientDistrict)
agingInfo = prepare_data.getAgingConfigInfo(origin,destination,sessionId)
distance = manage_province_city_district.getNavigationDistance(origin, destination,sessionId)
result = {'distance':distance,'agingInfo':[ agingInfo['standardTime'] ,agingInfo['commonTime'] ,agingInfo['urgentTime'] ]}
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def calculateVehiclePrice(request):
data = request.POST
#从前端获取数据
selectCalcuteWay = data.get('selectCalcuteWay')
senderProvince = data.get('senderProvince')
senderCity = data.get('senderCity')
senderDistrict = data.get('senderDistrict')
recipientProvince = data.get('recipientProvince')
recipientCity = data.get('recipientCity')
recipientDistrict = data.get('recipientDistrict')
distance = data.get("distance")
goodsName = data.get("goodsName")
tonnage = data.get("tonnage")
volume = data.get("volume")
agingWay = data.get("agingWay")
loadUnloadTemplate = data.get("loadUnloadTemplate")
invoiceWay = data.get("invoiceWay")
environment = data.get("environment")
if (environment == "205环境") :
sessionId = "201811199INnDXmzMduEjiL"
if (environment == "预发环境"):
sessionId = "20181124or0qjhYfEFtlDtb"
# 根据省市区名称获取对应的编号,如:330002
senderAddress = handleProvinceName(senderProvince) + '-' + handleCityName(senderCity,senderProvince) + '-' + senderDistrict
print senderAddress
senderAddressCode = database.getAddressCode(senderAddress,environment)
start_province = senderAddressCode['provinceid']
start_city = senderAddressCode['cityid']
start_district = senderAddressCode['district']
print "执行了"
# 根据省市区名称获取对应的编号,如:330002
recipientAddress = handleProvinceName(recipientProvince) + '-' + handleCityName(recipientCity,recipientProvince) + '-' + recipientDistrict
print recipientAddress
recipientAddressCode = database.getAddressCode(recipientAddress,environment)
arrive_province = recipientAddressCode['provinceid']
arrive_city = recipientAddressCode['cityid']
arrive_district = recipientAddressCode['district']
origin = manage_province_city_district.getLonAndLat(senderProvince,senderCity,senderDistrict)
destination = manage_province_city_district.getLonAndLat(recipientProvince,recipientCity,recipientDistrict)
result = calculate_price.getOneVehicleLinePrice(start_province,start_city,start_district,arrive_province,arrive_city,arrive_district,
tonnage,volume,goodsName,selectCalcuteWay,distance,loadUnloadTemplate,invoiceWay,agingWay,origin,destination,sessionId,environment)
if (result != ''):
return HttpResponse(result)
else:
return HttpResponse('')
def handleProvinceName(provinceName):
if (str(provinceName).endswith('市')):
provinceName = provinceName[:-1]
return provinceName
#要将二级地址市去掉
def handleCityName(cityName,provinceName):
if ((provinceName == "吉林省") or (provinceName == "安徽省")or (provinceName == "湖北省")or (provinceName == "广东省")) :
city_name = cityName
elif ((cityName == "唐山市") or (cityName == "北京市")or (cityName == "重庆市")or (cityName == "天津市") or (cityName == "包头市")
or (cityName == "毕节市") or (cityName == "铜仁市") or (cityName == "延安市") or (cityName == "渭南市") or (cityName == "咸阳市")
or (cityName == "宝鸡市")or (cityName == "铜川市")or (cityName == "西安市")or (cityName == "汉中市")or (cityName == "榆林市")
or (cityName == "安康市")or (cityName == "商洛市")or (cityName == "西宁市")or (cityName == "海东市")or (cityName == "固原市")
or (cityName == "中卫市")or (cityName == "乌鲁木齐市")or (cityName == "克拉玛依市")or (cityName == "吐鲁番市")or (cityName == "三沙市")
or (cityName == "乌海市") or (cityName == "赤峰市") or (cityName == "通辽市")or (cityName == "许昌市")):
city_name = cityName
elif (str(cityName).endswith('市')):
city_name = cityName[:-1]
elif (str(cityName).endswith('市辖区')):
city_name = cityName[:-3]
return city_name
def test(senderProvince,senderCity,senderDistrict,recipientProvince,recipientCity,recipientDistrict,tonnage,
volume, goodsName, selectCalcuteWay, distance,loadUnloadTemplate, invoiceWay, agingWay,sessionId,environment ):
# 根据省市区名称获取对应的编号,如:330002
senderAddress = senderProvince + '-' + handleCityName(senderCity,senderProvince) + '-' + senderDistrict
print senderAddress
senderAddressCode = database.getAddressCode(senderAddress,environment)
start_province = senderAddressCode['provinceid']
start_city = senderAddressCode['cityid']
start_district = senderAddressCode['district']
# 根据省市区名称获取对应的编号,如:330002
recipientAddress = recipientProvince + '-' + handleCityName(recipientCity,recipientProvince) + '-' + recipientDistrict
print senderAddress
recipientAddressCode = database.getAddressCode(recipientAddress,environment)
arrive_province = recipientAddressCode['provinceid']
arrive_city = recipientAddressCode['cityid']
arrive_district = recipientAddressCode['district']
origin = manage_province_city_district.getLonAndLat(senderProvince, senderCity, senderDistrict)
destination = manage_province_city_district.getLonAndLat(recipientProvince, recipientCity, recipientDistrict)
result = calculate_price.getOneVehicleLinePrice(start_province, start_city, start_district, arrive_province,
arrive_city, arrive_district,
tonnage, volume, goodsName, selectCalcuteWay, distance,
loadUnloadTemplate, invoiceWay, agingWay, origin, destination,sessionId,environment)
if (result != ''):
print"结果:%s"%result
else:
print"暂无估价"
#test('浙江省','金华市','兰溪市','浙江省','金华市','义乌市','5',
# '0', '纺织类', '整车线路', '80.251','一装一卸', '无需发票','12-24','201811199INnDXmzMduEjiL',"预发环境" )
#test('浙江省','金华市','兰溪市','河北省','衡水市','景县','12',
# '30', '面粉', '整车价格', '1198.559','一装一卸', '无需发票','42-48' )
|
from binance.client import Client
from datetime import datetime as dt
from api_data import api, secret
from Allert import allert_buy, allert_sell
import time
client = Client(api, secret)
def buy_signal(tiker, period):
"""проверка 1 час свечей на наличие сигнала на покупку"""
while True:
# запуск проверки кажды час
# if dt.now().minute == 59 and dt.now().second == 58 and (10 > dt.now().microsecond > 0):
if dt.now().minute == 0 and dt.now().second == 30:
if allert_buy(tiker, period):
# print('server_time: {}'.format(dt.fromtimestamp(client.get_server_time()['serverTime'] / 1000)))
# print('local_time: {}'.format(dt.now()))
print('Buy!')
return True
else:
# print('server_time: {}'.format(dt.fromtimestamp(client.get_server_time()['serverTime'] / 1000)))
# print('local_time: {}'.format(dt.now()))
# print('Not yet!')
time.sleep(3580)
pass
else:
pass
def sell_signal(tiker, period):
"""проверка 1 час свечей на наличие сигнала на продажу"""
while True:
# запуск проверки каждые 15 минут
# if dt.now().minute == 59 and dt.now().second == 58 and (10 > dt.now().microsecond > 0):
if dt.now().minute == 0 and dt.now().second == 30:
if allert_sell(tiker, period):
# print('server_time: {}'.format(dt.fromtimestamp(client.get_server_time()['serverTime'] / 1000)))
# print('local_time: {}'.format(dt.now()))
print('Sell!')
return True
else:
# print('server_time: {}'.format(dt.fromtimestamp(client.get_server_time()['serverTime'] / 1000)))
# print('local_time: {}'.format(dt.now()))
# print('Not yet!')
time.sleep(3580)
pass
else:
pass
|
import math
# noinspection PyPackageRequirements
from typing import Tuple
import numpy as np
# noinspection PyPackageRequirements
import cv2
from Application.Frame.global_variables import JobInitStateReturn
from Application.Frame.port import Port
from Application.Frame.transferJobPorts import get_port_from_wave
from Utils.log_handler import log_to_file, log_error_to_console
from Application.Config.create_config import jobs_dict, create_dictionary_element
from config_main import PYRAMID_LEVEL
from Application.Config.util import transform_port_name_lvl, transform_port_size_lvl, job_name_create, get_module_name_from_file
############################################################################################################################################
# Internal functions
############################################################################################################################################
def process_edge_map(edge_map: Port.arr, port_name_output: Port.arr, port_name_labels_output: Port.arr, connectivity: int):
"""
# http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/MORSE/connectivity.pdf
:param edge_map : bitmap of edges
:param port_name_output: port to hold output image
:param connectivity: 8 or 4 for 8-way or 4-way connectivity respectively
:return number of labels, average number of pixels per label, number of edges
"""
p_out = get_port_from_wave(name=port_name_output)
p_out_labels = get_port_from_wave(name=port_name_labels_output)
# threshold image to be sure that all edges have 255 value
ret, edge_map = cv2.threshold(src=edge_map, thresh=1, maxval=255, type=cv2.THRESH_BINARY)
edge_map = np.uint8(edge_map)
num_labels, labels = cv2.connectedComponents(image=edge_map, connectivity=connectivity)
# Map component labels to hue val, 0-179 is the hue range in OpenCV
label_hue = np.uint8(179 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# Converting cvt to BGR
p_out.arr[:] = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
p_out.arr[label_hue == 0] = 0
p_out.set_valid()
p_out_labels.arr[:] = labels
p_out_labels.set_valid()
nr_edge_pixels = np.count_nonzero(labels)
return num_labels, nr_edge_pixels / num_labels, nr_edge_pixels
############################################################################################################################################
# Init functions
############################################################################################################################################
def init_edge_label(param_list: list = None) -> JobInitStateReturn:
"""
Init function for the job
:param param_list: list of PORT to be written in the csv file
:return: INIT or NOT_INIT state for the job
"""
log_to_file('Nr Edges ' + param_list[0])
log_to_file('AVG px/edge ' + param_list[0])
log_to_file('Nr Edge px ' + param_list[0])
return JobInitStateReturn(True)
# define a init function, function that will be executed at the begging of the wave
def init_func_global() -> JobInitStateReturn:
"""
Init function for the job.
Remember this function is called before the framework gets pictures.
:return: INIT or NOT_INIT state for the job
"""
return JobInitStateReturn(True)
############################################################################################################################################
# Main functions
############################################################################################################################################
def create_edge_label_map(param_list: list = None) -> bool:
"""
Calculates the maximum pixel value
:param param_list: Param needed to respect the following list:
[port_in name: image, wave_in: int,
port_out: image RGB of edges]
:return: True if the job executed OK.
"""
# noinspection PyPep8Naming
PORT_IN_POS = 0
# noinspection PyPep8Naming
PORT_IN_WAVE = 1
# noinspection PyPep8Naming
PORT_CONNECTIVITY_POS = 2
# noinspection PyPep8Naming
PORT_OUT_POS = 3
# noinspection PyPep8Naming
PORT_OUT_LABELS_POS = 4
if len(param_list) != 5:
log_error_to_console("EDGE LABEL JOB MAIN FUNCTION PARAM NOK", str(len(param_list)))
return False
else:
p_in_1 = get_port_from_wave(name=param_list[PORT_IN_POS], wave_offset=param_list[PORT_IN_WAVE])
if p_in_1.is_valid() is True:
try:
nr_edge, average_px_edge, nr_edge_px = process_edge_map(edge_map=p_in_1.arr, port_name_output=param_list[PORT_OUT_POS], port_name_labels_output=param_list[PORT_OUT_LABELS_POS],
connectivity=param_list[PORT_CONNECTIVITY_POS])
log_to_file(str(nr_edge))
log_to_file(str(average_px_edge))
log_to_file(str(nr_edge_px))
except BaseException as error:
log_error_to_console("EDGE LABEL JOB NOK: ", str(error))
log_to_file('')
log_to_file('')
log_to_file('')
pass
else:
log_to_file('')
log_to_file('')
log_to_file('')
return False
return True
# define a main function, function that will be executed at the begging of the wave
def main_func_line_filtering(param_list: list = None) -> bool:
"""
Main function for {job} calculation job.
:param param_list: Param needed to respect the following list:
[enumerate list]
:return: True if the job executed OK.
"""
# noinspection PyPep8Naming
PORT_IN_POS = 0
# noinspection PyPep8Naming
PORT_IN_WAVE = 1
# noinspection PyPep8Naming
PORT_IN_THETA = 2
# noinspection PyPep8Naming
PORT_IN_DEVIATION = 3
# noinspection PyPep8Naming
PORT_OUTPUT_LINE = 4
# noinspection PyPep8Naming
PORT_OUTPUT_LINE_IMG = 5
# verify that the number of parameters are OK.
if len(param_list) != 6:
log_error_to_console("LINE FILTERING JOB MAIN FUNCTION PARAM NOK", str(len(param_list)))
return False
else:
# get needed ports
p_in = get_port_from_wave(name=param_list[PORT_IN_POS], wave_offset=param_list[PORT_IN_WAVE])
p_out_lines = get_port_from_wave(name=param_list[PORT_OUTPUT_LINE])
p_out_lines_img = get_port_from_wave(name=param_list[PORT_OUTPUT_LINE_IMG])
# check if port's you want to use are valid
if p_in.is_valid() is True:
try:
value = math.tan(math.radians(param_list[PORT_IN_THETA]))
grade = param_list[PORT_IN_DEVIATION]
min_value = value - math.radians(grade)
max_value = value + math.radians(grade)
line_idx = 0
for line in p_in.arr:
start_point = line[0]
end_point = [0, 0]
idx = 0
if line[idx][0] == 0 and line[idx][1] == 0:
break
while True:
if line[idx][0] == 0 and line[idx][1] == 0:
break
end_point = line[idx]
idx += 1
line_slope = (np.abs(int(end_point[0]) - int(start_point[0])) / (int(end_point[1]) - int(start_point[1])))
# line_slope = (end_point[0] - start_point[0]) / (end_point[1] - start_point[1])
if min_value < line_slope < max_value:
p_out_lines.arr[line_idx][:] = line
for el in p_out_lines.arr[line_idx]:
p_out_lines_img.arr[el[0], el[1]] = 255
line_idx += 1
p_out_lines.set_valid()
p_out_lines_img.set_valid()
except BaseException as error:
log_error_to_console("LINE FILTERING JOB NOK: ", str(error))
pass
else:
return False
return True
############################################################################################################################################
# Job create functions
############################################################################################################################################
def do_line_theta_filtering_job(port_input_name: str, theta_value: int, deviation_theta: float = 10,
nr_lines: int = 50, nr_pt_line: int = 50,
port_output: str = None, port_img_output: str = None,
level: PYRAMID_LEVEL = PYRAMID_LEVEL.LEVEL_0, wave_offset: int = 0) -> Tuple[str, str]:
"""
Filters lines accordingly to a theta value. 0 for horizontal
:param port_input_name: One or several input ports
:param theta_value: theta value
:param deviation_theta: accepted deviation of theta value
:param nr_lines: number of lines to keep at the end
:param nr_pt_line: number of points per lines to keep at the end
:param port_output: port of lines
:param port_img_output: port of image of lines kept
:param level: Level of input port, please correlate with each input port name parameter
:param wave_offset: wave of input port, please correlate with each input port name parameter
:return: Name of output port or ports
"""
input_port_name = transform_port_name_lvl(name=port_input_name, lvl=level)
if port_img_output is None:
port_output = '{name}_{theta}_{theta_value}_{theta_procent}_{theta_p_value}_{Input}'.format(name='LINE_FILTERING',
theta='T', theta_value=theta_value.__str__().replace('.', '_'),
theta_procent='D', theta_p_value=deviation_theta,
Input=port_input_name)
port_img_output = '{name}_{theta}_{theta_value}_{theta_procent}_{theta_p_value}_{Input}'.format(name='LINE_FILTERING_IMG',
theta='T', theta_value=theta_value.__str__().replace('.', '_'),
theta_procent='D', theta_p_value=deviation_theta,
Input=port_input_name)
output_port_line_img_name = transform_port_name_lvl(name=port_img_output, lvl=level)
output_port_line_img_size = transform_port_size_lvl(lvl=level, rgb=False)
port_line_output_name = transform_port_name_lvl(name=port_output, lvl=level)
input_port_list = [input_port_name]
main_func_list = [input_port_name, wave_offset, theta_value, deviation_theta, port_line_output_name, output_port_line_img_name]
output_port_list = [(port_line_output_name, "(" + str(nr_lines) + "," + str(nr_pt_line) + ", 2)", 'H', False),
(output_port_line_img_name, output_port_line_img_size, 'B', True)]
job_name = job_name_create(action='LINE FILTERING', input_list=input_port_list, wave_offset=[wave_offset], level=level)
d = create_dictionary_element(job_module=get_module_name_from_file(__file__),
job_name=job_name,
input_ports=input_port_list,
init_func_name='init_func_global', init_func_param=None,
main_func_name='main_func_line_filtering',
main_func_param=main_func_list,
output_ports=output_port_list)
jobs_dict.append(d)
return port_output, port_img_output
if __name__ == "__main__":
# If you want to run something stand-alone
pass |
import unittest
from lib.ui.login_page import LoginPage
from lib.utils import create_driver
from selenium.webdriver.common.keys import Keys
import pytest
class TestComponents(unittest.TestCase):
def setUp(self):
self.driver = create_driver.get_driver_instance()
self.login = LoginPage(self.driver)
def tearDown(self):
self.driver.close()
def test_framework_components(self):
self.login.wait_for_login_page_to_load()
self.login.get_username_textbox().send_keys('admin')
self.login.get_password_textbox().send_keys('pass')
self.login.get_login_button().click()
actual_title = self.driver.title
expected_title = 'actiTIME - Login'
assert actual_title == expected_title,'passed'
|
import cv2
import numpy as np
class matches(object):
def __init__(self,img1,img2,K,params):
self.img1 = img1
self.img2 = img2
self.params = params
self.matches = self._getMatches()
self.matchPoints = self._sortMatchPoints()
self.K = K
self.P = np.hstack((np.eye(3), np.zeros((3, 1))))
def _getMatches(self):
if self.params['kp'] == 'orb' or self.params['kp'] == 'brisk' or self.params['kp'] == 'freak' or self.params['kp'] == 'lucid':
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
binMatch = bf.match(self.img1.descriptors,self.img2.descriptors)
good = []
for match in binMatch:
if match.distance < 40:
good.append(match)
return good
else:
index_params = dict(algorithm = 0, trees = 5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
flannMatch = flann.knnMatch(self.img1.descriptors, self.img2.descriptors, k=2)
good = []
for m,n in flannMatch:
if m.distance < 0.7*n.distance:
good.append([m])
return good
def _sortMatchPoints(self):
if self.params['kp'] == 'orb' or self.params['kp'] == 'brisk' or self.params['kp'] == 'freak' or self.params['kp'] == 'lucid':
img1_pts = np.float32([ self.img1.keypoints[match.queryIdx].pt for match in self.matches]).reshape(-1,1,2)
img2_pts = np.float32([ self.img2.keypoints[match.trainIdx].pt for match in self.matches]).reshape(-1,1,2)
else:
img1_pts = np.float32([ self.img1.keypoints[match[0].queryIdx].pt for match in self.matches]).reshape(-1,1,2)
img2_pts = np.float32([ self.img2.keypoints[match[0].trainIdx].pt for match in self.matches]).reshape(-1,1,2)
return {'img1':img1_pts,'img2':img2_pts}
def drawMatches(self):
if self.matches == None:
return False
else:
if self.params['kp'] == 'orb' or self.params['kp'] == 'brisk' or self.params['kp'] == 'lucid' or self.params['kp'] == 'freak':
img = cv2.drawMatches(self.img1.img,self.img1.keypoints,self.img2.img,self.img2.keypoints,self.matches, None,flags=2)
elif self.params['kp'] == 'sift' or self.params['kp'] == 'surf' or self.params['kp'] == 'kaze' or self.params['kp'] == 'daisy':
img = cv2.drawMatchesKnn(self.img1.img,self.img1.keypoints,self.img2.img,self.img2.keypoints,self.matches, None,flags=2)
cv2.imshow("Matches", img)
cv2.waitKey()
cv2.destroyAllWindows()
def eulerAngles(self):
P = self.P
p = np.arcsin(P[0,2])
o = np.arctan2(-P[1,2],P[2,2])
k = np.arctan2(-P[0,1],P[0,0])
return np.array([o,p,k])
def rMatrix(self,o,p,k):
coso = np.cos(o)
sino = np.sin(o)
cosp = np.cos(p)
sinp = np.sin(o)
cosk = np.cos(k)
sink = np.sin(o)
r11 = cosp*cosk
r12 = -cosp*sink
r13 = sinp
r21 = coso*sink + sino*sinp*cosk
r22 = coso*cosk - sino*sinp*sink
r23 = -sino*cosp
r31 = sino*sink - coso*sinp*cosk
r32 = sino*cosk + coso*sinp*sink
r33 = coso*cosp
return np.array([[r11,r12,r13],[r21,r22,r23],[r31,r32,r33]])
|
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
%matplotlib inline
import os
import gc
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, f1_score
import seaborn as sns
from google.colab import drive
drive.mount('/content/drive')
data_fer = pd.read_csv('/content/drive/MyDrive/DATASET/fer2013.csv')
data_fer.head()
idx_to_emotion_fer = {0:"Angry", 1:"Disgust", 2:"Fear", 3:"Happy", 4:"Sad", 5:"Surprise", 6:"Neutral"}
X_fer_train, y_fer_train = np.rollaxis(data_fer[data_fer.Usage == "Training"][["pixels", "emotion"]].values, -1)
X_fer_train = np.array([np.fromstring(x, dtype="uint8", sep=" ") for x in X_fer_train]).reshape((-1, 48, 48))
y_fer_train = y_fer_train.astype('int8')
X_fer_test_public, y_fer_test_public = np.rollaxis(data_fer[data_fer.Usage == "PublicTest"][["pixels", "emotion"]].values, -1)
X_fer_test_public = np.array([np.fromstring(x, dtype="uint8", sep=" ") for x in X_fer_test_public]).reshape((-1, 48, 48))
y_fer_test_public = y_fer_test_public.astype('int8')
X_fer_test_private, y_fer_test_private = np.rollaxis(data_fer[data_fer.Usage == "PrivateTest"][["pixels", "emotion"]].values, -1)
X_fer_test_private = np.array([np.fromstring(x, dtype="uint8", sep=" ") for x in X_fer_test_private]).reshape((-1, 48, 48))
y_fer_test_private = y_fer_test_private.astype('int8')
from keras.models import Model
from keras.layers import Flatten, Dense, Input, Dropout, Conv2D, MaxPool2D, BatchNormalization
from keras.utils import to_categorical, plot_model
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
BATCH_SIZE=128
X_train = X_fer_train.reshape((-1, 48, 48, 1))
X_val = X_fer_test_public.reshape((-1, 48, 48, 1))
X_test = X_fer_test_private.reshape((-1, 48, 48, 1))
y_train = to_categorical(y_fer_train,7)
y_val = to_categorical(y_fer_test_public,7)
y_test = to_categorical(y_fer_test_private,7)
train_datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1,
horizontal_flip=True,
)
val_datagen = ImageDataGenerator(
featurewise_center=False,
featurewise_std_normalization=False,
)
train_datagen.fit(X_train)
val_datagen.fit(X_train)
train_flow = train_datagen.flow(X_train, y_train, batch_size=BATCH_SIZE)
val_flow = val_datagen.flow(X_val, y_val, batch_size=BATCH_SIZE, shuffle=False)
test_flow = val_datagen.flow(X_test, y_test, batch_size=1, shuffle=False)
DROPOUT_RATE = 0.3
CONV_ACTIVATION = "relu"
img_in = Input(shape=(48,48,1))
X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(img_in)
X = BatchNormalization()(X)
X = Conv2D(64, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X)
X = Dropout(DROPOUT_RATE)(X)
X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(128, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X)
X = Dropout(DROPOUT_RATE)(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X)
X = Dropout(DROPOUT_RATE)(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(256, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X)
X = Dropout(DROPOUT_RATE)(X)
X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = Conv2D(512, (3, 3), padding='same', kernel_initializer='he_normal', activation=CONV_ACTIVATION)(X)
X = BatchNormalization()(X)
X = MaxPool2D((2, 2), strides=(2, 2), padding='same')(X)
X = Dropout(DROPOUT_RATE)(X)
X = Flatten()(X)
X = Dense(2048, activation="relu")(X)
X = Dropout(DROPOUT_RATE)(X)
X = Dense(1024, activation="relu")(X)
X = Dropout(DROPOUT_RATE)(X)
X = Dense(512, activation="relu")(X)
X = Dropout(DROPOUT_RATE)(X)
out = Dense(7, activation='softmax')(X)
model = Model(inputs=img_in, outputs=out)
model.compile(loss='categorical_crossentropy', optimizer=Adam(0.001), metrics=['categorical_accuracy'])
model.summary()
plot_model(model, show_shapes=True, show_layer_names=False)
early_stopping = EarlyStopping(monitor='val_categorical_accuracy', mode='max', verbose=1, patience=20)
checkpoint_loss = ModelCheckpoint('best_loss_weights.h5', verbose=1, monitor='val_loss',save_best_only=True, mode='min')
checkpoint_acc = ModelCheckpoint('best_accuracy_weights.h5', verbose=1, monitor='val_categorical_accuracy',save_best_only=True, mode='max')
lr_reduce = ReduceLROnPlateau(monitor='val_categorical_accuracy', mode='max', factor=0.5, patience=5, min_lr=1e-7, cooldown=1, verbose=1)
history = model.fit_generator(
train_flow,
steps_per_epoch= X_train.shape[0] // BATCH_SIZE,
epochs=125,
validation_data=val_flow,
validation_steps = X_val.shape[0] // BATCH_SIZE,
callbacks=[early_stopping, checkpoint_acc, checkpoint_loss, lr_reduce]
)
plt.plot(history.history['categorical_accuracy'])
plt.plot(history.history['val_categorical_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'validation'], loc='upper left')
plt.show()
def evaluate_model(weights_path):
model.load_weights(weights_path)
y_pred = model.predict_generator(test_flow, steps=X_test.shape[0])
y_pred_cat = np.argmax(y_pred, axis=1)
y_true_cat = np.argmax(test_flow.y, axis=1)
report = classification_report(y_true_cat, y_pred_cat)
print(report)
conf = confusion_matrix(y_true_cat, y_pred_cat, normalize="true")
labels = idx_to_emotion_fer.values()
_, ax = plt.subplots(figsize=(8, 6))
ax = sns.heatmap(conf, annot=True, cmap='YlGnBu',
xticklabels=labels,
yticklabels=labels)
plt.show()
evaluate_model('best_loss_weights.h5')
evaluate_model('best_accuracy_weights.h5')
!pip install -q tensorflow-model-optimization
import tensorflow_model_optimization as tfmot
model.load_weights('best_accuracy_weights.h5')
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
# Fine-tune prunned model on a couple of epochs,
# because the model may loose some of the learned features
pruning_epochs = 2
validation_split = X_val.shape[0] / X_train.shape[0]
num_images = X_train.shape[0] * (1 - validation_split)
end_step = np.ceil(num_images / BATCH_SIZE).astype(np.int32) *125
pruning_params = {
'pruning_schedule': tfmot.sparsity.keras.PolynomialDecay(initial_sparsity=0.5,
final_sparsity=0.8,
begin_step=0,
end_step=end_step)
}
model_for_pruning = prune_low_magnitude(model, **pruning_params)
model_for_pruning.compile(loss='categorical_crossentropy',
optimizer=Adam(0.001),
metrics=['categorical_accuracy'])
print(model_for_pruning.summary())
import tempfile
logdir = tempfile.mkdtemp()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=logdir),
]
model_for_pruning.fit_generator(
train_flow,
steps_per_epoch= X_train.shape[0] // BATCH_SIZE,
epochs=pruning_epochs,
validation_data=val_flow,
validation_steps = X_val.shape[0] // BATCH_SIZE,
callbacks=callbacks
)
model.save('pruned_model.h5')
evaluate_model('pruned_model.h5')
import os
import tensorflow as tf
compressed_model = tfmot.sparsity.keras.strip_pruning(model_for_pruning)
tf.keras.models.save_model(compressed_model, 'compressed_model.h5', include_optimizer=False)
pruned_model_size = os.path.getsize('compressed_model.h5')
pruned_model_size_mb = pruned_model_size // 1024 // 1024
best_acc_model_size = os.path.getsize('best_accuracy_weights.h5')
best_acc_model_size_mb = best_acc_model_size // 1024 // 1024
improvement = int((1 - pruned_model_size / best_acc_model_size) * 100)
print(f'Pruned model size is {pruned_model_size_mb} Mbytes')
print(f'Pre-pruning model size is {best_acc_model_size_mb} Mbytes')
print(f'Improvement compared to pre-pruning model is {improvement}%')
import json
compressed_model_json = compressed_model.to_json()
with open('compressed_model.json', 'w') as f:
f.write(compressed_model_json)
!pip install tensorflowjs
import tensorflowjs as tfjs
tfjs.converters.save_keras_model(compressed_model, 'compressed_model_js.json')
model.save_weights("compressed_model.h5")
|
class Node(object):
def __init__(self, name):
self.name = str(name)
def getName(self):
return self.name
def __str__(self):
return self.name
class Edge(object):
def __init__(self, src, dest):
self.src = src
self.dest = dest
def getSource(self):
return self.src
def getDestination(self):
return self.dest
def __str__(self):
return str(self.src) + '->' + str(self.dest)
class WeightedEdge(Edge):
def __init__(self, src, dest, dist,out):
self.src = src
self.dest = dest
self.dist = dist
self.out = out
def getTotalDistance(self):
return self.dist
def getOutdoorDistance(self):
return self.out
def __str__(self):
return str(self.src) + '->'+str(self.dest) + '('+str(self.dist)+\
', '+str(self.out)+ ')'
class Digraph(object):
def __init__(self):
self.nodes = set([])
self.edges = {}
def addNode(self, node):
if node in self.nodes:
raise ValueError('Duplicate node')
else:
self.nodes.add(node)
self.edges[node] = []
def addEdge(self, edge):
src = edge.getSource()
dest = edge.getDestination()
if not(src in self.nodes and dest in self.nodes):
raise ValueError('Node not in graph')
self.edges[src].append(dest)
def childrenOf(self, node):
return self.edges[node]
def hasNode(self, node):
return node in self.nodes
def __str__(self):
res = ''
for k in self.edges:
for d in self.edges[k]:
res = res + str(k) + '->' + str(d) + '\n'
return res[:-1]
class Graph(Digraph):
def addEdge(self, edge):
Digraph.addEdge(self, edge)
rev = Edge(edge.getDestination(), edge.getSource())
Digraph.addEdge(self, rev)
class WeightedDigraph(Digraph):
def addEdge(self,edge):
src = edge.getSource()
dest = edge.getDestination()
dist = edge.getTotalDistance()
out = edge.getOutdoorDistance()
if not( src in self.nodes and dest in self.nodes):
raise ValueError('Node not in graph')
self.edges[src].append( (dest,dist,out) ) #a triple tuple
def childrenOf(self,node):
children = []
for child in self.edges[node]:
children.append(child[0])
return children
def __str__(self):
res = ''
for k in self.edges:
for d in self.edges[k]:
res = res+str(k)+'->'+str(d[0])+' ('+str(float(d[1]))+', '+str(float(d[2]))+')'+'\n'
return res[:-1]
def printPath(path):
# a path is a list of nodes
result = ''
for i in range(len(path)):
if i == len(path) - 1:
result = result + str(path[i])
else:
result = result + str(path[i]) + '->'
return result
|
import re
pattern = re.compile("<>")
#for i, line in enumerate(open('test.txt')):
# for match in re.finditer(pattern, line):
# print 'Found on line %s: %s' % (i+1, match.groups())
def word_frequencies(file_list):
"""
Returns a dictionary with the frequencies
of the annotations occurring on file with name.
"""
result = {}
for file in file_list:
file1 = open(file, 'r')
while True:
line = file1.readline()
if line == '':
break
words = line.split(' ')
for word in words:
if "<" in word:
if word.rstrip().strip() in result:
result[word.rstrip().strip()] += 1
else:
result[word.rstrip().strip()] = 1
file1.close()
return result
#
#s = (word_frequencies(file_list))
#sorted_by_value = sorted(s.items(), key=lambda kv: kv[1], reverse = True)
#print (s)
#for item in sorted_by_value:
# print (item + "\n")
descriptions = ["money_spent_he.txt", # freq 1
"num_top_unis.txt", # freq 2
"gender_pay_gap.txt", # freq 3
"women_study_department.txt", # freq 4
"women_work_sector.txt", # freq 5
"obesity.txt", # freq 6
"young_evenings.txt", # freq 7
"student_choice_study.txt", # freq 8
"median_salary_se.txt", # freq 9
"median_salary_women.txt", # freq 10
]
#files = ["gender_pay_gap.txt"]
#
#
#files = ["gender_pay_gap.txt"]
s1 = (word_frequencies(descriptions))
#s2 = (word_frequencies(files))
sorted_by_value = sorted(s1.items(), key=lambda kv: kv[1], reverse = True)
#sorted_by_value = sorted(s2.items(), key=lambda kv: kv[1], reverse = True)
with open('tags_freq_10.txt', 'w') as f:
#with open('gender_pay_gap_labels.txt', 'w') as f:
for item in sorted_by_value:
f.write('%s = %d \n' % item)
#f.write("\n")
|
from OpenAPI.Data.final_data.tenant_data import *
@ddt.ddt
class test_case(unittest.TestCase):
# 每个测试用例执行之前做操作
def setUp(self):
pass
# 每个测试用例执行之后做操作
def tearDown(self):
pass
# 所有测试执行之前
@classmethod
def setUpClass(cls):
# 删除所有租客(只清空1001房间的租客)
del_all_tenant(room_1001)
pass
# 所有测试执行之后
@classmethod
def tearDownClass(cls):
pass
# 添加租客
@ddt.data(
*(get_cases(add_tenant_cmd))
)
@ddt.unpack
def test_01_add_tenant(self, url, name, method, param, check, do=0):
ret = run_case(url, name, method, param)
self.assertEqual(check, ret['ErrNo'])
if '-S-' in name:
del_all_tenant(room_1001)
return
# 更新租客
@ddt.data(
*(get_cases(update_tenant_cmd))
)
@ddt.unpack
def test_02_update_tenant(self, url, name, method, param, check, do=0):
add_tenant_init()
ret = run_case(url, name, method, param)
self.assertEqual(check, ret['ErrNo'])
return
# 根据房源id获取租客列表
@ddt.data(
*(get_cases(list_tenants_by_homeid_cmd))
)
@ddt.unpack
def test_03_list_tenants_by_homeid(self, url, name, method, param, check, do=0):
add_tenant_init()
ret = run_case(url, name, method, param)
self.assertEqual(check, ret['ErrNo'])
return
# 根据room_id获取租客信息
@ddt.data(
*(get_cases(get_tenant_by_roomid_cmd))
)
@ddt.unpack
def test_04_get_tenant_by_roomid(self, url, name, method, param, check, do=0):
add_tenant_init()
ret = run_case(url, name, method, param)
self.assertEqual(check, ret['ErrNo'])
return
# 删除租客
@ddt.data(
*(get_cases(delete_tenant_cmd))
)
@ddt.unpack
def test_05_delete_tenant(self, url, name, method, param, check, do=0):
add_tenant_init()
ret = run_case(url, name, method, param)
self.assertEqual(check, ret['ErrNo'])
return
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
"""
Display some information like pie charts...etc... in order to analyse the
results of the evaluation part of the algorithm.
"""
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import yaml
import numpy as np
import preprocessing as pp
# Constants
# ==================================================
matplotlib.rcParams['font.size'] = 5.0
# Definitions
# ==================================================
def pie_chart_support_distribution(classification_report, title, folder):
"""
Plot a pie chart which describes the distribution of each class.
:param classification_report: Sliced classification report : classes,
toPlot, support. toPlot must be a tuple (precision, recall, f1-score)
"""
classes, toPlot, support = slice_classification_report(
classification_report)
# Don't take into account the last column which is the total number
# of each class
labels = classes[0:len(classes)-1]
sizes = support[0:len(classes)-1]
fig1, ax1 = plt.subplots()
patches, texts, _ = ax1.pie(sizes, labels=labels, autopct='%1.1f%%',
startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax1.axis('equal')
ax1.set_title(title)
ax1.legend(patches, labels, loc="best")
plt.savefig(folder+"/"+title.replace(" ", "_")+".png", format="png",
dpi=1000)
def bar_chart_classification_report(classification_report, title, folder):
"""
Plot a bar graph which sums up the classification report of the scikit
learn tool.
:param classification_report: Sliced classification report : classes,
toPlot, support. toPlot must be a tuple (precision, recall, f1-score)
"""
classes, toPlot, support = slice_classification_report(
classification_report)
N = 3
bar_width = 0.05
ind = np.arange(N)
fig, ax = plt.subplots()
# Enumerate over each class except the last one which represent the average
# and total
bars = []
for i in range(len(classes)):
bar_i = ax.bar(ind + i * bar_width, toPlot[i], bar_width)
bars.append(bar_i)
# Add some text for labels, title and axes ticks
ax.set_ylabel("Percent")
ax.set_title(title)
ax.set_xticks(ind + bar_width / len(classes))
ax.set_xticklabels(("Precision", "Recall", "F1-score"))
ax.legend(bars, classes, loc="best")
plt.savefig(folder+"/"+title.replace(" ", "_")+".png", format="png",
dpi=1000)
def slice_classification_report(classification_report):
"""
Plot scikit-learn classification report.
Extension based on https://stackoverflow.com/a/31689645/395857
"""
lines = classification_report.split('\n')
classes = []
plotMat = []
support = []
class_names = []
for line in lines[2: (len(lines) - 2)]:
t = line.strip().split()
if len(t) < 2:
continue
classes.append(t[0])
v = [float(x) for x in t[1: len(t) - 1]]
support.append(int(t[-1]))
class_names.append(t[0])
plotMat.append(v)
# Save the average precision/recall/F1-score and total support
t = lines[len(lines) - 2].strip().split()
classes.append(t[0] + t[1] + t[2])
v = [float(x) for x in t[3: len(t) - 1]]
support.append(int(t[-1]))
class_names.append(t[0] + t[1] + t[2])
plotMat.append(v)
print("\n")
print("plotMat: {0}".format(plotMat))
print("support: {0}".format(support))
return classes, plotMat, support
def display_stat(filepath):
"""
Statistics from the SemEval 2016 competition, Task 5, Subtask 1 dataset.
:param filepath: Path of the dataset SemEval. The path must leads to a
folder containing both the training and testing sets.
:type filepath: string
:return: Pandas.dataframe with the following columns : review_id,
sentence_id, text, feature, polarity
"""
training_set = pp.parse_XML(filepath+"/train.xml")
testing_set = pp.parse_XML(filepath+"/test/test_gold.xml")
# Some opinions concerns various food, drinks...etc... but the opinion
# is the same while the target differ. So deleting duplicates as the scope
# of this study does not imply target (OPE in SemEval)
training_set = training_set.drop_duplicates()
testing_set = testing_set.drop_duplicates()
# Count # of opinions for each sentence
count_opinions_train = training_set['sentence_id'].value_counts()
count_opinions_train = count_opinions_train.value_counts()
count_opinions_test = testing_set['sentence_id'].value_counts()
count_opinions_test = count_opinions_test.value_counts()
# Display pie charts
count_dict_train = count_opinions_train.to_dict()
labels = list(count_dict_train.keys())
sizes = list(count_dict_train.values())
fig, ax = plt.subplots()
ax.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)
ax.axis('equal')
ax.set_title('[TRAIN] Percentage of opinion occurences in a sentence')
count_dict_test = count_opinions_test.to_dict()
labels = list(count_dict_test.keys())
sizes = list(count_dict_test.values())
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%', startangle=90)
ax1.axis('equal')
ax1.set_title('[TEST] Percentage of opinion occurences in a sentence')
plt.show()
def display_pie(data, folder):
# Feature distribution
# --------------------
distribution_train = data.groupby('feature').size()
labels = distribution_train.index.tolist()
sizes = distribution_train.tolist()
fig, ax = plt.subplots()
patches, texts, autotexts = ax.pie(sizes, labels=labels, autopct='%1.1f%%',
startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax.axis('equal')
this_folder = folder+"/feature_distribution"
plt.savefig(this_folder+".png", format="png",
dpi=1000)
# Polarity distribution
# ---------------------
distribution_train = data.groupby('polarity').size()
labels = distribution_train.index.tolist()
sizes = distribution_train.tolist()
fig, ax = plt.subplots()
patches, texts, autotexts = ax.pie(sizes, labels=labels, autopct='%1.1f%%',
startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax.axis('equal')
this_folder = folder+"/polarity_distribution"
plt.savefig(this_folder+".png", format="png",
dpi=1000)
# Combination distribution
# ------------------------
distribution_train = data.groupby(['feature', 'polarity']).size()
labels = distribution_train.index.tolist()
sizes = distribution_train.tolist()
fig, ax = plt.subplots()
patches, texts, autotexts = ax.pie(sizes, labels=labels, autopct='%1.1f%%',
startangle=90)
# Equal aspect ratio ensures that pie is drawn as a circle.
ax.axis('equal')
this_folder = folder+"/comb_distribution"
plt.savefig(this_folder+".png", format="png",
dpi=1000)
def display_distrib_data():
folder = "Figures/Data_distribution"
# Aspect solution
# ===============
# Restaurant domain
filepath = "../data/SemEval/Subtask1/restaurant"
training_set = pp.parse_XML(filepath+"/train.xml", True)
testing_set = pp.parse_XML(filepath+"/test/test_gold.xml", True)
training_set = training_set.drop_duplicates()
testing_set = testing_set.drop_duplicates()
display_pie(training_set, folder+"/Train/Aspect/restaurant")
display_pie(testing_set, folder+"/Test/Aspect/restaurant")
# Laptop domain
filepath = "../data/SemEval/Subtask1/laptop"
training_set = pp.parse_XML(filepath+"/train.xml", True)
testing_set = pp.parse_XML(filepath+"/test/test_gold.xml", True)
training_set = training_set.drop_duplicates()
testing_set = testing_set.drop_duplicates()
display_pie(training_set, folder+"/Train/Aspect/laptop")
display_pie(testing_set, folder+"/Test/Aspect/laptop")
# Entity solution
# ===============
# Restaurant domain
filepath = "../data/SemEval/Subtask1/restaurant"
training_set = pp.parse_XML(filepath+"/train.xml")
testing_set = pp.parse_XML(filepath+"/test/test_gold.xml")
training_set = training_set.drop_duplicates()
testing_set = testing_set.drop_duplicates()
display_pie(training_set, folder+"/Train/Entity/restaurant")
display_pie(testing_set, folder+"/Test/Entity/restaurant")
# Laptop domain
filepath = "../data/SemEval/Subtask1/laptop"
training_set = pp.parse_XML(filepath+"/train.xml")
testing_set = pp.parse_XML(filepath+"/test/test_gold.xml")
training_set = training_set.drop_duplicates()
testing_set = testing_set.drop_duplicates()
display_pie(training_set, folder+"/Train/Entity/laptop")
display_pie(testing_set, folder+"/Test/Entity/laptop")
def slot3_accuracy():
# Filepath for restaurant domain predictions
filepath = "runs/aspects/word2vec_200-epochs/1501773565/predictions.csv"
# Load the predictions into a DataFrame
predictions = pd.DataFrame.from_csv(filepath)
predictions = predictions[['text', 'feature', 'polarity', 'pred_polarity']]
# For each aspect, determine the accuracy
# ---------------------------------------
predictions = predictions.groupby('feature')
acc = {}
for key, item in predictions:
predictions_one_aspect = predictions.get_group(key)
total_number = len(predictions_one_aspect)
df = pd.DataFrame(columns=['correct_polarities'])
df['correct_polarities'] = (predictions_one_aspect.polarity == predictions_one_aspect.pred_polarity)
correct_polarities = df['correct_polarities'].sum()
acc[key] = float(correct_polarities)/total_number
# Get the mean accuracy
mean_acc = float(sum(acc.values())) / len(acc)
acc["mean"] = mean_acc
print("Slot 3 - Accuracy measure :")
for key, value in acc.items():
print(key + " : " + str(value))
if __name__ == '__main__':
with open("config.yml", 'r') as ymlfile:
cfg = yaml.load(ymlfile)
# Parameters
# ==================================================
# Data Parameters
# Eval Parameters
tf.flags.DEFINE_boolean("display_stat", False,
"Display statistics of SemEval dataset")
tf.flags.DEFINE_boolean("slot3", False,
"Display accuracy following the accuracy measure "+
"of SemEval competition")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
dataset_filepath_REST = "../data/SemEval/Subtask1/restaurant"
dataset_filepath_LAPT = "../data/SemEval/Subtask1/laptop"
if FLAGS.display_stat:
display_stat(dataset_filepath_REST)
display_stat(dataset_filepath_LAPT)
if FLAGS.slot3:
slot3_accuracy()
|
from collections import defaultdict
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import models
from django.utils.functional import cached_property
from django.utils.text import slugify
from django.utils.translation import gettext_lazy as _
from modelcluster.fields import ParentalKey
from wagtail.admin.panels import (
FieldPanel,
HelpPanel,
InlinePanel,
MultiFieldPanel,
ObjectList,
TabbedInterface,
)
from wagtail.fields import RichTextField, StreamField
from wagtail.images import get_image_model_string
from wagtail.models import (
Collection,
GroupCollectionPermission,
GroupPagePermission,
Orderable,
Page,
)
from wagtail.search import index
from rca.api_content.content import CantPullFromRcaApi, pull_related_students
from rca.people.filter import SchoolCentreDirectorateFilter
from rca.people.formatters import format_research_highlights
from rca.people.utils import get_staff_research_projects, get_student_research_projects
from rca.programmes.models import ProgrammePage
from rca.research.models import ResearchCentrePage
from rca.schools.models import SchoolPage
from rca.users.models import User
from rca.utils.blocks import AccordionBlockWithTitle, GalleryBlock, LinkBlock
from rca.utils.filter import TabStyleFilter
from rca.utils.models import BasePage, SluggedTaxonomy
from .admin_forms import StudentPageAdminForm
from .utils import (
StudentPageInlinePanel,
StudentPagePromoteTab,
StudentPageSettingsTab,
get_area_linked_filters,
)
# PerUserTabbedInterface,
STUDENT_PAGE_RICH_TEXT_FEATURES = features = ["bold", "italic", "link"]
class AreaOfExpertise(models.Model):
title = models.CharField(max_length=128)
slug = models.SlugField(blank=True)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(AreaOfExpertise, self).save(*args, **kwargs)
class Directorate(models.Model):
title = models.CharField(max_length=128)
slug = models.SlugField(blank=True)
intranet_slug = models.SlugField(
blank=True,
help_text="In order to import events and news to the intranet and relate them to this taxonomy, this \
slug value should match the value of the slug on the Category page on the intranet",
)
def __str__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Directorate, self).save(*args, **kwargs)
class StaffRole(Orderable):
role = models.CharField(max_length=128)
programme = models.ForeignKey(
"programmes.ProgrammePage",
on_delete=models.CASCADE,
related_name="related_programme",
null=True,
blank=True,
)
custom_programme = models.CharField(
max_length=128,
help_text=_("Specify a custom programme page here if one does not exist"),
blank=True,
)
page = ParentalKey("StaffPage", related_name="roles")
def clean(self):
errors = defaultdict(list)
if self.programme and self.custom_programme:
errors["custom_programme"].append(
_("Please specify only a programme page, or a custom programme")
)
if errors:
raise ValidationError(errors)
def __str__(self):
return self.role
class StaffPageAreOfExpertisePlacement(models.Model):
page = ParentalKey("StaffPage", related_name="related_area_of_expertise")
area_of_expertise = models.ForeignKey(
AreaOfExpertise,
on_delete=models.CASCADE,
related_name="related_staff",
verbose_name=_("Areas of expertise"),
)
panels = [FieldPanel("area_of_expertise")]
class StaffPageDirectorate(models.Model):
page = ParentalKey("StaffPage", related_name="related_directorates")
directorate = models.ForeignKey(
Directorate,
on_delete=models.CASCADE,
related_name="related_staff",
verbose_name=_("Directorates"),
)
panels = [FieldPanel("directorate")]
class StaffPageManualRelatedStudents(models.Model):
page = ParentalKey(
"people.StaffPage",
on_delete=models.CASCADE,
related_name="related_students_manual",
)
first_name = models.CharField(max_length=255, blank=True)
surname = models.CharField(max_length=255, blank=True)
status = models.CharField(max_length=255, blank=True)
link = models.URLField(blank=True)
student_page = models.ForeignKey(
"people.StudentPage",
on_delete=models.CASCADE,
related_name="related_programme",
null=True,
blank=True,
)
panels = [
FieldPanel("first_name"),
FieldPanel("surname"),
FieldPanel("status"),
FieldPanel("link"),
FieldPanel("student_page"),
]
def clean(self):
if self.student_page and any(
[self.first_name, self.surname, self.status, self.link]
):
raise ValidationError(
{
"student_page": ValidationError(
"Please choose between a page or manually entered data"
),
}
)
class StaffPage(BasePage):
template = "patterns/pages/staff/staff_detail.html"
parent_page_types = ["people.StaffIndexPage"]
staff_title = models.CharField(
max_length=255, help_text=_("E.G Dr, Professor"), blank=True
)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
profile_image = models.ForeignKey(
get_image_model_string(),
null=True,
blank=True,
related_name="+",
on_delete=models.SET_NULL,
)
email = models.EmailField(blank=True)
introduction = models.TextField(blank=True)
body = RichTextField(blank=True)
research_highlights_title = models.CharField(
max_length=120,
blank=True,
help_text=_(
"The title value displayed above the Research highlights gallery showing project pages"
),
)
gallery = StreamField(
[("slide", GalleryBlock())],
blank=True,
verbose_name=_("Gallery"),
use_json_field=True,
)
more_information_title = models.CharField(max_length=80, default="More information")
more_information = StreamField(
[("accordion_block", AccordionBlockWithTitle())],
blank=True,
verbose_name=_("More information"),
use_json_field=True,
)
related_links = StreamField(
[("link", LinkBlock())],
blank=True,
verbose_name="Related Links",
use_json_field=True,
)
legacy_staff_id = models.IntegerField(
null=True,
blank=True,
help_text=_(
"Add the legacy staff page ID here to show related students. "
"This can be found by editing the page on the legacy site and copying "
"the number from the URL, E.G, /admin/pages/3365/edit"
),
)
search_fields = BasePage.search_fields + [
index.SearchField("introduction"),
index.SearchField("first_name"),
index.SearchField("last_name"),
index.SearchField("body"),
index.SearchField("more_information"),
]
key_details_panels = [
InlinePanel(
"related_research_centre_pages", label=_("Related Research Centres ")
),
InlinePanel("related_schools", label=_("Related Schools")),
InlinePanel("related_area_of_expertise", label=_("Areas of Expertise")),
InlinePanel("related_directorates", label=_("Directorate")),
]
content_panels = BasePage.content_panels + [
MultiFieldPanel(
[
FieldPanel("staff_title"),
FieldPanel("first_name"),
FieldPanel("last_name"),
FieldPanel("profile_image"),
],
heading="Details",
),
InlinePanel("roles", label=_("Staff role")),
MultiFieldPanel([FieldPanel("email")], heading=_("Contact information")),
FieldPanel("introduction"),
FieldPanel("body"),
MultiFieldPanel(
[
FieldPanel("research_highlights_title"),
InlinePanel(
"related_project_pages", label=_("Project pages"), max_num=8
),
],
heading=_("Research highlights gallery"),
),
FieldPanel("gallery"),
MultiFieldPanel(
[InlinePanel("related_students_manual"), FieldPanel("legacy_staff_id")],
heading=_("Related Students"),
),
MultiFieldPanel(
[
FieldPanel("more_information_title"),
FieldPanel("more_information"),
],
heading="More information",
),
FieldPanel("related_links"),
]
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Content"),
ObjectList(key_details_panels, heading="Key details"),
ObjectList(BasePage.promote_panels, heading="Promote"),
ObjectList(BasePage.settings_panels, heading="Settings"),
]
)
@property
def listing_meta(self):
# Returns a page 'type' value that's readable for listings,
return "Staff"
@property
def name(self):
parts = (self.staff_title, self.first_name, self.last_name)
return " ".join(p for p in parts if p)
@property
def related_students_cache_key(self):
return f"{self.pk}_related_students"
def fetch_related_students(self):
value = []
try:
value = pull_related_students(self.legacy_staff_id)
cache.set(self.related_students_cache_key, value, None)
except CantPullFromRcaApi:
pass
return value
@cached_property
def legacy_related_students(self):
cached_val = cache.get(self.related_students_cache_key)
if cached_val is not None:
return cached_val
return self.fetch_related_students()
def save(self, *args, **kwargs):
"""
Overrides the default Page.save() method to trigger
a cache refresh for related students (in case the
legacy_staff_id value has changed).
"""
super().save(*args, **kwargs)
if self.legacy_staff_id:
# Don't run if there is no ID
try:
self.fetch_related_students()
except CantPullFromRcaApi:
# Legacy API can be a bit unreliable, so don't
# break here. The management command can update
# the value next time it runs
pass
def format_student_page(self, page):
student_page = page
image = getattr(student_page, "profile_image", None)
if image:
image = image.get_rendition("fill-60x60").url
return {
"first_name": student_page.first_name,
"surname": student_page.last_name,
"status": student_page.degree_status,
"link": student_page.url,
"image_url": image,
}
def get_related_students(self):
"""
Returns a list containing:
- legacy related students from the cached api
- request and manual related students at the page level
- Students which reference this page through
StudentPage.related_supervisor
"""
students = []
# Format the api content
if self.legacy_staff_id:
for student in self.legacy_related_students:
item = student
fullname = student["name"].split(" ")
item["first_name"] = fullname[0].title()
# In case we encounter tripple names
item["surname"] = " ".join(fullname[1:]).title()
students.append(item)
# Format students which reference this page through
# StudentPage.related_supervisor
students_with_related_supervisor = StudentPage.objects.filter(
related_supervisor__supervisor_page=self
).live()
for student in students_with_related_supervisor:
item = self.format_student_page(student)
students.append(item)
# Format the students added at the page level
for student in self.related_students_manual.all():
if student.student_page:
student_page = student.student_page.specific
item = self.format_student_page(student_page)
else:
item = {
"first_name": student.first_name.title(),
"surname": student.surname.title(),
"status": student.status,
"link": student.link,
}
students.append(item)
# Sort students by surname
students = sorted(students, key=lambda k: k["surname"])
return students
def get_roles_grouped(self, request):
items = []
# First populate a list of all values
# E.G [['role title name','programme title'm 'url'], ['role title name','programme title', 'None'], ...]
for value in self.roles.all().select_related(
"programme", "programme__degree_level"
):
if value.programme:
items.append(
(str(value.programme), value.role, value.programme.get_url(request))
)
else:
items.append((value.custom_programme, value.role, None))
# Create a dictionary of values re-using keys so we can group by both
# the programmes and the custom programmes.
regrouped = {}
for (key, value, link) in items:
if key not in regrouped:
regrouped[key] = {"label": key, "items": [value], "link": link}
else:
regrouped[key]["items"].append(value)
if link and not regrouped[key]["link"]:
regrouped[key]["link"] = link
return regrouped.values()
def get_directorate_linked_filters(self):
"""For the directorate taxonomy thats listed out in key details,
they need to link to the parent staff picker page with a filter pre
selected"""
parent = self.get_parent()
directorates = []
for i in self.related_directorates.all().select_related("directorate"):
if parent:
directorates.append(
{
"title": i.directorate.title,
"link": f"{parent.url}?school-centre-or-area=d-{i.directorate.slug}",
}
)
else:
directorates.append({"title": i.directorate.title})
return directorates
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
research_pages = get_staff_research_projects(self)
context["research_highlights"] = format_research_highlights(research_pages)
context["areas"] = get_area_linked_filters(page=self)
context["directorates"] = self.get_directorate_linked_filters()
context["related_schools"] = self.related_schools.all()
context["research_centres"] = self.related_research_centre_pages.all()
context["related_students"] = self.get_related_students()
context["roles"] = self.get_roles_grouped(request)
return context
class StaffIndexPage(BasePage):
subpage_types = ["people.StaffPage"]
template = "patterns/pages/staff/staff_index.html"
introduction = RichTextField(blank=False, features=["link"])
content_panels = BasePage.content_panels + [FieldPanel("introduction")]
search_fields = BasePage.search_fields + [index.SearchField("introduction")]
def get_base_queryset(self):
return (
StaffPage.objects.child_of(self)
.live()
.prefetch_related("roles")
.order_by("last_name", "first_name")
)
def modify_results(self, paginator_page, request):
for obj in paginator_page.object_list:
# providing request to get_url() massively improves
# url generation efficiency, as values are cached
# on the request
obj.link = obj.get_url(request)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
base_queryset = self.get_base_queryset()
queryset = base_queryset.all()
filters = (
SchoolCentreDirectorateFilter(
"School, Centre or Area",
school_queryset=SchoolPage.objects.live().filter(
id__in=base_queryset.values_list(
"related_schools__page_id", flat=True
)
),
centre_queryset=ResearchCentrePage.objects.live().filter(
id__in=base_queryset.values_list(
"related_research_centre_pages__page_id", flat=True
)
),
directorate_queryset=Directorate.objects.filter(
id__in=base_queryset.values_list(
"related_directorates__directorate_id", flat=True
)
),
),
TabStyleFilter(
"Programme",
queryset=(
ProgrammePage.objects.live().filter(
id__in=base_queryset.values_list(
"roles__programme_id", flat=True
)
)
),
filter_by="roles__programme__slug__in",
option_value_field="slug",
),
TabStyleFilter(
"Expertise",
queryset=(
AreaOfExpertise.objects.filter(
id__in=base_queryset.values_list(
"related_area_of_expertise__area_of_expertise_id", flat=True
)
)
),
filter_by="related_area_of_expertise__area_of_expertise__slug__in", # Filter by slug here
option_value_field="slug",
),
)
# Apply filters
for f in filters:
queryset = f.apply(queryset, request.GET)
# Paginate filtered queryset
per_page = settings.DEFAULT_PER_PAGE
page_number = request.GET.get("page")
paginator = Paginator(queryset, per_page)
try:
results = paginator.page(page_number)
except PageNotAnInteger:
results = paginator.page(1)
except EmptyPage:
results = paginator.page(paginator.num_pages)
# Set additional attributes etc
self.modify_results(results, request)
# Finalise and return context
context.update(
hero_colour="light",
filters={
"title": "Filter by",
"aria_label": "Filter results",
"items": filters,
},
results=results,
result_count=paginator.count,
)
return context
class DegreeType(SluggedTaxonomy):
pass
class DegreeStatus(SluggedTaxonomy):
pass
class RelatedStudentPage(Orderable):
source_page = ParentalKey(Page, related_name="related_student_pages")
page = models.ForeignKey("people.StudentPage", on_delete=models.CASCADE)
panels = [FieldPanel("page")]
class StudentPageGallerySlide(Orderable):
source_page = ParentalKey("StudentPage", related_name="gallery_slides")
title = models.CharField(max_length=120)
image = models.ForeignKey(
get_image_model_string(),
null=True,
related_name="+",
on_delete=models.SET_NULL,
)
author = models.CharField(max_length=120)
panels = [FieldPanel("image"), FieldPanel("title"), FieldPanel("author")]
class StudentPageSocialLinks(Orderable):
source_page = ParentalKey("StudentPage", related_name="personal_links")
link_title = models.CharField(
max_length=120, help_text="The text displayed for the link"
)
url = models.URLField()
panels = [FieldPanel("link_title"), FieldPanel("url")]
class StudentPageRelatedLinks(Orderable):
source_page = ParentalKey("StudentPage", related_name="relatedlinks")
link_title = models.CharField(
max_length=120, help_text="The text displayed for the link"
)
url = models.URLField()
panels = [FieldPanel("link_title"), FieldPanel("url")]
class StudentPageAreOfExpertisePlacement(models.Model):
page = ParentalKey("StudentPage", related_name="related_area_of_expertise")
area_of_expertise = models.ForeignKey(
AreaOfExpertise,
on_delete=models.CASCADE,
related_name="related_student",
verbose_name=_("Areas of expertise"),
)
panels = [FieldPanel("area_of_expertise")]
class StudentPageSupervisor(models.Model):
page = ParentalKey(
"people.StudentPage",
on_delete=models.CASCADE,
related_name="related_supervisor",
)
supervisor_page = models.ForeignKey(
StaffPage,
null=True,
blank=True,
on_delete=models.CASCADE,
related_name="+",
)
title = models.CharField(max_length=20, help_text="E.G, Dr, Mrs, etc", blank=True)
first_name = models.CharField(max_length=255, blank=True)
surname = models.CharField(max_length=255, blank=True)
link = models.URLField(blank=True)
panels = [
HelpPanel(
content="Choose an internal Staff page or manually enter information"
),
FieldPanel("supervisor_page"),
FieldPanel("title"),
FieldPanel("first_name"),
FieldPanel("surname"),
FieldPanel("link"),
]
def clean(self):
errors = defaultdict(list)
if self.supervisor_page and any(
[self.title, self.first_name, self.surname, self.link]
):
errors["supervisor_page"].append(
_(
"Please specify a supervisor page or manually enter information, both are not supported"
)
)
if not self.supervisor_page and not self.first_name:
errors["first_name"].append(_("Please specify a first name"))
if not self.supervisor_page and not self.surname:
errors["surname"].append(_("Please specify a surname"))
if errors:
raise ValidationError(errors)
class StudentPage(BasePage):
base_form_class = StudentPageAdminForm
template = "patterns/pages/student/student_detail.html"
parent_page_types = ["people.StudentIndexPage"]
student_title = models.CharField(
max_length=255, help_text=_("E.G Dr, Professor"), blank=True
)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
profile_image = models.ForeignKey(
get_image_model_string(),
null=True,
blank=True,
related_name="+",
on_delete=models.SET_NULL,
)
email = models.EmailField(blank=True)
degree_status = models.ForeignKey(
DegreeStatus,
on_delete=models.SET_NULL,
related_name="related_student",
null=True,
blank=True,
)
degree_start_date = models.DateField(blank=True, null=True)
degree_end_date = models.DateField(blank=True, null=True)
degree_award = models.CharField(
max_length=1,
choices=(("1", "MPhil"), ("2", "PhD")),
blank=True,
)
introduction = models.TextField(blank=True, verbose_name="Project title")
bio = RichTextField(
blank=True,
help_text="Add a detail summary",
verbose_name="Abstract",
)
programme = models.ForeignKey(
"programmes.ProgrammePage",
on_delete=models.SET_NULL,
null=True,
blank=True,
)
biography = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
degrees = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
experience = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
awards = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
funding = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
exhibitions = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
publications = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
research_outputs = RichTextField(
blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES
)
conferences = RichTextField(blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES)
additional_information_title = models.TextField(blank=True)
addition_information_content = RichTextField(
blank=True, features=STUDENT_PAGE_RICH_TEXT_FEATURES
)
link_to_final_thesis = models.URLField(blank=True)
student_funding = RichTextField(blank=True, features=["link"])
student_user_account = models.OneToOneField(
User,
on_delete=models.SET_NULL,
null=True,
blank=True,
limit_choices_to={"groups__name": "Students"},
unique=True,
)
student_user_image_collection = models.OneToOneField(
Collection,
on_delete=models.SET_NULL,
null=True,
blank=True,
unique=True,
help_text="This should link to this students image collection",
)
search_fields = BasePage.search_fields + [
index.SearchField("introduction"),
index.SearchField("first_name"),
index.SearchField("last_name"),
index.SearchField("bio"),
index.SearchField("biography"),
index.SearchField("degrees"),
index.SearchField("experience"),
index.SearchField("awards"),
index.SearchField("funding"),
index.SearchField("exhibitions"),
index.SearchField("publications"),
index.SearchField("research_outputs"),
index.SearchField("addition_information_content"),
]
content_panels = BasePage.content_panels + [
FieldPanel("student_user_account", permission="superuser"),
FieldPanel("student_user_image_collection", permission="superuser"),
MultiFieldPanel(
[
FieldPanel("student_title", permission="superuser"),
FieldPanel("first_name", permission="superuser"),
FieldPanel("last_name", permission="superuser"),
FieldPanel("profile_image"),
],
heading="Details",
),
FieldPanel("link_to_final_thesis"),
InlinePanel("related_supervisor", label="Supervisor information"),
MultiFieldPanel([FieldPanel("email")], heading="Contact information"),
FieldPanel("programme", permission="superuser"),
FieldPanel("degree_start_date", permission="superuser"),
FieldPanel("degree_end_date", permission="superuser"),
FieldPanel("degree_award", permission="superuser"),
FieldPanel("degree_status", permission="superuser"),
FieldPanel("introduction"),
FieldPanel("bio"),
StudentPageInlinePanel(
"related_project_pages",
label=_("Project pages"),
max_num=5,
heading=_("Research highlights gallery"),
),
InlinePanel("gallery_slides", label="Gallery slide", max_num=5),
MultiFieldPanel(
[
FieldPanel("biography"),
FieldPanel("degrees"),
FieldPanel("experience"),
FieldPanel("awards"),
FieldPanel("funding"),
FieldPanel("exhibitions"),
FieldPanel("publications"),
FieldPanel("research_outputs"),
FieldPanel("conferences"),
],
heading="More information",
),
MultiFieldPanel(
[
FieldPanel("additional_information_title"),
FieldPanel("addition_information_content"),
],
heading="Additional information",
),
InlinePanel("relatedlinks", label="External links", max_num=5),
]
key_details_panels = [
InlinePanel("related_area_of_expertise", label="Areas of Expertise"),
StudentPageInlinePanel(
"related_research_centre_pages",
label=_("Related Research Centres "),
),
StudentPageInlinePanel("related_schools", label=_("Related Schools")),
InlinePanel("personal_links", label="Personal links", max_num=5),
FieldPanel("student_funding"),
]
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Content"),
ObjectList(key_details_panels, heading="Key details"),
StudentPagePromoteTab(BasePage.promote_panels, heading="Promote"),
StudentPageSettingsTab(
BasePage.settings_panels, heading="Settings"
), # needs to have no content for students
]
)
@property
def listing_meta(self):
# Returns a page 'type' value that's readable for listings,
return "Student"
@property
def name(self):
parts = (self.student_title, self.first_name, self.last_name)
return " ".join(p for p in parts if p)
@property
def supervisors(self):
supervisors = []
for item in self.related_supervisor.all():
if item.supervisor_page:
supervisors.append(
{
"title": item.supervisor_page.title,
"link": item.supervisor_page.url,
}
)
else:
supervisors.append(
{
"title": f"{item.title} {item.first_name} {item.surname}",
"link": item.link,
}
)
return supervisors
def student_information(self):
# Method for preparing student data into an accordion friendly format
data = []
if self.biography:
data.append({"value": {"heading": "Biography", "body": self.biography}})
if self.degrees:
data.append({"value": {"heading": "Degrees", "body": self.degrees}})
if self.experience:
data.append({"value": {"heading": "Experience", "body": self.experience}})
if self.awards:
data.append({"value": {"heading": "Awards", "body": self.awards}})
if self.funding:
data.append({"value": {"heading": "Funding", "body": self.funding}})
if self.exhibitions:
data.append({"value": {"heading": "Exhibitions", "body": self.exhibitions}})
if self.publications:
data.append(
{"value": {"heading": "Publications", "body": self.publications}}
)
if self.research_outputs:
data.append(
{
"value": {
"heading": "Research outputs",
"body": self.research_outputs,
}
}
)
if self.conferences:
data.append({"value": {"heading": "Conferences", "body": self.conferences}})
if self.addition_information_content:
data.append(
{
"value": {
"heading": self.additional_information_title
or "Additional information",
"body": self.addition_information_content,
}
}
)
return data
@property
def student_gallery(self):
# Format related model to a nice dict
data = []
for item in self.gallery_slides.all():
data.append(
{
"value": {
"title": item.title,
"author": item.author,
"image": item.image,
}
}
)
return data
@property
def student_related_links(self):
return [
{"value": {"title": item.link_title, "url": item.url}}
for item in self.relatedlinks.all()
]
def save(self, *args, **kwargs):
"""On saving the student page, make sure the student_user_account
has a group created with the necessary permissions
"""
super(StudentPage, self).save()
if self.student_user_image_collection and self.student_user_account:
# Check if a group configuration exsists already for this user.
group = Group.objects.filter(
name=self.student_user_account.student_group_name
)
if group:
# If we find a group already, we don't need to create one.
return
# Create a specific group for this student so they have edit access to their page
# and their image collection
specific_student_group = Group.objects.create(
name=self.student_user_account.student_group_name
)
# Create new add GroupPagePermission
GroupPagePermission.objects.create(
group=specific_student_group, page=self, permission_type="edit"
)
# Create new GroupCollectionPermission for Profile Images collection
GroupCollectionPermission.objects.create(
group=specific_student_group,
collection=Collection.objects.get(
name=self.student_user_image_collection
),
permission=Permission.objects.get(codename="add_image"),
)
GroupCollectionPermission.objects.create(
group=specific_student_group,
collection=Collection.objects.get(
name=self.student_user_image_collection
),
permission=Permission.objects.get(codename="choose_image"),
)
# Add the new specific student group to the user
self.student_user_account.groups.add(specific_student_group)
self.student_user_account.save()
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
research_pages = get_student_research_projects(self)
context["areas"] = get_area_linked_filters(page=self)
context["research_highlights"] = format_research_highlights(research_pages)
context["related_schools"] = self.related_schools.all()
context["research_centres"] = self.related_research_centre_pages.all()
context["student_information"] = self.student_information()
return context
class StudentIndexPage(BasePage):
max_count = 1
subpage_types = ["people.StudentPage"]
template = "patterns/pages/student/student_index.html"
introduction = RichTextField(blank=False, features=["link"])
content_panels = BasePage.content_panels + [FieldPanel("introduction")]
search_fields = BasePage.search_fields + [index.SearchField("introduction")]
def get_base_queryset(self):
return (
StudentPage.objects.child_of(self)
.live()
.order_by("last_name", "first_name")
)
def modify_results(self, paginator_page, request):
for obj in paginator_page.object_list:
# providing request to get_url() massively improves
# url generation efficiency, as values are cached
# on the request
obj.link = obj.get_url(request)
def get_context(self, request, *args, **kwargs):
context = super().get_context(request, *args, **kwargs)
base_queryset = self.get_base_queryset()
queryset = base_queryset.all()
filters = (
TabStyleFilter(
"School or Centre",
queryset=(
Page.objects.live()
.filter(
content_type__in=list(
ContentType.objects.get_for_models(
SchoolPage, ResearchCentrePage
).values()
)
)
.filter(
models.Q(
id__in=base_queryset.values_list(
"related_schools__page_id", flat=True
)
)
| models.Q(
id__in=base_queryset.values_list(
"related_research_centre_pages__page_id", flat=True
)
)
)
),
filter_by=(
"related_schools__page__slug__in",
"related_research_centre_pages__page__slug__in", # Filter by slug here
),
option_value_field="slug",
),
TabStyleFilter(
"Expertise",
queryset=(
AreaOfExpertise.objects.filter(
id__in=base_queryset.values_list(
"related_area_of_expertise__area_of_expertise_id", flat=True
)
)
),
filter_by="related_area_of_expertise__area_of_expertise__slug__in", # Filter by slug here
option_value_field="slug",
),
TabStyleFilter(
"Degree status",
queryset=(
DegreeStatus.objects.filter(
id__in=base_queryset.values_list("degree_status_id", flat=True)
)
),
filter_by="degree_status__slug__in",
option_value_field="slug",
),
)
# Apply filters
for f in filters:
queryset = f.apply(queryset, request.GET)
# Paginate filtered queryset
per_page = settings.DEFAULT_PER_PAGE
page_number = request.GET.get("page")
paginator = Paginator(queryset, per_page)
try:
results = paginator.page(page_number)
except PageNotAnInteger:
results = paginator.page(1)
except EmptyPage:
results = paginator.page(paginator.num_pages)
# Set additional attributes etc
self.modify_results(results, request)
# Finalise and return context
context.update(
hero_colour="light",
filters={
"title": "Filter by",
"aria_label": "Filter results",
"items": filters,
},
results=results,
result_count=paginator.count,
)
return context
|
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report,confusion_matrix
cancer= load_breast_cancer()
X = cancer['data']
y = cancer['target']
X_train, X_test, y_train, y_test = train_test_split(X, y)
scaler= StandardScaler()
scaler.fit(X_train)
X_train= scaler.transform(X_train)
X_test = scaler.transform(X_test)
MLPClassifier(hidden_layer_sizes=(30,30,30))
mlp= MLPClassifier(hidden_layer_sizes=(30,30,30))
mlp.fit(X_train,y_train)
predictions= mlp.predict(X_test)
print(confusion_matrix(y_test,predictions))
print(classification_report(y_test,predictions))
|
# -*- coding:utf-8 -*-
__author__ = 'yyp'
__date__ = '2018-4-4 23:46'
class Solution:
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
s = s.lower()
i = 0
j = len(s) - 1
while i < j:
if not self._is_alphanumeric(s[i]):
i += 1
continue
if not self._is_alphanumeric(s[j]):
j -= 1
continue
if s[i] != s[j]:
return False
i += 1
j -= 1
return True
def _is_alphanumeric(self, c):
if (ord(c) >= 97 and ord(c) <= 122) or (ord(c) >= 48 and ord(c) <= 57):
return True
s = Solution()
print(s.isPalindrome("A man, a plan, a canal: Panama"))
print(s.isPalindrome("race a car"))
|
from .models import *
from django.shortcuts import render, get_object_or_404
class EventService:
def add_events(self, list_of_events):
for event in list_of_events['events']:
ticket_classes = list(filter(lambda x : x['on_sale_status']=="AVAILABLE", event['ticket_classes']))
if len(ticket_classes) == 0:
new_event = Event(name=event['name']['text'], start_date=event['start']['utc'], organizer_id=event['organization_id'], ticket_cost=0)
else:
if event['is_free'] == True:
new_event = Event(name=event['name']['text'], start_date=event['start']['utc'], organizer_id=event['organization_id'], ticket_cost=0)
else:
if ticket_classes[0]['free']==True or 'cost' not in ticket_classes[0]:
new_event = Event(name=event['name']['text'], start_date=event['start']['utc'], organizer_id=event['organization_id'], ticket_cost=0)
else:
new_event = Event(name=event['name']['text'], start_date=event['start']['utc'], organizer_id=event['organization_id'], ticket_cost=float(ticket_classes[0]['cost']['major_value']))
try:
new_event.save(ignore_conflicts=True)
except:
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__date__ = '2018/4/3 21:24'
__author__ = 'ooo'
import numpy as np
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
class FPNet(nn.Module):
def __init__(self, indepth, outdepth, stages):
super(FPNet, self).__init__()
self.stages = stages
self.P6 = nn.MaxPool2d(kernel_size=1, stride=2)
self.P5_conv1 = nn.Conv2d(indepth[3], outdepth, kernel_size=1, stride=1)
self.P5_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(outdepth, outdepth, kernel_size=3, stride=1),
)
self.P4_conv1 = nn.Conv2d(indepth[2], outdepth, kernel_size=1, stride=1)
self.P4_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(outdepth, outdepth, kernel_size=3, stride=1),
)
self.P3_conv1 = nn.Conv2d(indepth[1], outdepth, kernel_size=1, stride=1)
self.P3_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(outdepth, outdepth, kernel_size=3, stride=1),
)
self.P2_conv1 = nn.Conv2d(indepth[0], outdepth, kernel_size=1, stride=1)
self.P2_conv2 = nn.Sequential(
SamePad2d(kernel_size=3, stride=1),
nn.Conv2d(outdepth, outdepth, kernel_size=3, stride=1),
)
def forward(self, feature_maps):
_, C2, C3, C4, C5, _ = feature_maps
p5_out = self.P5_conv1(C5)
p4_out = self.P4_conv1(C4) + F.upsample(p5_out, scale_factor=2)
p3_out = self.P3_conv1(C3) + F.upsample(p4_out, scale_factor=2)
p2_out = self.P2_conv1(C2) + F.upsample(p3_out, scale_factor=2)
p5_out = self.P5_conv2(p5_out)
p4_out = self.P4_conv2(p4_out)
p3_out = self.P3_conv2(p3_out)
p2_out = self.P2_conv2(p2_out)
# P6 is used for the 5th anchor scale in RPN. Generated by
# subsampling from P5 with stride of 2.
p6_out = self.P6(p5_out)
return [p2_out, p3_out, p4_out, p5_out, p6_out]
class SamePad2d(nn.Module):
"""Mimics tensorflow's 'SAME' padding.
"""
def __init__(self, kernel_size, stride):
super(SamePad2d, self).__init__()
self.kernel_size = torch.nn.modules.utils._pair(kernel_size)
self.stride = torch.nn.modules.utils._pair(stride)
def forward(self, input):
in_width = input.size()[2]
in_height = input.size()[3]
out_width = math.ceil(float(in_width) / float(self.stride[0]))
out_height = math.ceil(float(in_height) / float(self.stride[1]))
pad_along_width = ((out_width - 1) * self.stride[0] +
self.kernel_size[0] - in_width)
pad_along_height = ((out_height - 1) * self.stride[1] +
self.kernel_size[1] - in_height)
pad_left = math.floor(pad_along_width / 2)
pad_top = math.floor(pad_along_height / 2)
pad_right = pad_along_width - pad_left
pad_bottom = pad_along_height - pad_top
return F.pad(input, (pad_left, pad_right, pad_top, pad_bottom), 'constant', 0)
def __repr__(self):
return self.__class__.__name__ |
import copy
from datetime import datetime
from common import finished, next_configs, process_results, build_path
from utils import *
ALGORITHM_NAME = "Breadth First Search (BFS)"
def bfs(level, testall):
initial_time = datetime.now()
smap = level.smap
first_node = Node(level.start, None, [])
queue = []
#metemos al nodo inicial en la cola
queue.append(first_node)
known_cfgs = set()
known_cfgs.add(first_node.config)
nodes_processed = 0
# mientras que la cola tenga elementos y no gane
won = False
while queue and not won:
# saco el primer nodo de la cola
node = queue.pop(0)
# print("Current node: ", node.config)
# primero me fijo si gane
if(finished(node.config.boxes, level)):
# si gane listo
print("Found solution!")
won = True
else:
nodes_processed += 1
# si no gane pido mis movimientos legales
possible_configs = next_configs(node.config, level.smap)
# print("Possible configs: ", possible_configs)
children = node.children
#por cada movimiento legal me fijo si ya tube esta config antes y si no la apendeo a la cola
for config in possible_configs:
if config in known_cfgs:
continue
known_cfgs.add(config)
new_node = Node(copy.copy(config), node, [])
children.append(new_node)
queue.append(new_node)
# print("Added move: ", new_node.config)
# print("Used configs: ", processed)
# print("Queue is: ", queue)
finish_time = datetime.now()
elapsed_time = finish_time - initial_time
if won:
path = build_path(node)
return process_results(won, testall, elapsed_time, smap, node, path, ALGORITHM_NAME, nodes_processed - 1, len(queue))
else:
return process_results(won, testall, elapsed_time, smap, None, [], ALGORITHM_NAME, nodes_processed - 1, len(queue))
|
# Generated by Django 2.1.15 on 2020-08-23 06:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('watcher', '0002_auto_20200819_1642'),
]
operations = [
migrations.CreateModel(
name='Floor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=256, null=True)),
('floor_num', models.IntegerField(default=-1)),
('description', models.CharField(blank=True, max_length=1024, null=True)),
('store', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='watcher.Store')),
],
),
migrations.AddField(
model_name='camera',
name='floor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='watcher.Floor'),
),
migrations.AddField(
model_name='table',
name='floor',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='watcher.Floor'),
),
]
|
## Conjuntos, como manipular e sua aplicação ##
# Para criar um conjunto uriliza a "{}"
conjunto = {1, 2, 3, 4, 2, 4} # O conjunto não imprime valores que estão duplicados
print(type(conjunto))
print(conjunto)
conjunto.add(5) # Incorpora elemento ao conjunto
print(conjunto)
conjunto.discard(2) # Remove elemento ao conjunto
print(conjunto)
conjunto2 = {1, 2, 3, 4, 5}
conjunto3 = {5, 6, 7, 8}
conjunt_uniao = conjunto2.union(conjunto3) #Une os dois conjuntos
print('União: {}'.format(conjunt_uniao))
conjunto_interseccao = conjunto2.intersection(conjunto3) # É tudo que tem nos dois conjuntos
print('Intersecção: {}'.format(conjunto_interseccao))
conjunto_diferenca1 = conjunto2.difference(conjunto3) # Vai imprimir somente os valores te tem no conjunto da esquerda, a ordem dos fatores neste altera o resultado.
print('Diferença 2 e 3: {}'.format(conjunto_diferenca1))
conjunto_diferenca2 = conjunto3.difference(conjunto2)
print('Diferença 3 e 2: {}'.format(conjunto_diferenca2))
conjunto_diff_simetrica = conjunto2.symmetric_difference(conjunto3) #É tudo que tem no "a e so tem no b".
print('Diferença simétrica: {}'.format(conjunto_diff_simetrica))
conjunto_a = {1, 2, 3} #subset de "B"
conjunto_b = {1, 2, 3, 4, 5} #superset de "A" : Porque b tem todos elementos que tem em "A"
conjunto_subset = conjunto_a.issubset(conjunto_b) #Retorna se o conjunto é sub conjunto de outro conjunto
print('A é subconjunto de B: {}'.format(conjunto_subset)) # Retorna um booleano
conjunto_subset2 = conjunto_b.issubset(conjunto_a)
print('B é subconjunto de A: {}'.format(conjunto_subset2)) # Não é porque tem o 4 e o 5
conjunto_superset = conjunto_b.issuperset(conjunto_a) # Sempre que um conjunto for ao contrario de um subset de um conjunto ele é superset deste conjunto.
print('B é superconjunto de A: {}'.format(conjunto_superset))
#Convertendo a lista em conjunto, fazendo isso remove a duplicidade de valores
lista = ['cachorro', 'cachorro', 'gato', 'gato', 'elefante']
print(lista)
conjunto_animais = set(lista) # Convertendo lista em conjunto
print(conjunto_animais)
lista_animais = list(conjunto_animais) # Convertendo conjunto em lista
print(lista_animais) |
"""Write a Python program to calculate the hypotenuse of a right angled triangle."""
import math
def hypo(h,base):
return math.hypot(h, base)
print(hypo(10,2)) |
from graphene import ObjectType, String, ID, Float, Field, Boolean
class Position(ObjectType):
dec = Float()
ra = Float()
dec_dot = Float()
ra_dot = Float()
epoch = String()
class Magnitude(ObjectType):
min_magnitude = Float()
max_magnitude = Float()
filter = String() # from bandpass
class Target(ObjectType):
id = ID()
name = String()
is_optional = Boolean()
position = Field(Position)
magnitude = Field(Magnitude)
|
from sklearn.linear_model import Ridge, Lasso, LinearRegression, RidgeCV
from sklearn.model_selection import cross_validate, cross_val_predict, cross_val_score, KFold
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, RobustScaler, StandardScaler
from sklearn.preprocessing import PolynomialFeatures, Normalizer
from sklearn.datasets import make_regression, load_boston
from sklearn.neural_network import MLPRegressor
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import numpy as np
import stk
def scale_data(X_train, X_test, X_today, classtype="StandardScaler"):
"""Scales data using MinMaxScaler in range -3 to 3
returns scaled train and test data"""
cols = X_train.shape[1]
rows_train = X_train.shape[0]
rows_test = X_test.shape[0]
rows_today = X_today.shape[0]
X_train_scaled = np.ones((rows_train, 1))
X_test_scaled = np.ones((rows_test, 1))
X_today_scaled = np.ones((rows_today, 1))
# X_train, X_test = norm_pre(X_train, X_test)
if(classtype == "MinMax"):
scaler = MinMaxScaler(feature_range=(-3, 3))
elif(classtype == "Robust"):
scaler = RobustScaler()
if(classtype == "StandardScaler"):
scaler = StandardScaler()
for i in range(1, cols):
scaler.fit(X_train[:, i].reshape(-1, 1))
X_train_scaled = np.c_[X_train_scaled,
scaler.transform(X_train[:, i].reshape(-1, 1))]
X_test_scaled = np.c_[X_test_scaled,
scaler.transform(X_test[:, i].reshape(-1, 1))]
X_today_scaled = np.c_[X_today_scaled,
scaler.transform(X_today[:, i].reshape(-1, 1))]
return X_train_scaled, X_test_scaled, X_today_scaled
def poly_pre(X, n):
poly = PolynomialFeatures(n, interaction_only=False)
poly.fit(X)
return poly.transform(X)
def PCA_pre(X, n=20):
pca = PCA(n_components=n)
pca.fit(X)
return pca.transform(X)
def norm_pre(X_train):
norm = Normalizer()
norm.fit(X_train)
return norm.transform(X_train)
def nonlin_comp(X):
m = X.shape[1]
m = int(m / 10)
shrink = PCA_pre(X, n=m)
shrink_sin = np.sin(shrink)
shrink_cos = np.cos(shrink)
return np.c_[X, shrink_sin, shrink_cos]
def plot(classifier, X, y, title="data"):
# b: blue g: green r: red c: cyan m: magenta y: yellow k: black w: white
l1 = plt.plot(y)
l2 = plt.plot(classifier.predict(X))
plt.setp(l1, label='Real', color='b', lw=1, ls='-', marker='+', ms=1.5)
plt.setp(l2, label='Prediction', color='r',
lw=1, ls='--', marker='o', ms=1.5)
plt.title(title)
plt.ylabel("Target")
plt.xlabel("Sample Number")
plt.legend()
plt.show()
def plotchange(Window, classifier, X, y, title="Model Vs Real"):
# b: blue g: green r: red c: cyan m: magenta y: yellow k: black w: white
l1 = Window.a1.plot(y, label='Real', color='b',
lw=1, ls='-', marker='+', ms=1.5, zorder=3)
l2 = Window.a1.plot(classifier.predict(X), label='Prediction', color='r',
lw=1, ls='--', marker='o', ms=1.5, zorder=3)
# Window.a1.setp(l1, )
# Window.a1.setp(l2, )
Window.a1.set_title(title)
Window.a1.set_ylabel("Target")
Window.a1.set_xlabel("Sample/Day Number")
Window.a1.legend()
Window.a1.grid()
# Window.a1.show()
def fit_company_change(name):
#2337, 2330, 6223, 6220
X_close, X_index, X_close_index, y_close, y_index, y_close_index = ProperReturn(
name)
X = X_close_index
y = y_close_index
X = nonlin_comp(X)
X = poly_pre(X, 4)
X_train, X_test, y_train, y_test = train_test_split(
X, y, shuffle=False, stratify=None, test_size=0.25, random_state=42)
X_train = X_test = X
X_today = X[-1:, :].reshape(-1, X.shape[1])
X_train, X_test, X_today = scale_data(
X_train, X_test, X_today, "StandardScaler")
retx = X_train
rety = y
return retx, rety
def fit_algo(X_train, X_test, y_train, y_test, algotype="Ridge"):
"""fit the given algorithm to given data, returns an object of type classifier"""
train_list = []
test_list = []
print("using:", algotype)
if(algotype == "Ridge"):
algotype = Ridge(alpha=0.1, max_iter=20000)
elif(algotype == "Lasso"):
algotype = Lasso(alpha=0.1, max_iter=20000)
classifier = algotype.fit(X_train, y_train)
print("train score: ", classifier.score(X_train, y_train),
"test score: ", classifier.score(X_test, y_test))
return classifier
elif(algotype == "LinearRegression"):
algotype = LinearRegression()
classifier = algotype.fit(X_train, y_train)
print("train score: ", classifier.score(X_train, y_train),
"test score: ", classifier.score(X_test, y_test))
return classifier
elif(algotype == "Ridge1"):
algotype = Ridge(alpha=1, max_iter=20000)
classifier = algotype.fit(X_train, y_train)
print("train score: ", classifier.score(X_train, y_train),
"test score: ", classifier.score(X_test, y_test))
return classifier
elif(algotype == "Ridge0.1"):
algotype = Ridge(alpha=0.1, max_iter=20000)
classifier = algotype.fit(X_train, y_train)
print("train score: ", classifier.score(X_train, y_train),
"test score: ", classifier.score(X_test, y_test))
return classifier
elif(algotype == "Ridge0.01"):
algotype = Ridge(alpha=0.01, max_iter=20000)
classifier = algotype.fit(X_train, y_train)
print("train score: ", classifier.score(X_train, y_train),
"test score: ", classifier.score(X_test, y_test))
return classifier
elif(algotype == "RidgeCV"):
cv_array = [float(float(i) / 100.0) for i in range(-100, 1000)]
cv_array[cv_array.index(0)] = 0.1
algotype = RidgeCV(cv_array)
classifier = algotype.fit(X_train, y_train)
print("train score: ", classifier.score(X_train, y_train),
"test score: ", classifier.score(X_test, y_test))
return classifier
cv_array = [float(float(i) / 100.0) for i in range(-1000, 1000)]
print(max(cv_array), min(cv_array))
for i in cv_array:
algotype = Ridge(alpha=i, max_iter=20000)
classifier = algotype.fit(X_train, y_train)
train_score = classifier.score(X_train, y_train)
test_score = classifier.score(X_test, y_test)
train_list.append(train_score)
test_list.append(test_score)
optimal_score = max(train_list)
optimal_score_index = train_list.index(optimal_score)
final_alpha = cv_array[train_list.index(optimal_score)]
algotype = Ridge(alpha=final_alpha, max_iter=20000)
print("alpha:", final_alpha, optimal_score, test_list[optimal_score_index])
return classifier
def fit_neural_network(X_train, X_test, y_train, y_test, activation="relu",
network_structure=(10, 10), learn_rate=0.001, iter=20000):
NN = MLPRegressor(hidden_layer_sizes=network_structure,
learning_rate_init=learn_rate, max_iter=iter, )
classifier = NN.fit(X_train, y_train)
print("The score for train set is: {}".format(
classifier.score(X_train, y_train)))
print("The score for test set is: {}".format(
classifier.score(X_test, y_test)))
return classifier
def ProperReturn(name):
close, index, close_index = stk.csv_to_df(name)
close = np.array(close, dtype=np.float64)
index = np.array(index, dtype=np.float64)
close_index = np.array(close_index, dtype=np.float64)
y_close = close[1:, :]
y_index = index[1:, :]
close_index = close_index[1:, :]
collength = y_close.shape[0]
X_close = X_index = np.arange(0, collength).reshape(-1, 1)
y_close_index = close_index[:, 0].reshape(-1, 1)
X_close_index = close_index[:, 1].reshape(-1, 1)
X_close_index = np.c_[X_close_index,
np.arange(0, collength).reshape(-1, 1)]
return X_close, X_index, X_close_index, y_close, y_index, y_close_index
def fit_company(name):
#2337, 2330, 6223, 6220
X_close, X_index, X_close_index, y_close, y_index, y_close_index = ProperReturn(
name)
X = X_close_index
y = y_close_index
X = nonlin_comp(X)
X = poly_pre(X, 4)
X_train, X_test, y_train, y_test = train_test_split(
X, y, shuffle=True, stratify=None, test_size=0.1, random_state=42)
X_today = X[-1:, :].reshape(-1, X.shape[1])
X_train, X_test, X_today = scale_data(
X_train, X_test, X_today, "StandardScaler")
# classifier = fit_neural_network(
# X_train, X_test, y_train.ravel(), y_test.ravel(), network_structure=(10, 10, 10), activation="relu")
classifier = fit_algo(X_train, X_test, y_train,
y_test, algotype="RidgeCV")
print("for ", str(name), " Todays's Price is: ",
str(classifier.predict(X_today)))
plot(classifier, X_train, y_train, "train plot")
plot(classifier, X_test, y_test, "test plot")
return classifier
def predict_all():
#2337, 2330, 6223, 6220
company_list = [2337, 2330, 6223, 2867]
clasifier_list = []
for i in company_list:
clasifier_list.append(fit_company(i))
return clasifier_list
def main():
#2337, 2330, 6223, 2867
company_list = [2337, 2330, 6223, 2867]
clasifier_list = []
for i in company_list:
clasifier_list.append(fit_company(i))
fit_company_change(2337)
return clasifier_list
if __name__ == "__main__":
main()
|
# Created by jongwonkim on 25/06/2017.
import os
import logging
import json
import re
from src.dynamodb.intents import DbIntents
log = logging.getLogger()
log.setLevel(logging.DEBUG)
db_intents = DbIntents(os.environ['INTENTS_TABLE'])
def compose_validate_response(event):
event['intents']['current_intent'] = 'InviteMate'
if event['currentIntent']['slots']['Mate']:
mates = re.findall(r'@([A-Z1-9]\w+)', event['currentIntent']['slots']['Mate'])
for mate in mates:
if mate not in event['intents']['mates']:
event['intents']['mates'].append(mate)
if len(event['intents']['mates']) > 0:
# To keep getting mates and store in the db session.
response = {'sessionAttributes': event['sessionAttributes'], 'dialogAction': {
'type': 'ConfirmIntent',
"intentName": "InviteMate",
'slots': {
'Mate': event['intents']['mates'][0]
}
}}
return response
else: # First time getting an mate.
response = {'sessionAttributes': event['sessionAttributes'], 'dialogAction': {
'type': 'Delegate',
'slots': {
'Mate': None
}
}}
return response
# End of the InviteMate intention moves to the CreateChannel intention.
def compose_fulfill_response(event):
event['intents']['current_intent'] = 'ReserveLounge'
response = {
'sessionAttributes': event['sessionAttributes'],
'dialogAction': {
'type': 'ElicitSlot',
'intentName': 'ReserveLounge',
'slotToElicit': 'Lounge',
'slots': {
'Lounge': None
},
}
}
return response
def retrieve_intents(event):
if 'sessionAttributes' not in event:
raise Exception('Required keys: `team_id` and `channel_id` are not provided.')
event['intents'] = db_intents.retrieve_intents(
event['sessionAttributes']['team_id'],
event['sessionAttributes']['channel_id']
)
def store_intents(event):
return db_intents.store_intents(
keys={
'team_id': event['sessionAttributes']['team_id'],
'channel_id': event['sessionAttributes']['channel_id']
},
attributes=event['intents']
)
def handler(event, context):
log.info(json.dumps(event))
response = {
"statusCode": 200
}
try:
retrieve_intents(event)
if event['currentIntent'] is not None and event['currentIntent']['confirmationStatus'] == 'Denied':
# Terminating condition.
response = compose_fulfill_response(event)
else:
# Processing the user input.
response = compose_validate_response(event)
store_intents(event)
except Exception as e:
response = {
"statusCode": 400,
"body": json.dumps({"message": str(e)})
}
finally:
log.info(response)
return response
|
# -*- coding: utf-8 -*-
# @Time : 2020/9/16 0:25
# @Author : MA Ziqing
# @FileName: sql_cli.py.py
#
# import os
# import sys
# from sqlalchemy import create_engine
# from sqlalchemy.orm import sessionmaker
# from sqlbase.sql_table_base import QualityIndicator, OutputDB, Result1, Result2
#
#
# class DataBaseSqlClient(object):
# def __init__(self, config_dict):
# user = config_dict['user'] # 'sa'
# dbname = config_dict['dbname'] # 'YZSC'
# host = config_dict['host'] # '166.111.42.116'
# password = config_dict['password'] # '123456'
# # mysql + pymysql: // < username >: < password > @ < host > / < dbname > charset = utf8
# self._db_path_2 = 'mssql+pymssql://{}:{}@{}/{}'.format(user, password, host, dbname)
# self._engine = create_engine(self._db_path_2, echo=False)
#
# def get_quality_indicator_data(self):
# Session = sessionmaker(bind=self._engine)
# session = Session()
# query = session.query(QualityIndicator)
# query.filter(QualityIndicator.value1 > 50)
# return query
#
# def write_one_row_into_output_result1(self, row):
# Session = sessionmaker(bind=self._engine)
# session = Session()
# last_row = session.query(Result1).order_by(Result1.id.desc()).first()
# res = Result1()
# if last_row:
# res.id = last_row.id + 1
# else:
# res.id = 0
# res.json = row['json']
# res.state = row['state']
# res.type = row['type']
# session.add(res)
# session.commit()
#
# def write_one_row_into_output_result2(self, row):
# Session = sessionmaker(bind=self._engine)
# session = Session()
# res = Result2(resultId=row['id'],
# json=row['json'],
# state=row['state'],
# type=row['type'])
# session.add(res)
# session.commit()
#
#
# def test():
# data_base_cli = DataBaseSqlClient()
# query = data_base_cli.get_quality_indicator_data()
# for qi in query:
# print(qi)
#
#
# if __name__ == '__main__':
# test()
|
"""
Contains i/o-related functions.
Public Functions:
- build_dict_string -- converts a dictionary into a string
equivalent (i.e., the literal representation of the dictionary
in code).
- clear_screen -- clears the screen (if supported by the console).
- del_from_list -- deletes all items from a list that meet
specified criteria.
- confirm -- asks the user to confirm a choice.
- file_create -- creates a file specified by the user.
- file_read -- opens and reads data from a specified file.
- file_write -- writes data to a specified file.
- get_filename_open -- asks the user for the name of a file to open.
- get_input -- prints a prompt and gets a string, int or float from
the user.
- goodbye_screen -- prints a goodbye message
- menu -- prints a menu and gets a choice from the user.
- print_block -- prints or returns a string broken at a specified
column width.
- print_status -- prints a status message.
- welcome_screen -- prints an initial screen.
- yes_no -- gets the user's answer to a yes/no question.
Private Functions:
- _menu_build_display_list -- builds the list for a menu.
- _menu_build_prompt -- builds the prompt for a menu.
- _menu_display -- displays a menu.
- _menu_evaluate_response -- checks the response to a menu.
- menu_get_response -- gets a response to a menu.
- _z_exc -- generic exception handler.
---------------------------------------------------------------------
"""
# Import sys.
import sys
def _z_exc(loc, err):
"""
Catch-all exception handler.
Arguments:
- loc -- string naming the module/function/method in which the
exception occurred.
- err -- the exception string.
Returns: nothing (exits program).
-----------------------------------------------------------------
"""
# Print error information.
print("An interal error occurred in " + loc + ": ", err)
sys.exit(
"Please report the above error and the circumstances which caused it " +
"to the developer.")
return
# end function
# Other imports.
try:
import csv
import os
import re
import str_utils
import wl_resource
except Exception as err:
_z_exc("io_utils.py/module imports", err)
# end try
def build_dict_string(dic):
"""
Builds and returns a string representation of a dictionary.
Arguments:
- dic -- the dictionary to process.
Returns: the string representing the dictionary.
-----------------------------------------------------------------
"""
try:
# Why to build a string representation piecemeal instead of just
# using str: The str method, when used on a whole dictionary
# (as opposed to a specific item), returns the __repr__ of each
# item, rather than the __str__. For built-in types these two
# values are the same, but for other objects (notably datetime
# objects) they may not be.
#
# If the function is passed an empty dictionary, just return the
# string representation of that.
if not dic:
return "{}"
# end if
string = "{"
# Loop through the dictionary, getting strings for each key and
# value.
for key, value in dic.items():
string += str(key) + ": " + str(value) + ", "
# end for
# Strip the last comma and space and close.
string = string[:-2] + "}"
return string
except Exception as err:
_z_exc("io_utils.py/build_dict_string", err)
# end try
# end function
def clear_screen():
"""
Clears the screen.
Arguments: None.
Returns: Nothing.
-----------------------------------------------------------------
"""
try:
# This print line is a marker for those terminals/shells which
# refuse to implement the system call. For those terminals/shells
# that do, this line will instantly disappear.
print(
"SCREENCLEARSHERE SCREENCLEARSHERE SCREENCLEARSHERE")
os.system("cls" if os.name == "nt" else "clear")
return
except Exception as err:
_z_exc("io_utils.py/clear_screen", err)
# end try
# end function
def del_from_list(lst, condition, index=None, attr=None):
"""
Deletes all items in a list that meet a condition.
Arguments:
- lst -- the list from which to delete items.
- condition -- the criterion for deletion.
Keyword Arguments:
- index -- subitem within the item to search.
- attr -- object attribute to search.
Returns: the number of items deleted.
-----------------------------------------------------------------
"""
try:
d = 0
# Loop through the list backwards.
for n in range(len(lst)-1, -1, -1):
# If the item matches, delete it.
if index is not None:
if lst[n][index] in condition:
del lst[n]
d += 1
# end if
elif attr is not None:
if getattr(lst[n], attr) in condition:
del lst[n]
d += 1
elif lst[n] in condition:
del lst[n]
d += 1
# end if
# end for
return d
except Exception as err:
_z_exc("io_utils.py/del_from_list", err)
# end try
# end function
def confirm(prompt, line_length=80):
"""
Asks the user to confirm a decision.
Arguments:
- prompt -- descriptive text of what ot confirm.
- line_length -- the width of the screen in characters (default
80)
Returns: True if the user confirms, otherwise False.
-----------------------------------------------------------------
"""
try:
# Unlike the yes_no function, confirm doesn't loop seeking a
# valid answer. It treats "Y" (or "y") as confirmation, and
# any other input as non-confirmation.
#
# Print the header line.
print("\n-=-=-{Confirm}", end="")
char = "-"
for n in range(15, line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# Get the response. If it isn't a "y", assume the answer is no.
response = input(
print_block(
"Are you sure you want to " + prompt + "? [Y/N]: ",
lf=False, ret_str=True, line_length=line_length))
if re.match(r"y", response, re.I):
return True
else:
return False
# end if
except Exception as err:
_z_exc("io_utils.py/confirm", err)
# end try
# end function
def file_create(filetype="txt", line_length=80):
"""
Creates a new file/overwrites an existing file to store data.
Keyword Arguments:
- filetype -- the type of file to create (default "txt").
- line_length -- the width of the screen in characters (default
80).
Returns: The name of the file created, or an empty string if
unsuccessful; and a bool, which is True only if the named file
exists and the user wants to open it.
-----------------------------------------------------------------
"""
try:
fname = ""
while not fname:
fname = get_input(
prompt="Please enter a name for the new file (press [ENTER] " +
"to go back).\n", must_respond=False)
if not fname:
# If the user entered nothing, ask if they want to go
# back.
exit = yes_no(
"You did not enter a name. Do you want to go back?")
if exit:
# User wants to exit. Return an empty string.
return "", False
else:
# Loop back to getting a filename.
continue
# end if
else:
# If user did not manually put in an extension, add it.
if fname[-4:].lower() != ("." + filetype):
fname += "." + filetype
# end if
# Check to see if the file exists.
if os.path.isfile(fname):
# If the file exists, ask the user if they want to
# use the file, overwrite the file, or create a new
# file with a different name.
print_status(
"Warning", f"{fname} already exists.", go=True,
line_length=line_length)
choice = menu(
["Open this file.", "Replace this file",
"Create a new file with a different name"],
confirm=True, keystroke=True,
keystroke_list=["O", "R", "C"], lines=True)
# If the user chose to quit, return an empty string.
if choice == 0:
return "", False
elif choice == 1:
# The user chose to open this file. Return
# the filename and flag to open the file.
return fname, True
elif choice == 2:
# The user chose to replace the existing
# file. Return the filename (as though the
# file were being created).
return fname, False
else:
# User chose to try with a different name. Set
# fname back to an empty string and skip to the
# end of the loop.
fname = ""
continue
# end if
# end with
else:
# File doesn't exist (good). Test create the file
# just to make sure it will work.
try:
with open(fname, "w", newline="") as data_file:
data_file.write("TEST")
return fname, False
except Exception as err:
print_status(
"Error",
f"An error occured while creating the file: {err}",
line_length=line_length)
# Ask the user if they want to try again or go
# back.
if yes_no("Do you want to try again?"):
# User wants to exit.
return "", False
# end if
# Loop back to getting a filename.
continue
# end if
# end try
# end if
# end if
# end while
# Unless something went wrong along the way, return the file
# name.
return fname, False
except Exception as err:
_z_exc("io_utils.py/file_create", err)
# end try
# end function
def file_read(fname, filetype="txt", line_length=80):
"""
Opens and reads a file.
Arguments:
- fname -- the name of the file to open.
Keyword Arguments:
- filetype -- the extension of the file to open or create
(default txt).
- line_length -- the width of the screen in characters (default
80).
Returns: a data element. For a txt file, a list containing the
lines of text in the file; for a csv file, a list of
dictionaries containing the data in the file. If the open or
read operation failed, an empty list.
-----------------------------------------------------------------
"""
# Open and read the file.
try:
with open(fname, "r", newline="") as data_file:
if filetype == "txt":
data_list = list(data_file)
elif filetype == "csv":
data_list = list(csv.DictReader(data_file, delimiter=","))
else:
# If the file type doesn't match any known type, return
# an error.
print_status(
"Error", "Unrecognized file type.", line_length=line_length)
return []
# end if
return data_list
# end with
except Exception as err:
print_status(
"Error", f"An error occured while reading the file: {err}",
line_length=line_length)
return []
# end try
# end function
def file_write(fname, filetype, data_list, fieldnames=None, line_length=80):
"""
Opens a file and writes data to it.
Arguments:
- fname -- the name of the file to open.
- filetype -- the type of file (the function does not check to
ensure that the file type matches the extension).
Keyword Arguments:
- fieldnames -- for csv files, the field names of the dictionary
to be written.
- line_length -- the width of the screen in characters (default
80).
Returns: True if the open/write operation succeeded, False
otherwise.
-----------------------------------------------------------------
"""
# Open the file.
try:
with open(fname, "w", newline="") as file:
if filetype == "txt":
for line in data_list:
file.write(line)
# end for
else:
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(data_list)
# end if
# end with
except OSError as err:
print_status(
"Warning", f"Error writing log file: {err}",
line_length=line_length)
return False
# end try
return True
# end function
def get_filename_open(filetype, line_length=80):
"""
Gets the name of a file to open.
On request, displays a list of files in the working directory
and allows the user to choose one.
Arguments:
- filetype -- extension of the file to open.
- line_length -- the width of the screen in characters (default
80).
Returns: the name of the file to open.
-----------------------------------------------------------------
"""
try:
valid = False
while not valid:
prompt = (
"Enter the name of the file you want to open, or [?] to see a " +
"list of available files. Enter [Q] to go back.")
fname = get_input(prompt=prompt)
# If the user didn't enter anything, print a message and
# loop back.
if not fname:
print_status(
"Error", "You did not enter anything.",
line_length=line_length)
continue
if fname.lower() == "q":
# If the user backs out, set the filename to an empty
# string and set the loop to end.
fname = ""
valid = True
elif fname != "?":
# If the user specified a filename but did not type the
# extension, add it now.
if fname[-4:].lower() != ("." + filetype):
fname += "." + filetype
# end if
# Then check to make sure the file exists.
if os.path.isfile(fname):
# Set the loop to end with the complete filename.
valid = True
else:
# Print a warning message and loop back.
print_status(
"Error", f"File {fname} not found.",
line_length=line_length)
# end if
else:
# Scan the directory and present a menu of available
# files.
files = os.listdir()
if not files:
# If there were no files in the directory, go back
# to the previous menu.
print_status(
"Warning", "No available files found.",
line_length=line_length)
fname = ""
valid = True
else:
# remove any files of the wrong filetype.
temp = []
for f in files:
if f[-3:] == filetype:
temp.append(f)
# end if
# end for
files = temp
# If no files of the right filetype were found, go
# back to the previous menu.
if len(files) == 0:
print_status(
"Warning", "No available files found.",
line_length=line_length)
fname = ""
valid = True
else:
# Show a menu of the available files, keyed by
# number.
choice = menu(
files, option_type="files", keystroke=True,
keystroke_list="#", lines=True)
if choice:
fname = files[choice - 1]
valid = True
# end if
# end if
# end if
# end if
# end while
return fname
except Exception as err:
_z_exc("io_utils.py/get_filename_open", err)
# end try
# end function
def get_input(prompt, typ="str", must_respond=True, line_length=80):
"""
Prompts for and gathers input from the user.
Arguments:
- prompt -- the prompt to display (default ">>").
Keyword arguments:
- typ -- the type of input to return (default "str").
- must_respond -- user must provide a response (default True).
- line_length -- the width of the screen in characters (default
80).
Returns: a string with the user's input; None if a specific
type is required and the user's input cannot be converted to
that type.
-----------------------------------------------------------------
"""
try:
# Loop.
while True:
# Print the header line.
print("\n-=-=-{Input}", end="")
char = "-"
for n in range(12, line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# Print the prompt text.
print_block(prompt, line_length=line_length)
# Put the prompt itself on the next line, and get a response.
response = input(">> ")
# If the user must respond, check to make sure the response
# isn't empty, and if it is, loop back.
if response or (not must_respond):
break
# end if
print_status(
"Error", "You did not enter anything.", line_length=line_length)
# end while
# If the caller wants a string, just return.
if typ == "str":
return response
# end if
# If the caller wants a different built-in type, try to convert
# the response, and return None if there is an exception.
try:
if typ == "int":
response = int(response)
return response
elif typ == "float":
response = float(response)
return response
# end if
except ValueError:
return None
# end try
# If the type wasn't recognized, just return the raw input.
return response
# end if
except Exception as err:
_z_exc("io_utils.py/get_input", err)
# end try
# end function
def goodbye_screen(project_name, line_length=80):
"""
Prints a thank you message.
Arguments:
- project_name -- the name of the project.
Keyword Arguments:
- line_length -- the width of the screen in characters (default
80).
Returns: Nothing
-----------------------------------------------------------------
"""
try:
# Print header line.
begin = "-=-=-{Goodbye}"
print(begin, end="")
char = "-"
for n in range(14, line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# Print message.
print(f"Thanks for using {project_name}!")
# Print footer line.
char = "-"
for n in range(line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
return
except Exception as err:
_z_exc("io_utils.py/goodbye_screen", err)
# end try
# end function
def menu(
options, option_type="options", confirm=False, keystroke=False,
keystroke_list=[], match_case=False, multiple=False, lines=True, columns=1,
col_dir="down", validate_all=False, show_help=False, help_text="",
top_level=False, prompt="", quit_=True, nav=False, prev=False, nxt=False,
header="", help_toggle=False, line_length=80):
"""
Presents a menu and obtains a response from the user.
Arguments:
- options -- list of options from which to choose. It is the
caller's responsibility to ensure that option strings are not
longer than line_length (or line_length minus the 4 spaces
needed to display the associated number or keystroke option);
if they are, they will be truncated.
Keyword Arguments:
- option_type -- word or phrase describing the options, used to
build a generic prompt; ignored if prompt is passed (default
"options").
- confirm -- asks the user to confirm his/her choice(s) before
returning (default False).***
- keystroke -- specifies selection by shortcut (default False).
- keystroke_list -- list of shortcuts corresponding to the
options; ignored if keystroke is False, unless it is "#",
which specifies selection by number (default empty list).
(NOTE: "Q" is reserved for the quit option and cannot be
included in the list).
- match_case -- requires the user's response(s) to match the
case of the option(s) (default False).
- multiple -- allows the selection of multiple options (default
False).
- lines -- print each option on a separate line; ignored if
columns > 1 (default True).
- columns -- number of columns in which to arrange the options;
can be 1 to 3 (default 1).***
- col_dir -- direction in which to arrange the options in a
multi-column display; ignored if columns = 1 (default
"down").***
- validate_all -- requires all choices made by the user to be
valid options (defualt False)
- show_help -- allows the user to choose to see a help screen
before making his/her choice(s) (default False).***
- help_text -- the help text to show the user; ignored if
show_text is False (default empty string).***
- top_level -- flag indicating a top-level menu (default False).
- prompt -- the prompt to display (default empty string).
- quit_ -- allows the user to quit/abort/go back (default True).
- nav -- prints additional navigational choices; ignored unless
the menu choices are numbered (default False).
- prev -- prints the choice to move backards; ignored if nav is
False (default False).
- nxt -- prints the choice to move forwards; ignored if nav is
False (default False).
- header -- prints a header line before the first option (but
after the quit option, if quit_ is True) (default empty
string). It is the caller's responsibility to ensure that
the header is not longer than line_length; if it is, it will
be truncated.
- help_toggle -- enables menu to return "-h" as a command to
toggle help (default False).
- line_length -- the width of the screen in characters (default
80).
***(Not currently implemented.)
Returns:
if quit_ is True and the user chooses to quit, 0;
if multiple is False, an 1-based integer representing the
user's choice;
if multiple is True, a list of 1-based integers representing
the user's choices.
if nav is True and the user so enters, "-p" or "-n";
if help_toggle is True and the user so enters, "-h".
-----------------------------------------------------------------
"""
try:
# Make sure the column argument is valid.
if not (0 < columns <= 3):
columns = 1
# end if
# Disable nav unless the menu choices are numbered.
if keystroke_list != "#":
nav = False
# end if
# Build the menu prompt.
prompt = _menu_build_prompt(prompt, multiple, option_type)
# Build the menu display.
display_list = _menu_build_display_list(
options, option_type, prompt, keystroke, keystroke_list, line_length,
multiple, quit_, top_level, header)
# Run this part in a loop until a valid response is entered.
while True:
# Display the menu.
_menu_display(
display_list, prompt, lines, multiple, nav, prev, nxt,
line_length)
# Get a response. If the user didn't enter anything, loop
# back.
response_list = _menu_get_response(line_length)
if response_list is None:
continue
# end if
response_list = _menu_evaluate_response(
response_list, options, keystroke, keystroke_list, multiple,
validate_all, match_case, quit_, nav, help_toggle, line_length)
if response_list is not None:
return response_list
# end if
# end while
except Exception as err:
_z_exc("io_utils.py/menu", err)
# end try
# end function
def print_block(string, line_length=80, lf=True, ret_str=False):
"""
Takes a long string and prints it within a specified width.
Arguments:
- string -- the string to print.
Keyword Arguments:
- line_length -- the desired line length (default 80).
- lf -- print a line feed after the block (default True)
- ret_str -- flag to return a string rather than print it.
Returns: if string is True, a formatted string; else nothing.
-----------------------------------------------------------------
"""
try:
if ret_str:
r_str = ""
# end if
# Break the string into words, along spaces, hyphens, and
# newlines.
word_list = re.split(r"(\s)|(-)|(¤)", string)
col = 0
for word in word_list:
# Filter out None.
if word:
# If the word is a newline character, always print (or
# add) a new line, and reset the column counter.
if word == "¤":
if ret_str:
r_str += "\n"
else:
print()
# end if
col = 0
# If there is EXACTLY one character left on the line--
elif col == line_length - 1:
# If the word is a space or a hyphen, print or add
# it, and then a new line, and reset the column
# counter.
if word in [" ", "-"]:
if ret_str:
r_str += word + "\n"
else:
print(word)
# end if
col = 0
# If the word is anything else, print or add a new
# line, then the word, and reset the column
# counter.
else:
if ret_str:
r_str += "\n" + word
else:
print("\n" + word, end="")
# end if
col = len(word)
# end if
# In all other cases--
else:
# Print or add the word if it won't run past the end
# of the line, and increment the column counter.
if col + len(word) < line_length:
if ret_str:
r_str += word
else:
print(word, end="")
# end if
col += len(word)
# If it would run past the end of the line, print or
# add a new line, then the word, and reset the
# column counter.
else:
if ret_str:
r_str += "\n" + word
else:
print("\n" + word, end="")
# end if
col = len(word)
# end if
# end if
# end if
# end for
# Print or add a newline at the end if called for.
if lf:
if ret_str:
r_str += "\n"
else:
print()
# end if
# end if
# Return the string if called for; otherwise we're done.
if ret_str:
return r_str
else:
return
# end if
except Exception as err:
_z_exc("io_utils.py/print_block", err)
# end try
# end function
def print_status(msg_type, msg, go=False, line_length=80):
"""
Prints a status or error message, optionally waits for the user
to press [ENTER] to continue.
Arguments:
- msg_type -- the type of status to print.
- msg -- the message to print
Keyword Arguments:
- go -- return without waiting for the user (default False).
- line_length -- the width of the screen in characters (default
80)
Returns: nothing.
-----------------------------------------------------------------
"""
try:
# Print the beginning of the header line.
begin = "\n-=-=-{" + msg_type.title() + "}"
print(begin, end="")
# Print the remainder of the header line.
if len(begin) % 2 == 1:
char = "-"
else:
char = "="
# end if
for n in range(len(begin) - 1, line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# Print the message.
if len(msg) < line_length:
print(msg)
else:
print_block(msg, line_length=line_length)
# end if
# Print the footer line.
char = "-"
for n in range(line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# Optionally wait for the user.
if not go:
input("Press [ENTER] to continue.")
# end if
return
except Exception as err:
_z_exc("io_utils.py/print_status", err)
# end try
# end function
def welcome_screen(project_no, project_name, line_length):
"""
Clears the screen and prints introductory text.
Arguments:
- project_no -- the number of the project.
- project_name -- the name of the project.
- line_length -- the initial width of the screen in characters.
Returns: Nothing
-----------------------------------------------------------------
"""
try:
# Clear the screen.
clear_screen()
# Print the header line.
print("-=-=-{Welcome}", end="")
char = "-"
for n in range(14, line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# Print the welcome message.
print(
"Treehouse Python Techdegree Project #" +
str(project_no) + ":")
print(project_name)
print("-" * (line_length))
print("Implemented by Steven Tagawa")
# Print the footer line.
char = "-"
for n in range(line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
return
except Exception as err:
_z_exc("io_utils.py/welcome_screen", err)
# end try
# end function
def yes_no(prompt, clear=False, quit_=False, line_length=80):
"""
Prompts the user to answer a yes or no question.
Arguments:
- prompt -- The question to be answered.
Keyword arguments:
- clear -- Clear the screen first (default False).
- quit_ -- Allow the user to go back or quit (default False).
- line_length -- the width of the screen in characters (default
80)
Returns: True if the user answers yes, False if no. If quit_ is
True, will also return "-b" or "-q" if the user enters it.
-----------------------------------------------------------------
"""
try:
# Clear the screen if applicable.
if clear:
clear_screen()
# Run in a loop until a valid response is obtained.
while True:
# Print the header line.
print("\n-=-=-{Input}", end="")
char = "-"
for n in range(12, line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# If the user can quit or back out, print instructions.
if quit_:
wl_resource.print_nav(q=True, b=True)
print("-" * line_length)
# end if
# Print the prompt and get a response.
response = input(prompt + " [Y]/[N] >> ")
# Because the function uses an RE method, make sure that the
# response is compilable. If its not, go straight to the
# error message.
try:
# If it's possible for the user to quit and he/she does
# so, just return that response.
if quit_ and re.match(r"-(b|q)", response, re.I):
return response
# Otherwise, return True for yes and False for no.
elif re.match(r"y", response, re.I):
return True
elif re.match(r"n", response, re.I):
return False
except Exception:
pass
# end try.
# If the response didn't match anything, print an error and
# loop back.
else:
print_status(
"Error", "That wasn't a 'yes' or a 'no'…",
line_length=line_length)
# end if
# end while
except Exception as err:
_z_exc("io_utils.py/yes_no", err)
# end try
# end function
def _menu_build_display_list(
options, option_type, prompt, keystroke, keystroke_list, line_length,
multiple, quit_, top_level, header):
"""
Builds the contents of a menu.
Arguments:
- options -- the list of options to display.
- option_type -- a word or phrase describing the options, used
only if prompt is an empty string.
- prompt -- the menu prompt.
- keystroke -- allow menu choices by keystroke.
- keystroke_list -- a list of possible keystroke responses, or
"#", signifying responses by number.
- line_length -- the width of the screen in characters.
- multiple -- allow multiple responses.
- quit_ -- allows the user to quit.
- top_level -- flag indicating a top-level menu.
- header -- an optional header line.
Returns: a list of lines to be printed.
-----------------------------------------------------------------
"""
try:
display_list = []
# TODO: Implement help screen.
#
# If the option(s) is/are to be chosen by number, prefix numbers
# to the options; if by keyboard shortcut, prefix shortcuts.
# Otherwise just transfer the options unchanged.
if keystroke_list == "#":
if quit_:
if top_level:
display_list.append("[0] Quit\n" + ("-" * (line_length)))
else:
display_list.append(
"[0] Go Back\n" + ("-" * (line_length)))
# end if
# end if
if header:
display_list.append(header[:line_length])
# end if
for n, option in enumerate(options):
display_list.append(
"[" + str(n + 1) + "] " + option[:line_length - 4])
# end for
elif keystroke:
if quit_:
if top_level:
display_list.append("[Q] Quit\n" + ("-" * (line_length)))
else:
display_list.append(
"[Q] Go Back\n" + ("-" * (line_length)))
# end if
# end if
if header:
display_list.append(header[:line_length])
# end if
for n, option in enumerate(options):
display_list.append(
"[" + keystroke_list[n] + "] " + option[:line_length - 4])
# end for
else:
if quit_:
if top_level:
display_list.append("Quit\n" + ("-" * (line_length)))
else:
display_list.append("Go Back\n" + ("-" * (line_length)))
# end if
# end if
if header:
display_list.append(header[:line_length])
# end if
for option in options:
display_list.append(option[:line_length])
# end for
# end if
# Finally, add bottom separator.
display_list.append('-' * (line_length))
return display_list
except Exception as err:
_z_exc("io_utils.py/_menu_build_display_list", err)
# end try
# end function
def _menu_build_prompt(prompt, multiple, option_type):
"""
Builds a menu prompt.
Arguments:
- prompt -- the prompt to display.
- multiple -- whether multiple responses are permitted.
- option_type -- a word or phrase describing the options.
Returns: the final prompt.
-----------------------------------------------------------------
"""
try:
if prompt == "":
prompt = "Please select one "
if multiple:
prompt += "or more "
# end if
prompt += "of the following " + option_type + ":"
# end if
return prompt
except Exception as err:
_z_exc("io_utils.py/_menu_build_prompt", err)
# end try
# end function
def _menu_display(
display_list, prompt, lines, multiple, nav, prev, nxt, line_length):
"""
Displays a menu.
Arguments:
- display_list -- the list of options to display.
- prompt -- the menu prompt.
- lines -- print one option on each line.
- multiple -- allow multiple responses.
- nav - print navigation options.
- prev - print "previous" option; ignored if nav is False.
- nxt - print "next" option; ignored if nav is False.
- line_length - width of the screen in characters.
Returns: nothing.
-----------------------------------------------------------------
"""
try:
# Print the menu and get a response.
print(
"\n-=-=-{Input}", end="")
char = "-"
for n in range(12, line_length):
print(char, end="")
if char == "-":
char = "="
else:
char = "-"
# end if
# end for
print()
# TODO: Implement multi-column display.
if len(prompt) < line_length:
print(prompt, "\n")
else:
print_block(prompt)
print()
# end if
for n, option in enumerate(display_list):
if lines or (n >= len(display_list) - 2):
print(option)
else:
print(option, end=", ")
# end if
# end for
if multiple:
print("\nSeparate multiple choices with commas.")
# end if
# If necessary, print navigational options.
if nav and (prev or nxt):
print()
if prev:
print("[P] Previous, ", end="")
# end if
if nxt:
print("[N] Next", end="")
# end if
print()
# end if
return
except Exception as err:
_z_exc("io_utils.py/_menu_display", err)
# end try
# end function
def _menu_evaluate_response(
response_list, options, keystroke, keystroke_list, multiple, validate_all,
match_case, quit_, nav, help_toggle, line_length):
"""
Evaluates the response to a menu.
Arguments:
- response_list -- the user's response.
- options -- the list of options.
- keystroke -- enable response by keystroke.
- keystroke_list -- list of keystroke options, or "#",
signifying selection by number.
- multiple -- allows multiple responses.
- validate_all -- requires all responses to be valid; ignored
if multiple is False.
- match_case -- requires responses to match the case of the
options.
- quit_ -- allows the user to quit/go back.
- nav -- allows for navigational responses.
- help_toggle -- allows for a response toggling help.
- line_length -- the width of the screen in characters.
Returns:
if quit_ is True and the user chooses to quit, 0;
if multiple is False, an 1-based integer representing the
user's choice;
if multiple is True, a list of 1-based integers representing
the user's choices.
if nav is True and the user so enters, "-p" or "-n";
if help_toggle is True and the user so enters, "-h";
if the user did not enter any valid choices, or if validate_all
is True and and the user entered an invalid choice, None.
-----------------------------------------------------------------
"""
try:
invalid_list = []
# Check for navigational response.
if nav and (response_list[0].lower() in ["p", "n"]):
return response_list[0].lower()
# end if
# Check for help toggle.
if help_toggle and (response_list[0].lower() == "-h"):
return response_list[0].lower()
# end if
# Validate each choice.
for n in range(len(response_list)):
# Even if the menu presents options by number or keystroke,
# the user can always make a choice by typing the option
# itself (or just the beginning of the option). But since
# this can conflict with numeric and keystroke entries,
# don't do this check if the response is only one character
# long.
if len(response_list[n]) > 1:
# Because this block uses regex methods, first make sure
# that the user input is compilable. If it's not, skip
# this block entirely.
try:
resp_ci = re.compile(response_list[n], re.I)
resp = re.compile(response_list[n])
# If the user can quit and that is his/her response,
# return immediately (even if there are other
# responses). Note that for the quit option, the
# match_case argument is ignored.
if quit_ and re.match(resp_ci, "quit"):
return 0
# end if
# Otherwise, the response has to match the beginning
# of an option (possibly including case).
for x in range(len(options)):
if match_case:
# If the response matches the beginning of
# an option, replace the response with the
# integer.
if re.match(resp_ci, options[x]):
response_list[n] = x + 1
break
# end if
else:
# Same, ignoring case.
if re.match(resp, options[x]):
response_list[n] = x + 1
break
# end if
# end if
# end for
# If the response wasn't converted to an integer, it
# didn't match anything.
if type(response_list[n]) != int:
invalid_list.append(response_list[n])
response_list[n] = "*"
# end if
except Exception:
pass
# end try
# end if
# For number choices, just make sure that it's a number and
# that it's within the range of options.
elif keystroke_list == "#":
try:
response_list[n] = int(response_list[n])
except ValueError:
invalid_list.append(response_list[n])
response_list[n] = "*"
continue
# end try
# If the user can quit and that is his/her response,
# return immediately (even if there are other
# responses).
if quit_ and response_list[n] == 0:
return 0
else:
if response_list[n] > len(options):
invalid_list.append(response_list[n])
response_list[n] = "*"
# end if
# end if
# Keystroke responses need to be checked against the
# keystroke list.
elif keystroke:
# If the user can quit and the response is to quit,
# return immediately (even if there are other
# responses). Note that for the quit option, the
# match_case argument is ignored.
if quit_ and response_list[n].lower() == "q":
return 0
# end if
# Check if the response matches one of the options.
for x in range(len(keystroke_list)):
if match_case:
# If the response matches the option, replace
# the response with the integer.
if response_list[n] == keystroke_list[x]:
response_list[n] = x + 1
break
# end if
else:
# Same, ignoring case.
if (
response_list[n].lower() ==
keystroke_list[x].lower()):
response_list[n] = x + 1
break
# end if
# end if
# end for
# If the response wasn't converted to an integer, it
# didn't match anything.
if type(response_list[n]) != int:
invalid_list.append(response_list[n])
response_list[n] = "*"
# end if
# end if
# end for
# If there were invalid responses, remove the "*"s from the
# response list.
if invalid_list:
for n in range(len(response_list) - 1, -1, -1):
if response_list[n] == "*":
response_list.pop(n)
# end if
# end for
# end if
# TODO: Implement confirm block.
#
# If validate_all is True, the response is not valid if any
# choice is invalid. Otherwise, only return the valid
# choice(s).
# But if ALL the choices were invalid, do not return.
if (not validate_all) and (response_list):
# If multiple is True, return a list (even if it has only
# one element). If multiple is False, return only the
# first element (even if there are others).
if multiple:
return response_list
else:
return response_list[0]
# end if
else:
# Build the error message.
if len(invalid_list) == 1:
err_msg = str(invalid_list[0]) + " is not a valid option."
else:
err_msg = (
str_utils.comma_str_from_list(invalid_list) +
" are not valid options.")
# end if
# Print error message.
print_status("Error", err_msg, line_length=line_length)
return None
# end if
# end while
except Exception as err:
_z_exc("io_utils.py/_menu_evaluate_response", err)
# end try
# end function
def _menu_get_response(line_length):
"""
Gets a user's response to a menu.
Arguments:
- line_length - the width of the screen in characters.
Returns: the user's response, or None if the user didn't enter
anything.
-----------------------------------------------------------------
"""
try:
# Get input from the user, and split it into individual items
# (if more than one choice is entered).
response_list = re.split(r",\s*", input("\n>> "))
# If the user didn't enter anything, try again.
if response_list == [""]:
print_status(
"Error", "You did not enter anything.",
line_length=line_length)
return None
# end if
return response_list
except Exception as err:
_z_exc("io_utils.py/_menu_get_response", err)
# end try
# end function
|
#! /usr/bin/env python3
from argparse import ArgumentParser, Namespace
import pathlib
import sys
from lib.app import App
from lib.config import ApplicationConfig, load_config
def main(opts: Namespace) -> int:
config: ApplicationConfig = load_config(opts.config)
app = App(config)
app.setup_routes()
try:
app.run_app()
return 0
except Exception:
return 1
def parse_args() -> Namespace:
parser = ArgumentParser()
parser.add_argument(
"-c",
"--config",
help="path to the server application config file",
default=pathlib.Path("config/config.json").resolve(),
)
return parser.parse_args()
if __name__ == "__main__":
opts = parse_args()
sys.exit(main(opts))
|
import sys
from time import sleep
import pygame
from settings import Settings
from game_stats import GameStats
from scoreboard import Scoreboard
from button import Button
from ship import Ship
from bullet import Bullet
from alien import Alien
class AlienInvasion:
"""Overall class to manage game assets and behavior."""
def __init__(self):
"""Initialize the game, and create game resources."""
pygame.init() # initialize all pygame modules
self.settings = Settings() # Initialize a settings object for the current game
# Initialize screen for display set_mode(0,0) sets best possible match
self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)
# set screen width based on screen dimensions
self.settings.screen_width = self.screen.get_rect().width
# set screen width based on screen dimensions
self.settings.screen_height = self.screen.get_rect().height
# set title of display (Screen)
pygame.display.set_caption("Alien Invasion")
# Create a GameStats instance to store game statistics,
self.stats = GameStats(self)
self.sb = Scoreboard(self) # Create a scoreboard.
self.ship = Ship(self) # create a Ship instance
# load alien and ship bitmap images
self.bullets = pygame.sprite.Group()
self.aliens = pygame.sprite.Group()
self._create_fleet() # create a fleet of Instances of alien objects
# Make the Play button.
self.play_button = Button(self, "Play")
def run_game(self):
"""Start the main loop for the game."""
while True:
self._check_events() # check for keyboard or mouse presses
if self.stats.game_active: # confirm game is running
self.ship.update() # update ship instance based on user input
self._update_bullets() # update bullet/s location based on user input
self._update_aliens() # update alien/s location
self._update_screen() # redraw the screen
def _check_events(self):
"""Respond to keypresses and mouse events."""
for event in pygame.event.get(): # get all messages and remove from the queue
if event.type == pygame.QUIT: # reads exit condition
sys.exit() # exit game
elif event.type == pygame.KEYDOWN: # if a key on keyboard is pressed
self._check_keydown_events(event) # respond to keypresses
elif event.type == pygame.KEYUP: # check if a key is released
self._check_keyup_events(event) # respond to key releases
elif event.type == pygame.MOUSEBUTTONDOWN: # check if mouse button is pressed
mouse_pos = pygame.mouse.get_pos() # get position of mouse cursor
self._check_play_button(mouse_pos) # respond to mouse button pressed
def _check_play_button(self, mouse_pos):
"""Start a new game when the player clicks Play."""
# set to true if play button is clicked
button_clicked = self.play_button.rect.collidepoint(mouse_pos)
# play button is clicked and another game is not active
if button_clicked and not self.stats.game_active:
# Reset the game settings.
self.settings.initialize_dynamic_settings()
self.stats.reset_stats() # Reset the game statistics.
self.stats.game_active = True # set game to active
self.sb.prep_score() # Turn the score into a rendered image.
self.sb.prep_level() # turn the level into a rendered image
self.sb.prep_ships() # show how many ships are left
# Get rid of any remaining aliens and bullets.
self.aliens.empty() # remove all alien instances from game screen
self.bullets.empty() # remove all bullet instances from game screen
# Create a new fleet and center the ship.
self._create_fleet() # create a fleet of Instances of alien objects
self.ship.center_ship() # Center the ship on the screen
pygame.mouse.set_visible(False) # Hide the mouse cursor.
def _check_keydown_events(self, event):
"""Respond to keypresses."""
if event.key == pygame.K_RIGHT: # if right arrow pressed
self.ship.moving_right = True # move ship right
elif event.key == pygame.K_LEFT: # if left arrow pressed
self.ship.moving_left = True # move ship left
elif event.key == pygame.K_q: # if q button pressed
sys.exit() # exit game
elif event.key == pygame.K_SPACE: # if space button pressed
self._fire_bullet() # Create a new bullet and add it to the bullets group.
def _check_keyup_events(self, event):
"""Respond to key releases."""
if event.key == pygame.K_RIGHT: # right arrow released
self.ship.moving_right = False # stop moving right
elif event.key == pygame.K_LEFT: # left arrow released
self.ship.moving_left = False # stop moving left
def _fire_bullet(self):
"""Create a new bullet and add it to the bullets group."""
# ensure max number of bullets is not surpassed
if len(self.bullets) < self.settings.bullets_allowed:
new_bullet = Bullet(self) # Instantiate new bullet
self.bullets.add(new_bullet) # Add new bullet to list of bullets
def _update_bullets(self):
"""Update position of bullets and get rid of old bullets."""
# Update bullet positions.
self.bullets.update()
# Get rid of bullets that have disappeared.
for bullet in self.bullets.copy(): # go through all bullets
if bullet.rect.bottom <= 0: # if bullet is out of range
self.bullets.remove(bullet) # remove bullet from list of all bullets
self._check_bullet_alien_collisions() # Respond to bullet-alien collisions
def _check_bullet_alien_collisions(self):
"""Respond to bullet-alien collisions."""
# Remove any bullets and aliens that have collided.
# Find all sprites that collide between bullets and aliens (doKill arguments = True).
collisions = pygame.sprite.groupcollide(self.bullets, self.aliens, True, True)
if collisions: # if collision occurs
for aliens in collisions.values(): # go through each alien that collided in aliens list
self.stats.score += self.settings.alien_points * len(aliens) # increase score
self.sb.prep_score() # Turn the score into a rendered image.
self.sb.check_high_score() # Check to see if there's a new high score
if not self.aliens:
# Destroy existing bullets and create new fleet.
self.bullets.empty() # empty bullets list (remove all projectiles)
self._create_fleet() # create a fleet of Instances of alien objects
self.settings.increase_speed() # Increase speed settings and alien point values.
self.stats.level += 1 # Increase level.
self.sb.prep_level() # turn the level into a rendered image
def _update_aliens(self):
"""
Check if the fleet is at an edge,
then update the positions of all aliens in the fleet.
"""
self._check_fleet_edges() # Respond appropriately if any aliens have reached an edge.
self.aliens.update() # update alien positions
# Look for alien-ship collisions.
if pygame.sprite.spritecollideany(self.ship, self.aliens):
self._ship_hit() # Respond to the ship being hit by an alien
# Look for aliens hitting the bottom of the screen.
self._check_aliens_bottom() # Check if any aliens have reached the bottom of the screen.
def _check_aliens_bottom(self):
"""Check if any aliens have reached the bottom of the screen."""
screen_rect = self.screen.get_rect() # reference to screen
# for each alien bitmap image
for alien in self.aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom: # if alien is out of bounds
# Treat this the same as if the ship got hit.
self._ship_hit() # Respond to the ship being hit by an alien
break # exit loop
def _ship_hit(self):
"""Respond to the ship being hit by an alien."""
# livews are still remaining
if self.stats.ships_left > 0:
# Decrement ships_left, and update scoreboard.
self.stats.ships_left -= 1 # decrement number of lilves remaining
self.sb.prep_ships() # Show how many ships are left.
# Get rid of any remaining aliens and bullets.
self.aliens.empty() # remove remaining aliens
self.bullets.empty() # remove remaining bullets
# Create a new fleet and center the ship.
self._create_fleet() # create a fleet of Instances of alien objects
self.ship.center_ship() # Center the ship on the screen
# Pause.
sleep(0.5) # sleep for half a second
else: # no lives remaining
self.stats.game_active = False # set game inactive
pygame.mouse.set_visible(True) # set mouse pointer to visible
def _create_fleet(self):
"""Create the fleet of aliens."""
# Create an alien and find the number of aliens in a row.
# Spacing between each alien is equal to one alien width.
alien = Alien(self) # Instantiate alien
alien_width, alien_height = alien.rect.size # Set alien size
# space to left and right of aliens
available_space_x = self.settings.screen_width - (2 * alien_width)
# number of aliens per row (Integer value)
number_aliens_x = available_space_x // (2 * alien_width)
# Determine the number of rows of aliens that fit on the screen.
ship_height = self.ship.rect.height # determine size of ship bmp
# vertical space for aliens
available_space_y = (self.settings.screen_height -
(3 * alien_height) - ship_height)
# Number of rows [Column height] (Integer value)
number_rows = available_space_y // (2 * alien_height)
# Create the full fleet of aliens.
for row_number in range(number_rows): # go through each row of aliens
for alien_number in range(number_aliens_x): # each alien in current row
# Create an alien and place it in the row.
self._create_alien(alien_number, row_number)
def _create_alien(self, alien_number, row_number):
"""Create an alien and place it in the row."""
alien = Alien(self) # Instantiate alien
alien_width, alien_height = alien.rect.size # Set alien size
# set alien horizontal location
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x # set alien horizontal coordinates
# set alien vertical coordinates
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
self.aliens.add(alien) # add current alien to list of aliens
def _check_fleet_edges(self):
"""Respond appropriately if any aliens have reached an edge."""
for alien in self.aliens.sprites(): # travers list of alien bmp images
if alien.check_edges(): # if at edge of screen
# Drop the entire fleet and change the fleet's direction
self._change_fleet_direction()
break # exit loop
def _change_fleet_direction(self):
"""Drop the entire fleet and change the fleet's direction."""
for alien in self.aliens.sprites(): # travers list of alien bmp images
alien.rect.y += self.settings.fleet_drop_speed # reduce y coordinates
# inverse fleet direction to negative of current value
self.settings.fleet_direction *= -1
def _update_screen(self):
"""Update images on the screen, and flip to the new screen."""
self.screen.fill(self.settings.bg_color) # paint screen to bg_color
self.ship.blitme() # Draw the ship at its current location.
for bullet in self.bullets.sprites(): # traverse list of bullet bmp images
bullet.draw_bullet() # Draw the bullet to the screen.
self.aliens.draw(self.screen) # draw aliens to screen
# Draw the score information.
self.sb.show_score() # Draw scores, level, and ships to the screen.
# Draw the play button if the game is inactive.
if not self.stats.game_active: # if game not active
self.play_button.draw_button() # draw play button
pygame.display.flip() # Update the full display Surface to the screen
if __name__ == '__main__':
# Make a game instance, and run the game.
ai = AlienInvasion() # instantiate game AI
ai.run_game() # start the main game loop
|
# The following comments couldn't be translated into the new config version:
# Test storing OtherThing as well
# Configuration file for PrePoolInputTest
import FWCore.ParameterSet.Config as cms
process = cms.Process("TESTBOTHFILES")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring("file:PoolInputOther.root"),
fileNames = cms.untracked.vstring("file:PoolInput2FileTest.root")
)
process.p = cms.Path(process.OtherThing)
|
"""
Alright. Here is the meat of the program.
This looks overwhelming, being a 400+ line file,
but the vast majority of it is just creating and sending
very repetitive embedded messages to run communications with the user.
"""
import discord
from discord.ext import commands
import datetime
import random # for embeds
# Import my personal addons from the Resources folder.
from Resources.Data import save_data
from Resources.CheckEmail import check_email
from Resources.Enums import PlayerType
from Resources.Sheets import Sheets
class Welcome(commands.Cog, name = "Welcome"):
"""
The user join listener that sends a welcome message.
"""
def __init__(self, bot):
self.bot = bot
print("Loaded Welcome Cog.")
# When a member joins the server, catch that event.
@commands.Cog.listener()
async def on_member_join(self, member):
# If the member is not a bot.
if not member.bot:
# Create a starting embed to send to the user so get them going.
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Welcome to NAU Esports!",
description = 'To get started, please tell us your email.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Welcome to NAU Esports!",
description = 'To get started, please tell us your email.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`your-email@nau.edu`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [1/7]',
icon_url = self.bot.footer_icon
)
await member.send(content = member.mention, embed = embed)
# Send a message to the logs channel that a user has joined and is going through verification
if self.bot.use_timestamp:
embed = discord.Embed(
title = "User Joined",
description = f"User {member.mention} has joined the server and has been sent the verification prompt.",
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "User Joined",
description = f"User {member.mention} has joined the server and has been sent the verification prompt.",
color = random.choice(self.bot.embed_colors)
)
embed.set_author(
name = member.name,
icon_url = member.avatar_url
)
embed.set_footer(
text = self.bot.footer_text,
icon_url = self.bot.footer_icon
)
for log in self.bot.logs_channels:
channel = self.bot.get_channel(log)
await channel.send(embed = embed)
"""
Write the starting data for the user to the `new_members` data entry.
This is where all of the changes will happen and be stored during the
verification process, so taht if the bot restarts, the data will not be lost.
"""
if not 'new_members' in self.bot.data.keys():
self.bot.data['new_members'] = {}
self.bot.data['new_members'][str(member.id)] = {
"id": str(member.id),
"name": str(member.name),
"avatar_url": str(member.avatar_url),
"email": None,
"first_name": None,
"last_name": None,
"school": None,
"major": None,
"game_system": None,
"type_of_player": None
}
save_data(self.bot.data_file, self.bot.data)
"""
Ok, now that we have sent the prompt to begin the verification process
It is time to start the dalogue with the user.
To do that, we passively listen for any and all messages.
"""
@commands.Cog.listener()
async def on_message(self, message):
"""
If the message author is not a bot and they are messaging in a DM channel
And they are in the listings of `new_members` in the data
"""
if not message.author.bot and isinstance(message.channel, discord.DMChannel) and 'new_members' in self.bot.data.keys() and str(message.author.id) in self.bot.data['new_members'].keys():
"""
Then we start checking to see what step they are on.
Basically, this is just a massive If/Else tree
If the user doesn't have an email, then we will expet them to enter an email.
If there is already an email, but not a first name, then we expect them to enter a first name
That continues down all the verification questions in the order shown below:
1. Email
2. First Name
3. Last Name
4. School
5. Major
6. Gaming Platforms
7. Competitive or Casual
"""
# If the user has not yet had an email saved
if not self.bot.data['new_members'][str(message.author.id)]['email']:
# Check if the input they gave is a valid email using our custom function inmported from Resources/CheckEmail.py
valid = check_email(message.content)
# If the email is not valid
if not valid:
# Respond with an X reaction
await message.add_reaction('❌')
# Create an error message explaining that the email was invalid.
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Invalid Email",
description = 'Please make sure you send a valid email.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Invalid Email",
description = 'Please make sure you send a valid email.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`your-email@nau.edu`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [1/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
else:
# If it is valid, change the data
self.bot.data['new_members'][str(message.author.id)]['email'] = message.content
# If the email ends with `nau.edu`, in any combo of upper/lower case, set their school to NAU
if message.content.split('@')[-1].lower() == 'nau.edu':
self.bot.data['new_members'][str(message.author.id)]['school'] = 'NAU'
# Save this data
save_data(self.bot.data_file, self.bot.data)
# Add a check mark reaction
await message.add_reaction('✅')
# Compose a response confirming email registration and asking the next question
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Email Registered",
description = 'Next, please tell us your first name.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Email Registered",
description = 'Next, please tell us your first name.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`Jon`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [2/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# If they have an email, but not a first name registered
elif not self.bot.data['new_members'][str(message.author.id)]['first_name']:
# If there are any numbers in the message, the name is invalid
invalid = any(char.isdigit() for char in message.content)
if invalid:
# Invalid prompting
await message.add_reaction('❌')
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Invalid First Name",
description = 'Your name can only contain letters.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Invalid First Name",
description = 'Your name can only contain letters.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`Jon`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [2/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
else:
# Save the name
self.bot.data['new_members'][str(message.author.id)]['first_name'] = message.content.lower().capitalize()
save_data(self.bot.data_file, self.bot.data)
# Confirmation and prompt for next question
await message.add_reaction('✅')
if self.bot.use_timestamp:
embed = discord.Embed(
title = "First Name Registered",
description = 'Next, please tell us your last name.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "First Name Registered",
description = 'Next, please tell us your last name.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`Smith`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [3/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# If they have an email and first name, but no last name
elif not self.bot.data['new_members'][str(message.author.id)]['last_name']:
# Check for numbers in the message
invalid = any(char.isdigit() for char in message.content)
if invalid:
# Invalid prompting
await message.add_reaction('❌')
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Invalid Last Name",
description = 'Your name can only contain letters.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Invalid Last Name",
description = 'Your name can only contain letters.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`Smith`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [3/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
else:
# Save last name
self.bot.data['new_members'][str(message.author.id)]['last_name'] = message.content.lower().capitalize()
save_data(self.bot.data_file, self.bot.data)
# Confirmation
await message.add_reaction('✅')
# If they do not yet have a school registered, prompt them to input a school
if not self.bot.data['new_members'][str(message.author.id)]['school']:
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Last Name Registered",
description = 'Next, please tell us what school you attend.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Last Name Registered",
description = 'Next, please tell us what school you attend.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`NAU`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [4/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
else:
# If they already have the school registered, first confirm last name submission
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Last Name Registered",
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Last Name Registered",
color = random.choice(self.bot.embed_colors)
)
embed.set_footer(
text = self.bot.footer_text + ' | [4/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# Then confirm school submission
if self.bot.use_timestamp:
embed = discord.Embed(
title = f"School Registered as `{self.bot.data['new_members'][str(message.author.id)]['school']}`",
description = 'Next, please tell us your major.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = f"School Registered as `{self.bot.data['new_members'][str(message.author.id)]['school']}`",
description = 'Next, please tell us your major.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`Computer Science`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [5/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# If they do not yet have a school
elif not self.bot.data['new_members'][str(message.author.id)]['school']:
# Save the school input (this is open ended)
self.bot.data['new_members'][str(message.author.id)]['school'] = message.content
save_data(self.bot.data_file, self.bot.data)
# Confirm school input and prompt for next question
await message.add_reaction('✅')
if self.bot.use_timestamp:
embed = discord.Embed(
title = "School Registered",
description = 'Next, please tell us your major.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "School Registered",
description = 'Next, please tell us your major.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`Computer Science`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [5/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# If they have everything prior but no major
elif not self.bot.data['new_members'][str(message.author.id)]['major']:
# Save the message content as their major
self.bot.data['new_members'][str(message.author.id)]['major'] = message.content
save_data(self.bot.data_file, self.bot.data)
# Confirm major input and prompt for next question
await message.add_reaction('✅')
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Major Registered",
description = 'Next, please tell us what platforms you play games on.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Major Registered",
description = 'Next, please tell us what platforms you play games on.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`PC, Xbox`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [6/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# If they have everything prior but no game systems registered
elif not self.bot.data['new_members'][str(message.author.id)]['game_system']:
# Save game systems
self.bot.data['new_members'][str(message.author.id)]['game_system'] = message.content
save_data(self.bot.data_file, self.bot.data)
# Confirm game system input and prompt for last question
await message.add_reaction('✅')
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Platforms Registered",
description = 'Lastly, please tell us whether you are a Casual or Competitive player.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Platforms Registered",
description = 'Lastly, please tell us whether you are a Casual or Competitive player.',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`Competitive`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [7/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# If they have not answered the last question yet
elif not self.bot.data['new_members'][str(message.author.id)]['type_of_player']:
# This try statement will fail if the value the user entered is not either `casual` or `competitive`
try:
# Because this line tries to load it to an enum.
player_type = PlayerType(message.content.lower()).name
# Save the player type, but do not write it to the file. In case something goes wrong,
# this would not execute again to save their data to the sheet and give them the roles
# unless the `type_of_player` field is None
self.bot.data['new_members'][str(message.author.id)]['type_of_player'] = player_type
# Get some the information necessary to add either the student of non-student role
guild = self.bot.get_guild(self.bot.guild_id)
student_role = guild.get_role(self.bot.student_role_id)
non_student_role = guild.get_role(self.bot.non_student_role_id)
member = guild.get_member(message.author.id)
# Add the role depending on the school name
if self.bot.data['new_members'][str(message.author.id)]['school'] == 'NAU' and not student_role in member.roles:
await member.add_roles(student_role)
elif not self.bot.data['new_members'][str(message.author.id)]['school'] == 'NAU' and not non_student_role in member.roles:
await member.add_roles(non_student_role)
# Create a connection to the Google Sheet specified in Resources/Sheets.py
sheet = Sheets(self.bot.credentials_file, self.bot.token_file)
sheet.append_user(self.bot.data['new_members'][str(message.author.id)])
# Final message for the user that they have completed their verification.
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Registration Complete!",
description = 'Welcome to NAU Esports!\n\nIf you are a Campus Faculty Member at NAU please contact an Officer.',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Registration Complete!",
description = 'Welcome to NAU Esports!\n\nIf you are a Campus Faculty Member at NAU please contact an Officer.',
color = random.choice(self.bot.embed_colors)
)
embed.set_footer(
text = self.bot.footer_text,
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# Send a message to the logs channel that a user has finished going through verification
if self.bot.use_timestamp:
embed = discord.Embed(
title = "User Verified",
description = f"User {member.mention} has finished verification.",
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "User Verified",
description = f"User {member.mention} has finished verification.",
color = random.choice(self.bot.embed_colors)
)
embed.set_author(
name = member.name,
icon_url = member.avatar_url
)
embed.set_footer(
text = self.bot.footer_text,
icon_url = self.bot.footer_icon
)
for log in self.bot.logs_channels:
channel = self.bot.get_channel(log)
await channel.send(embed = embed)
# Delete this user and their data from `new_members` in memory and in file, that way we arent bloating data sizes unnecessarily.
del self.bot.data['new_members'][str(message.author.id)]
save_data(self.bot.data_file, self.bot.data)
# If their input is not a valid player type
except ValueError:
# Tell them its not valid and what valid answers are
if self.bot.use_timestamp:
embed = discord.Embed(
title = "Invalid Player Type",
description = 'Please specify `casual` or `competitive` (your answer must be one of those words).',
color = random.choice(self.bot.embed_colors),
timestamp = datetime.datetime.now(datetime.timezone.utc)
)
else:
embed = discord.Embed(
title = "Invalid Player Type",
description = 'Please specify `casual` or `competitive` (your answer must be one of those words).',
color = random.choice(self.bot.embed_colors)
)
embed.add_field(
name = "Example",
value = "`competitive`"
)
embed.set_footer(
text = self.bot.footer_text + ' | [7/7]',
icon_url = self.bot.footer_icon
)
await message.author.send(embed = embed)
# Setup. Adds the actual cog to the bot.
def setup(bot):
bot.add_cog(Welcome(bot))
|
from flask_wtf import Form
from wtforms import StringField, SubmitField, PasswordField
from wtforms.validators import Required
class NameForm(Form):
username = StringField('Username', validators=[Required()])
password = PasswordField('Password', validators=[Required()])
submit = SubmitField('Submit') |
import pylab
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def f1(t, x, y, z): return y*(z-1+x**2)+gamma*x
def f2(t, x, y, z): return x*(3*z+1-x**2)+gamma*y
def f3(t, x, y, z): return -2*z*(alpha+x*y)
gamma=0.03
alpha=0.02
x_initial=-0.67
y_initial=0.
z_initial=0.5
t_initial=0
t_final=100
h=0.001
def Runge(x_initial, y_initial, z_initial, t_initial, h, t_final):
time=list(np.arange(t_initial, t_final+h, h))
X=[x_initial]
Y=[y_initial]
Z=[z_initial]
for i in range(1, len(time)):
p=time[i-1]
q=X[i-1]
r=Y[i-1]
s=Z[i-1]
k11=h*f1(p, q, r, s)
k21=h*f2(p, q, r, s)
k31=h*f3(p, q, r, s)
k12=h*f1(p+h/2., q+k11/2., r+k21/2., s+k31/2.)
k22=h*f2(p+h/2., q+k11/2., r+k21/2., s+k31/2.)
k32=h*f3(p+h/2., q+k11/2., r+k21/2., s+k31/2.)
k13=h*f1(p+h/2., q+k12/2., r+k22/2., s+k32/2.)
k23=h*f2(p+h/2., q+k12/2., r+k22/2., s+k32/2.)
k33=h*f3(p+h/2., q+k12/2., r+k22/2., s+k32/2.)
k14=h*f1(p+h, q+k13, r+k23, s+k33)
k24=h*f2(p+h, q+k13, r+k23, s+k33)
k34=h*f3(p+h, q+k13, r+k23, s+k33)
X+=[X[i-1]+(k11+2*k12+2*k13+k14)/6., ]
Y+=[Y[i-1]+(k21+2*k22+2*k23+k24)/6., ]
Z+=[Z[i-1]+(k31+2*k32+2*k33+k34)/6., ]
## fig=plt.figure()
## ax=plt.axes(projection='3d')
## ax.plot3D(X, Y, Z, 'green')
## ax.set_xlabel('X')
## ax.set_ylabel('Y')
## ax.set_zlabel('Z')
## plt.show()
pylab.subplot(3, 2, 1)
pylab.plot(X, Y)
pylab.xlabel('X')
pylab.ylabel('Y')
pylab.subplot(3, 2, 2)
pylab.plot(X, Z, 'r-')
pylab.xlabel('X')
pylab.ylabel('Z')
pylab.subplot(3, 2, 3)
pylab.plot(Y, Z, 'g-')
pylab.xlabel('Y')
pylab.ylabel('Z')
pylab.subplot(3, 2, 4)
pylab.plot(time, X, 'y-')
pylab.xlabel('time')
pylab.ylabel('X')
pylab.subplot(3, 2, 5)
pylab.plot(time, Y, 'r-')
pylab.xlabel('time')
pylab.ylabel('Y')
pylab.subplot(3, 2, 6)
pylab.plot(time, Z)
pylab.xlabel('time')
pylab.ylabel('Z')
pylab.show()
Runge(x_initial, y_initial, z_initial, t_initial, h, t_final)
|
# Generated by Django 3.2.2 on 2021-05-25 11:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_alter_userprofile_profile_picture'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='profile_picture',
field=models.ImageField(blank=True, default='static/user_image_default.png', upload_to='userprofile'),
),
]
|
first_name = "sruthi" #1.created a variable first_name to store first name
last_name = "VANGARA" #2.created a variable last_name to store last name
print("Hello",first_name.upper(),last_name.lower()) #3.using string function upper() to convert first_name into upper case and function lower() to convert last_name into lower case
print() #4. This will print new line
print() #This will print new line
full_name = ("sruthi vangara") #5. created a variable full_name to store both first and last name
sliced_text = slice(6) #using slice() function
print(full_name[sliced_text]) #6.prints the sliced text and prints in new line
x = full_name.replace("vangara","Walsh College Student") # 7.uses replace function to replace the last name that stored in full_name
print(x) #prints the output of variable x
print('"Start by doing what\'s necessary; then do what\'s possible; and suddenly you are doing the impossible - Francis of Assisi"') #8.it prints the given statement in question with double quotes
var1 = 10 #9.stores a var1
var2 = 5 #stores a var2
addition = var1 + var2 #10.formula for addition
subtraction = var1 - var2 #formula for subtraction
multiplication = var1 * var2 #formula for multiplication
division = var1 / var2 #formula for division
print('numeric value of variable',var1,'plus','numeric value of variable',var2,'equals',addition) #11.will print the addition of var1 and var2
print('numeric value of variable',var1,'minus','numeric value of variable',var2,'equals',subtraction) #will print the subtraction of var1 and var2
print('numeric value of variable',var1,'multiplied by','numeric value of variable',var2,'equals',multiplication) #will print the multiplication of var1 and var2
print('numeric value of variable',var1,'divided by','numeric value of variable',var2,'equals',division) #will print the division of var1 and var2
sq_root = multiplication ** 0.5 #12.square root is taken from the multiplication number and its formula
print('value of square root that stored the result of multiplication',multiplication,'equals',sq_root) #It prints the sqare root
current_month = "october" #13.created a variable as current_month as string
day_of_current_month = 8 #created a variable to take day of current month
print("Today is day", day_of_current_month,"\t\tof month",current_month) #14.it prints the output with tabbed over two times
#GitHub Repository link https://github.com/sruthivangara/SruthiV
|
from flask import Flask
from flask import request,render_template, Response,request
from Camera import Camera
import time
app = Flask(__name__)
frame = None
def gen():
"""Video streaming generator function."""
while True:
#frame = Camera.get_frame()
#print("public frame frame type:"+str(type(frame)))
fopen=[open('loaded.jpg', 'rb').read()]
frame = fopen[0]
time.sleep(0.1)
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/upload', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
f = request.files['file']
f.save('loaded.jpg')
return "upload"
app.run(port=5001) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.