text stringlengths 38 1.54M |
|---|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
from anvil import shell as sh
class Helper(object):
def __init__(self):
self._executable = sh.which("explode_envra", ["tools/"])
def explode(self, *filenames):
if not filenames:
return []
cmdline = [self._executable]
for filename in filenames:
cmdline.append(sh.basename(filename))
(stdout, _stderr) = sh.execute(cmdline)
results = []
missing = collections.deque(filenames)
for line in stdout.splitlines():
decoded = json.loads(line)
decoded['origin'] = missing.popleft()
results.append(decoded)
if missing:
raise AssertionError("%s filenames names were lost during"
" exploding: %s" % (len(missing),
list(missing)))
if len(results) > len(filenames):
diff = len(results) - len(filenames)
raise AssertionError("%s filenames appeared unexpectedly while"
" exploding" % (diff))
return results
|
n1 = int(input('Digite o numero'))
n2 = n1 % 2
if n2 == 0:
print('O numero {} é par'.format(n1))
else:
print('O numero {} é impar'.format(n1)) |
# Generated by Django 4.2.3 on 2023-07-07 18:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("address_book", "0014_auto_20210720_1640"),
]
operations = [
migrations.AlterField(
model_name="address",
name="street2",
field=models.CharField(blank=True, max_length=128, verbose_name="Unit / Apartment / Suite"),
),
migrations.AlterField(
model_name="address",
name="type",
field=models.CharField(
blank=True, choices=[("H", "Home"), ("W", "Work"), ("O", "Other"), (None, "Type")], max_length=1
),
),
migrations.AlterField(
model_name="phonenumber",
name="type",
field=models.CharField(
blank=True,
choices=[("H", "Home"), ("M", "Mobile"), ("W", "Work"), ("O", "Other"), (None, "Type")],
max_length=1,
),
),
migrations.AlterField(
model_name="venue",
name="url",
field=models.URLField(blank=True, verbose_name="Website"),
),
]
|
import os
import numpy as np
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
import pandas as pd
class DeepImageBuilder:
'''
A deep learning class for transfer learning using pre-built keras models that were built from rgb images. This
class helps loading and preparing data in the right format for keras models. For example, it transforms data
from grayscale images to rgb.
'''
# A dictionary to match class attributes to data name variables. Primarily used in for loops within class methods.
DATA_ATTRIBUTE_NAMES_DICT = {
'path_data_train': 'DataTrain',
'path_labels_train': 'LabelsTrain',
'path_data_test': 'DataTest',
'path_labels_test': 'LabelsTest',
'path_data_val': 'DataVal',
'path_labels_val': 'LabelsVal'}
def __init__(self, paths_dict):
self.PathsDict = self.set_paths(paths_dict, nargout=1) # set the dictionary of paths.
self.PathCurrent = os.path.dirname(os.path.realpath(__file__)) # path where this file is opened
self.PathSampleImages = os.path.join(self.PathCurrent, '../sample_imgs') # path for saved sample class images
self.DataTrain = np.empty(shape=(100, 10, 10, 1)) # initialize with empty numpy array
self.DataTest = np.empty(shape=(10, 10, 10, 1)) # initialize with empty numpy array
self.DataVal = np.empty(shape=(10, 10, 10, 1)) # validation data. can be created with self.create_val_set()
self.LabelsTrain = np.empty(shape=self.DataTrain.shape[0])
self.LabelsTest = np.empty(shape=self.DataTest.shape[0])
self.LabelsVal = np.empty(shape=self.DataVal.shape[0])
self.EncoderTrain = LabelEncoder()
self.EncoderTest = LabelEncoder()
self.EncoderVal = LabelEncoder()
def set_paths(self, paths_dict, nargout=0):
'''
Sets a dictionary that contains the absolute path of the main directory and its relative paths to the training,
validation, and test sets (data & labels).
:param paths_dict: Dict.
path_main: the absolute path of the main directory that contains all the data
path_data_train: relative path (from path_main) to the training images. Format should be numpy array (.npy).
path_labels_train: relative path to the training labels. Format should be numpy array (.npy)
path_data_test, path_labels_test, path_data_val, path_labels_val: similar to training paths. If not
available, set to ''
Example:
paths_dict = {
'path_main': 'C:\\Users\\jesus\\Documents\\Springboard\\project_data\\ddsm-mammography',
'path_data_train': 'cv10_data\\cv10_data.npy',
'path_labels_train': 'cv10_labels.npy',
'path_data_test': 'test10_data\\test10_data.npy',
'path_labels_test': 'test10_labels.npy',
'path_data_val': '',
'path_labels_val': ''}
:param nargout: Int. Output paths_dict if nargout = 1 (after checking key names). Do a self update
(self.PathsDict) if 0.
:return:
'''
args = ('path_main',) + tuple(self.DATA_ATTRIBUTE_NAMES_DICT.keys())
for key, value in paths_dict.items():
if not(key in args):
raise ValueError(key, 'Not recognized. path keys must be one of the following:', args)
if nargout == 0:
self.PathsDict = paths_dict
elif nargout == 1:
return paths_dict
else:
ValueError(nargout, 'must be int 0 or 1')
def get_data(self):
data_attribute_names_dict = self.DATA_ATTRIBUTE_NAMES_DICT
'''
Loads image data and labels for training, validation, or test data. Only loads data if the file path for each
file isn't empty.
:return:
'''
for key, file_path in self.PathsDict.items():
if (file_path != '') & (type(file_path) == str) & (key != 'path_main'):
exec("self." + str(data_attribute_names_dict[key]) +
r" = np.load(self.PathsDict['path_main'] + '\\' + file_path)")
def show_sample_class_images(self, num_classes, display_images=True, save_images=False):
'''
FUTURE: create a method that shows a sample image from each class/category.
:param num_classes:
:param display_images:
:param save_images:
:return:
'''
# *************************************************************#
# Get the indeces for the differen types of classes. Create static method from "sample for each type of class"
# in static method get_sample
if save_images:
print('Sample class images will be saved in: ' + self.PathSampleImages)
def check_data_choice(self, data_choice: list = None):
# *************************************************************#
# Check data_choice type if all the strings in data_choice are correct
if type(data_choice) != list:
raise TypeError(
"data_choice must be a list. List must contain 'training', 'test', and/or 'validation'")
for entry in data_choice:
if not (entry in ['training', 'test', 'validation']):
raise ValueError(
entry + "not recognized. data_choice list can only contain 'training', 'test', and/or "
"'validation'.")
# *************************************************************#
# Get the corresponding data and training labels
data_list = []
labels_list = []
data_choice_tracker = []
if 'training' in data_choice:
data_list.append(self.DataTrain)
labels_list.append(self.LabelsTrain)
data_choice_tracker.append('Train')
print("Selected training set to prep.")
if 'test' in data_choice:
data_list.append(self.DataTest)
labels_list.append(self.LabelsTest)
data_choice_tracker.append('Test')
print("Selected test set to prep.")
if 'validation' in data_choice:
data_list.append(self.DataVal)
labels_list.append(self.LabelsVal)
data_choice_tracker.append('Val')
print("Selected validation set to prep.")
return data_list, labels_list, data_choice_tracker
def prep_data(self, data_choice):
'''
Prepare data by converting images from gray scale NxNx1 to rgb NxNx3. This is done because the imported keras
models were trained with rgb images. In addition, one-hot encode labels if it's necessary.
Future: generalize storing data to self like get_data() method by using DATA_ATTRIBUTE_NAMES_DICT
:param data_choice: a list that contains 'training', 'test', and/or 'validation'. Data preparation will apply
to data stored in corresponding attributes.
:return:
'''
data_list, labels_list, data_choice_tracker = \
self.check_data_choice(data_choice=data_choice)
# *************************************************************#
# Loop through data lists and prepare data if it's needed
for idx, choice in enumerate(data_choice_tracker):
# *************************************************************#
# Assign current variables
data = data_list[idx]
labels = labels_list[idx]
suffix = choice
# *************************************************************#
# Convert images from gray scale NxNx1 to rgb NxNx3 (since our imported nodes were built from rgb images)
if data.shape[-1] == 1:
print("Converting images from grayscale NxNx1 to rgb NxNx3")
data = np.repeat(data, 3, -1)
print("...New shape of data: ", data.shape)
# *************************************************************#
# convert our training labels to an NxM matrix
# convert label strings to numerical
if type(labels[0]) == str:
print("Converting labels from strings to numerical. Outputting encoder to ")
encoder = LabelEncoder()
encoder.fit(labels)
labels = encoder.transform(labels)
exec("self.Encoder" + suffix + " = encoder")
# Convert integers to dummy variables (i.e., one-hot encoded)
if len(labels.shape) == 1:
print("Converting labels to categorical (one-hot encoded)")
labels = np_utils.to_categorical(labels)
# Store arguments into respective properties
self.store_loop_data(data, labels, suffix)
def adjust_exposure(self, data_choice):
# Check data choice
data_list, labels_list, data_choice_tracker = \
self.check_data_choice(data_choice=data_choice)
# *************************************************************#
# Loop through data lists and adjust exposure
for idx, choice in enumerate(data_choice_tracker):
# *************************************************************#
# Assign current variables
data = data_list[idx]
labels = labels_list[idx]
suffix = choice
#
# Store arguments into respective properties
self.store_loop_data(data, labels, suffix)
@staticmethod
def store_loop_data(data, labels, suffix):
"""
Store arguments into respective properties.
This is typically run inside a loop, where each argument is the
current value.
:param data:
:param labels:
:param suffix:
:return:
"""
exec("self.Data" + suffix + " = data")
exec("self.Labels" + suffix + " = labels")
print(
"Storing data in self.Data" + suffix + " and labels in self.Labels" + suffix)
def create_smaller_train_set(self, percent):
"""
Subsample the training set of our object and replace the results in self.DataTrain and self.LabelsTrain
:param percent: percent of the dataset we want to sample
:return: num_samples_training_dict, a dictionary containing the number of samples for each class
"""
[self.DataTrain, self.LabelsTrain, num_samples_training_dict] = DeepImageBuilder.get_sample(
data=self.DataTrain,
labels=self.LabelsTrain, percent=percent,
remove_samples=False)
return num_samples_training_dict
def create_val_set(self, percent):
"""
Creates a validation set (self.DataVal, self.LabelsVal) from the training set (self.DataTrain, self.LabelsTrain)
This automatically removes these samples from our training set.
:param percent: percent of training data to convert to validation set
:return: num_samples_validation_dict, a dictionary containing the number of samples for each class
"""
[self.DataVal, self.LabelsVal, num_samples_validation_dict, self.DataTrain, self.LabelsTrain] = \
DeepImageBuilder.get_sample(data=self.DataTrain, labels=self.LabelsTrain, percent=percent,
remove_samples=True)
return num_samples_validation_dict
@staticmethod
def get_sample(data, labels, percent, remove_samples):
'''
Create a subsmaple of our data. This should be done before method DeepImageBuilder.prep_data() is called.
:param data: DeepImageBuilder.Data
:param labels: DeepImageBuilder.Labels, should be an Nx1 numpy array
:param percent: int, the sample size percent
:param remove_samples: boolean, set to True to remove the samples that were sampled from data
:return: [data_sampled, labels_sampled, num_samples_dict, data_samples_removed, labels_samples_removed] if
remove_samples = True, else return
[data_sampled, labels_sampled, num_samples_dict]
'''
# *************************************************************#
# pre-allocate sampled numpy arrays
num_samples_dict = pd.Series(labels).value_counts() # count number of samples of the sampled array
num_samples_dict = dict(np.ceil(num_samples_dict*percent/100).astype('int64')) # have at least one sample
data_sampled = np.empty(shape=(sum(num_samples_dict.values()),) + data.shape[1:], dtype=data.dtype)
labels_sampled = np.empty(shape=data_sampled.shape[0], dtype=labels.dtype)
idx_tracker = 0 # keep track of the starting index of our sample
idx_samples_all = [] # an array to keep track of all the indexes we sampled
# *************************************************************#
# sample for each type of class
for label, num_samples in num_samples_dict.items():
# get the indexes of labels that match our current label
idx_labels = np.where(labels == label)[0] # [0] because it's a tuple with our array
# sample the dictionary
idx_labels_sampled = np.random.choice(idx_labels, size=num_samples, replace=False)
data_sampled[idx_tracker:idx_tracker + num_samples, :, :, :] = data[idx_labels_sampled, :, :, :]
labels_sampled[idx_tracker:idx_tracker + num_samples] = labels[idx_labels_sampled]
idx_tracker += num_samples
idx_samples_all.extend(idx_labels_sampled)
# *************************************************************#
# Randomly shuffle the data and labels
shuffler = np.arange(labels_sampled.shape[0]) # create an indexed array
np.random.shuffle(shuffler) # shuffle our array
data_sampled = data_sampled[shuffler, :, :, :]
labels_sampled = labels_sampled[shuffler]
if remove_samples:
# create a mask to remove sampled data and return
mask = np.ones(len(labels), dtype=bool)
mask[tuple([idx_samples_all])] = False
data_samples_removed = data[mask, :, :, :]
labels_samples_removed = labels[mask]
return [data_sampled, labels_sampled, num_samples_dict, data_samples_removed, labels_samples_removed]
else:
return [data_sampled, labels_sampled, num_samples_dict]
@staticmethod
def plot_history(history, datagen):
"""
Future: if it becomes necessary, create a static plotting method for the class
:param history: keras.callbacks.callbacks.History object, output from keras.models.Model.fit_generator()
:param datagen: keras.preprocessing.image.ImageDataGenerator, the generator object that was used when fitting
Model.fit_gerator(). Use this to get parameters.
:return:
"""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 21:01:37 2019
@author: aleksandr
"""
l = [1, 2, 3, 4, 5, 6, 1]
new_l = []
def lists_middle(l):
i = len(l)
new_l = l[1:i-1]
return(new_l)
print(lists_middle(l))
|
import sys
#sys.stdin = open("input.txt", "rt")
input = sys.stdin.readline # 입력량이 많을 때 입력 속도가 빨라짐
#s = input().rstrip(): # readline 쓰면서 문자열을 읽을 때 문자열 끝에 줄바꿈 기호까지 읽기 때문에 탈피해야함
################ 동적 계획법 #################
### Bottom-Up 쓰는 것 같음, 메모이제이션 포함
# 리스트 순서대로 각 항목이 부분증가수열의 마지막 항이라 했을 때, 길이를 dy에 입력
# 앞에 항목들 중 작은 수들 중에서 길이가 가장 긴 것의 길이 +1 해준 것을 저장
n = int(input())
arr = list(map(int, input().split()))
arr.insert(0, 0)
dy = [0] * (n+1)
dy[1] = 1
res = 0
for i in range(2, n+1):
max = 0
for j in range(i-1, 0, -1): # i-1 부터 1 까지 거꾸로 range
if arr[j] < arr[i] and dy[j] > max:
max = dy[j]
dy[i] = max + 1
if dy[i] > res:
res = dy[i]
print(res)
'''
# 설명만 듣고 푼거 100점
n = int(input())
arr = list(map(int, input().split()))
dy = [0] * (n+1)
dy[1] = 1
for i in range(2, n+1):
for j in range(1,i):
if arr[j-1] < arr[i-1] and (dy[j]+1) > dy[i]:
dy[i] = dy[j]+1
if dy[i] == 0:
dy[i] = 1
print(max(dy))
'''
|
from django.db import models
class Post(models.Model):
title = models.CharField(max_length=100, db_index=True)
post_date = models.DateTimeField(auto_now_add=True)
blogger = models.ForeignKey('Blogger', on_delete=models.CASCADE, related_name='posts')
text = models.TextField()
def __str__(self):
return self.title
class Meta:
ordering = ['-post_date', ]
class Blogger(models.Model):
name = models.CharField(max_length=100, db_index=True)
post = models.CharField(max_length=100)
biography = models.CharField(max_length=100)
COUNTRY_CHOICES = (
(None, 'Выберите страну'),
('a', 'Беларусь'),
('b', 'Канада'),
('c', 'Польша'),
('d', 'США'),
)
country = models.CharField(max_length=1, choices=COUNTRY_CHOICES)
def __str__(self):
return self.name
class Meta:
ordering = ['name', ]
class Comment(models.Model):
blogger = models.ForeignKey(Blogger, on_delete=models.CASCADE)
date_added = models.DateTimeField(auto_now_add=True)
comment = models.TextField()
post = models.ForeignKey(Post, on_delete=models.CASCADE)
class Meta:
ordering = ['date_added', ]
|
# coding: utf-8
# In[2]:
#-- Youtube, Python Widgets, Dot graphs, ...
from IPython.display import YouTubeVideo
import ipywidgets as wdg
#-- Useful Python functions
# product(set, set) generates the cartesian product
from itertools import product
# For copy.deepcopy(structure) produces a deep copy of the structure
import copy
# For random.choice(list) produces a random pick from the list
import random
# reduce(lambda x,y: x+y, [], -1000) or reduce(lambda x,y: x+y, [1]) or reduce(lambda x,y: x+y, [1,2])
# -1000 is default
from functools import reduce
# Graphviz functions : produce dot files or display files or dot objects
from graphviz import Digraph, Source
# datetime.datetime.now() - help obtain time in various formats
import datetime
#--end of imports, 5/28/17
|
import random
from itertools import combinations
import pytest
from apivisualizer.highestproduct import highest_product
class TestHighestProduct:
def test_too_short_input(self):
with pytest.raises(ValueError):
highest_product([0, 6])
def test_all_positive(self):
numbers = [1, 6, 5, 10, 5]
assert highest_product(numbers) == 10 * 6 * 5
def test_all_negative(self):
numbers = [-1, -6, -5, -10, -5]
assert highest_product(numbers) == -1 * -5 * -5
def test_mixed_sign_positive_result(self):
numbers = [1, -6, -5, 10, 5]
assert highest_product(numbers) == 10 * -6 * -5
def test_mixed_sign_negative_result(self):
numbers = [1, -10, 5]
assert highest_product(numbers) == 1 * -10 * 5
def test_zero_result(self):
numbers = [-10, -5, -2, 0]
assert highest_product(numbers) == 0
def test_long_input(self):
numbers = [6, 5, 10] + [1, 2] * 350
assert highest_product(numbers) == 300
def test_very_long_input(self):
numbers = [6, 5, 10] + [1, 2] * 350 + [-10, -5] * 1000
assert highest_product(numbers) == -10 * -10 * 10
def test_random_inputs(self):
def naive_implementation(numbers):
return max(a * b * c for (a, b, c) in combinations(numbers, 3))
for _ in range(100):
nums = random.sample(range(-100, 100), 20)
assert naive_implementation(nums) == highest_product(nums)
|
import os
import sys
import time
import math
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import string
start_time = time.time()
ps = PorterStemmer()
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
model_dict = dict()
model = open("nbmodel.txt", "r", encoding="latin1")
prior_prob_spam = float(model.readline().rstrip())
prior_prob_ham = float(model.readline().rstrip())
no_unique_words = int(model.readline().rstrip())
no_spam_words = int(model.readline().rstrip())
no_ham_words = int(model.readline().rstrip())
line = model.readline().rstrip()
cnt = 1
while line:
split_string = line.split()
model_dict[split_string[0] + split_string[1]] = float(line.split()[2])
line = model.readline()
output = open("nboutput.txt", "w")
correct_classification_spam = 0
correct_classification_ham = 0
for root, directories, files in os.walk(sys.argv[1]):
for file in files:
if '.txt' in file:
directory_name, file_name = os.path.split(os.path.join(root, file))
parent_directory = directory_name.split('/')
parent_directory = parent_directory[len(parent_directory) - 1]
prob_spam = prior_prob_spam
prob_ham = prior_prob_ham
file_pointer = open(os.path.join(root, file), "r", encoding="latin1")
content = file_pointer.read()
for word in content.split():
lowercase_word = word.lower()
if lowercase_word not in string.punctuation and lowercase_word not in stop_words:
lowercase_word = ps.stem(lowercase_word)
if "spam" + lowercase_word in model_dict or "ham" + lowercase_word in model_dict:
if "spam" + lowercase_word in model_dict:
prob_spam = prob_spam + model_dict.get("spam" + lowercase_word)
else:
prob_spam = prob_spam + math.log(1 / (no_spam_words + no_unique_words))
if "ham" + lowercase_word in model_dict:
prob_ham = prob_ham + model_dict.get("ham" + lowercase_word)
else:
prob_ham = prob_ham + math.log(1 / (no_ham_words + no_unique_words))
if prob_spam > prob_ham:
if parent_directory == "spam":
correct_classification_spam += 1
output.write("spam\t" + os.path.join(root, file) + "\n")
elif prob_spam < prob_ham:
if parent_directory == "ham":
correct_classification_ham += 1
output.write("ham\t" + os.path.join(root, file) + "\n")
else:
print("Equal")
print(correct_classification_spam)
print(correct_classification_ham)
print(str(time.time() - start_time))
|
##Faça um Programa que peça 2 números inteiros e um número real. Calcule e mostre:
##o produto do dobro do primeiro com metade do segundo .
##a soma do triplo do primeiro com o terceiro.
##o terceiro elevado ao cubo
a= (int(input(" Insira um valor inteiro : ")))
b= (int(input(" Insira outro valor inteiro : ")))
c= (float(input(" Insira um valor real: ")))
cont1=a*b
cont2=3*a+ 3*b
cont3=c**3
print(" O primeiro valor é " +str(cont1))
print (" O segundo valor é " +str(cont2))
print(" O terceiro valor " + str(cont3))
|
import json
from django.shortcuts import get_object_or_404
from django.http import JsonResponse
from django.core.mail import send_mail
from django.template.loader import render_to_string
from apps.cart.cart import Cart
from .models import Product
from apps.order.models import Order, OrderItem
from apps.order.utils import checkout
def api_add_to_cart(request):
data = json.loads(request.body)
jsonresponse = {'success':True}
product_id = data['product_id']
quantity = data['quantity']
update = data['update']
cart = Cart(request)
product = get_object_or_404(Product, pk=product_id)
if not update:
cart.add(product=product,quantity=1, update_quantity = False )
else:
cart.add(product = product, quantity = quantity, update_quantity = True )
return JsonResponse(jsonresponse)
def api_checkout(request):
cart = Cart(request)
data = json.loads(request.body)
jsonresponse = {'success':True}
first_name = data['first_name']
last_name = data['last_name']
email = data['email']
city = data['city']
address = data['address']
phone = data['phone']
orderid = checkout(request, first_name, last_name, email, city, address, phone)
cod = True
if cod:
order = Order.objects.get(pk = orderid)
order.cod = True
order.amount_to_be_paid = cart.get_total_cost()
order.save()
html = render_to_string('core/email_confirmation.html')
send_mail('Order Confirmation','Your order has been sent','noreply@mpasal.com',['mail@mpasal.com',order.email],fail_silently=True,html_message=html)
cart.clear()
return JsonResponse(jsonresponse)
def api_remove_from_cart(request):
cart = Cart(request)
data = json.loads(request.body)
jsonresponse = {'success':True}
product_id = str(data['product_id'])
cart = Cart(request)
cart.remove(product_id)
return JsonResponse(jsonresponse)
|
import pickle
import gzip
import random
import numpy as np
import sys
import time
class Dataset(object):
def __init__(self, train, validation, test, input_shape=None):
self.train_x = train[0]
self.train_y = train[1]
self.n_train = len(train[0])
if validation != None:
self.validation_available = True
self.validation_x = validation[0]
self.validation_y = validation[1]
self.n_validation = len(validation[0])
else:
self.validation_available = False
if test != None:
self.test_available = True
self.test_x = test[0]
self.test_y = test[1]
self.n_test = len(test[0])
else:
self.test_available = False
if input_shape != None:
self.set_input_shape(input_shape)
def set_input_shape(self, input_shape):
self.train_x = [np.reshape(X, input_shape) for X in self.train_x]
if self.validation_available == True:
self.validation_x = [np.reshape(X, input_shape) for X in self.validation_x]
self.test_x = [np.reshape(X, input_shape) for X in self.test_x]
def shuffle_train(self):
idx = np.arange(self.n_train)
np.random.shuffle(idx)
self.train_x = np.array(self.train_x)[idx]
self.train_y = np.array(self.train_y)[idx]
def get_batch(self, _type, ind, batch_size):
if _type == 'train':
mini_x = self.train_x[ind * batch_size : (ind+1) * batch_size]
mini_y = self.train_y[ind * batch_size : (ind+1) * batch_size]
elif _type=='validation':
mini_x = self.validation_x[ind * batch_size : (ind+1) * batch_size]
mini_y = self.validation_y[ind * batch_size : (ind+1) * batch_size]
elif _type=='test':
mini_x = self.test_x[ind * batch_size : (ind+1) * batch_size]
mini_y = self.test_y[ind * batch_size : (ind+1) * batch_size]
return mini_x, mini_y
def shuffle_data(self, X, y, seed=None):
if seed:
np.random.seed(seed)
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
return X[idx], y[idx]
def train_test_split(self, X, y, test_size=0.5, shuffle=True, seed=None):
if shuffle:
X, y = self.shuffle_data(X, y, seed)
split_i = len(y) - int(len(y) * test_size)
X_train, X_test = X[:split_i], X[split_i:]
y_train, y_test = y[:split_i], y[split_i:]
return X_train, X_test, y_train, y_test
def __onehot__(self, v, n_classes):
y = np.zeros(n_classes)
y[v] = 1
return y |
import webbrowser
class My():
def __init__(self,my_title,my_storyline,poster_image,trailer_youtube):
self.title=my_title
self.story=my_storyline
self.poster=poster_image
self.trailer=trailer_youtube
def show_trailer(self):
webbrowser.open(self.trailer)
|
"unittest"
import unittest
import psycopg2
from unittest import mock
from db_load import IronMQFile, load_data_to_postgre
from main import event_handlers, mq
from settings import settings
class TestIronMQFile(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_read(self):
mq = IronMQFile(None)
mq.readline = mock.Mock(return_value='hello\n')
self.assertEqual(mq.read(3), 'hel')
self.assertEqual(mq.leftover, 'lo\n')
self.assertEqual(mq.read(5), 'lo\nhe')
self.assertEqual(mq.leftover, 'llo\n')
self.assertEqual(mq.read(12), 'llo\nhello\nhe')
self.assertEqual(mq.leftover, 'llo\n')
self.assertEqual(mq.read(11), 'llo\nhello\nh')
self.assertEqual(mq.read(1), 'e')
self.assertEqual(mq.leftover, 'llo\n')
self.assertEqual(mq.read(0), '')
self.assertEqual(mq.leftover, 'llo\n')
def test_readline(self):
mq = IronMQFile(None)
mq._populate_cache = mock.Mock(return_value=None)
mq.event_cache = ['hello', 'world']
self.assertEqual(mq.readline(), 'hello\n')
self.assertEqual(mq.readline(), 'world\n')
self.assertEqual(mq.readline(), '')
# Generally for a unit test I should use a stub for db connections and cursors here.
# However, as the main function of this method is to execute db operations
# and the logic of itself is very simple, it seems to be useless to just test
# it with stubs. Instead, let's treat it as half unittest and half integration test
# to try corner cases to raise and handle exceptions.
# TODO: split the function to separate parts: external connections and the logic.
# TODO: there should be more test cases to increase the coverage.
def test_load_data_to_postgre(self):
db_parms = { #'dbname': None,
'user': settings.DB_USER,
'password': settings.DB_PASSWORD,
'host': '127.0.0.1',
'port': '12345'
}
tbl_quefile_dict = {}
for tbl_name in event_handlers.handlers.keys():
tbl_quefile_dict[tbl_name] = IronMQFile(mq.queue(tbl_name))
self.assertEqual(load_data_to_postgre(None, db_parms, 'test'), None)
|
#Modifed SimpleHTTPServer which returns cookies passed on the request
import SimpleHTTPServer
import logging
import time
import base64
import random
cookieHeader = None
class MyHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
self.cookieHeader = self.headers.get('Cookie')
SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def end_headers(self):
self.send_my_headers()
SimpleHTTPServer.SimpleHTTPRequestHandler.end_headers(self)
def send_my_headers(self):
token = base64.b64encode( str(time.time()) + str(random.randint(1, 10)))
self.send_header('Cookie', "Test="+token)
if __name__ == '__main__':
SimpleHTTPServer.test(HandlerClass=MyHTTPRequestHandler)
|
from cfgm_common.exceptions import NoIdError
from schema_transformer.resources._resource_base import ResourceBaseST
class VirtualPortGroupST(ResourceBaseST):
_dict = {}
obj_type = 'virtual_port_group'
ref_fields = ['virtual_machine_interface']
prop_fields = ['annotations']
@classmethod
def reinit(cls):
for obj in cls.list_vnc_obj():
if not obj.annotations:
continue
for kvp in obj.annotations.key_value_pair:
if kvp.key == 'usage' and kvp.value == 'sriov-vm':
st_obj = cls.locate(obj.get_fq_name_str(), obj)
st_obj.evaluate()
# end reinit
def __init__(self, name, obj=None):
self.name = name
self.uuid = None
self.virtual_machine_interfaces = set()
self.update(obj)
self.uuid = self.obj.uuid
# end __init__
def update(self, obj=None):
changed = self.update_vnc_obj(obj)
if 'annotations' in changed:
self.set_annotations()
return changed
# end update
def delete_obj(self):
for fabric_vmi_st_name in self.virtual_machine_interfaces:
fabric_vmi_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_machine_interface') \
.get(fabric_vmi_st_name)
self.delete_fabric_vmi_ref(fabric_vmi_st)
self.delete_physical_interface_ref()
# end delete_obj
def evaluate(self, **kwargs):
if getattr(self, 'annotations', {}).get("usage", "") == "sriov-vm":
self.sanitize_fabric_vmis()
if not self.is_valid():
self.delete_self_db_obj()
# end evaluate
def sanitize_fabric_vmis(self):
self._logger.debug("Starts sanitizing "
"vpg's (%s) fabric vmis" % self.name)
for fabric_vmi_st_name in self.virtual_machine_interfaces:
fabric_vmi_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_machine_interface') \
.get(fabric_vmi_st_name)
if fabric_vmi_st is None:
continue
vn_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_network') \
.get(fabric_vmi_st.virtual_network)
if vn_st is None:
continue
vn_st.virtual_port_groups.add(self.name)
no_valid_vmi_under_fabric_vmi = True
for vmi_st_name in vn_st.virtual_machine_interfaces:
if vmi_st_name != fabric_vmi_st_name:
vmi_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_machine_interface') \
.get(vmi_st_name)
if vmi_st is None:
continue
if vmi_st.get_pi_uuid() in \
self.get_uuids(self.obj.get_physical_interface_refs()):
no_valid_vmi_under_fabric_vmi = False
break
if no_valid_vmi_under_fabric_vmi:
self.delete_fabric_vmi_ref(fabric_vmi_st)
self._logger.debug("Finshed sanitizing "
"vpg's (%s) fabric vmis" % self.name)
# end sanitize_fabric_vmis
def is_valid(self):
self.update(None)
if len(self.virtual_machine_interfaces) == 0:
return False
return True
# end is_valid
def delete_self_db_obj(self):
self._logger.debug("Starts deleting vpg db object %s" % self.name)
try:
# no need to manually delete fabric VMI,
# since delete_obj will run when VPG deletion event is caught
self.delete_obj()
self._vnc_lib.virtual_port_group_delete(id=self.uuid)
except NoIdError:
pass
self._logger.debug("Finished deleting vpg db object %s" % self.name)
# end delete_self_db_obj
def delete_fabric_vmi_ref(self, fabric_vmi_st):
if fabric_vmi_st is not None:
vn_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_network') \
.get(fabric_vmi_st.virtual_network)
if vn_st is not None:
if self.name in vn_st.virtual_port_groups:
vn_st.virtual_port_groups.remove(self.name)
fabric_vmi_uuid = fabric_vmi_st.uuid
if fabric_vmi_uuid is not None:
try:
self._vnc_lib.ref_update(
'virtual-port-group', self.uuid,
'virtual-machine-interface', fabric_vmi_uuid,
None, 'DELETE')
fabric_vmi = self._vnc_lib \
.virtual_machine_interface_read(
id=fabric_vmi_uuid)
fabric_vmi_vpg_back_refs = \
fabric_vmi.get_virtual_port_group_back_refs()
if fabric_vmi_vpg_back_refs is None or \
len(fabric_vmi_vpg_back_refs) == 0:
self._vnc_lib \
.virtual_machine_interface_delete(
id=fabric_vmi_uuid)
except NoIdError:
pass
except Exception as e:
msg = ("Unexpected error during "
"dereferencing fabric vmi %s: %s"
% (fabric_vmi_st.name, str(e)))
self._logger.error(msg)
self.add_ignored_error(msg)
# end delete_fabric_vmi_ref
def delete_physical_interface_ref(self):
# Since internal created sriov vpg only refers one pi
# We simply delete all pi refs
for pi_uuid in self.get_uuids(self.obj.get_physical_interface_refs()):
try:
self._vnc_lib.ref_update(
'virtual-port-group', self.uuid,
'physical-interface', pi_uuid,
None, 'DELETE')
except NoIdError:
pass
except Exception as e:
msg = ("Unexpected error during "
"dereferencing "
"pyhsical interface %s: %s"
% (pi_uuid, str(e)))
self._logger.error(msg)
self.add_ignored_error(msg)
# end delete_physical_interface_ref
def get_uuids(self, items):
if items is None:
return []
if isinstance(items, list):
return [item['uuid'] for item in items]
if isinstance(items, dict) and len(items.keys()) > 0:
return [item['uuid'] for item in
items.get(list(items.keys())[0], [])]
# end get_uuids
def set_annotations(self):
self.annotations = self.kvps_to_dict(self.obj.get_annotations())
return
# end set_bindings
def kvps_to_dict(self, kvps):
dictionary = dict()
if not kvps:
return dictionary
for kvp in kvps.get_key_value_pair():
dictionary[kvp.get_key()] = kvp.get_value()
return dictionary
# end kvps_to_dict
# end class VirtualPortGroupST
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name = 'mpg',
packages = find_packages(),
version = '0.0.1',
description = 'To generate MaaS pods',
author = 'Taihsiang Ho (tai271828)',
author_email = 'tai271828@gmail.com',
url = 'https://github.com/tai271828/maas-pod-generator',
download_url = 'https://github.com/tai271828/maas-pod-generator',
keywords = ['ubuntu', 'maas', 'science', 'cluster'],
entry_points={
'console_scripts': [
'mpg-cli=mpg.launcher.mpg_cli:main',
]
},
classifiers = [
"Programming Language :: Python",
]
)
|
#!/usr/bin/python
import hashlib
import os
file_path = os.path.abspath(__file__)
dir_path = os.path.dirname(file_path)
replace_str = "2\n"
func_names = []
with open(os.path.join(dir_path, '2.code'), 'r') as fp:
while True:
line = fp.readline()
if line.strip() == "":
break
func_names.append(line.strip())
for func_name in func_names:
data = hashlib.md5(func_name).hexdigest()
md5_str = '_' + data
replace_str = replace_str + func_name + ":" + md5_str + "\n"
with open(os.path.join(dir_path, '1.code'), 'w') as fp:
fp.write(replace_str)
os.system("rm -f " + os.path.join(dir_path, "2.code")) |
N = int(input())
W = [input() for i in range(N)]
history = []
judge = True
for w in W:
if w in history or (history != [] and history[-1][-1] != w[0]):
judge = False
break
history.append(w)
print('Yes' if judge else 'No')
|
#!/usr/bin/python3
from pd2.runner import Runner
import sys
if __name__ == "__main__":
runner = Runner(sys.argv)
runner.run()
|
import sys
import unittest
sys.path.append('./code')
from transforms import Transform, RadialGaussianization
from models import GSM
from numpy import all, sqrt, sum, square
Transform.VERBOSITY = 0
class Tests(unittest.TestCase):
def test_inverse(self):
"""
Make sure inverse Gaussianization is inverse to Gaussianization.
"""
gsm = GSM(3, 10)
gsm.initialize('cauchy')
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# reconstructed samples
samples_ = rg.inverse(rg(samples))
# distance between norm and reconstructed norm
dist = abs(sqrt(sum(square(samples_))) - sqrt(sum(square(samples))))
self.assertTrue(all(dist < 1E-6))
###
# test one-dimensional GSM
gsm = GSM(1, 7)
gsm.initialize('cauchy')
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# reconstructed samples
samples_rg = rg.inverse(rg(samples))
# distance between norm and reconstructed norm
dist = abs(sqrt(sum(square(samples_rg))) - sqrt(sum(square(samples))))
self.assertTrue(all(dist < 1E-6))
def test_logjacobian(self):
"""
Test log-Jacobian.
"""
gsm = GSM(3, 10)
gsm.initialize('cauchy')
# standard normal distribution
gauss = GSM(3, 1)
gauss.scales[0] = 1.
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# after Gaussianization, samples should be Gaussian distributed
loglik_gsm = gsm.loglikelihood(samples)
loglik_gauss = gauss.loglikelihood(rg(samples)) + rg.logjacobian(samples)
dist = abs(loglik_gsm - loglik_gauss)
self.assertTrue(all(dist < 1E-6))
###
# test one-dimensional Gaussian
gsm = GSM(1, 10)
gsm.initialize('cauchy')
# standard normal distribution
gauss = GSM(1, 1)
gauss.scales[0] = 1.
# generate test data
samples = gsm.sample(100)
rg = RadialGaussianization(gsm)
# after Gaussianization, samples should be Gaussian distributed
loglik_gsm = gsm.loglikelihood(samples)
loglik_gauss = gauss.loglikelihood(rg(samples)) + rg.logjacobian(samples)
dist = abs(loglik_gsm - loglik_gauss)
self.assertTrue(all(dist < 1E-6))
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2017 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Wrapper for the various widgets used from frameworks so that they can be used
easily from with Qt Designer
"""
import sgtk
from sgtk.platform.qt import QtCore, QtGui
context_selector = sgtk.platform.import_framework("tk-framework-qtwidgets",
"context_selector")
ContextWidget = context_selector.ContextWidget
screen_grab = sgtk.platform.import_framework("tk-framework-qtwidgets",
"screen_grab")
logger = sgtk.platform.get_logger(__name__)
class Thumbnail(QtGui.QLabel):
"""
A specialized, custom widget that either displays a
static square thumbnail or a thumbnail that can be captured
using screen capture and other methods.
"""
# emitted when screen is captured
# passes the QPixmap as a parameter
screen_grabbed = QtCore.Signal(object)
# internal signal to initiate screengrab
_do_screengrab = QtCore.Signal()
def __init__(self, parent=None):
"""
:param parent: The parent QWidget for this control
"""
QtGui.QLabel.__init__(self, parent)
# _multiple_values allows to display indicator that the summary
# thumbnail is not applied to all items
self._multiple_values = False
self._thumbnail = None
self._enabled = True
self._bundle = sgtk.platform.current_bundle()
self.setAutoFillBackground(True)
self.setCursor(QtCore.Qt.PointingHandCursor)
self._no_thumb_pixmap = QtGui.QPixmap(":/tk_maya_webgl/camera.png")
self._do_screengrab.connect(self._on_screengrab)
self.set_thumbnail(self._no_thumb_pixmap)
def setEnabled(self, enabled):
"""
Overrides base class setEnabled
:param bool enabled: flag to indicate enabled state of the widget
"""
self._enabled = enabled
if enabled:
self.setCursor(QtCore.Qt.PointingHandCursor)
else:
self.unsetCursor()
def set_thumbnail(self, pixmap):
"""
Set pixmap to be displayed
:param pixmap: QPixmap to show or None in order to show default one.
"""
if pixmap is None:
self._set_screenshot_pixmap(self._no_thumb_pixmap)
else:
self._set_screenshot_pixmap(pixmap)
def mousePressEvent(self, event):
"""
Fires when the mouse is pressed.
In order to emulate the aesthetics of a button,
a white frame is rendered around the label at mouse press.
"""
QtGui.QLabel.mousePressEvent(self, event)
if self._enabled:
self.setStyleSheet("QLabel {border: 1px solid #eee;}")
def mouseReleaseEvent(self, event):
"""
Fires when the mouse is released
Stops drawing the border and emits an internal
screen grab signal.
"""
QtGui.QLabel.mouseReleaseEvent(self, event)
if self._enabled:
# disable style
self.setStyleSheet(None)
# if the mouse is released over the widget,
# kick off the screengrab
pos_mouse = event.pos()
if self.rect().contains(pos_mouse):
self._do_screengrab.emit()
def _on_screengrab(self):
"""
Perform a screengrab and update the label pixmap.
Emit screen_grabbed signal.
"""
self._bundle.log_debug("Prompting for screenshot...")
self.window().hide()
try:
pixmap = screen_grab.ScreenGrabber.screen_capture()
finally:
self.window().show()
if pixmap:
self._bundle.log_debug(
"Got screenshot %sx%s" % (pixmap.width(), pixmap.height())
)
self._multiple_values = False
self._set_screenshot_pixmap(pixmap)
self.screen_grabbed.emit(pixmap)
def _set_multiple_values_indicator(self, is_multiple_values):
"""
Specifies wether to show multiple values indicator
"""
self._multiple_values = is_multiple_values
def paintEvent(self, paint_event):
"""
Paint Event override
"""
# paint multiple values indicator
if self._multiple_values:
p = QtGui.QPainter(self)
p.drawPixmap(0, 0, self.width(), self.height(),
self._no_thumb_pixmap,
0, 0, self._no_thumb_pixmap.width(),
self._no_thumb_pixmap.height())
p.fillRect(0, 0, self.width(), self.height(),
QtGui.QColor(42, 42, 42, 237))
p.setFont(QtGui.QFont("Arial", 15, QtGui.QFont.Bold))
pen = QtGui.QPen(QtGui.QColor("#18A7E3"))
p.setPen(pen)
p.drawText(self.rect(), QtCore.Qt.AlignCenter, "Multiple Values")
else:
# paint thumbnail
QtGui.QLabel.paintEvent(self, paint_event)
def _set_screenshot_pixmap(self, pixmap):
"""
Takes the given QPixmap and sets it to be the thumbnail
image of the note input widget.
:param pixmap: A QPixmap object containing the screenshot image.
"""
self._thumbnail = pixmap
# format it to fit the label size
thumb = self._thumbnail.scaled(
self.width(),
self.height(),
QtCore.Qt.KeepAspectRatio,
QtCore.Qt.SmoothTransformation
)
logger.debug("width: %s, height: %s" % (self.width(), self.height()))
self.setPixmap(thumb)
class PublishDescriptionEdit(QtGui.QPlainTextEdit):
"""
Widget that holds the summary description
"""
def __init__(self, parent):
"""
Constructor
:param parent: QT parent object
"""
QtGui.QPlainTextEdit.__init__(self, parent)
self._show_placeholder = False
# this is the placeholder text to be displayed in the bottom right
# corner of the widget. The spaces afterwards were added so that the
# placeholder text won't be hidden behind the scroll bar that is
# automatically added when the text is too long
self._placeholder_text = "<multiple values>"
def paintEvent(self, paint_event):
"""
Paints the line plain text editor and adds a placeholder on bottom
right corner when multiple values are detected.
"""
# If the box does not have focus, draw <multiple values> placeholder
# when self._show_placeholder is true, even if the widget has text
if not self.hasFocus() and self._show_placeholder:
p = QtGui.QPainter(self.viewport())
# right placeholder note in blue
col = QtGui.QColor(24, 167, 227) # blue
p.setPen(QtGui.QPen(col))
p.setBrush(QtGui.QBrush(col))
p.drawText(self.rect(), QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft,
self._placeholder_text)
else:
QtGui.QPlainTextEdit.paintEvent(self, paint_event)
|
"""
Divergences, measuring the distance between distributions. They are not
necessarily true metrics, but some are.
"""
from .copy_mutual_information import (
copy_mutual_information,
)
# from .coupling_metrics import (
# coupling_metric,
# )
from .cross_entropy import (
cross_entropy,
)
from .generalized_divergences import (
alpha_divergence,
hellinger_divergence,
renyi_divergence,
tsallis_divergence,
f_divergence,
hellinger_sum,
)
from .earth_movers_distance import (
earth_movers_distance,
)
from .hypercontractivity_coefficient import (
hypercontractivity_coefficient,
)
from .jensen_shannon_divergence import (
jensen_shannon_divergence,
)
from .kullback_leibler_divergence import (
kullback_leibler_divergence,
relative_entropy,
)
from .maximum_correlation import (
maximum_correlation,
)
from .variational_distance import (
bhattacharyya_coefficient,
chernoff_information,
hellinger_distance,
variational_distance,
)
from . import pmf
|
from markdown import markdown
from utils.FileUtils import read_data_from_file
from os import remove
import settings
def parse_markdown(md_location):
content = read_data_from_file(md_location)
html = generate_html(content)
if settings.verbose:
print ('parsing ' + md_location)
return html
def generate_html(content):
html = markdown(content, extensions=['fenced_code', 'tables'], output_format='html5')
return html
def parse_markdown_without_header(md_location):
with open(md_location, 'r') as fin:
data = fin.read().splitlines(True)
with open('temp.md', 'w') as fout:
fout.writelines(data[1:])
fout.close()
withoutHeadder = parse_markdown('temp.md')
remove ('temp.md')
return withoutHeadder |
"""
Умова: Школа вирішила сформувати три нові групи учнів та надати їм окремі класи. У кожному класі необхідно встановити столи для учнів, у розрахунку, що за одним столом може сидіти не більше двох учнів. Яку мінімальну кількість столів необхідно придбати?
Вхідні дані: 3 цілих числа - кількість учнів у кожній групі. Кожне число користувач вводить в окремому рядку.
Вихідні дані: число - кількість столів
"""
k1 = abs(int(input("Введіть кількість учнів в 1 групі ")))
k2 = abs(int(input("Введіть кількість учнів в 2 групі ")))
k3 = abs(int(input("Введіть кількість учнів в 3 групі ")))
count = 0
if(k1/2 == k1//2):
count += k1//2
else:
count = count +1 + k1//2
if(k2/2 == k2//2):
count += k2//2
else:
count = count +1 + k2//2
if(k3/2 == k3//2):
count += k3//2
else:
count = count +1 + k3//2
print("Кількість столів %d" % count)
|
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
n=int(input())
a=input()
x=a.count('x')
if 2*x>n:a=a.replace('x','X',x-n//2)
else:a=a.replace('X','x',n//2-x)
print(abs(2*x-n)//2)
print(a)
|
#!/usr/bin/env python3
"""
$ pylint cleancamera.py
Your code has been rated at 10.00/10 (previous run: 10.00/10, +0.00)
"""
DATA_INT = []
def sum_list(listaNumeros: int) -> int:
laSuma = 0
for i in listaNumeros:
laSuma = laSuma + i
return laSuma
def divide_array() -> str:
"""
get data file
"""
FILE_OPEN = open('DATA.lst', 'r')
MESSAGE = FILE_OPEN.read()
DATA = [MESSAGE]
for line in DATA:
TYPE_DATA = line.split()
DATA_INT = list(map(int, TYPE_DATA))
print(sum_list(DATA_INT))
divide_array()
# $ python cleancamera.py
# 10000
|
n, m=map(int, input().split())
graph=[[] for _ in range(n+1)]
visited=[False]*(n+1)
result=0
for _ in range(m):
i, j=map(int, input().split())
graph[i].append(j)
graph[j].append(i)
def dfs(v):
visited[v]=True
for x in graph[v]:
if visited[x]==False:
dfs(x)
else:
continue
for i in range(1,n+1):
if visited[i]==False:
dfs(i)
result+=1
print(result)
##최대한 깊게깊게==dfs!!!이미 방문한곳은 방문하지않는다!이미
##깊게탐색햇을시에 탐색했기 때문.
##또 배열 전체를 넘겨줄필요는 없다!! 메모리만 낭비임. 다 기록된다. |
import flwr as fl
from flwr.common.typing import Scalar
import ray
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from pathlib import Path
from typing import Dict, Callable, Optional, Tuple
from dataset_utils import getCIFAR10, do_fl_partitioning, get_dataloader
# Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')
# borrowed from Pytorch quickstart example
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# borrowed from Pytorch quickstart example
def train(net, trainloader, epochs, device: str):
"""Train the network on the training set."""
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
net.train()
for _ in range(epochs):
for images, labels in trainloader:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
loss = criterion(net(images), labels)
loss.backward()
optimizer.step()
# borrowed from Pytorch quickstart example
def test(net, testloader, device: str):
"""Validate the network on the entire test set."""
criterion = torch.nn.CrossEntropyLoss()
correct, total, loss = 0, 0, 0.0
net.eval()
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
loss += criterion(outputs, labels).item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return loss, accuracy
# Flower client that will be spawned by Ray
# Adapted from Pytorch quickstart example
class CifarRayClient(fl.client.NumPyClient):
def __init__(self, cid: str, fed_dir_data: str):
self.cid = cid
self.fed_dir = Path(fed_dir_data)
self.properties: Dict[str, Scalar] = {"tensor_type": "numpy.ndarray"}
# instantiate model
self.net = Net()
# determine device
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_parameters(self):
return [val.cpu().numpy() for _, val in self.net.state_dict().items()]
# def get_properties(self, ins: PropertiesIns) -> PropertiesRes:
def get_properties(self, ins):
return self.properties
def set_parameters(self, parameters):
params_dict = zip(self.net.state_dict().keys(), parameters)
state_dict = OrderedDict(
{k: torch.from_numpy(np.copy(v)) for k, v in params_dict}
)
self.net.load_state_dict(state_dict, strict=True)
def fit(self, parameters, config):
# print(f"fit() on client cid={self.cid}")
self.set_parameters(parameters)
# load data for this client and get trainloader
num_workers = len(ray.worker.get_resource_ids()["CPU"])
trainloader = get_dataloader(
self.fed_dir,
self.cid,
is_train=True,
batch_size=int(config["batch_size"]),
workers=num_workers,
)
# send model to device
self.net.to(self.device)
# train
train(self.net, trainloader, epochs=int(config["epochs"]), device=self.device)
# return local model and statistics
return self.get_parameters(), len(trainloader.dataset), {}
def evaluate(self, parameters, config):
# print(f"evaluate() on client cid={self.cid}")
self.set_parameters(parameters)
# load data for this client and get trainloader
num_workers = len(ray.worker.get_resource_ids()["CPU"])
valloader = get_dataloader(
self.fed_dir, self.cid, is_train=False, batch_size=50, workers=num_workers
)
# send model to device
self.net.to(self.device)
# evaluate
loss, accuracy = test(self.net, valloader, device=self.device)
# return statistics
return float(loss), len(valloader.dataset), {"accuracy": float(accuracy)}
def fit_config(rnd: int) -> Dict[str, str]:
"""Return a configuration with static batch size and (local) epochs."""
config = {
"epoch_global": str(rnd),
"epochs": str(5),
"batch_size": str(64),
}
return config
def set_weights(model: torch.nn.ModuleList, weights: fl.common.Weights) -> None:
"""Set model weights from a list of NumPy ndarrays."""
state_dict = OrderedDict(
{
k: torch.tensor(np.atleast_1d(v))
for k, v in zip(model.state_dict().keys(), weights)
}
)
model.load_state_dict(state_dict, strict=True)
def get_eval_fn(
testset: torchvision.datasets.CIFAR10,
) -> Callable[[fl.common.Weights], Optional[Tuple[float, float]]]:
"""Return an evaluation function for centralized evaluation."""
def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:
"""Use the entire CIFAR-10 test set for evaluation."""
# determine device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = Net()
set_weights(model, weights)
model.to(device)
testloader = torch.utils.data.DataLoader(testset, batch_size=50)
loss, accuracy = test(model, testloader, device=device)
# return statistics
return loss, {"accuracy": accuracy}
return evaluate
# Start Ray simulation (a _default server_ will be created)
# This example does:
# 1. Downloads CIFAR-10
# 2. Partitions the dataset into N splits, where N is the total number of
# clients. We refere to this as `pool_size`. The partition can be IID or non-IID
# 4. Starts a Ray-based simulation where a % of clients are sample each round.
# 5. After the M rounds end, the global model is evaluated on the entire testset.
# Also, the global model is evaluated on the valset partition residing in each
# client. This is useful to get a sense on how well the global model can generalise
# to each client's data.
if __name__ == "__main__":
pool_size = 100 # number of dataset partions (= number of total clients)
client_resources = {"num_cpus": 1} # each client will get allocated 1 CPUs
# download CIFAR10 dataset
train_path, testset = getCIFAR10()
# partition dataset (use a large `alpha` to make it IID;
# a small value (e.g. 1) will make it non-IID)
# This will create a new directory called "federated: in the directory where
# CIFAR-10 lives. Inside it, there will be N=pool_size sub-directories each with
# its own train/set split.
fed_dir = do_fl_partitioning(
train_path, pool_size=pool_size, alpha=1000, num_classes=10, val_ratio=0.1
)
# configure the strategy
strategy = fl.server.strategy.FedAvg(
fraction_fit=0.1,
min_fit_clients=10,
min_available_clients=pool_size, # All clients should be available
on_fit_config_fn=fit_config,
eval_fn=get_eval_fn(testset), # centralised testset evaluation of global model
)
def client_fn(cid: str):
# create a single client instance
return CifarRayClient(cid, fed_dir)
# (optional) specify ray config
ray_config = {"include_dashboard": False}
# start simulation
fl.simulation.start_simulation(
client_fn=client_fn,
num_clients=pool_size,
client_resources=client_resources,
num_rounds=5,
strategy=strategy,
ray_init_args=ray_config,
)
|
name = 'Sonja'
if name == 'Ola':
print('Ahoj Ola!')
elif name == 'Sonja':
print('Ahoj Sonja!')
else:
print('Ahoj neznama!') |
np = int(input("numarul de petale ale unei flori: "))
if (np % 5 == 0):
print("Nu ma iubeste deloc")
elif (np % 5 == 1):
print("Ma iubeste un pic")
elif (np % 5 == 2):
print("Ma iubeste mult")
elif (np % 5 == 3):
print("Ma iubeste cu pasiune")
elif (np % 5 == 4):
print("Ma iubeste la nebunie")
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.homepage),
path('register', views.register),
path('login', views.login),
path('instruction',views.instruction),
path('logout', views.logout),
path('landing', views.landing),
path('edit_user_info/<int:user_id>',views.edit_user),
path('submit',views.submit),
path('scoreboard', views.scoreboard),
# path('fakedata/<int:score>/<int:round_count>', views.fakedata)
]
|
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
pass
|
# coding=utf-8
import os
from rest_framework import serializers
from tw.models import *
class DistrictSerializer(serializers.ModelSerializer):
class Meta:
model = District
fields = ('id', 'name', 'parentId')
class DistrictDetailSerializer(serializers.ModelSerializer):
class Meta:
model = District
fields = ('id', 'latlng', 'content')
class TagsSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'tag', 'hot')
class PhotoSerializer(serializers.ModelSerializer):
photo_1000 = serializers.SerializerMethodField('get_photo_1000')
photo_500 = serializers.SerializerMethodField('get_photo_500')
photo_100 = serializers.SerializerMethodField('get_photo_100')
class Meta:
model = Photo
fields = ('id', 'title', 'pclass', 'district', 'photo', 'photo_1000', 'photo_500', 'photo_100')
def get_photo_1000(self, obj):
base, ext = os.path.splitext(obj.photo.name)
return base + '_1000' + ext
def get_photo_500(self, obj):
base, ext = os.path.splitext(obj.photo.name)
return base + '_500' + ext
def get_photo_100(self, obj):
base, ext = os.path.splitext(obj.photo.name)
return base + '_100' + ext
# 仅仅用来修改title
class PhotoTitleSerializer(serializers.ModelSerializer):
class Meta:
model = Photo
fields = ('id', 'title')
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = ('id', 'district', 'title', 'pclass', 'weight', 'status')
class ArticleDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = ('id', 'title', 'copyurl', 'content', 'contentBak')
class PoiSerializer(serializers.ModelSerializer):
class Meta:
model = Poi
fields = ('id', 'district', 'title', 'pclass', 'weight', 'status')
class PoiDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Poi
fields = ('id', 'title', 'latlng', 'content', 'contentBak')
class ArticlePoiSerializer(serializers.ModelSerializer):
poiname = serializers.SerializerMethodField('get_name')
class Meta:
model = Article_poi
fields = ('id', 'article', 'poi', 'poiname')
def get_name(self, obj):
return obj.poi.title
|
from marketsim import event, meta, types, registry, _
from _basic import Strategy, Empty
from _wrap import wrapper2
class _Array_Impl(Strategy):
def __init__(self):
Strategy.__init__(self)
for s in self.strategies:
event.subscribe(s.on_order_created, _(self)._send, self)
def dispose(self):
for s in self.strategies:
s.dispose()
exec wrapper2('Array', "", [('strategies', '[Empty()]', 'meta.listOf(types.ISingleAssetStrategy)')])
|
"""The bluetooth integration."""
from __future__ import annotations
import asyncio
from collections.abc import Callable
from datetime import datetime
import logging
import time
import async_timeout
import bleak
from bleak import BleakError
from bleak.backends.device import BLEDevice
from bleak.backends.scanner import AdvertisementData
from dbus_next import InvalidMessageError
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import (
CALLBACK_TYPE,
Event,
HomeAssistant,
callback as hass_callback,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.package import is_docker_env
from .const import (
DEFAULT_ADAPTERS,
SCANNER_WATCHDOG_INTERVAL,
SCANNER_WATCHDOG_TIMEOUT,
SOURCE_LOCAL,
START_TIMEOUT,
)
from .models import BluetoothScanningMode
OriginalBleakScanner = bleak.BleakScanner
MONOTONIC_TIME = time.monotonic
_LOGGER = logging.getLogger(__name__)
MONOTONIC_TIME = time.monotonic
SCANNING_MODE_TO_BLEAK = {
BluetoothScanningMode.ACTIVE: "active",
BluetoothScanningMode.PASSIVE: "passive",
}
def create_bleak_scanner(
scanning_mode: BluetoothScanningMode, adapter: str | None
) -> bleak.BleakScanner:
"""Create a Bleak scanner."""
scanner_kwargs = {"scanning_mode": SCANNING_MODE_TO_BLEAK[scanning_mode]}
if adapter and adapter not in DEFAULT_ADAPTERS:
scanner_kwargs["adapter"] = adapter
_LOGGER.debug("Initializing bluetooth scanner with %s", scanner_kwargs)
try:
return OriginalBleakScanner(**scanner_kwargs) # type: ignore[arg-type]
except (FileNotFoundError, BleakError) as ex:
raise RuntimeError(f"Failed to initialize Bluetooth: {ex}") from ex
class HaScanner:
"""Operate a BleakScanner.
Multiple BleakScanner can be used at the same time
if there are multiple adapters. This is only useful
if the adapters are not located physically next to each other.
Example use cases are usbip, a long extension cable, usb to bluetooth
over ethernet, usb over ethernet, etc.
"""
def __init__(
self, hass: HomeAssistant, scanner: bleak.BleakScanner, adapter: str | None
) -> None:
"""Init bluetooth discovery."""
self.hass = hass
self.scanner = scanner
self.adapter = adapter
self._start_stop_lock = asyncio.Lock()
self._cancel_stop: CALLBACK_TYPE | None = None
self._cancel_watchdog: CALLBACK_TYPE | None = None
self._last_detection = 0.0
self._callbacks: list[
Callable[[BLEDevice, AdvertisementData, float, str], None]
] = []
self.name = self.adapter or "default"
self.source = self.adapter or SOURCE_LOCAL
@property
def discovered_devices(self) -> list[BLEDevice]:
"""Return a list of discovered devices."""
return self.scanner.discovered_devices
@hass_callback
def async_register_callback(
self, callback: Callable[[BLEDevice, AdvertisementData, float, str], None]
) -> CALLBACK_TYPE:
"""Register a callback.
Currently this is used to feed the callbacks into the
central manager.
"""
def _remove() -> None:
self._callbacks.remove(callback)
self._callbacks.append(callback)
return _remove
@hass_callback
def _async_detection_callback(
self,
ble_device: BLEDevice,
advertisement_data: AdvertisementData,
) -> None:
"""Call the callback when an advertisement is received.
Currently this is used to feed the callbacks into the
central manager.
"""
self._last_detection = MONOTONIC_TIME()
for callback in self._callbacks:
callback(ble_device, advertisement_data, self._last_detection, self.source)
async def async_start(self) -> None:
"""Start bluetooth scanner."""
self.scanner.register_detection_callback(self._async_detection_callback)
async with self._start_stop_lock:
await self._async_start()
async def _async_start(self) -> None:
"""Start bluetooth scanner under the lock."""
try:
async with async_timeout.timeout(START_TIMEOUT):
await self.scanner.start() # type: ignore[no-untyped-call]
except InvalidMessageError as ex:
_LOGGER.debug(
"%s: Invalid DBus message received: %s", self.name, ex, exc_info=True
)
raise ConfigEntryNotReady(
f"{self.name}: Invalid DBus message received: {ex}; try restarting `dbus`"
) from ex
except BrokenPipeError as ex:
_LOGGER.debug(
"%s: DBus connection broken: %s", self.name, ex, exc_info=True
)
if is_docker_env():
raise ConfigEntryNotReady(
f"{self.name}: DBus connection broken: {ex}; try restarting `bluetooth`, `dbus`, and finally the docker container"
) from ex
raise ConfigEntryNotReady(
f"{self.name}: DBus connection broken: {ex}; try restarting `bluetooth` and `dbus`"
) from ex
except FileNotFoundError as ex:
_LOGGER.debug(
"%s: FileNotFoundError while starting bluetooth: %s",
self.name,
ex,
exc_info=True,
)
if is_docker_env():
raise ConfigEntryNotReady(
f"{self.name}: DBus service not found; docker config may be missing `-v /run/dbus:/run/dbus:ro`: {ex}"
) from ex
raise ConfigEntryNotReady(
f"{self.name}: DBus service not found; make sure the DBus socket is available to Home Assistant: {ex}"
) from ex
except asyncio.TimeoutError as ex:
raise ConfigEntryNotReady(
f"{self.name}: Timed out starting Bluetooth after {START_TIMEOUT} seconds"
) from ex
except BleakError as ex:
_LOGGER.debug(
"%s: BleakError while starting bluetooth: %s",
self.name,
ex,
exc_info=True,
)
raise ConfigEntryNotReady(
f"{self.name}: Failed to start Bluetooth: {ex}"
) from ex
self._async_setup_scanner_watchdog()
self._cancel_stop = self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, self._async_hass_stopping
)
@hass_callback
def _async_setup_scanner_watchdog(self) -> None:
"""If Dbus gets restarted or updated, we need to restart the scanner."""
self._last_detection = MONOTONIC_TIME()
self._cancel_watchdog = async_track_time_interval(
self.hass, self._async_scanner_watchdog, SCANNER_WATCHDOG_INTERVAL
)
async def _async_scanner_watchdog(self, now: datetime) -> None:
"""Check if the scanner is running."""
time_since_last_detection = MONOTONIC_TIME() - self._last_detection
if time_since_last_detection < SCANNER_WATCHDOG_TIMEOUT:
return
_LOGGER.info(
"%s: Bluetooth scanner has gone quiet for %s, restarting",
self.name,
SCANNER_WATCHDOG_INTERVAL,
)
async with self._start_stop_lock:
await self._async_stop()
await self._async_start()
async def _async_hass_stopping(self, event: Event) -> None:
"""Stop the Bluetooth integration at shutdown."""
self._cancel_stop = None
await self.async_stop()
async def async_stop(self) -> None:
"""Stop bluetooth scanner."""
async with self._start_stop_lock:
await self._async_stop()
async def _async_stop(self) -> None:
"""Stop bluetooth discovery under the lock."""
_LOGGER.debug("Stopping bluetooth discovery")
if self._cancel_watchdog:
self._cancel_watchdog()
self._cancel_watchdog = None
if self._cancel_stop:
self._cancel_stop()
self._cancel_stop = None
try:
await self.scanner.stop() # type: ignore[no-untyped-call]
except BleakError as ex:
# This is not fatal, and they may want to reload
# the config entry to restart the scanner if they
# change the bluetooth dongle.
_LOGGER.error("Error stopping scanner: %s", ex)
|
from libs.config import alias, color, gget
from libs.myapp import execute_sql_command
@alias(True, _type="DATABASE", db="database")
def run(database: str = ""):
"""
db_tables
Output all tables of a database.
eg: db_init {database=current_database}
"""
if (not gget("db_connected", "webshell")):
print(color.red("Please run db_init command first"))
return
database = database if database else gget("db_dbname", "webshell")
print(execute_sql_command("show tables;", database))
|
__author__ = 'HLZC'
import os
fp = os.getcwd()
fp1 = os.path.dirname(__file__)
print(fp1)
print(fp)
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='aspace_tools',
version="0.3",
packages=find_packages(),
include_package_data=True,
install_requires=[
'requests', 'lxml', 'iso-639'
],
entry_points={
'console_scripts': [
'oac-process = aspace_tools.oac_process:main',
]
}
)
|
import errno
import os
import select
import threading
import time
from scheme import Integer, Text
from spire.core import Configuration, Unit, configured_property
from spire.support.logs import LogHelper
log = LogHelper(__name__)
try:
from os import mkfifo
except ImportError:
class Idler(Unit):
configuration = Configuration({
'timeout': Integer(default=30),
})
timeout = configured_property('timeout')
def idle(self, timeout=None):
time.sleep(timeout or self.timeout)
def interrupt(self):
pass
else:
class Idler(Unit):
configuration = Configuration({
'fifo': Text(default='/tmp/platoon-idler'),
'timeout': Integer(default=5),
})
fifo = configured_property('fifo')
def __init__(self):
self.fd = None
self.poller = None
self.timeout = self.configuration['timeout'] * 1000
def idle(self, timeout=None):
if not self.poller:
self._prepare_idler()
if not self.fd:
self._open_fifo()
if timeout is not None:
poll_timeout = timeout * 1000
else:
poll_timeout = self.timeout
try:
events = self.poller.poll(poll_timeout)
except select.error, exception:
if exception.args[0] == errno.EINTR:
return
else:
raise
interrupted = False
try:
fd, event = events[0]
except IndexError:
return
if event & select.POLLIN:
os.read(fd, 64)
interrupted = True
log('debug', 'interrupted')
if event & select.POLLHUP:
self._open_fifo()
if not interrupted:
self.idle(timeout)
def interrupt(self):
try:
fd = os.open(self.fifo, os.O_WRONLY | os.O_NONBLOCK)
except OSError, exception:
if exception.args[0] in (errno.ENOENT, errno.ENXIO):
return
else:
raise
try:
os.write(fd, '\x00')
log('debug', 'attempting to interrupt')
except OSError, exception:
if exception.args[0] in (errno.EAGAIN, errno.EPIPE):
return
else:
raise
finally:
os.close(fd)
def _open_fifo(self):
if self.fd:
self.poller.unregister(self.fd)
os.close(self.fd)
self.fd = os.open(self.fifo, os.O_RDONLY | os.O_NONBLOCK)
self.poller.register(self.fd, select.POLLIN)
def _prepare_idler(self):
self.poller = select.poll()
try:
mkfifo(self.fifo)
except OSError, exception:
if exception.args[0] != errno.EEXIST:
raise
|
#also known as the why you don't do inheritance cos it is evil like a bad fantasy villian from princess bride.
class Parent(object):
def implict(self):
print 'parent implict()'
def override(self):
print 'parent crash override'
def altered(self):
print 'parent altered'
class Child(Parent):
def __init__(self, stuff):
self.stuff = stuff
super(Child, self).__init__()
def override(self):
print 'child crash override'
def altered(self):
print 'child altered start'
super(Child, self).altered()
print 'child altered end'
class Other(object):
def override(self):
print 'other crash override'
def implict(self):
print 'other implict'
def altered(self):
print 'other altered'
class NovaChild(object):
def __init__(self):
self.other = Other()
def implict(self):
self.other.implict()
def override(self):
print 'novachild crash override'
def altered(self):
print 'novachild start'
self.other.altered()
print 'novachild end'
parent = Parent()
child = Child("stuff")
parent.implict()
child.implict()
parent.override()
child.override()
parent.altered()
child.altered()
novachild = NovaChild()
novachild.implict()
novachild.override()
novachild.altered() |
import math
class Euclidean_distance:
## コンストラクタ
def __init__(self, st, go):
self.st = st
self.go = go
## 距離計算メソッド
def calc_dist(self):
dist = math.sqrt((self.st[0] - self.go[0]) ** 2 + (self.st[1] - self.go[1]) ** 2)
return dist
|
from __future__ import absolute_import, division
from psychopy import locale_setup, gui, visual, core, data, event, logging
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy.random import random, randint, normal, shuffle
import random
import os # handy system and path functions
import sys # to get file system encoding
import pandas as pd
import numpy as np
from operator import itemgetter
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# include paths to bin in order to create a subject specific trial sequence
binDir = _thisDir + os.sep + u'bin'
# Store info about the experiment session
expName = 'CCL' # from the Builder filename that created this script
expInfo = {'participant':'','session':'001'}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel during popout
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, "ses_" + expInfo['session'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
# Setup the Window
win = visual.Window(
size=(1024, 768), fullscr=True, allowGUI=False,
monitor='testMonitor', color=[1,1,1], useFBO=True)
##########Timer##########
globalClock = core.MonotonicClock() # to track the time since experiment started
Practice_1_Clock = core.Clock() #unlike globalclock, gets reset each trial
Practice_2_Clock = core.Clock()
Task_1_Clock = core.Clock()
Task_2_Clock = core.Clock()
##########Version##########
# version 1 [non,con] version 2 [con,non]
version = random.choice([1,2])
print(version)
##########Stimuli##########
Imagelist1 = list(os.listdir(_thisDir + '/Set1'))
Imagelist1 = ["Set1/" + i for i in Imagelist1]
print(Imagelist1)
Imagelist2 = list(os.listdir(_thisDir + '/Set2'))
Imagelist2 = ["Set2/" + i for i in Imagelist2]
print(Imagelist2)
Practicelist = list(os.listdir(_thisDir + '/Practice'))
Practicelist = ["Practice/" + i for i in Practicelist]
Image = visual.ImageStim(win=win, name='Image',
image= _thisDir + '/Set1/CK_f_01.jpg', mask=None,
ori=0, pos=(0, 0), opacity=1, texRes=128, depth=0,
size=(0.75, 1), interpolate = True)
stroop_text =visual.TextStim(win=win, name='stroop_text',
text='default',font=u'Arial',
pos=(0, 0), height=0.2, wrapWidth=None, ori=0,
color=u'red', colorSpace='rgb', opacity=1,
depth=0)
Blank = visual.TextStim(win=win, name='blank', text='h',
font=u'Arial', pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color=u'black', colorSpace='rgb', opacity=0, depth=0.0)
Correct =visual.TextStim(win=win, name='Correct',
text='Correct',font=u'Arial',
pos=(0, -0.75), height=0.2, wrapWidth=None, ori=0,
color=u'red', colorSpace='rgb', opacity=1,
depth=0)
Incorrect =visual.TextStim(win=win, name='Incorrect',
text='Incorrect',font=u'Arial',
pos=(0, -0.75), height=0.2, wrapWidth=None, ori=0,
color=u'red', colorSpace='rgb', opacity=1,
depth=0)
###Indexing Image###
# index the images for high, medium and low frequencies for later selection
#indices_all = (np.random.choice(len(Imagelist), 104, replace=False)).tolist()
indices_all = list(range(104))
indices_low = indices_all[:80]
indices_medium = indices_all[80:96]
indices_high = indices_all[96:104]
# select corresponding images
random.shuffle(Imagelist1)
stim_image_high1 = [Imagelist1[i] for i in indices_high]*10
stim_image_medium1 = [Imagelist1[i] for i in indices_medium]*5
stim_image_low1 = [Imagelist1[i] for i in indices_low]
stim_image1 = Practicelist*3 + stim_image_high1 +stim_image_medium1 + stim_image_low1
print(stim_image1)
random.shuffle(Imagelist2)
stim_image_high2 = [Imagelist2[i] for i in indices_high]*10
stim_image_medium2 = [Imagelist2[i] for i in indices_medium]*5
stim_image_low2 = [Imagelist2[i] for i in indices_low]
stim_image2 = Practicelist*3 + stim_image_high2 +stim_image_medium2 + stim_image_low2
print (stim_image2)
##########Timing##########
trials = 240
duration = 1.0
ITI_min = 800.0
ITI_max = 2000.0
ITI = []
#ITI & duration
for i in range(252):
ITI.append(random.randint(ITI_min, ITI_max)/1000)
duration = [duration]*252
##########Frequency##########
frequency = ['None']*12 + ['high']*80 + ['medium']*80 + ['low']*80
##########Congruency##########
congruency_con = ['con']*6 + ['incon']*6 + ['con']*40 + ['incon']*40 + ['con']*40 + ['incon']*40 + ['con']*40 + ['incon']*40
#congruency_con_practice = ['con']*6 + ['incon']*6
congruency_non = ['None']*252
image_set = [stim_image1,stim_image2]
random.shuffle(image_set)
##########corrAns##########
# S-R mapping is different from exp version so needs a new number generator #
# 0 --> female w, male o; 1 --> female o, male w
Ans_version = random.choice([0,1])
print(Ans_version)
corrAns1 = []
corrAns2 = []
if Ans_version==0:
SR = ['w','o']
for i in image_set[0]:
if ('m' or 'M') in i:
corrAns1.append(SR[1])
else:
corrAns1.append(SR[0])
for i in image_set[1]:
if ('m' or 'M') in i:
corrAns2.append(SR[1])
else:
corrAns2.append(SR[0])
else:
SR = ['o','w']
for i in image_set[0]:
if ('m' or 'M') in i:
corrAns1.append(SR[1])
else:
corrAns1.append(SR[0])
for i in image_set[1]:
if ('m' or 'M') in i:
corrAns2.append(SR[1])
else:
corrAns2.append(SR[0])
##########Exp Matrix##########
#Turn Exp Matrix into df to randomize rows
expmatrix_non = [image_set[0], frequency, congruency_non, corrAns1, duration, ITI]
expmatrix_non = pd.DataFrame(expmatrix_non)
expmatrix_non = expmatrix_non.transpose()
expmatrix_non.columns = ['stim_image','Frequency','Congruency','corrAns','Duration','ITI']
expmatrix_non_exp = expmatrix_non[12:252].sample(frac=1).reset_index(drop=True)
expmatrix_non_prac = expmatrix_non[0:12].sample(frac=1).reset_index(drop=True)
expmatrix_con = [image_set[1], frequency, congruency_con, corrAns2, duration, ITI]
expmatrix_con = pd.DataFrame(expmatrix_con)
expmatrix_con = expmatrix_con.transpose()
expmatrix_con.columns = ['stim_image','Frequency','Congruency','corrAns','Duration','ITI']
expmatrix_con_exp = expmatrix_con[12:252].sample(frac=1).reset_index(drop=True)
expmatrix_con_prac = expmatrix_con[0:12].sample(frac=1).reset_index(drop=True)
if version == 1:
expmatrix = pd.concat([expmatrix_non_prac , expmatrix_non_exp, expmatrix_con_prac, expmatrix_con_exp],ignore_index=True)
else:
expmatrix = pd.concat([expmatrix_con_prac, expmatrix_con_exp, expmatrix_non_prac , expmatrix_non_exp],ignore_index=True)
expmatrix.to_csv(r'expmatrix.csv')
##########Instruction##########
# Beginning Instr
lines_begin_1 = [line.rstrip('\n') for line in open(os.path.join(binDir, "CLInstr_Begin1.txt"))]
if version == 2:
lines_begin_1.append('A text label will also be presented on top of every face image.')
else:
pass
lines_begin_2 = [line.rstrip('\n') for line in open(os.path.join(binDir, "CLInstr_Begin2.txt"))]
if version == 2:
lines_begin_2.append('ignore the meaning of the word and to categorize the gender of the face image.')
else:
lines_begin_2.append('categorize the gender of the face image.')
if Ans_version == 0:
lines_begin_2.append('Press' + ' ' + SR[0] + ' ' + 'if the image shows a female face and' + ' ' + SR[1] + ' '+ 'if it shows a male face.')
else:
lines_begin_2.append('Press' + ' ' + SR[0] + ' ' + 'if the image shows a male face and' + ' ' + SR[1] + ' ' + 'if it shows a female face.')
lines_begin_2.append("Memorize that task rule and press the space bar to begin the practice trials.")
# Mid-way Instr (task change)
lines_mid_1 = [line.rstrip('\n') for line in open(os.path.join(binDir, "CLInstr_Mid.txt"))]
if version == 1:
lines_mid_1.append('This time, a text label will be presented on top of every face image.')
else:
lines_mid_1.append('This time, the word will no longer be shown on top of the images.')
lines_mid_2 = [line.rstrip('\n') for line in open(os.path.join(binDir, "CLInstr_Begin2.txt"))]
if version == 1:
lines_mid_2.append('ignore the meaning of the word and still to categorize the gender of the face image.)
else:
lines_mid_2.append('still categorize the face image.')
lines_mid_2.append("Please memorize the task rule and press the space bar to begin.")
# Practice Instr
lines_practice = [line.rstrip('\n') for line in open(os.path.join(binDir, "CLInstr_Practice.txt"))]
lines_practice.append
# Post Forced Choice Instr
lines_post = [line.rstrip('\n') for line in open(os.path.join(binDir, "CLInstr_PostFC.txt"))]
lines_post.append
Instr_1 = visual.TextStim(win=win, name='Instr_1 ', color='black', font=u'Arial', pos=(0, 0), height=0.1, wrapWidth=None,
text=(' '.join(map(str, lines_begin_1))))
Instr_2 = visual.TextStim(win=win, name='Instr_2 ', color='black', font=u'Arial', pos=(0, 0), height=0.1, wrapWidth=None,
text=(' '.join(map(str, lines_begin_2))))
Instr_3 = visual.TextStim(win=win, name='Instr_3 ', color='black', font=u'Arial', pos=(0, 0), height=0.1, wrapWidth=None,
text=(' '.join(map(str, lines_mid1))))
Instr_4 = visual.TextStim(win=win, name='Instr_4 ', color='black', font=u'Arial', pos=(0, 0), height=0.1, wrapWidth=None,
text=(' '.join(map(str, lines_mid2))))
Instr_Practice = visual.TextStim(win=win, name='Instr_Practice', color='black', font=u'Arial', pos=(0, 0), height=0.1, wrapWidth=None,
text=(' '.join(map(str, lines_practice))))
##----------------------------------------------------------------------------##
##-----------------------------START RUNNING----------------------------------##
##----------------------------------------------------------------------------##
##---------------------------START INSTRUCTIONS-------------------------------##
# Should set the ins according to the version
Instr_1.setAutoDraw(True)
advance = 0
while advance < 2:
if event.getKeys(keyList=["space"]):
advance += 1
Instr_1.setAutoDraw(False)
Instr_2.setAutoDraw(True)
if event.getKeys(keyList=["escape"]):
core.quit()
win.flip()
Instr_2.setAutoDraw(False)
##---------------------------START Practice_1-------------------------------##
theseKeys = []
trialcounter = 0
for ptrial in range(12):
t = 0
Practice_1_Clock.reset()
continueRoutine = True
##------------------SET DURATION & ITI OF STIMULI-------------------##
#duration 1000 ms
duration = expmatrix.loc[ptrial,'Duration']
#ITI jittering 800-2000 ms
ITI = expmatrix.loc[ptrial,'ITI']
##---------------------SET STIMULI & RESPONSE---------------------------##
stim_image = expmatrix.loc[ptrial,'stim_image']
Image.setImage(stim_image)
frequency = expmatrix.loc[ptrial,'Frequency']
corrAns = expmatrix.loc[ptrial,'corrAns']
congruency = expmatrix.loc[ptrial,'Congruency']
if congruency != 'None':
if congruency == 'con':
if ('m' or 'M') in stim_image:
stroop_text.setText('Male')
else:
stroop_text.setText('Female')
else:
if ('m' or 'M') in stim_image:
stroop_text.setText('Female')
else:
stroop_text.setText('Male')
else:
pass
##--------------------------WHILE LOOP BEGINS-------------------------##
while continueRoutine:
if event.getKeys(keyList=["escape"]):
core.quit()
# get current time
t = Practice_1_Clock.getTime()
key_resp = event.BuilderKeyResponse()
if t < ITI:
Blank.setAutoDraw(True)
elif t > ITI and t < ITI + duration:
Blank.setAutoDraw(False)
Image.setAutoDraw(True)
if congruency != 'None':
stroop_text.setAutoDraw(True)
else:
pass
else:
Image.setAutoDraw(False)
Blank.setAutoDraw(False)
if congruency != 'None':
stroop_text.setAutoDraw(False)
else:
pass
continueRoutine = False
theseKeys = event.getKeys(keyList=['w', 'o'])
if len(theseKeys) > 0 and t < ITI + duration and t > duration:# at least one key was pressed
# if theseKeys[-1] == None:
# key_resp.corr = 0
# Incorrect.setAutoDraw(True)
#theseKeys = theseKeys[0]
#print(theseKeys[0])
# was this 'correct'?
if str(corrAns) in theseKeys:
key_resp.corr = 1
Correct.setAutoDraw(True)
else:
key_resp.corr = 0
Incorrect.setAutoDraw(True)
##------------CHECK ALL IF COMPONENTS HAVE FINISHED---------------##
if continueRoutine:
win.flip()
else:
Correct.setAutoDraw(False)
Incorrect.setAutoDraw(False)
break
##--------------------------NO NEED TO RECORD DATA-------------------------------##
trialcounter += 1
thisExp.nextEntry()
# completed 12 repeats of 'Practice_1'
##------------------------------START Task_1----------------------------------##
theseKeys = []
trialcounter = 0
Instr_Practice.setAutoDraw(True)
advance = 0
while advance < 1:
if event.getKeys(keyList=["space"]):
advance += 1
Instr_Practice.setAutoDraw(False)
if event.getKeys(keyList=["escape"]):
core.quit()
win.flip()
for trial in range(12,252):
t = 0
Task_1_Clock.reset() # clock
continueRoutine = True
##------------------SET DURATION & ITI OF STIMULI-------------------##
#duration 1000 ms
duration = expmatrix.loc[trial,'Duration']
#ITI jittering 800-2000 ms
ITI = expmatrix.loc[trial,'ITI']
##---------------------SET STIMULI & RESPONSE---------------------------##
stim_image = expmatrix.loc[trial,'stim_image']
Image.setImage(stim_image)
frequency = expmatrix.loc[trial,'Frequency']
corrAns = expmatrix.loc[trial,'corrAns']
congruency = expmatrix.loc[trial,'Congruency']
if congruency != 'None':
if congruency == 'con':
if ('m' or 'M') in stim_image:
stroop_text.setText('Male')
else:
stroop_text.setText('Female')
else:
if ('m' or 'M') in stim_image:
stroop_text.setText('Female')
else:
stroop_text.setText('Male')
else:
pass
##--------------------------WHILE LOOP BEGINS-------------------------##
while continueRoutine:
if event.getKeys(keyList=["escape"]):
core.quit()
# get current time
t = Task_1_Clock.getTime()
key_resp = event.BuilderKeyResponse()
##--------------------STIMULI PRESENTATION-------------------------------##
if t < ITI:
Blank.setAutoDraw(True)
elif t > ITI and t < ITI + duration:
Blank.setAutoDraw(False)
Image.setAutoDraw(True)
if congruency != 'None':
stroop_text.setAutoDraw(True)
else:
pass
else:
Image.setAutoDraw(False)
Blank.setAutoDraw(False)
if congruency != 'None':
stroop_text.setAutoDraw(False)
else:
pass
continueRoutine = False
theseKeys = event.getKeys(keyList=['w', 'o'])
if len(theseKeys) > 0 and t < ITI + duration:# at least one key was pressed
if theseKeys[-1] != None:
key_resp.rt = key_resp.clock.getTime()
thisExp.addData('Response', theseKeys[-1])
thisExp.addData('RT', key_resp.rt)
# was this 'correct'?
if str(corrAns) in theseKeys:
key_resp.corr = 1
thisExp.addData('Accuracy', key_resp.corr)
else:
key_resp.corr = 0
thisExp.addData('Accuracy', key_resp.corr)
##------------CHECK ALL IF COMPONENTS HAVE FINISHED---------------##
if continueRoutine:
win.flip()
else:
break
##--------------------------RECORD DATA-------------------------------##
trialcounter += 1
thisExp.addData('Trial',trialcounter)
thisExp.addData('stim_image', stim_image)
thisExp.addData('frequency', frequency)
thisExp.addData('congruency', congruency)
thisExp.addData('text', stroop_text.text)
thisExp.addData('corrAns', corrAns)
thisExp.addData('duration', duration)
thisExp.addData('ITI', ITI)
thisExp.nextEntry()
# completed 240 repeats of 'Task_1'
##---------------------------START Practice_2-------------------------------##
Instr_3.setAutoDraw(True)
advance = 0
while advance < 2:
if event.getKeys(keyList=["space"]):
advance += 1
Instr_3.setAutoDraw(False)
Instr_4.setAutoDraw(True)
if event.getKeys(keyList=["escape"]):
core.quit()
win.flip()
Instr_4.setAutoDraw(False)
theseKeys = []
trialcounter = 0
for ptrial in range(252,264):
t = 0
Practice_2_Clock.reset()
continueRoutine = True
##------------------SET DURATION & ITI OF STIMULI-------------------##
#duration 1000 ms
duration = expmatrix.loc[ptrial,'Duration']
#ITI jittering 800-2000 ms
ITI = expmatrix.loc[ptrial,'ITI']
##---------------------SET STIMULI & RESPONSE---------------------------##
stim_image = expmatrix.loc[ptrial,'stim_image']
Image.setImage(stim_image)
frequency = expmatrix.loc[ptrial,'Frequency']
corrAns = expmatrix.loc[ptrial,'corrAns']
congruency = expmatrix.loc[ptrial,'Congruency']
if congruency != 'None':
if congruency == 'con':
if ('m' or 'M') in stim_image:
stroop_text.setText('Male')
else:
stroop_text.setText('Female')
else:
if ('m' or 'M') in stim_image:
stroop_text.setText('Female')
else:
stroop_text.setText('Male')
else:
pass
##--------------------------WHILE LOOP BEGINS-------------------------##
while continueRoutine:
if event.getKeys(keyList=["escape"]):
core.quit()
# get current time
t = Practice_2_Clock.getTime()
key_resp = event.BuilderKeyResponse()
if t < ITI:
Blank.setAutoDraw(True)
elif t > ITI and t < ITI + duration:
Blank.setAutoDraw(False)
Image.setAutoDraw(True)
if congruency != 'None':
stroop_text.setAutoDraw(True)
else:
pass
else:
Image.setAutoDraw(False)
Blank.setAutoDraw(False)
if congruency != 'None':
stroop_text.setAutoDraw(False)
else:
pass
continueRoutine = False
theseKeys = event.getKeys(keyList=['w', 'o'])
if len(theseKeys) > 0 and t < ITI + duration:# at least one key was pressed
# was this 'correct'?
if str(corrAns) in theseKeys:
key_resp.corr = 1
Correct.setAutoDraw(True)
else:
key_resp.corr = 0
Incorrect.setAutoDraw(True)
##------------CHECK ALL IF COMPONENTS HAVE FINISHED---------------##
if continueRoutine:
win.flip()
else:
Correct.setAutoDraw(False)
Incorrect.setAutoDraw(False)
break
##--------------------------NO NEED TO RECORD DATA-------------------------------##
trialcounter += 1
thisExp.nextEntry()
# completed 12 repeats of 'Practice_2'
##------------------------------START Task_2----------------------------------##
theseKeys = []
trialcounter = 0
Instr_Practice.setAutoDraw(True)
advance = 0
while advance < 1:
if event.getKeys(keyList=["space"]):
advance += 1
Instr_Practice.setAutoDraw(False)
if event.getKeys(keyList=["escape"]):
core.quit()
win.flip()
for trial in range(264,504):
t = 0
overalltime = globalClock.getTime()
Task_2_Clock.reset() # clock
continueRoutine = True
##------------------SET DURATION & ITI OF STIMULI-------------------##
#duration 1000 ms
duration = expmatrix.loc[trial,'Duration']
#ITI jittering 800-2000 ms
ITI = expmatrix.loc[trial,'ITI']
##---------------------SET STIMULI & RESPONSE---------------------------##
stim_image = expmatrix.loc[trial,'stim_image']
Image.setImage(stim_image)
frequency = expmatrix.loc[trial,'Frequency']
corrAns = expmatrix.loc[trial,'corrAns']
congruency = expmatrix.loc[trial,'Congruency']
if congruency != 'None':
if congruency == 'con':
if ('m' or 'M') in stim_image:
stroop_text.setText('Male')
else:
stroop_text.setText('Female')
else:
if ('m' or 'M') in stim_image:
stroop_text.setText('Female')
else:
stroop_text.setText('Male')
else:
pass
##--------------------------WHILE LOOP BEGINS-------------------------##
while continueRoutine:
if event.getKeys(keyList=["escape"]):
core.quit()
# get current time
t = Task_2_Clock.getTime()
key_resp = event.BuilderKeyResponse()
##--------------------STIMULI PRESENTATION-------------------------------##
if t < ITI:
Blank.setAutoDraw(True)
elif t > ITI and t < ITI + duration:
Blank.setAutoDraw(False)
Image.setAutoDraw(True)
if congruency != 'None':
stroop_text.setAutoDraw(True)
else:
pass
else:
Image.setAutoDraw(False)
Blank.setAutoDraw(False)
if congruency != 'None':
stroop_text.setAutoDraw(False)
else:
pass
continueRoutine = False
theseKeys = event.getKeys(keyList=['w', 'o'])
if len(theseKeys) > 0 and t < ITI + duration:# at least one key was pressed
if theseKeys[-1] != None:
key_resp.rt = key_resp.clock.getTime()
thisExp.addData('Response', theseKeys[-1])
thisExp.addData('RT', key_resp.rt)
# was this 'correct'?
if str(corrAns) in theseKeys:
key_resp.corr = 1
thisExp.addData('Accuracy', key_resp.corr)
else:
key_resp.corr = 0
thisExp.addData('Accuracy', key_resp.corr)
##------------CHECK ALL IF COMPONENTS HAVE FINISHED---------------##
if continueRoutine:
win.flip()
else:
break
##--------------------------RECORD DATA-------------------------------##
trialcounter += 1
thisExp.addData('Trial',trialcounter)
thisExp.addData('stim_image', stim_image)
thisExp.addData('frequency', frequency)
thisExp.addData('congruency', congruency)
thisExp.addData('text', stroop_text.text)
thisExp.addData('corrAns', corrAns)
thisExp.addData('duration', duration)
thisExp.addData('ITI', ITI)
thisExp.nextEntry()
# completed 240 repeats of 'Task_2'
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit() |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time : 2019/8/22 14:05
#@Author: Kingsley
#@File : __init__.py.py |
#!/usr/bin/env python2.6
# $Id$
import unittest
import sys
import os.path
import logging
module_path = [os.path.dirname(__file__),
"..", "lib", "python"]
sys.path.append(os.path.join(*module_path))
# To add more test files, create <name>.py file and add a corresponding line
# here:
from catalog_notifier_test import *
from catalog_test import *
from checkpkg_lib_test import *
from checkpkg_test import *
from csw_upload_pkg_test import *
from database_test import *
from dependency_checks_test import *
from inspective_package_test import *
from ldd_emul_test import *
from models_test import *
from opencsw_test import *
from package_checks_test import *
from package_stats_test import *
from package_test import *
from pkgdb_test import *
from pkgmap_test import *
from sharedlib_utils_test import *
from struct_util_test import *
from submit_to_newpkgs_test import *
from system_pkgmap_test import *
from tag_test import *
# These are very slow GAR tests, which I'm disabling for now.
# from example_test import *
# from overrides_test import *
if __name__ == '__main__':
# Some tests output warnings, and we don't want them to be displayed
# during unit test runs.
logging.basicConfig(level=logging.ERROR)
unittest.main()
# vim:set ts=2 sts=2 sw=2 expandtab:
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from django.views.generic.simple import direct_to_template
urlpatterns = patterns('',
(r'^$', direct_to_template, {'template': 'ajax_client/index.html'}),
) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
from meta.utils.file_system import is_file_valid
from meta.utils.primitive import remove_empty_values
class SampleDataLine:
def __init__(self, name: str, reads: list, taxa: dict = None):
self.state = dict()
self.name = name.strip()
self.reads = remove_empty_values(reads)
if taxa is None:
taxa = dict()
self.taxa = taxa
self.is_valid = False
self.validate()
def __eq__(self, other):
return self.name == other.name
def __lt__(self, other):
return self.name < other.name
def __len__(self):
return len(self.reads)
def __repr__(self):
return f"SampleDataLine with name '{self.name}' and {len(self)} reads"
def _validate_name(self):
self.is_valid = self.name is not None and len(self.name) > 0
def _validate_reads(self):
c = int(len(self.reads) == 0)
out = list()
for read_file in self.reads:
if is_file_valid(file=read_file, report=False):
out.append(read_file)
else:
print("Not found the reads file: '{}'".format(read_file))
c += 1
self.is_valid = c == 0
self.reads = out
def validate(self):
self._validate_name()
self._validate_reads()
@staticmethod
def import_from_dict(d: dict):
"""
:param d: {
'name': 'sample_name_1',
'reads': ['reads_1', ...],
'taxa':
{
'genera': 'genera',
'species': 'species',
'strain': 'strain'
}
'state':
{
'key': 'value'
}
}
:return: SampleDataLine object
"""
if any(i not in d.keys() for i in ["name", "reads"]):
raise ValueError(f"Unable to parse: '{json.dumps(d)}'")
out = SampleDataLine(d.get("name"), d.get("reads"))
if "taxa" in d.keys():
out.taxa = d.get("taxa")
if "state" in d.keys():
out.state = d.get("state")
out.validate()
return out
def export_to_dict(self):
d = dict(name=self.name, reads=self.reads, taxa=self.taxa, state=self.state)
return d
|
# -----------------------------------------------------------------------------
# Name: ObserverDBUtil.py
# Purpose: Utility functions for peewee based DB
#
# Author: Will Smith <will.smith@noaa.gov>
#
# Created: March 18, 2016
# License: MIT
# ------------------------------------------------------------------------------
import arrow
import hashlib
import json
import logging
import time
import filecmp
import os
import socket
import shutil
import re
from decimal import *
from typing import Any, Dict, List, Type, Union
from apsw import BusyError
from peewee import Model, BigIntegerField, BooleanField, DoubleField, FloatField, ForeignKeyField, IntegerField, \
PrimaryKeyField, SmallIntegerField, TextField, TimestampField
from py.observer.ObserverDBBaseModel import BaseModel, database
from py.observer.ObserverDBModels import Settings, Programs, TripChecks, SpeciesCompositionItems, FishingActivities
from playhouse.apsw_ext import APSWDatabase
from playhouse.shortcuts import dict_to_model
from playhouse.test_utils import test_database
from py.observer.ObserverConfig import optecs_version
import unittest
class ObserverDBUtil:
default_dateformat = 'MM/DD/YYYY HH:mm' # Updated for arrow
javascript_dateformat = 'YYYY-MM-DDTHH:mm:ss' # For QML interactions
oracle_date_format = 'DD-MMM-YYYY' # 01-MAY-2017
def __init__(self):
pass
@staticmethod
def db_load_setting(parameter):
try:
return Settings.get(Settings.parameter == parameter).value
except Settings.DoesNotExist as e:
logging.info('DB Setting does not exist: ' + parameter)
return None
@staticmethod
def db_save_setting(parameter, value):
"""
Save Setting to DB.
:param parameter: PARAMETER to match
:param value: Value to save as Str
:return: True if saved
"""
id_query = Settings.select().where(Settings.parameter == parameter)
if not id_query.exists():
Settings.create(parameter=parameter)
setting = id_query.get()
setting.value = str(value)
# Handle a BusyError attempting to write to Settings by retrying up to five times.
n_retries = 0
max_retries = 5
while True:
try:
setting.save()
break
except BusyError as be:
n_retries += 1
if n_retries > max_retries:
raise be
sleep_time_secs = 0.1 * n_retries
time.sleep(sleep_time_secs)
# Don't log at INFO level or above - these are typically written to
# OPTECS_LOG_MESSAGES table, adding to database contention.
logging.debug(f'Trip.save() Try#{n_retries} failed w/BusyError. " +'
f'Sleeping {sleep_time_secs} seconds before retrying.')
logging.debug('Save ' + parameter + ' -> ' + str(value))
return True
@staticmethod
def db_load_setting_as_json(parameter: str) -> Union[List, Dict[str, Any]]:
"""
Load a Settings table entry that is a JSON string, returning a Python list or dictionary.
:param parameter:
:return: Python list of values or dictionary with (immutable) string key and any value type.
"""
structure_as_json_string = ObserverDBUtil.db_load_setting(parameter)
if structure_as_json_string is None:
return None
return json.loads(structure_as_json_string)
@staticmethod
def db_save_setting_as_json(parameter: str, value: Union[List, Dict]) -> None:
"""
Save a python list in the value field of an entry of Settings table as a JSON string.
Note: json.dumps will convert non-string keys into a string (python dict keys
must be immutable).
From https://stackoverflow.com/questions/1450957/pythons-json-module-converts-int-dictionary-keys-to-strings:
"In Python ... the keys to a mapping (dictionary) are object references. In Python
they must be immutable types, or they must be objects which implement a __hash__ method"
(Brought up because first key in dictionary held in JSON string was Trip ID (integer))
:param parameter:
:param value:
:return:
"""
return ObserverDBUtil.db_save_setting(parameter, json.dumps(value))
@staticmethod
def db_fix_empty_string_nulls(logger: logging.Logger) -> None:
# FIELD-1261 If we have bad data in VESSELS ('' instead of NULL) fix that:
database.execute_sql(
"update VESSELS set REGISTERED_LENGTH = NULL where REGISTERED_LENGTH = ''")
# TRIP_CHECKS may have empty strings in the numeric field MODIFIED_BY. Possibly other fields as well.
# Convert empty strings in all numeric fields to null or zero, depending on whether field is nullable.
ObserverDBUtil.db_coerce_empty_strings_in_number_fields(TripChecks, logger)
@staticmethod
def db_coerce_empty_strings_in_number_fields(db_table: Type[BaseModel], logger: logging.Logger) -> Dict[str, int]:
"""
Oracle and SQLite both allow a number field to have a value of empty string.
Peewee, however, does not: it throws a ValueError exception.
Avoid peewee ValueError exceptions by coercing in db_table any empty string in any number field
to null (if field is nullable) or 0 (if field is not nullable).
:param db_table:
:param logger:
:return: a dictionary of field_name: empty_string_count
"""
numeric_field_types = (
IntegerField,
FloatField,
BooleanField,
DoubleField,
BigIntegerField,
SmallIntegerField,
TimestampField
)
db = db_table._meta.database
numeric_fields = []
for field in db_table._meta.declared_fields:
if type(field) in numeric_field_types and \
not (isinstance(field, PrimaryKeyField) or isinstance(field, ForeignKeyField)):
numeric_fields.append(field)
numeric_field_empty_string_cnts = {}
fields_to_coerce = []
for numeric_field in numeric_fields:
# Do a SELECT to determine which columns have any empty strings. Not strictly necessary - could just do the
# update, but a log of counts of empty string values by field could be useful for tracking its frequency.
# Use execute_sql to avoid peewee's problem handling an empty string in a numeric field.
select_sql_query = f"SELECT {numeric_field.db_column} FROM {db_table._meta.db_table} " + \
f"WHERE {numeric_field.db_column} = ''"
# logger.debug(select_sql_query)
cursor = db.execute_sql(select_sql_query)
n_empty_string_values = len(cursor.fetchall())
numeric_field_empty_string_cnts[numeric_field.db_column] = n_empty_string_values
if n_empty_string_values > 0:
fields_to_coerce.append(numeric_field)
for field_to_coerce in fields_to_coerce:
coerced_value = "NULL" if field_to_coerce.null else "0"
update_sql = f"UPDATE {db_table._meta.db_table} set {field_to_coerce.db_column} = {coerced_value} " + \
f"WHERE {field_to_coerce.db_column} = ''"
logger.debug(update_sql)
db.execute_sql(update_sql)
# Log the results. Also return the results for possible use by the caller.
if len(fields_to_coerce) == 0:
logger.info(f"Found no occurrences of empty strings in numeric fields in Table {db_table._meta.db_table}.")
else:
logger.info(
f"Found {len(fields_to_coerce)} field(s) with at least one empty string value. Counts by field:")
for key, value in numeric_field_empty_string_cnts.items():
logger.info(f"\t{key}: {value}")
return numeric_field_empty_string_cnts
@staticmethod
def db_load_save_setting_as_json(parameter: str, default_value: Union[List, Dict]) -> Union[List, Dict]:
"""
If present in Settings table, return the entry with key = 'parameter' as a Python list
or dict (whichever of the two data structures the json defines).
If not present, add an entry with the default_value and return default_value.
:param parameter: string - key value
:param default_value: List or Dict
:return: List or Dict
"""
value_in_settings = ObserverDBUtil.db_load_setting_as_json(parameter)
if value_in_settings is not None:
return value_in_settings
ObserverDBUtil.db_save_setting_as_json(parameter, default_value)
return default_value
@staticmethod
def get_arrow_datestr(date_format=default_dateformat):
"""
Get current time in string format. If oracle_date_format, 3-character month will be uppercase
@param date_format: arrow date format
@return: Arrow formatted datestring
"""
result = arrow.now().format(date_format)
return result if date_format != ObserverDBUtil.oracle_date_format else result.upper()
@staticmethod
def get_external_drive_letters():
"""
Enumerate drive letters for thumb drive. Win32 only
Adapted from https://stackoverflow.com/questions/827371/\
is-there-a-way-to-list-all-the-available-drive-letters-in-python
@return: None or list of non-C drives in format 'D:\\', ...
"""
if os.name == 'nt':
import win32api
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1]
if 'C:\\' in drives:
drives.remove('C:\\')
return drives
else: # TODO 'posix', 'mac' etc
return None
@staticmethod
def get_current_catch_ratio_from_notes(cur_notes):
if not cur_notes:
return None
ratio_re = re.compile('Ratio=([\S_]+)')
match = ratio_re.search(cur_notes)
ratio = None
if match:
ratio = float(match.group(1))
return ratio
@staticmethod
def datetime_to_str(indate):
"""
Updated for arrow
@param indate:
@return:
"""
return indate.format(ObserverDBUtil.default_dateformat)
@staticmethod
def str_to_datetime(datestr):
return arrow.get(datestr, ObserverDBUtil.default_dateformat)
@staticmethod
def convert_jscript_datetime(datestr):
"""
Given 2017-01-24T16:30:00 local time, convert to arrow UTC
@param datestr: javascript format datetime
@return:
"""
if datestr:
arrow_time = arrow.get(datestr, ObserverDBUtil.javascript_dateformat)
return arrow_time.format(ObserverDBUtil.default_dateformat)
@staticmethod
def convert_arrow_to_jscript_datetime(datestr):
"""
Given standard datestr, basically just add a T
@param datestr: arrow format datestr
@return:
"""
if datestr:
arrow_time = arrow.get(datestr, ObserverDBUtil.default_dateformat)
return arrow_time.format(ObserverDBUtil.javascript_dateformat)
@staticmethod
def convert_datestr(datestr, existing_fmt, desired_fmt):
"""
pass in datestr, parse into datetime, then reformat back to string
:param datestr: str
:param existing_fmt: str (e.g. default_dateformat)
:param desired_fmt: str (e.g. oracle_date_format)
:return: str (new format)
"""
return arrow.get(datestr, existing_fmt).format(desired_fmt)
@staticmethod
def log_peewee_model_dependencies(logger, dependencies, context_message=None):
"""
Log the non-null fields of records in the list of dependencies.
http://docs.peewee-orm.com/en/latest/peewee/api.html#Model.dependencies
:param logger: where to logk
:param dependencies: from Peewee model_instance.dependencies()
:context_message: optional message to include in first log line.
:return: None
"""
logger.info('Peewee dependency information. Context: {}:'.format(
context_message if not None else "(None supplied)"))
dependency_found = False
for (query, fk) in dependencies:
model = fk.model_class
query_result = model.select().where(query).execute()
if query_result:
dependency_found = True
try:
for row in query_result:
ObserverDBUtil.log_peewee_model_instance(logger, row)
except Exception as e:
logger.error(e)
if not dependency_found:
logger.info('No dependencies.')
@staticmethod
def log_peewee_model_instance(logger, db_record, context_message=None):
if not isinstance(db_record, BaseModel):
logger.error("Expected db_record to be an Observer BaseModel type.")
return
logger.info("Non-null Fields of Record from Table {}:".format(db_record._meta.db_table))
if context_message is not None:
logger.info("\t(Context: {})".format(context_message))
for field_name, field_value in db_record._data.items():
if field_value is not None:
logger.info("\t\t{}: {}".format(field_name, field_value))
@staticmethod
def load_peewee_model_from_json_file(peewee_model: Type[BaseModel],
json_filepath: str,
logger: logging.Logger) -> int:
"""
Load a Navicat-created JSON dump of a SQL table into Observer DB using the peewee model for that table.
Format of a Navicat JSON file:
{
"RECORDS":[
{
"FIELD_1_NAME" : "row1_field1_value",
...
"FIELD_N_NAME": 'row1_fieldM_value"
},
...
{
"FIELD_1_NAME": "rowN_field1_value",
...
"FIELD_N_NAME": "rowN_fieldM_value"
}
]
}
The basic technique is to use the Playhouse extension, dict_to_model, to load the dictionary values into
a peewee record.
The only wrinkle: Navicat field names are database columns. Playhouse expects Peewee field names.
Limitation: Expects an empty destination table. This isn't required, just simplest implementation
for what's currently needed.
:return: number of model instances (rows) loaded into peewee_model (table)
"""
# Column names in Navicat JSON are the database column names (uppercase with underscore separator).
# The corresponding field names in Peewee are pythonic variable names.
# Prepare a map from DB column names to Peewee field names.
dbcol_to_field = {}
for field in peewee_model._meta.declared_fields:
dbcol_to_field[field.db_column] = field.name
query = peewee_model.select()
nrows = query.count()
if nrows > 0:
logger.info(f'Table {peewee_model._meta.db_table} is NOT empty (contains {nrows} rows). ' +
f'NOT reloading from JSON.')
return nrows
else:
logger.debug(f'Destination table {peewee_model._meta.db_table} is empty. Proceeding with load.')
with open(json_filepath) as data_file:
data = json.load(data_file)
json_records_with_dbcolumn_names = data['RECORDS']
# Load each JSON record into destination table.
for json_record in json_records_with_dbcolumn_names:
model_instance_dict = {}
# Convert database column names (e.g. TRIP_CHECK_ID) to peewee field name (e.g. trip_check)
for dbcol_name in json_record.keys():
field_name = dbcol_to_field[dbcol_name]
model_instance_dict[field_name] = json_record[dbcol_name]
peewee_model_instance = dict_to_model(peewee_model, model_instance_dict)
peewee_model_instance.save(force_insert=True)
nrows = peewee_model.select().count()
logger.info(f'Loaded {nrows} rows into Table {peewee_model._meta.db_table} from JSON file {json_filepath}')
return nrows
@staticmethod
def checksum_peewee_model(peewee_model: Type[BaseModel], logger: logging.Logger) -> str:
"""
Can help answer the question: have the contents of a SQLite table changed at all?
Unlike some other DBMSs like SQL Server, SQLite does not provide a CHECKSUM function.
Performs a SHA1 checksum over every field of every row of the peewee table.
The plan:
1. Create a hash for each row by hashing a concatenation of hashes for each field.
Each field is treated as string.
2. Create a hash for the table by hashing the concatenation of hashes for each row.
Justification for technique:
From http://crypto.stackexchange.com/questions/10058/how-to-hash-a-list-of-multiple-items
"[I]nstead of just encoding the inputs into a single string and hashing it, it's also possible to modify the
hashing method to directly support multiple inputs. One general way to do that is to use a hash list,
in which every input value is first hashed separately, and the resulting hashes (which have a fixed length,
and can thus be safely concatenated) are then concatenated and hashed together."
:param peewee_model: The table to checksum. Must have a primary key field
(the case for all Peewee-based tables).
:param logger:
:return:
"""
start_time = time.time()
# Oracle and SQLite allow a value of empty string in numeric fields. Peewee takes exception: ValueError.
# Empty string values could be added to TRIP_CHECKS by a DB Sync download.
# Before running a checksum, convert any empty string values in a numeric field to null or zero,
# depending on whether the field is nullable or not.
empty_string_counts = ObserverDBUtil.db_coerce_empty_strings_in_number_fields(peewee_model, logger)
n_flds_empty_str = len([x for x in empty_string_counts if empty_string_counts[x] > 0])
logger.info(f"Found {'no' if n_flds_empty_str == 0 else n_flds_empty_str} fields with empty strings.")
table_name = peewee_model._meta.name
n_fields = len(peewee_model._meta.fields)
primary_key_field = peewee_model._meta.primary_key
# There will always be a primary key when using Peewee. But just in case:
if not primary_key_field:
raise Exception("checksum_peewee_model requires a primary key field by which to sort.")
all_rec_query = peewee_model.select().order_by(primary_key_field)
n_rows = all_rec_query.count()
row_sha1_concatenations = ""
for record in all_rec_query:
field_sha1_concatenations = ""
for name, value in record._data.items():
# print(f'{name}={value}')
field_sha1_concatenations += hashlib.sha1(str(value).encode()).hexdigest()
row_sha1_concatenations += hashlib.sha1(field_sha1_concatenations.encode()).hexdigest()
table_sha1 = hashlib.sha1(row_sha1_concatenations.encode()).hexdigest()
logger.info(f"SHA1 checksum for Table {table_name} of {n_rows} rows by {n_fields} fields: {table_sha1}.")
logger.info(f"\tTime to calculate table's SHA1: {time.time() - start_time:.2f} seconds.")
return table_sha1
@staticmethod
def get_setting(setting_name, fallback_value=None):
"""
Load setting from SETTINGS
@param setting_name: e.g. 'current_user_id'
@param fallback_value: value to return if value not found
@return: value or fallback_value or None
"""
try:
return Settings.get(Settings.parameter == setting_name).value
except Settings.DoesNotExist:
return fallback_value
@staticmethod
def get_or_set_setting(setting_name, default_value):
"""
Load setting from SETTINGS. If not found, then set to default_value
@param setting_name: e.g. 'current_user_id'
@param default_value: value to set if setting not found
@return: found value or default_value
"""
try:
return Settings.get(Settings.parameter == setting_name).value
except Settings.DoesNotExist:
ObserverDBUtil.db_save_setting(setting_name, default_value)
return default_value
@staticmethod
def clear_setting(setting_name):
"""
Delete setting from SETTINGS
@param setting_name: e.g. 'current_user_id'
@return: True or None
"""
try:
Settings.get(Settings.parameter == setting_name).delete_instance()
return True
except Settings.DoesNotExist:
return None
@staticmethod
def get_current_user_id():
try:
return int(ObserverDBUtil.get_setting('current_user_id'))
except (TypeError, ValueError):
return None
@staticmethod
def set_current_user_id(user_id):
try:
ObserverDBUtil.db_save_setting('current_user_id', user_id)
except (TypeError, ValueError):
return None
@staticmethod
def get_current_program_id():
try:
return int(ObserverDBUtil.get_setting('current_program_id'))
except (TypeError, ValueError):
return None
@staticmethod
def get_current_program_name():
try:
program_id = int(ObserverDBUtil.get_setting('current_program_id'))
return Programs.get(Programs.program == program_id).program_name
except Programs.DoesNotExist:
return None
except (TypeError, ValueError):
return None
@staticmethod
def set_current_program_id(program_id):
try:
ObserverDBUtil.db_save_setting('current_program_id', program_id)
except (TypeError, ValueError):
return None
@staticmethod
def get_current_fishery_id() -> int:
try:
return int(ObserverDBUtil.get_setting('current_fishery_id'))
except (TypeError, ValueError):
return None
@staticmethod
def is_fixed_gear():
return True if ObserverDBUtil.get_setting('gear_type') == 'FALSE' else False
@staticmethod
def set_current_fishery_id(program_id: int):
try:
ObserverDBUtil.db_save_setting('current_fishery_id', program_id)
except (TypeError, ValueError):
return None
@staticmethod
def set_current_username(username):
"""
Eventually we're probably not going to persist the last username.
@param username: first+last
@return:
"""
try:
ObserverDBUtil.db_save_setting('current_user', username)
except (TypeError, ValueError):
return None
@staticmethod
def get_current_trip_id():
try:
# TODO: Rename Setting name from 'trip_number' to 'current_trip_id'?
return int(ObserverDBUtil.get_setting('trip_number'))
except (TypeError, ValueError):
return None
@staticmethod
def get_current_haulset_id():
try:
return int(ObserverDBUtil.get_setting('current_haulset_id'))
except (TypeError, ValueError):
return None
@staticmethod
def get_current_haulset_createddate():
"""
Getting created_date for current selected haul/set.
:return: date string from database in format MM/DD/YYYY HH:mm
"""
current_faid = ObserverDBUtil.get_current_haulset_id() # database ID set when haul is selected
if current_faid:
try:
return FishingActivities.get(FishingActivities.fishing_activity == current_faid).created_date
except FishingActivities.DoesNotExist:
return None
else:
return None
@staticmethod
def get_current_debriefer_mode() -> bool:
try:
debriefer_mode = ObserverDBUtil.get_setting('current_debriefer_mode', fallback_value=False)
return debriefer_mode == 'True'
except (TypeError, ValueError):
return False
@staticmethod
def set_current_debriefer_mode(debriefer_mode: bool) -> None:
try:
ObserverDBUtil.db_save_setting('current_debriefer_mode', debriefer_mode)
except (TypeError, ValueError):
pass
@staticmethod
def escape_linefeeds(input_str):
"""
Replace linefeed with <br> for future formatting, removes quotes
@param input_str: original
@return: string with no linefeeds
"""
no_lfs = input_str.replace('\r', '')
no_lfs = no_lfs.replace('\n', '<br>')
no_lfs = no_lfs.replace('"', '')
return no_lfs
@staticmethod
def get_data_source() -> str:
return f'optecs_{optecs_version} {socket.gethostname()}' # FIELD-2114: adding version to data_source
@staticmethod
def del_species_comp_item(comp_item_id, delete_baskets=True):
try:
del_item = SpeciesCompositionItems.get(SpeciesCompositionItems.species_comp_item == comp_item_id)
ObserverDBUtil.log_peewee_model_instance(logging, del_item, 'About to delete')
del_item.delete_instance(recursive=delete_baskets) # delete baskets associated with this species comp id
except SpeciesCompositionItems.DoesNotExist:
logging.error(f'Could not delete species comp item ID {comp_item_id}')
@staticmethod
def round_up(val, precision='.01'):
"""
https://stackoverflow.com/questions/56820/round-doesnt-seem-to-be-rounding-properly#56833
function to properly round up
TODO: replace the Decimal rounding functionality throughout the app, using this in Sets.
TODO: replace in CountsWeights.tallyTimesAvgWeight, CountsWeights._calculate_totals...
:return: rounded float (defaults to two points of precision)
"""
try:
return float(Decimal(val).quantize(Decimal(precision), rounding=ROUND_HALF_UP))
except TypeError:
return None
class TestObserverDBUtil(unittest.TestCase):
"""
Note: any write/update interaction should be done with test_database...
http://stackoverflow.com/questions/15982801/custom-sqlite-database-for-unit-tests-for-code-using-peewee-orm
"""
test_db = APSWDatabase(':memory:')
def setUp(self):
logging.basicConfig(level=logging.DEBUG)
self._logger = logging.getLogger(__name__)
# Shut up peewee debug and info messages. Comment out setLevel below to get them
peewee_logger = logging.getLogger('peewee')
peewee_logger.setLevel(logging.WARNING)
def test_save_get_setting(self):
with test_database(self.test_db, [Settings]):
self.assertTrue(ObserverDBUtil.db_save_setting('TestSettingParam', '1234'))
self.assertTrue(ObserverDBUtil.db_save_setting('TestSettingParam2', '12345'))
self.assertTrue(ObserverDBUtil.db_save_setting('TestSettingParam', '4321'))
retval = ObserverDBUtil.db_load_setting('TestSettingParam')
self.assertEqual(retval, '4321')
retval2 = ObserverDBUtil.db_load_setting('TestSettingParam2')
self.assertEqual(retval2, '12345')
def test_failcase(self):
with test_database(self.test_db, [Settings]):
retval = ObserverDBUtil.db_load_setting('TestSettingParam')
self.assertIsNone(retval)
def test_load_list_setting(self):
with test_database(self.test_db, [Settings]):
self.assertTrue(ObserverDBUtil.db_save_setting_as_json('TestSettingParam', ['1234', '4567']))
self.assertTrue(ObserverDBUtil.db_save_setting_as_json('TestSettingParam2', [12345, 67891]))
self.assertTrue(ObserverDBUtil.db_save_setting_as_json('TestSettingParam', ['4321', '7654']))
retval = ObserverDBUtil.db_load_setting_as_json('TestSettingParam')
self.assertEqual(retval, ['4321', '7654'])
retval2 = ObserverDBUtil.db_load_setting_as_json('TestSettingParam2')
self.assertEqual(retval2, [12345, 67891])
# Load/Save
test_parameter = 'TestSettingParam3'
default_value = 'This value is not in Settings'.split(" ")
self.assertIsNone(ObserverDBUtil.db_load_setting(test_parameter))
actual_value = ObserverDBUtil.db_load_save_setting_as_json(test_parameter, default_value)
self.assertEqual(default_value, actual_value)
def test_load_dict_setting(self):
""" Use 'list' load/save for a dictionary.
Drawbacks:
- non-string keys are returned as strings (immutable type required for dict key)
"""
with test_database(self.test_db, [Settings]):
test_dict = {1: '2017-09-16', 2: 2.0, 3.0: 3.0}
setting_name = "trips_with_last_TER_error_free"
self.assertTrue(ObserverDBUtil.db_save_setting_as_json(setting_name, test_dict))
retval = ObserverDBUtil.db_load_setting_as_json(setting_name)
expected_dict = { str(k):v for k, v in test_dict.items()}
self.assertEqual(retval, expected_dict)
def test_load_tuple_setting_as_json(self):
""" Use json load/save for a tuple. Drawbacks:
- Returned as List, not Tuple
"""
with test_database(self.test_db, [Settings]):
test_tuple = ('2017-09-16', '2017-09-17', 3, 4.0)
setting_name = "tuple_of_strings"
self.assertTrue(ObserverDBUtil.db_save_setting_as_json(setting_name, test_tuple))
retval = ObserverDBUtil.db_load_setting_as_json(setting_name)
self.assertEqual(retval, list(test_tuple))
def test_datefuncs(self):
# Create two datetime objects, convert back and forth to string, compare
nowdate = arrow.now()
nowdatestr = ObserverDBUtil.get_arrow_datestr()
nowdatestr_test = ObserverDBUtil.datetime_to_str(nowdate)
date_from_str = ObserverDBUtil.str_to_datetime(nowdatestr)
date2_from_str = ObserverDBUtil.str_to_datetime(nowdatestr_test)
deltat = date_from_str - date2_from_str
# check within 1 second of each other
self.assertLess(abs(deltat.seconds), 1)
nowdate = ObserverDBUtil.get_arrow_datestr(date_format='DD-MMM-YYYY')
self.assertEqual(len('00-MMM-0000'), len(nowdate))
def test_escapelf(self):
test_str = 'this\nline has "things" and various\nfeeds'
new_str = ObserverDBUtil.escape_linefeeds(test_str)
self.assertTrue('\n' not in new_str)
self.assertTrue('<br>' in new_str)
self.assertTrue('"' not in new_str)
def test_checksum_peewee_model(self):
expected_sha1 = "689327755da6658627c0f015c25796a5cdc98c0c"
with test_database(self.test_db, [Settings]):
# Use Settings as test data table. Seed with two rows.
self.assertTrue(ObserverDBUtil.db_save_setting('TestSettingParam1', '1234'))
self.assertTrue(ObserverDBUtil.db_save_setting('TestSettingParam2', '4321'))
actual_sha1 = ObserverDBUtil.checksum_peewee_model(Settings, logging)
self.assertEqual(expected_sha1, actual_sha1)
# Change a digit: SHA1 should change.
self.assertTrue(ObserverDBUtil.db_save_setting('TestSettingParam2', '4221'))
actual_sha1 = ObserverDBUtil.checksum_peewee_model(Settings, logging)
self.assertNotEquals(expected_sha1, actual_sha1)
# Restore the original digit: SHA1's should match.
self.assertTrue(ObserverDBUtil.db_save_setting('TestSettingParam2', '4321'))
actual_sha1 = ObserverDBUtil.checksum_peewee_model(Settings, logging)
self.assertEqual(expected_sha1, actual_sha1)
def test_checksum_peewee_model_exception_not_possible_with_peewee(self):
clean_test_db = APSWDatabase(':memory:')
with test_database(clean_test_db, [NoPrimaryKeyTable]):
self.assertTrue(NoPrimaryKeyTable.table_exists())
ObserverDBUtil.checksum_peewee_model(NoPrimaryKeyTable, logging)
"""
No exception. That's because:
"Because we have not specified a primary key, peewee will automatically add
an auto-incrementing integer primary key field named id."
(http://docs.peewee-orm.com/en/latest/peewee/models.html)
"""
peewee_default_primary_key_field_name = 'id'
primary_key_field = NoPrimaryKeyTable._meta.primary_key
self.assertIsNotNone(primary_key_field)
self.assertEqual(peewee_default_primary_key_field_name, primary_key_field.name)
def test_empty_string_coerce(self):
with test_database(self.test_db, [TripChecks]):
# Demonstrate peewee's problem with empty string in integer field, on a save of new record.
expected_exception_msg = "invalid literal for int() with base 10: ''"
try:
test_record_1 = TripChecks(
allow_ack="N",
check_code="", # Integer field!
check_message="A msg",
check_sql="Insert something",
check_type="E",
created_by=100,
created_date="12/05/2017",
status=0,
trip_check_group=456)
test_record_1.save()
self.fail("Should have objected to invalid literal")
except ValueError as ve:
self.assertEqual(expected_exception_msg, ve.args[0])
# Demonstrate peewee's problem with empty string in integer field, on a read of a record.
expected_exception_msg = "invalid literal for int() with base 10: ''"
try:
test_record_2 = TripChecks(
allow_ack="N",
check_code=0,
check_message="A msg",
check_sql="Insert something",
check_type="E",
created_by=101,
created_date="12/05/2017",
status=0,
trip_check_group=456)
test_record_2.save()
self._logger.debug(f"TRIP_CHECK_ID={test_record_2.trip_check}.")
# Introduce an empty string in an integer string - outside of peewee model.
ret_val = self.test_db.execute_sql(
f"update TRIP_CHECKS set CREATED_BY = '' where CREATED_BY = {test_record_2.created_by}")
# Now try to access record with empty string in integer field.
trip_check_record = TripChecks.get(TripChecks.trip_check == test_record_2.trip_check)
self.fail("Should have objected to invalid literal")
except ValueError as ve:
self.assertEqual(expected_exception_msg, ve.args[0])
# Run utility to clear empty strings from numeric fields.
empty_field_count_dict = ObserverDBUtil.db_coerce_empty_strings_in_number_fields(TripChecks, self._logger)
self.assertEqual(1, empty_field_count_dict["CREATED_BY"])
# Now try to access record with formerly empty string in integer field - now should be zero.
trip_check_record = TripChecks.get(TripChecks.trip_check == test_record_2.trip_check)
self.assertEqual(0, trip_check_record.created_by, "Empty string in non-nullable integer field should be 0.")
class NoPrimaryKeyTable(Model):
field1 = TextField()
field2 = TextField()
|
sqSide = input("Please enter square side:")
# todo: it is better to declare int(sqSide) and use it as variable (no need to convert sqSide to int several times)
sqPerimeter = 4*int(sqSide)
sqDiagonal = int(sqSide)*(2**0.5)
print("Perimeter:",sqPerimeter,"\nDiagonal:",sqDiagonal) |
#!/usr/bin/env python
## Copyright 2019 PME Tommy,Cheng, Shinchen-Huang
##
## *****************************************************
## DESCRIPTION :
## socket of server main.py
## use:
# please set ip and host port first in the bottom accroding to environments
import time
import threading
import numpy as np
import os
import logging
import math
from utils import *
from scipy import signal
import socket
import rospy
from std_msgs.msg import Int16MultiArray
from sound_localize.msg import SoundRaw
logger = logging.getLogger('MicArray')
chunk_buffer = None
chunk_lock = True
azimuth_global = 0
chunk_time = None
def rec_thread(quit_event):
conn, addr = s.accept()
pubFrameRaw = rospy.Publisher('sound_raw', SoundRaw, queue_size=1)
while not rospy.is_shutdown():
# con_start = rospy.Time.now()
# It only takes 0.00011 secs to transfer
# 32768 bits from respeaker to robots through Ethernet
chunk_buffer = conn.recv(32768,socket.MSG_WAITALL)
# con_end = rospy.Time.now()
chunk_time = rospy.Time.now()
# print("Receive")
# print("con time: ", (con_end-con_start).to_sec())
# print("==========================")
xx = np.frombuffer(chunk_buffer,'int16')
print("size: ", xx.shape)
# print("The xx: ", xx)
# print("xx len: ", xx.size)
s_raw_msg = SoundRaw()
s_raw_msg.data = xx
s_raw_msg.header.stamp = chunk_time
pubFrameRaw.publish(s_raw_msg)
conn.close()
def main():
# ros init
rospy.init_node('sound_localize',anonymous=False)
import time
logging.basicConfig(level=logging.DEBUG)
# create thread for microphone array beamforming to do localization with TODA or MUSIC
q = threading.Event()
rec_t = threading.Thread(target=rec_thread, args=(q, ))
rec_t.start()
# while True:
# try:
# time.sleep(1.0)
# except KeyboardInterrupt:
# print('Quit')
# q.set()
# break
# wait for the thread
rospy.spin()
rec_t.join()
if __name__ == '__main__':
# ros init
rospy.init_node('sound_localize',anonymous=False)
# set socket server client ip and port
# HOST = '192.168.50.7' #HOST = '192.168.50.102' # phone '192.168.43.253' # tealab '192.168.1.176'
HOST = rospy.get_param("~IP", "192.168.50.7")
PORT = 8001
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
print ('Server start at: %s:%s' %(HOST, PORT))
print ('wait for connection...')
s.listen(5)
main()
|
from abaqusConstants import *
from .SpringDashpot import SpringDashpot
from ..Region.Region import Region
class SpringDashpotToGround(SpringDashpot):
"""The SpringDashpotToGround object defines springs and/or dashpots between points and
ground on a part or an assembly region.
The SpringDashpotToGround object is derived from the SpringDashpot object.
Attributes
----------
suppressed: Boolean
A Boolean specifying whether the spring/dashpot is suppressed or not. The default value
is OFF.
Notes
-----
This object can be accessed by:
.. code-block:: python
import part
mdb.models[name].parts[name].engineeringFeatures.springDashpots[name]
import assembly
mdb.models[name].rootAssembly.engineeringFeatures.springDashpots[name]
The corresponding analysis keywords are:
- ELEMENT
- SPRING
- DASHPOT
"""
# A Boolean specifying whether the spring/dashpot is suppressed or not. The default value
# is OFF.
suppressed: Boolean = OFF
def __init__(
self,
name: str,
region: Region,
dof: int,
orientation: str = None,
springBehavior: Boolean = OFF,
dashpotBehavior: Boolean = OFF,
springStiffness: float = 0,
dashpotCoefficient: float = 0,
):
"""This method creates a SpringDashpotToGround object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[name].engineeringFeatures.SpringDashpotToGround
mdb.models[name].rootAssembly.engineeringFeatures\
.SpringDashpotToGround
Parameters
----------
name
A String specifying the repository key.
region
A Region object specifying the region to which the springs and/or dashpots are applied.
dof
An Int specifying the degree of freedom associated with the spring and dashpot
behaviors.
orientation
None or a DatumCsys object specifying the local directions for the spring and/or
dashpot. If *orientation*=None, the spring and/or dashpot data are defined in the global
coordinate system. The default value is None.
springBehavior
A Boolean specifying whether to apply spring behavior to the selected points. The
default value is OFF.At least one of the arguments *springBehavior*=ON or
*dashpotBehavior*=ON must be specified.
dashpotBehavior
A Boolean specifying whether to apply dashpot behavior to the selected points. The
default value is OFF.At least one of the arguments *springBehavior*=ON or
*dashpotBehavior*=ON must be specified.
springStiffness
A Float specifying the force per relative displacement for the spring. The default value
is 0.0.
dashpotCoefficient
A Float specifying the force per relative velocity for the dashpot. The default value is
0.0.
Returns
-------
A SpringDashpotToGround object.
"""
super().__init__()
pass
def setValues(
self,
orientation: str = None,
springBehavior: Boolean = OFF,
dashpotBehavior: Boolean = OFF,
springStiffness: float = 0,
dashpotCoefficient: float = 0,
):
"""This method modifies the SpringDashpotToGround object.
Parameters
----------
orientation
None or a DatumCsys object specifying the local directions for the spring and/or
dashpot. If *orientation*=None, the spring and/or dashpot data are defined in the global
coordinate system. The default value is None.
springBehavior
A Boolean specifying whether to apply spring behavior to the selected points. The
default value is OFF.At least one of the arguments *springBehavior*=ON or
*dashpotBehavior*=ON must be specified.
dashpotBehavior
A Boolean specifying whether to apply dashpot behavior to the selected points. The
default value is OFF.At least one of the arguments *springBehavior*=ON or
*dashpotBehavior*=ON must be specified.
springStiffness
A Float specifying the force per relative displacement for the spring. The default value
is 0.0.
dashpotCoefficient
A Float specifying the force per relative velocity for the dashpot. The default value is
0.0.
"""
pass
|
from django.db import IntegrityError
from django.http import HttpResponseBadRequest
import json
import logging
log = logging.getLogger('api.middleware')
class ValueErrorMiddleware:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
def process_exception(self, request, exception):
log.debug(f"{request}: {exception}")
if isinstance(exception, ValueError):
return HttpResponseBadRequest(content_type='application/json',
content=json.dumps({"error": str(exception)}))
|
from hydra_python_core.doc_writer import (
HydraClass,
HydraClassOp,
HydraClassProp,
HydraLink,
)
from typing import List, Union, Optional
class ClassProcessor:
def __init__(
self,
title: str,
id: Union[str, HydraLink],
desc: str = "",
hydra_ops: List[HydraClassOp] = [],
hydra_props: List[HydraClassProp] = [],
) -> None:
self.title = title
self.desc = desc if desc else f"Class for {title}"
self.hydra_ops = hydra_ops
self.hydra_props = self.filter_props(hydra_props)
self.id = id
@staticmethod
def filter_props(objects):
filtered_objs = {}
for object_ in objects:
title = object_.__dict__.get("title")
if not filtered_objs.get(title):
filtered_objs[title] = object_
elif object_.__dict__ != filtered_objs.get(title).__dict__:
filtered_objs[title] = object_
return filtered_objs.values()
def generate(self) -> HydraClass:
hydra_class = HydraClass(
title=self.title,
desc=self.desc,
_id=self.id,
endpoint=True,
)
for hydra_op in self.hydra_ops:
hydra_class.add_supported_op(hydra_op)
for hydra_prop in self.hydra_props:
hydra_class.add_supported_prop(hydra_prop)
return hydra_class
|
# coding=utf-8
#Leow Yenn Han
#leowyennhan@gmail.com
import time
from flask import Flask, render_template , request, make_response, Blueprint
from flask_bootstrap import Bootstrap
import requests, json, pprint
from reportlab.pdfgen import canvas
from reportlab.lib.enums import TA_JUSTIFY, TA_CENTER, TA_LEFT, TA_RIGHT
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image, Table,TableStyle, PageBreak, Flowable
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle, ListStyle
from reportlab.lib.units import inch
from reportlab.lib.units import cm,mm
from reportlab.rl_config import defaultPageSize
from reportlab.lib import colors
from num2words import num2words
import io
class MCLine(Flowable):
"""
Line flowable --- draws a line in a flowable
http://two.pairlist.net/pipermail/reportlab-users/2005-February/003695.html
"""
# ----------------------------------------------------------------------
def __init__(self, width, height=0):
Flowable.__init__(self)
self.width = width
self.height = height
# ----------------------------------------------------------------------
def __repr__(self):
return "Line(w=%s)" % self.width
# ----------------------------------------------------------------------
def draw(self):
"""
draw the line
"""
self.canv.line(0, self.height, self.width, self.height)
# ----------------------------------------------------------------------
PAGE_HEIGHT = defaultPageSize[1];PAGE_WIDTH = defaultPageSize[0]
def myFirstPage(canvas, doc):
pageinfo = "platypus example"
canvas.saveState()
canvas.setFont('Times-Bold', 16)
canvas.setFont('Times-Roman', 9)
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman', 9)
canvas.drawString(inch, 0.75 * inch, "Page %d " % (doc.page))
canvas.restoreState()
def invoice_pdf_generator(logo_im,invoice_id,company_name,company_address,purchaser,time1,the_list):
doc = SimpleDocTemplate(invoice_id + ".pdf", rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
styles = getSampleStyleSheet()
styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))
styles.add(ParagraphStyle('Center', alignment=TA_LEFT, leftIndent=130))
styles.add(ParagraphStyle('one', alignment=TA_RIGHT, rightIndent=20))
styles.add(ParagraphStyle('two', alignment=TA_JUSTIFY, leftIndent=300))
styles.add(ParagraphStyle('t1', alignment=TA_JUSTIFY, leftIndent=200))
styles.add(ParagraphStyle('left', alignment=TA_LEFT))
logo = logo_im
im = Image(logo, 1.3 * inch, 1 * inch)
Story = []
ptext = '<font size=12>Supplier Name:<br/><b> %s</b><br/></font>' % company_name
ptext_1 = Paragraph(ptext, styles['left'])
ptext2='<font size=9>%s</font>' %company_address
ptext_2 = Paragraph(ptext2,styles['left'])
ptext5='<font size=9><b>Shipment To:</b><br/>%s</font>'%company_address
ptext_5= Paragraph(ptext5,styles['left'])
if purchaser=="":
ptext3 = "<font size=9><b>Bill:</b><br/>Cash</font>"
ptext_3 = Paragraph(ptext3, styles['left'])
else:
ptext3 = "<font size=9><b>Bill To</b><br/>%s</font>" % purchaser
ptext_3 = Paragraph(ptext3, styles['left'])
ptext4="<font size=9><b>Invoice# </b> %s<br/><b>Invoice Date of Issue: </b> %s<br/></font>" %(invoice_id,time1)
ptext_4= Paragraph(ptext4,styles['left'])
able = [['','',im],[ptext_1, '', ''],[ptext_2,'',''],['','',''],[ptext_3,'',ptext_4],[ptext_5,'','']]
t = Table(able, (5* cm, 7 * cm, 3 * cm))
t.setStyle(TableStyle([
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
#('INNERGRID', (0, 0), (-1, -1), 1.5, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
# ('BOX', (0, 0), (-1, -1), 1.50, colors.black),
]))
Story.append(t)
Story.append(Spacer(1, 11))
Story.append(Spacer(1, 11))
line = MCLine(440)
Story.append(line)
Story.append(Spacer(1, 11))
Story.append(line)
Story.append(Spacer(1, 11))
able=[['QTY','DESCRIPTION','UNIT PRICE','AMOUNT (RM) ']]
t = Table(able, (2 * cm, 7.5 * cm, 3 * cm, 3 * cm))
t.setStyle(TableStyle([
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
('INNERGRID', (0, 0), (-1, -1), 1, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('BOX', (0, 0), (-1, -1), 1.0, colors.black),
]))
Story.append(t)
value=0
for item in the_list:
value+=item['amount']
p1=str(item['qty'])
p2=item['description_of_item']
p3=str(item['unit_price'])
p4=str(item['amount'])
p_1= Paragraph(p1,styles['one'])
p_2 = Paragraph(p2, styles['one'])
p_3 = Paragraph(p3, styles['one'])
p_4 = Paragraph(p4, styles['one'])
able = [[p_1, p_2, p_3, p_4]]
t = Table(able, (2 * cm, 7.5 * cm, 3 * cm, 3 * cm))
t.setStyle(TableStyle([
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
('INNERGRID', (0, 0), (-1, -1), 0.5, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('BOX', (0, 0), (-1, -1), 0.5, colors.black),
]))
Story.append(t)
gst=value*0.06
total=value+gst
t1=str(value)
t_1=Paragraph(t1,styles['one'])
t2=str(gst)
t_2=Paragraph(t2,styles['one'])
t3=str(total)
t_3=Paragraph(t3,styles['one'])
ptext="<font size=9>Subtotal</font>"
ptext_1=Paragraph(ptext,styles['two'])
ptext1 = "<font size=9>GST</font>"
ptext_2 = Paragraph(ptext1, styles['two'])
ptext2="<font size=9><b>TOTAL</b></font>"
ptext_3 = Paragraph(ptext2,styles['two'])
able=[[ptext_1,t_1],[ptext_2,t_2],[ptext_3,t_3]]
t = Table(able, ( 12.5* cm, 3 * cm))
t.setStyle(TableStyle([
('ALIGN', (0, 0), (-1, -1), 'CENTER'),
('INNERGRID', (1, 0), (1,2), 0.5, colors.black),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('BOX', (1,0), (1, 2), 0.5, colors.black)
]))
Story.append(t)
Story.append(Spacer(1, 11))
Story.append(line)
Story.append(Spacer(1, 11))
ptext="<font size=9>All taxable products & services at GST @ 6%</font>"
ptext1= Paragraph(ptext,styles['Center'])
Story.append(ptext1)
ptext='<font size=9>Thank You</font>'
ptext1= Paragraph(ptext,styles['t1'])
Story.append(ptext1)
ptext = '<font size=9>**************** Official Invoice ****************</font>'
ptext1 = Paragraph(ptext, styles['Center'])
Story.append(ptext1)
Story.append(Spacer(1, 11))
Story.append(line)
doc.multiBuild(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)
#purchaser="Jibby&Co"
#time1=time.asctime(time.localtime(time.time()))
#the_list=[{"qty":3,"description_of_item":"LENOVO 15inch Laptop","unit_price":5000,"amount":15000},{"qty":4,"description_of_item":"Dell 15inch Laptop","unit_price":2000,"amount":8000}]
#invoice_pdf_generator("test","Best Dengki Sdn Bhd","455, Amsterdam Avenue<br/>Petaling Jaya, 47400,<br/> Selangor Dahrul Ehsan",purchaser,time1,the_list) |
with open('inputs/input-16.txt') as file:
raw = file.read()
bounds,myticket,tickets = raw.split('\n\n')
tickets += '\n'+myticket.splitlines()[1]
names = [i.split(': ')[0] for i in bounds.splitlines()]
bounds = [[j.split('-') for j in i.split(': ')[1].split(' or ')] for i in bounds.splitlines()]
bounds = [[(int(a),int(b)) for a,b in i] for i in bounds]
tickets = [list(map(int,ticket.split(','))) for ticket in tickets.splitlines()[1:]]
def part1():
all_bounds = sum(bounds,[])
error_rate = 0
for val in sum(tickets,[]): # iterate over ALL the values
if not any(a<=val<=b for a,b in all_bounds):
error_rate += val
return error_rate
def part2():
myticket = tickets.pop(-1)
# remove all unwanted
all_bounds = sum(bounds,[])
for i in range(len(tickets)-1,-1,-1):
if not any(a<=val<=b for a,b in all_bounds for val in tickets[i]):
tickets.pop(i)
# get all possibilities
possible = [set(range(len(bounds))) for _ in range(len(bounds))]
for i,((a1,a2),(b1,b2)) in enumerate(bounds):
for ticket in tickets:
for field,val in enumerate(ticket):
if not (a1<=val<=a2 or b1<=val<=b2):
if field in possible[i]: possible[i].remove(field)
real = {}
while len(real)!=len(bounds): # while haven't gotten answers
for i in range(len(possible)):
if len(possible[i])==1:
field = possible[i].pop()
real[names[field]] = i
for j in range(len(possible)): # remove all times where it appears
if field in possible[j]:
possible[j].remove(field)
break
return
print(part1())
print(part2()) |
#직선 운동의 쉬운 예시
x += dx
y += dy
#곡선 운동(점프)의 쉬운 예시
dy = 100
y += dy
dy -= 10
#리스트를 빠르게 만들기 위한 독특한 문법 구조
numbers = [n for n in range(1, 10)]
numbers = [n for n in range(1, 10 + 1) if n % 2 == 0]
numbers = [n * 3 for n in range(1, 10)]
|
import math
class Heap:
def __init__(self):
self.elements = []
def add(self, number):
self.elements.append(number)
if len(self.elements) > 1:
self._verify_if_parent_is_higher(len(self.elements) - 1)
def is_empty(self):
return len(self.elements) == 0
def peek(self):
if not self.is_empty():
return self.elements[0]
return None
def pool(self):
if len(self.elements) >= 1:
element = self.elements[0]
if len(self.elements) >= 1:
self.elements[0] = self.elements[len(self.elements) - 1]
del self.elements[len(self.elements) - 1]
self._balance(0)
return element
else:
return None
def _left_child_index(self, index):
return index * 2 + 1
def _right_child_index(self, index):
return index * 2 + 2
def _parent_index(self, index):
return int((index - 1)/2)
def _verify_if_parent_is_higher(self, index):
if self.elements[self._parent_index(index)] < self.elements[index]:
self.elements[index], self.elements[self._parent_index(index)] = \
self.elements[self._parent_index(index)], self.elements[index]
if self._parent_index(index) > 0:
self._verify_if_parent_is_higher(self._parent_index(index))
def _balance(self, index):
left_child_index = self._left_child_index(index)
right_child_index = self._right_child_index(index)
new_index = index
if len(self.elements) >= left_child_index + 1:
if self.elements[index] < self.elements[left_child_index]:
new_index = left_child_index
if len(self.elements) >= right_child_index + 1:
if self.elements[index] < self.elements[right_child_index] and \
self.elements[right_child_index] > \
self.elements[left_child_index]:
new_index = right_child_index
if index != new_index:
self.elements[index], self.elements[new_index] = \
self.elements[new_index], self.elements[index]
self._balance(new_index)
def print_array(self):
print self.elements
heap = Heap()
heap.add(13)
heap.print_array()
heap.add(20)
heap.print_array()
heap.add(9)
heap.print_array()
heap.add(7)
heap.print_array()
heap.add(8)
heap.print_array()
heap.add(5)
heap.print_array()
heap.add(6)
heap.print_array()
heap.add(1)
heap.print_array()
heap.add(1)
heap.print_array()
heap.add(3)
heap.print_array()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
print heap.pool()
|
import datetime
import os
import subprocess
def log(path, name, participant_name, data):
file_name = str.join('_', [name, participant_name, 'utc',
str(datetime.datetime.utcnow()).replace(' ', '_').replace(':', '')]) + '.txt'
data.insert(0, file_name)
with open(path + '/' + file_name, 'w') as f:
f.write('\n'.join(map(str, data)))
def praat_play(path, mode, pms, vtl, df, prf):
praat = '/Praat.app/Contents/MacOS/Praat'
sound = '/' + mode + '.wav'
script = '/manipulateSound.praat'
path_praat, path_sound, path_script = path + praat, path + sound, path + script
subprocess.call([path_praat,
'--run', path_script,
'{0}'.format(path_sound),
'{0}'.format(pms),
'{0}'.format(vtl),
'{0}'.format(df),
'{0}'.format(prf)]) |
#!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Django template tag library containing Comments helpers.
"""
__authors__ = [
'"Sverre Rabbelier" <srabbelier@gmail.com>',
'"Lennard de Rijk" <ljvderijk@gmail.com>',
]
from django import template
from soc.logic.models import user as user_logic
from soc.views.helper import redirects
register = template.Library()
@register.inclusion_tag('soc/templatetags/_as_comments.html',
takes_context=True)
def as_comments(context, work):
"""Returns a HTML representation of a work's comments.
"""
context['comments'] = work.comments
return context
@register.inclusion_tag('soc/templatetags/_as_comment.html',
takes_context=True)
def as_comment(context, comment):
"""Returns a HTML representation of a comment.
"""
edit_link = ''
current_user = user_logic.logic.getCurrentUser()
# pylint: disable=E1103
if current_user and comment.author.key() == current_user.key():
params = {'url_name': context['comment_on_url_name']}
edit_link = redirects.getEditRedirect(comment, params)
context.update({
'author': comment.author.name,
'content': comment.content,
'created': comment.created,
'edit_link': edit_link,
'modified_on': comment.modified,
'modified_by': comment.modified_by.name if comment.modified_by else '',
'comment_class': "public" if comment.is_public else "private",
})
return context
@register.inclusion_tag('soc/templatetags/_as_review.html',
takes_context=True)
def as_review(context, review):
"""Returns a HTML representation of a review.
"""
# TODO(ljvderijk) once review editing is allowed redo this
context.update({
'author': review.author_name(),
'content': review.content,
'created': review.created,
'score': review.score,
'is_public': review.is_public,
'comment_class': "public" if review.is_public else "private",
})
return context
@register.inclusion_tag('soc/templatetags/_as_student_proposal_review.html',
takes_context=True)
def as_student_proposal_review(context, review, student):
"""Returns a HTML representation of a student proposal review.
"""
# TODO(ljvderijk) once review editing is allowed redo this
context.update({
'author': review.author_name(),
'content': review.content,
'created': review.created,
'score': review.score,
'is_public': review.is_public,
'from_student': review.author.key() == student.user.key()
})
return context
|
# -*- coding: utf-8 -*-
from twisted.internet import reactor, protocol
from twisted.internet.defer import DeferredList, inlineCallbacks, returnValue
from twisted.internet.protocol import ClientCreator
from twisted.protocols.ftp import FTPClient, FTPFileListProtocol
from datetime import datetime as dt
import fnmatch, os, shutil
dependencies = ["config", "commands"]
def parseDate(date):
if " " in date:
d = dt.strptime(date, "%b %d %Y")
else:
d = dt.strptime(date, "%b %d %H:%M")
return d
class Downloader(protocol.Protocol):
def __init__(self, name, deferred = None, limit = None):
self.file = open(name, "wb")
self.len = 0
self.deferred = deferred
self.limit = limit
self.connected = True
def dataReceived(self, data):
if not self.connected:
return
added = len(data)
if self.limit is not None and self.len + added > self.limit:
added = self.limit - self.len
data = data[:added]
self.len += added
self.file.write(data)
if self.limit is not None and self.len >= self.limit:
self.connected = False
self.transport.loseConnection()
def connectionLost(self, reason):
self.file.close()
if self.deferred is not None:
self.deferred.callback(self.len)
class Module(object):
split = 8
max = 8
def __init__(self, master):
self.master = master
self.config = master.modules["config"].interface("ftp")
self.connections = 0
self.caching = []
def stop(self):
pass
@inlineCallbacks
def acquireConnection(self):
exception = self.master.modules["commands"].exception
if self.connections >= self.max:
raise exception(u"No FTP connections available. Please wait and try again.")
self.connections += 1
user = yield self.config.get("ftpuser")
passwd = yield self.config.get("ftppass")
host = yield self.config.get("ftphost")
port = yield self.config.get("ftpport")
if user is None or passwd is None or host is None or port is None:
self.connections -= 1
raise exception(u"No FTP user, pass, host or port in config")
ftp = yield ClientCreator(reactor, FTPClient, user.encode("utf8"), passwd.encode("utf8")).connectTCP(host, int(port))
returnValue(ftp)
@inlineCallbacks
def releaseConnection(self, ftp):
yield ftp.quit()
ftp.fail(None)
self.connections -= 1
@inlineCallbacks
def isCached(self, filename):
premux_dir = yield self.config.get("premuxdir", "premuxes")
returnValue(os.path.isfile(os.path.join(premux_dir, filename)))
@inlineCallbacks
def get(self, folder, filename, destination):
exception = self.master.modules["commands"].exception
ftp = yield self.acquireConnection()
try:
yield ftp.changeDirectory(folder.encode("utf8"))
filelist = FTPFileListProtocol()
yield ftp.list(".", filelist)
sizes = [x["size"] for x in filelist.files if x["filename"] == filename]
if not sizes or len(sizes) > 1:
raise exception(u"Couldn't find file in FTP")
length = sizes[0]
downloader = Downloader(os.path.join(destination, filename))
yield ftp.retrieveFile(filename.encode("utf8"), downloader)
finally:
yield self.releaseConnection(ftp)
if length != downloader.len:
os.remove(os.path.join(destination, filename))
raise exception(u"Downloaded file was not the proper size. Got {:,d} instead of {:,d}".format(downloader.len, length))
@inlineCallbacks
def getFromCache(self, folder, filename, destination):
cached = yield self.isCached(filename)
if not cached:
yield self.cache(folder, filename)
premux_dir = yield self.config.get("premuxdir", "premuxes")
shutil.copyfile(os.path.join(premux_dir, filename), os.path.join(destination, filename))
@inlineCallbacks
def getLatest(self, folder, filter):
exception = self.master.modules["commands"].exception
ftp = yield self.acquireConnection()
try:
yield ftp.changeDirectory(folder.encode("utf8"))
filelist = FTPFileListProtocol()
yield ftp.list(".", filelist)
finally:
yield self.releaseConnection(ftp)
files = fnmatch.filter([x["filename"] for x in filelist.files if x["filetype"] != "d"], filter)
if not files:
raise exception(u"No files in FTP match given {}".format(filter))
files = [(x["filename"], parseDate(x["date"])) for x in filelist.files if x["filename"] in files]
files.sort(key=lambda x: x[1], reverse=True)
returnValue(files[0][0])
@inlineCallbacks
def put(self, folder, filename, destination = None):
exception = self.master.modules["commands"].exception
if destination is None:
destination = yield self.config.get("ftpdefaultdir")
if destination is None:
raise exception(u"No FTP default directory in config")
ftp = yield self.acquireConnection()
try:
yield ftp.changeDirectory(destination.encode("utf8"))
store, finish = ftp.storeFile(filename.encode("utf8"))
sender = yield store
with open(os.path.join(folder, filename), "rb") as f:
sender.transport.write(f.read())
sender.finish()
yield finish
finally:
yield self.releaseConnection(ftp)
@inlineCallbacks
def putXDCC(self, folder, filename, destination):
exception = self.master.modules["commands"].exception
user = yield self.config.get("xdccuser")
passwd = yield self.config.get("xdccpass")
host = yield self.config.get("xdcchost")
port = yield self.config.get("xdccport")
root = yield self.config.get("xdccfolder")
if user is None or passwd is None or host is None or port is None or root is None:
raise exception(u"No XDCC FTP user, pass, host, port or folder in config")
ftp = yield ClientCreator(reactor, FTPClient, user.encode("utf8"), passwd.encode("utf8")).connectTCP(host, int(port))
yield ftp.changeDirectory(root.encode("utf8"))
yield ftp.changeDirectory(destination.encode("utf8"))
store, finish = ftp.storeFile(filename.encode("utf8"))
sender = yield store
with open(os.path.join(folder, filename), "rb") as f:
sender.transport.write(f.read())
sender.finish()
yield finish
yield ftp.quit()
ftp.fail(None)
@inlineCallbacks
def putSeedbox(self, folder, filename):
exception = self.master.modules["commands"].exception
destination = yield self.config.get("seedmkvfolder")
if destination is None:
raise exception(u"No Seedbox MKV folder in config")
user = yield self.config.get("seeduser")
passwd = yield self.config.get("seedpass")
host = yield self.config.get("seedhost")
port = yield self.config.get("seedport")
if user is None or passwd is None or host is None or port is None:
raise exception(u"No Seedbox FTP user, pass, host or port in config")
ftp = yield ClientCreator(reactor, FTPClient, user.encode("utf8"), passwd.encode("utf8")).connectTCP(host, int(port))
yield ftp.changeDirectory(destination.encode("utf8"))
store, finish = ftp.storeFile(filename.encode("utf8"))
sender = yield store
with open(os.path.join(folder, filename), "rb") as f:
sender.transport.write(f.read())
sender.finish()
yield finish
yield ftp.quit()
ftp.fail(None)
@inlineCallbacks
def putTorrent(self, folder, filename):
exception = self.master.modules["commands"].exception
destination = yield self.config.get("seedtorrentfolder")
if destination is None:
raise exception(u"No Seedbox torrent folder in config")
user = yield self.config.get("seeduser")
passwd = yield self.config.get("seedpass")
host = yield self.config.get("seedhost")
port = yield self.config.get("seedport")
if user is None or passwd is None or host is None or port is None:
raise exception(u"No Seedbox FTP user, pass, host or port in config")
ftp = yield ClientCreator(reactor, FTPClient, user.encode("utf8"), passwd.encode("utf8")).connectTCP(host, int(port))
yield ftp.changeDirectory(destination.encode("utf8"))
store, finish = ftp.storeFile(filename.encode("utf8"))
sender = yield store
with open(os.path.join(folder, filename), "rb") as f:
sender.transport.write(f.read())
sender.finish()
yield finish
yield ftp.quit()
ftp.fail(None)
@inlineCallbacks
def uploadFonts(self, destination):
exception = self.master.modules["commands"].exception
fontdir = yield self.config.get("fontdir", "fonts")
ftp = yield self.acquireConnection()
try:
yield ftp.changeDirectory(destination.encode("utf8"))
filelist = FTPFileListProtocol()
yield ftp.list(".", filelist)
if fontdir not in [x["filename"] for x in filelist.files if x["filetype"] == "d"]:
yield ftp.makeDirectory(fontdir.encode("utf8"))
for font in os.listdir(fontdir):
path = os.path.join(fontdir, font)
store, finish = ftp.storeFile(path.encode("utf8"))
sender = yield store
with open(path, "rb") as f:
sender.transport.write(f.read())
sender.finish()
yield finish
finally:
yield self.releaseConnection(ftp)
@inlineCallbacks
def downloadFonts(self, folder, destination):
exception = self.master.modules["commands"].exception
fontdir = yield self.config.get("fontdir", "fonts")
ftp = yield self.acquireConnection()
try:
yield ftp.changeDirectory(folder.encode("utf8"))
filelist = FTPFileListProtocol()
yield ftp.list(fontdir.encode("utf8"), filelist)
fonts = [x["filename"] for x in filelist.files if x["filetype"] != "d"]
for font in fonts:
downloader = Downloader(os.path.join(destination, font))
yield ftp.retrieveFile(os.path.join(fontdir, font).encode("utf8"), downloader)
finally:
yield self.releaseConnection(ftp)
returnValue(fonts)
@inlineCallbacks
def cache(self, folder, filename):
exception = self.master.modules["commands"].exception
premux_dir = yield self.config.get("premuxdir", "premuxes")
if filename in self.caching:
raise exception(u"Already caching {}".format(filename))
self.caching.append(filename)
try:
connections = []
for _ in range(max(self.split, 1)):
try:
c = yield self.acquireConnection()
except:
break
else:
yield c.changeDirectory(folder.encode("utf8"))
connections.append(c)
if not connections:
raise exception(u"No FTP connections available. Please wait and try again.")
try:
filelist = FTPFileListProtocol()
yield connections[0].list(".", filelist)
filedata = [x for x in filelist.files if x["filename"] == filename][0]
chunk_len = 1024 * ((filedata["size"] / len(connections)) / 1024) # Round to nearest kilobyte
remainder = filedata["size"] - (chunk_len * (len(connections) - 1))
deferreds = []
for i in range(len(connections)):
fname = "{}.{:d}".format(filename, i)
size = remainder if i == len(connections) - 1 else chunk_len
downloader = Downloader(os.path.join(premux_dir, fname), limit=size)
d = connections[i].retrieveFile(os.path.join(folder, filename), downloader, offset=chunk_len*i)
d.addErrback(lambda _: None) # Swallow FTP fail errors
deferreds.append(d)
yield DeferredList(deferreds)
finally:
for c in connections:
yield self.releaseConnection(c)
# Merge files in a way that won't use too much memory
wrong_size = False
with open(os.path.join(premux_dir, filename), "wb") as fout:
for i in range(len(connections)):
size = remainder if i == len(connections) - 1 else chunk_len
fname = "{}.{:d}".format(filename, i)
wrong_size = wrong_size or os.path.getsize(os.path.join(premux_dir, fname)) != size
with open(os.path.join(premux_dir, fname), "rb") as fin:
shutil.copyfileobj(fin, fout, 65536)
os.remove(os.path.join(premux_dir, fname))
actual = os.path.getsize(os.path.join(premux_dir, filename))
if wrong_size or actual != filedata["size"]:
os.remove(os.path.join(premux_dir, filename))
raise exception(u"Downloaded file was not the proper size. Got {:,d} instead of {:,d}".format(actual, filedata["size"]))
finally:
self.caching.remove(filename)
@inlineCallbacks
def uncache(self, filename):
exception = self.master.modules["commands"].exception
premux_dir = yield self.config.get("premuxdir", "premuxes")
os.remove(os.path.join(premux_dir, filename))
|
import cv2
import monitor_module
gmail_address = 'YOUR EMAIL ADDRESS'
gmail_password = 'YOUR EMAIL PASSWORD'
to_address = ['FIRST TO EMAIL', 'SECOND TO EMAIL']
sid = 'YOUR ACCOUNT SID'
token = 'YOUR AUTH TOKEN'
us_phone = 'YOUR US PHONE'
tw_phone = 'YOUR TW PHONE'
capture = cv2.VideoCapture(0)
image_pre = None
skip = 1 # 設定不比對的次數,由於前影像是空的,略過一次比對
while capture.isOpened():
is_success, image = capture.read()
if is_success:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 灰階處理
image_now = cv2.GaussianBlur(gray, (13, 13), 5) # 高斯模糊
if skip > 0:
skip -= 1
else:
difference = cv2.absdiff(image_now, image_pre) # 此影格與前影格的差異值
ret, thresh = cv2.threshold(difference, 25, 255, # 門檻值
cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, # 找到輪廓
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if contours:
print('偵測到移動')
cv2.drawContours(image, contours, -1, (255, 255, 255), 2)
message = monitor_module.get_mime_image('小偷入侵', '鷹眼防盜監視器', '警察局', image)
if message:
monitor_module.send_gmail(gmail_address, gmail_password, to_address, message)
monitor_module.send_sms('小偷入侵', sid, token, us_phone, tw_phone)
skip = 200 # 0.05(waitKey) * 200 = 10秒,暫停不比對
else:
print('靜止畫面')
cv2.imshow('frame', image)
image_pre = image_now.copy()
keyboard = cv2.waitKey(50)
if keyboard in (ord('q'), ord('Q')):
print('退出系統')
cv2.destroyAllWindows()
capture.release()
break
|
import requests
import urllib3
import json
import errors
import xml.dom.minidom
import logging
import configparser
import threading
import collect
from datetime import datetime
from urllib3.exceptions import InsecureRequestWarning
urllib3.disable_warnings(InsecureRequestWarning)
config = configparser.ConfigParser(interpolation=None)
config.read('configs/config.ini')
timeout = config['DEFAULT']['Timeout_limit']
timeout_limit = float(timeout) if timeout.isnumeric() else None
def rest_get_json(baseURL, uri, user, password):
proxies = {
"http": None,
"https": None,
}
appformat = 'application/json'
headers = {'content-type': appformat, 'accept': appformat}
restURI = baseURL + uri
try:
r = requests.get(restURI, headers=headers, proxies=proxies, auth=(user, password), verify=False, timeout=timeout_limit)
collect.thread_data.logger.debug('The API response for URL {} is:\n{}'.format(restURI, json.dumps(r.json(), separators=(",",":"), indent=4)))
if r.status_code == 200:
return json.dumps(r.json(), indent=2)
else:
thejson = json.loads(json.dumps(r.json(), indent=2))
errormessage = thejson.get('rc.errors').get('error').get('error-message')
collect.thread_data.logger.info('error message is: ' + errormessage)
raise errors.InputError(restURI, "HTTP status code: " + str(r.status_code), "Error message returned: " + errormessage)
except errors.InputError as err:
collect.thread_data.logger.error('Exception raised: ' + str(type(err)) + '\nURL: {}\n{}\n{}'.format(err.expression, err.statuscode,err.message))
return
class Circuit_breaker:
def __init__(self, failure_threshold = 3, timeout_limit = timeout_limit):
self.failure_threshold = failure_threshold
self.timeout_limit = timeout_limit
self.reset_call = 20.2
self.failed_tries = 0
self.last_failure_time=0
self.state="closed"
def reset(self):
self.failed_tries = 0
self.last_failure_time = 0
def get_state(self):
if self.failed_tries >= self.failure_threshold and datetime.now() - self.last_failure_time > self.reset_call:
self.state = "half-closed"
elif self.failed_tries >= self.failure_threshold:
self.state = "open"
else: self.state = "closed"
def record_failure(self):
self.failed_tries += 1
self.last_failure_time = datetime.now()
def request(self, baseURL, uri, user, password):
proxies = {
"http": None,
"https": None,
}
appformat = 'application/json'
headers = {'content-type': appformat, 'accept': appformat}
restURI = baseURL + uri
self.get_state()
if self.state == "closed" or self.state == "half-closed":
while self.failed_tries < self.failure_threshold:
try:
check_not_empty = True
r = requests.get(restURI, headers=headers, proxies=proxies, auth=(user, password), verify=False, timeout=self.timeout_limit, allow_redirects=True)
collect.thread_data.logger.debug('The API response for URL {} is:\n{}'.format(restURI, json.dumps(r.json(), separators=(",",":"), indent=4)))
#Checking for empty responses
response_uni = json.dumps(r.json(), indent=2)
response = json.loads(response_uni)
length = len(response)
if length == 0 or length == 1:
key = response.keys()[0]
if len(response[key]) > 0:
pass
else: check_not_empty = False
if r.status_code == 200 and check_not_empty:
self.reset()
return response_uni
else:
thejson = json.loads(json.dumps(r.json(), indent=2))
errormessage = thejson.get('rc.errors').get('error').get('error-message')
collect.thread_data.logger.info('error message is: ' + errormessage)
raise errors.InputError(restURI, "HTTP status code: " + str(r.status_code), "Error message returned: " + errormessage)
except errors.InputError as err:
collect.thread_data.logger.error('Exception raised: ' + str(type(err)) + '\nURL: {}\n{}\n{}'.format(err.expression, err.statuscode, err.message))
self.record_failure()
if self.failed_tries >= self.failure_threshold:
temp = []
return json.dumps(temp)
else:
collect.thread_data.logger.error('Exception raised: Circuit is open')
raise("Circuit is open")
def rest_get_xml(baseURL, uri, user, password):
proxies = {
"http": None,
"https": None,
}
appformat = 'application/xml'
headers = {'content-type': appformat, 'accept': appformat}
restURI = baseURL + uri
try:
r = requests.get(restURI, headers=headers, proxies=proxies, auth=(user, password), verify=False, timeout=timeout_limit)
# print "HTTP response code is: " + str(r.status_code)
if r.status_code == 200:
response_xml = xml.dom.minidom.parseString(r.content)
return response_xml.toprettyxml()
else:
thejson = json.loads(json.dumps(r.json(), indent=2))
errormessage = thejson.get('rc.errors').get('error').get('error-message')
collect.thread_data.logger.info('error message is: ' + errormessage)
raise errors.InputError(restURI, "HTTP status code: " + str(r.status_code), "Error message returned: " + errormessage)
except errors.InputError as err:
collect.thread_data.logger.error('Exception raised: ' + str(type(err)) + '\nURL: {}\n{}\n{}'.format(err.expression, err.statuscode,err.message))
return
def rest_post_xml(baseURL, uri, thexml, user, password):
proxies = {
"http": None,
"https": None,
}
appformat = 'application/xml'
headers = {'content-type': appformat, 'accept': appformat}
restURI = baseURL + uri
try:
r = requests.post(restURI, data=thexml, headers=headers, proxies=proxies, auth=(user, password), verify=False)
# print "HTTP response code is: " + str(r.status_code)
if r.status_code == 200:
response_xml = xml.dom.minidom.parseString(r.content)
return response_xml.toprettyxml()
else:
thejson = json.loads(json.dumps(r.json(), indent=2))
errormessage = thejson.get('rc.errors').get('error').get('error-message')
collect.thread_data.logger.info('error message is: ' + errormessage)
raise errors.InputError(restURI, "HTTP status code: " + str(r.status_code), "Error message returned: " + errormessage)
except errors.InputError as err:
collect.thread_data.logger.error('Exception raised: ' + str(type(err)) + '\nURL: {}\n{}\n{}'.format(err.expression, err.statuscode,err.message))
return
def rest_post_json(baseURL, uri, thejson, user, password):
proxies = {
"http": None,
"https": None,
}
appformat = 'application/json'
headers = {'content-type': appformat, 'accept': appformat}
restURI = baseURL + uri
try:
r = requests.post(restURI, data=thejson, headers=headers, proxies=proxies, auth=(user, password),verify=False)
collect.thread_data.logger.debug('The API response for URL {} is:\n{}'.format(restURI, json.dumps(r.json(), separators=(",",":"), indent=4)))
if r.status_code == 200:
return json.dumps(r.json(), indent=2)
else:
thejson = json.loads(json.dumps(r.json(), indent=2))
errormessage = thejson.get('rc.errors').get('error').get('error-message')
collect.thread_data.logger.info('error message is: ' + errormessage)
raise errors.InputError(restURI, "HTTP status code: " + str(r.status_code), "Error message returned: " + errormessage)
except errors.InputError as err:
collect.thread_data.logger.error('Exception raised: ' + str(type(err)) + '\nURL: {}\n{}\n{}'.format(err.expression, err.statuscode,err.message))
return |
#/!bin/python3.x
import initialize
import syncInsertUpdate
import os
import initialize
#########################
# Main program
#########################
if __name__ == '__main__':
# initialize global variables in initialize.readConfigFile()
initialize.readConfigFile()
# get path+filename to dictionary
dictFilename = os.path.join(initialize.dataDirectory, initialize.dictFile)
# read dictionary
y = syncInsertUpdate.readDictionary(dictFilename)
syncInsertUpdate.initializeSync (y)
# is this needed?
# syncTargetTable ('CampaignHistoricalValues', 'DSS', 'DSS', "UpdatedOn", "HistoricalKey", True, '60', prodConn, dscConn)
# Exception: No updatedOn field in this table
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
from argparse import RawTextHelpFormatter
import unittest, time, re, inspect, os, os.path, json, argparse
TEST_SUITE_CLASS_NAME = "__template_className__"
class __template_className__(unittest.TestCase):
def __init__(self, driver, opts):
super(__template_className__, self).__init__("__template_methodName__")
self.driver_name = driver
self.profile = TestProfile(opts['profile'])
self.wait_time = opts['wait_time']
def setUp(self):
self.driver = self._get_web_driver(self.driver_name)
self.driver.implicitly_wait(30)
self.base_url = self.profile.param("base_url", "__template_baseURL__")
self.verificationErrors = []
self.accept_next_alert = True
def tearDown(self):
self.driver.save_screenshot(self._build_screenshot_name())
self.driver.quit()
if self._is_successful() and not self.profile.file_exists():
self.profile.save_profile_file()
self.assertEqual([], self.verificationErrors)
def __template_methodName__(self):
__template_param_getter__ = self.profile.param
__template_receiver__ = self.driver
# Footer
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException as e: return False
return True
def is_alert_present(self):
try: self.driver.switch_to_alert()
except NoAlertPresentException as e: return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally: self.accept_next_alert = True
@staticmethod
def _get_web_driver(driver_name):
if driver_name == 'chrome':
return webdriver.Chrome()
elif driver_name == 'firefox':
return webdriver.Firefox()
elif driver_name == 'ie':
return webdriver.Ie()
elif driver_name == 'edge':
return webdriver.Edge()
elif driver_name == 'safari':
return webdriver.Safari()
else:
raise ValueError
def _is_successful(self):
result = self._resultForDoCleanups
return len(result.failures) == 0 and len(result.errors) == 0
def _build_screenshot_name(self):
png_root = os.getcwd() + os.sep
local_time = time.strftime('%Y%m%d%H%M%S', time.localtime())
screenshot_id = '_'.join((self.__class__.__name__, self.driver_name, local_time))
return png_root + screenshot_id + '.png'
class TestProfile:
def __init__(self, profile_filename):
self.test_dict = {}
self.profile_filename = profile_filename
if self.file_exists():
self.load_profile_file()
def param(self, param, default_value):
if not self.file_exists():
self.test_dict[param] = default_value
return self.test_dict[param]
@staticmethod
def profile_file_exists_with(filename):
return os.path.isfile(filename)
def file_exists(self):
return self.profile_file_exists_with(filename=self.profile_filename)
def save_profile_file_with(self, filename):
with open(filename, 'w') as profile:
json.dump(self.test_dict, profile)
def save_profile_file(self):
self.save_profile_file_with(filename=self.profile_filename)
def load_profile_file_with(self, filename):
with open(filename) as profile:
self.test_dict = json.load(profile)
def load_profile_file(self):
self.load_profile_file_with(filename=self.profile_filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Python-Based Selenium Web Test Case for ' + TEST_SUITE_CLASS_NAME,
formatter_class=RawTextHelpFormatter)
group = parser.add_argument_group('options')
group.add_argument('-p', '--profile',
dest='profile',
type=str,
default=TEST_SUITE_CLASS_NAME + '_profile.json',
help='''determine a file path of test profile;
a default profile will be generated after execution
if this option not specified''')
group.add_argument('-d', '--driver',
dest='web_driver',
type=str,
required=True,
help='''specify a webdriver you would like to test with,
including:
- chrome: Google Chrome
- firefox: Mozilla Firefox
- ie: Microsoft Internet Explorer
- edge: Microsoft Edge (not tested yet)
- safari: Apple Safari (not tested yet)''')
group.add_argument('-t', '--time-wait',
dest='time_in_sec',
type=float,
default=0,
help='''determine the waiting time in seconds (decimal)
between two steps (default: 0)''')
args = parser.parse_args()
opts = {
'profile': args.profile,
'wait_time': args.time_in_sec
}
test_suite_class = eval(TEST_SUITE_CLASS_NAME)
suite = unittest.TestSuite()
suite.addTest(test_suite_class(args.web_driver, opts))
unittest.TextTestRunner().run(suite)
|
from lxml import etree
import os.path
DATA_DIR="./data"
F1='index1.html'
F2='index2.html'
def xhtml_file(name):
return open(os.path.join(DATA_DIR, name))
parser = etree.HTMLParser()
T1=etree.parse(xhtml_file(F1), parser)
#T2=etree.parse(xhtml_file(F2))
print etree.tostring(T1.getroot(), pretty_print=True, method="html")
|
class Trabajo():
def __init__(self, numero_linea, numero_componente, tiempo, indice_producto, nombre_producto, tiempo_total, contador_posicion, estado):
self.numero_linea = numero_linea
self.numero_componente = numero_componente
self.indice_producto = indice_producto
self.tiempo = tiempo
self.nombre_producto = nombre_producto
self.tiempo_total = tiempo_total #tiempo que tardo ese producto
self.contador_posicion = contador_posicion
self.estado = estado
self.siguiente = None
self.anterior = None
# ESTADOS
# 0 -> Esperar / No hacer nada
# 1 -> Moviendose
# 2 -> Ensamblando
def get_numero_linea(self):
return self.numero_linea
def get_numero_componente(self):
return self.numero_componente
def get_indice_producto(self):
return self.indice_producto
def get_nombre_producto(self):
return self.nombre_producto
def get_tiempo(self):
return self.tiempo
def set_tiempo(self, nuevo_tiempo):
self.tiempo = nuevo_tiempo
def get_tiempo_total(self):
return self.tiempo_total
def set_tiempo_total(self, tiempo_total):
self.tiempo_total = tiempo_total
def get_contador_posicion(self):
return self.contador_posicion
def set_contador_posicion(self, contador_posicion):
self.contador_posicion = contador_posicion
def get_estado(self):
return self.estado
def set_estado(self, estado):
self.estado = estado |
import matplotlib.pyplot as plt
import math
import scipy.constants
import numpy as np
def zadanie1(GT, GR, lam, d):
wyjscie = []
for i in range(len(d)):
wyjscie.append(GT * GR * (lam / (4 * math.pi * d[i])) ** 2)
return wyjscie
def zadanie2(dane, c):
wyjscie = np.zeros(dane.shape[0])
for i in range(dane.shape[0]):
wyjscie[i] = (dane[i] / c)
return wyjscie
def zadanie3(GT, GR, f, c, d, h1, h2):
wyjscie = np.zeros(d.shape[0])
lam = c / f
d1 = np.zeros(d.shape[0])
d2 = np.zeros(d.shape[0])
phi1 = np.zeros(d.shape[0])
phi2 = np.zeros(d.shape[0])
for i in range(d.shape[0]):
d1[i] = np.sqrt((h1 - h2)**2 + d[i]**2)
d2[i] = np.sqrt((h1 + h2)**2 + d[i]**2)
for i in range(d1.shape[0]):
phi1[i] = (-1) * 2 * np.pi * f * (d1[i] / c)
phi2[i] = (-1) * 2 * np.pi * f * (d2[i] / c)
for i in range(d.shape[0]):
wyjscie[i] = (GT * GR * ((lam / (4 * np.pi)) ** 2)) * abs(((1 / d1[i]) * np.exp(1j * phi1[i])) - ((1 / d2[i]) * np.exp(1j * phi2[i]))) ** 2
return wyjscie
def zad1():
d = list(range(1, 101))
d1 = list(range(1, 10001))
c = 300000000
f = 900000000
lista = zadanie1(1.6, 1.6, c / f, d)
lista2 = zadanie1(1.6, 1.6, c / f, d1)
decybele1 = []
decybele2 = []
for i in range(len(lista)):
decybele1.append(10 * math.log10(lista[i]))
for i in range(len(lista2)):
decybele2.append(10 * math.log10(lista2[i]))
plt.figure()
plt.title("Zad1 1-100m")
plt.plot(decybele1)
plt.xlabel('Dystans[m]')
plt.ylabel('Spadek mocy[Db]')
plt.savefig('Zad1_1-100m.png', dpi=600)
plt.show()
plt.figure()
plt.title("Zad1 1-10km")
plt.plot(decybele2)
plt.xlabel('Dystans[m]')
plt.ylabel('Spadek mocy[Db]')
plt.savefig('Zad1_1-10km.png', dpi=600)
plt.show()
def zad2():
zadanie21 = np.zeros(100)
zadanie22 = np.zeros(10000)
for i in range(zadanie21.shape[0]):
zadanie21[i] = i + 1
for i in range(zadanie22.shape[0]):
zadanie22[i] = i + 1
zadanie21 = zadanie2(zadanie21, 3000000)
zadanie22 = zadanie2(zadanie22, 3000000)
plt.figure()
plt.title("zad2 1-100m")
plt.plot(zadanie21)
plt.xlabel('Dystans[m]')
plt.ylabel('Opóźnienie')
plt.savefig('zad2_1-100m.png', dpi=600)
plt.show()
plt.figure()
plt.title("zad2 1-10km")
plt.plot(zadanie22)
plt.xlabel('Dystans')
plt.ylabel('Opóźnienie')
plt.savefig('zad2_1-10km.png', dpi=600)
plt.show()
def zad3():
listaDo3b = []
licznik = 1
for i in range(10001):
if i % 10 == 0 and i != 0:
listaDo3b.append(i)
licznik += 1
print(listaDo3b)
print("ilosc: ", len(listaDo3b))
zadanie3a = np.arange(1.0, 100.1, 0.1)
zadanie3b = np.arange(1.0, 10000.1, 0.1)
c = scipy.constants.c
f = 900000000
Gt = 1.6
Gr = 1.6
h1 = 30
h2 = 3
#zadanie3a od 1 do 100 krok 1
#zadanie3b od 10 do 10000 krok 10
zadanie3a = zadanie3(Gt, Gr, f, c, zadanie3a, h1, h2)
zadanie3b = zadanie3(Gt, Gr, f, c, zadanie3b, h1, h2)
decybele1 = []
decybele2 = []
for i in range(zadanie3a.shape[0]):
decybele1.append(10 * math.log10(zadanie3a[i]))
for i in range(zadanie3b.shape[0]):
decybele2.append(10 * math.log10(zadanie3b[i]))
plt.figure()
plt.title("zad3 1-100m")
plt.plot(decybele1)
plt.xlabel('Dystans[m]')
plt.ylabel('Spadek mocy[Db]')
plt.savefig('zad3_1-100m.png', dpi=600)
plt.show()
plt.figure()
plt.title("zad3 1-10km")
plt.plot(decybele2)
plt.xscale("log")
plt.xlabel('Dystans')
plt.ylabel('Spadek mocy[Db]')
plt.savefig('zad3_1-10km.png', dpi=600)
plt.show()
if __name__ == "__main__":
#zad1()
#zad2()
zad3()
print("COMPLEX: ", complex(0, 1))
print(np.imag(1j))
|
import numpy as np
#Define number of molecules and parameter values for the model
nl = 500
nr1 = 30
nr2 = 50
kr1 = pow(10,-3)
kr2 = pow(10,-3)
kd1 = 100
kd2 = 100
kf1 = kr1/kd1
kf2 = kr1/kd2
N1 = min(nr1,nl)
N2 = min(nr2,nl)
#Define the form of the block diagonal matrices used in the algorithm
def Build_QkkMinus1(QkkMinus1,k):
for i in range(min(N1,nl-k)+1):
QkkMinus1[i,i] = kr2*k
def Build_QkkPlus1(QkkPlus1,k):
for i in range(min(N1,nl-(k+1))+1):
QkkPlus1[i,i] = kf2*(nr2-k)*(nl-i-k)
def Build_Qkk(Qkk,k):
if min(N1,nl-k) != 0:
for i in range(1,min(N1,nl-k)):
Qik = kf1*(nr1-i)*(nl-i-k) + kr1*i + kf2*(nr2-k)*(nl-i-k) + kr2*k
Qkk[i,i] = -Qik
Qkk[i,i-1] = kr1*i
Qkk[i,i+1] = kf1*(nr1-i)*(nl-i-k)
i = 0
Qik = kf1*(nr1-i)*(nl-i-k) + kr1*i + kf2*(nr2-k)*(nl-i-k) + kr2*k
Qkk[i,i] = -Qik
Qkk[i,i+1] = kf1*(nr1-i)*(nl-i-k)
i = min(N1,nl-k)
Qik = kf1*(nr1-i)*(nl-i-k) + kr1*i + kf2*(nr2-k)*(nl-i-k) + kr2*k
Qkk[i,i] = -Qik
Qkk[i,i-1] = kr1*i
else:
i = 0
Qik = kf1*(nr1-i)*(nl-i-k) + kr1*i + kf2*(nr2-k)*(nl-i-k) + kr2*k
Qkk[0,0] = -Qik
#Function to compute the EMA steady state distribution
def EMA_steady_state_dist():
#Compute the number of states per level L(k)
size = []
for k in range(N2+1):
count=0
for r in range(min(N1,nl-k)+1):
count+=1
size.append(count)
#Implement Algorithm 2 for the steady state distribution
H = [np.matrix(np.zeros((size[i],size[i]))) for i in range(N2+1)]
invH = [ np.matrix(np.zeros((size[i],size[i]))) for i in range(N2+1)]
k = 0
Qkk=np.matrix(np.zeros((size[k],size[k])))
Build_Qkk(Qkk,k)
H[k] = Qkk
invH[k] = (H[k]).I
for k in range(1,N2+1):
Qkk=np.matrix(np.zeros((size[k],size[k])))
Build_Qkk(Qkk,k)
QkkMinus1=np.matrix(np.zeros((size[k],size[k-1])))
Build_QkkMinus1(QkkMinus1,k)
QkkPlus1=np.matrix(np.zeros((size[k-1],size[k])))
Build_QkkPlus1(QkkPlus1,k-1)
H[k]=Qkk-QkkMinus1*invH[k-1]*QkkPlus1
if k != N2:
invH[k] = (H[k]).I
else:
zeros = np.zeros(min(N1,int(nl-N2)))
one = np.array((1))
f = np.hstack((one,zeros))
H[k][:,min(N1,int(nl-N2))] = np.matrix((np.ones((len(H[k]))))).T
invH[k] = (H[k]).I
piaux=[np.matrix(np.zeros((1,size[i]))) for i in range(N2+1)]
pi=[np.matrix(np.zeros((1,size[i]))) for i in range(N2+1)]
piaux[N2] = (np.matrix((f[::-1]))*invH[N2])
for j in reversed(range(N2)):
QkkMinus1=np.matrix(np.zeros((size[j+1],size[j])))
Build_QkkMinus1(QkkMinus1,j+1)
piaux[j]=-piaux[j+1]*QkkMinus1*invH[j]
tot_sum = []
for i in range(N2+1):
tot_sum.append(np.sum(piaux[i]))
totsum = np.sum(tot_sum)
lengths = []
for i in range(N2+1):
pi[i] = piaux[i]/totsum
length = len(pi[i].T)
lengths.append(length)
max_len = max(lengths)
pi_full = []
for i in range(N2+1):
add_zeros = max_len - lengths[i]
if add_zeros == 0:
pi_full.append(np.asarray((pi[i])).flatten())
else:
pi_new = np.hstack((np.asarray((pi[i])).flatten(),np.zeros(add_zeros)))
pi_full.append(pi_new)
pi_EMA = np.stack(pi_full)
return pi_EMA
#Compute the expected number of monomers of type 1 and 2 in steady state
def EMA_expected_m1():
parts1 = []
for m1 in range(N1+1):
cols1 = []
for m2 in range(min(N2,nl-m1)+1):
cols1.append(pi_EMA[m2,m1])
parts1.append(m1*np.sum(cols1))
exp_m1 = np.sum(parts1)
return exp_m1
def EMA_expected_m2():
parts2 = []
for m2 in range(N2+1):
cols2 = []
for m1 in range(min(N1,nl-m2)+1):
cols2.append(pi_EMA[m2,m1])
parts2.append(m2*np.sum(cols2))
exp_m2 = np.sum(parts2)
return exp_m2
#Run the code to compute the steady state distribution and expected values of m1 and m2 in steady state
pi_EMA = EMA_steady_state_dist()
exp_m1_EMA = EMA_expected_m1()
exp_m2_EMA = EMA_expected_m2()
|
A, B = map(int, input().split())
m = int(input())
numbers = list(map(int, input().split()))
numbers.reverse()
num = 0
for i in range(m):
num += numbers[i] * (A ** i)
numbers_B = []
while num > 0:
numbers_B.append(str(num%B))
num //= B
numbers_B.reverse()
print(' '.join(numbers_B)) |
from cart.views import get_cart
from decimal import Decimal
def cart(request):
cart=get_cart(request)
cart_total_price=sum(Decimal(item['price'])*item['quantity']
for item in cart.values())
return {
'cart_total_price':cart_total_price
} |
import torch.nn as nn
from torch import optim
import torch
from collections import OrderedDict
def training_loop(epochs, optimizer, model, loss_fn, train_t_x, val_t_x, train_t_y, val_t_y):
for epoch in range(1, epochs + 1):
t_p_train = model(train_t_x)
loss_train = loss_fn(t_p_train, train_t_y)
t_p_val = model(val_t_x)
loss_val = loss_fn(t_p_val, val_t_y)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
if epoch == 1 or epoch % 1000 == 0:
print('Epoch {}, Training loss {}, Validation loss {}'.format(epoch, float(loss_train), float(loss_val)))
seq_model = nn.Sequential(OrderedDict([
('hidden_linear', nn.Linear(1, 8)),
('hidden_activation', nn.Tanh()),
('output_linear', nn.Linear(8, 1))
]))
optimizer = optim.SGD(seq_model.parameters(), lr=1e-3)
t_x = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]
t_x = torch.tensor(t_x).unsqueeze(1) * 0.1
t_y = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0]
t_y = torch.tensor(t_y).unsqueeze(1)
samples = t_x.shape[0]
vals = int(0.2 * samples)
shuffled_indices = torch.randperm(samples)
train_indices = shuffled_indices[:-vals]
val_indices = shuffled_indices[-vals:]
train_t_x = t_x[train_indices]
train_t_y = t_y[train_indices]
val_t_x = t_x[val_indices]
val_t_y = t_y[val_indices]
training_loop(epochs = 3000, optimizer = optimizer, model = seq_model, loss_fn = nn.MSELoss(),
train_t_x = train_t_x, val_t_x = val_t_x, train_t_y = train_t_y, val_t_y = val_t_y)
print('hidden', seq_model.hidden_linear.weight.grad)
# print(ordered_dict_seq_model.output_linear.bias)
# example is as below
# seq_model = nn.Sequential(
# nn.Linear(1, 13),
# nn.Tanh(),
# nn.Linear(13, 1)
# )
# print(seq_model)
# for name, param in seq_model.named_parameters():
# print(name, param.shape)
|
import requests
from bs4 import BeautifulSoup
from datetime import datetime, timedelta
import locale
locale.setlocale(locale.LC_ALL, 'ID')
import re
import pandas as pd
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
from requests.exceptions import ConnectionError
import unicodedata
import mysql.connector
class Liputan6:
def getAllBerita(self, details, page, cat_link, category, date=datetime.strftime(datetime.today(), '%Y/%m/%d')):
"""
Untuk mengambil seluruh url
link pada indeks category tertentu
date format : YYYY/mm/dd
"""
print("page ", page)
url = "https://www.liputan6.com/"+cat_link+"/indeks/"+date+"?page="+str(page)
print(url)
# Make the request and create the response object: response
try:
response = requests.get(url)
except ConnectionError:
print("Connection Error, but it's still trying...")
time.sleep(5)
details = self.getAllBerita(details, page, cat_link, category, date)
# Extract HTML texts contained in Response object: html
html = response.text
# Create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html, "html5lib")
contentDiv = soup.find('div', class_="articles--list articles--list_rows")
if contentDiv:
for post in contentDiv.findAll('figure'):
link = [post.find('a', href=True)['href'], category]
detail = self.getDetailBerita(link)
if detail:
if self.insertDB(detail):
details.append(detail)
el_page = soup.find('div', class_="simple-pagination__container")
if el_page:
a_page = el_page.find('ul').findAll('li', class_="simple-pagination__page-number")[-1].find('span')
if el_page.find('ul').findAll('li', class_="simple-pagination__page-number")[-1].find('span', class_="simple-pagination__page-number-link simple-pagination__page-number-link_active"):
max_page = page
else:
max_page = el_page.find('ul').findAll('li', class_="simple-pagination__page-number")[-1]
max_page = int(max_page['data-page'].replace('\n', '').strip(' '))
if page < max_page:
time.sleep(5)
details = self.getAllBerita(details, page+1, cat_link, category, date)
return 'berhasil ambil semua berita'
def getDetailBerita(self, link):
time.sleep(5)
articles = {}
#link
url = link[0]
response = requests.get(url)
html = response.text
# Create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html, "html5lib")
#articleid
article_id = soup.find('article', class_='hentry main read-page--core-article')
articles['id'] = int(article_id['data-article-id']) if article_id else ''
#extract title
title = soup.find('meta', attrs={"property":"og:title"})['content']
articles['title'] = title
if ("foto:" in title.lower()) or "video:" in title.lower():
return False
bc = soup.find('ul', class_="read-page--breadcrumb")
if not bc:
return False
cat = bc.findAll('a')[-2].get_text(strip=True)
sub = bc.findAll('a')[-1].get_text(strip=True)
#category
articles['category'] = cat
articles['subcategory'] = sub
#article_url
articles['url'] = url
#article
article = soup.find("div", class_="article-content-body__item-content")
if not article:
return False
#extract date
pubdate = soup.find('p', class_="read-page--header--author__datetime-wrapper").find('time')['datetime']
pubdate = pubdate.strip(' \t\n\r')
# pubdate = pubdate.replace(' WIB','').replace('Ags', 'Agt')
articles['pubdate']= datetime.strftime(datetime.strptime(pubdate, "%Y-%m-%d %H:%M:%S"), "%Y-%m-%d %H:%M:%S")
articles['pubdate'] = pubdate
#extract author
author = soup.find('a', class_="read-page--header--author__link url fn").find('span', class_="read-page--header--author__name fn").get_text(strip=True)
articles['author'] = author
#source
articles['source'] = 'Liputan6'
#extract comments count
comments = soup.find('li', class_="read-page--social-share__list-item js-social-share-comment").find('a')
comments = int(comments.find('span', class_="read-page--social-share__comment-total").get_text(strip=True))
articles['comments'] = comments
#extract tags
tags = soup.findAll('span', class_="tags--snippet__name")
articles['tags'] = ','.join([x.get_text(strip=True) for x in tags]) if tags else ''
#extract images
image = soup.find('picture', class_="read-page--photo-gallery--item__picture")
articles['images'] = image.find('img')['src'] if image else ''
#hapus link sisip
for link in article.findAll('div', class_="baca-juga"):
link.decompose()
for vid in article.findAll('p'):
if "saksikan video di bawah" in vid.get_text(strip=True).lower():
vid.decompose()
for vid2 in article.findAll('p'):
if "saksikan video pilihan di bawah" in vid2.get_text(strip=True).lower():
vid2.decompose()
for vid3 in article.findAll('p'):
if "saksikan cuplikan pertandingan dari liga inggris" in vid3.get_text(strip=True).lower():
vid3.decompose()
#hapus all script
#for script in article.findAll('script'):
#script.decompose()
#extract content
detail = BeautifulSoup(article.decode_contents().replace('<br/>', ' '), "html5lib")
content = re.sub(r'\n|\t|\b|\r','',unicodedata.normalize("NFKD",detail.get_text(strip=True)))
articles['content'] = content
#print('memasukkan berita id ', articles['id'])
return articles
def insertDB(self, articles):
"""
Untuk memasukkan berita ke DB
"""
con = mysql.connector.connect(user='root', password='', host='127.0.0.1', database='news_db')
print("Insert berita ", articles['title'])
cursor = con.cursor()
query = "SELECT count(*) FROM article WHERE url like '"+articles['url']+"'"
cursor.execute(query)
result = cursor.fetchone()
if result[0] <= 0:
add_article = ("INSERT INTO article (post_id, author, pubdate, category, subcategory, content, comments, images, title, tags, url, source) VALUES (%(id)s, %(author)s, %(pubdate)s, %(category)s, %(subcategory)s, %(content)s, %(comments)s, %(images)s, %(title)s, %(tags)s, %(url)s, %(source)s)")
# Insert article
cursor.execute(add_article, articles)
con.commit()
print('masuk')
cursor.close()
con.close()
return True
else:
cursor.close()
print('salah2')
con.close()
return False
|
from .Step import Step
from lib.Logger import Log
import pathlib
import os
import glob
import pathlib
import shutil
import glob
class CopyFiles(Step):
def __init__(self):
Step.__init__(self)
self._targets = None
self._from = None
self._to = None
self._force = False
def serialize(self, jsonNode):
self._targets = jsonNode["targets"]
self._to = jsonNode["to"]
self._from = jsonNode["from"]
self._force = jsonNode["force"]
self._to = pathlib.Path(self._to).resolve().__str__()
def run(self):
if not os.path.exists(self._from):
Log.error("Can't find 'from' path: '{0}'".format(self._from))
return False
if not os.path.isdir(self._from):
Log.error("The 'from' path: '{0}' is not a dir".format(self._from))
return False
for target in self._targets:
targetPath = "{0}/{1}".format(self._from, target)
matchFiles = glob.glob(targetPath)
for fileName in matchFiles:
self._copySingleFile(fileName)
return True
def _copySingleFile(self, fileName):
if not os.path.exists(self._to):
os.makedirs(self._to)
targetName = pathlib.Path(fileName).name
targetPath = "{0}/{1}".format(self._to, targetName)
if os.path.exists(targetPath):
if not self._force:
Log.debug("Skip copy because target already exist in destination: {0}".format(targetPath))
return True
else:
if os.path.isdir(targetPath):
Log.debug("Remove target before coping: {0}".format(targetPath))
shutil.rmtree(targetPath)
if os.path.isdir(fileName):
Log.debug("Copy folder: {0}, to folder: {1}".format(fileName, self._to))
shutil.copytree(fileName, targetPath)
else:
Log.debug("Copy file : {0}, to folder: {1}".format(fileName, self._to))
shutil.copy(fileName, targetPath) |
from flask import Flask, url_for, render_template
from flask_restful import Api, Resource, reqparse, inputs
import pymysql
app = Flask(__name__)
api = Api(app)
db = pymysql.connect("localhost","root","123456","weather")
cursor = db.cursor(pymysql.cursors.DictCursor)
class LoginView(Resource):
address = '咸阳'
year = '2017'
months = '11'
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("address",required=True)
parser.add_argument("year",required=True)
parser.add_argument("months",required=True)
args = parser.parse_args()
address = args.get("address")
year = args.get("year")
months = args.get("months")
print(address, year, months)
sql = "SELECT * FROM APP_weather WHERE city='{}' and yer='{}' and month='{}'".format(address, year, months)
# sql = "DESC APP_weather"
print(sql)
try:
cursor.execute(sql)
results = cursor.fetchall()
print("当月天气的列表:",results)
return {"weather_list":results}
except:
import traceback # 打印出报错具体信息
traceback.print_exc()
return "Error: unable to fetch data"
api.add_resource(LoginView, "/login/")
@app.route('/')
def hello_world():
try:
sql = "SELECT DISTINCT city FROM APP_weather;"
# sql = "DESC APP_weather"
cursor.execute(sql)
results = cursor.fetchall()
print("第一个页面的访问数据:",results)
return render_template("index.html", results=results)
except:
import traceback # 打印出报错具体信息
traceback.print_exc()
return "没有当天气"
# return render_template("index.html")
if __name__ == '__main__':
app.run(debug=True, port=8887) |
import re
#maximum munch
#regex should match the biggest string that it can not small parts
print re.findall(r"[0-9]+", "13 from 1 in 1776")
#>>> ["13","1","1776"] |
import numpy as np
import pandas as pd
import datetime
from sqlalchemy import create_engine
# import matplotlib.pyplot as plt
import tensorflow as tf
import keras
from keras.models import Model, Sequential, load_model
from keras.layers import Input, Dense, LSTM
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
table_name = 'grow_data_0a05p06e'
# table_name = 'grow_data_m65ex1jn' # flatline of light, and wonky sporadic soil_moisture readings ????
table_name = 'grow_data_1zh4hdzr' # flatline of soil moisture @ end
table_name = 'grow_data_4bcqg5xe' # flatline soil moisture @ end
table_name = 'grow_data_8gbfyzmp' # 2 flatline soil moistures
table_name = 'grow_data_8em4zdze' # all flatline light&soil moisture, temp ok
table_name = 'grow_data_63egxgj1' # light highlines and goes funky then returns to normalcy
table_name = 'grow_data_566xkgq2' # light square peaking at end
table_name = 'grow_data_cyza0e03' # light & soil go funky, temp also weird
table_name = 'grow_data_9ggnsrj8' # all good
table_name = 'grow_data_5rfs4y7n' # all good
table_name = 'grow_data_5pga25ec' # all good
table_name = 'grow_data_5nrha8hq' # all good
table_name = 'grow_data_5kc81f8r' # all good
table_name = 'grow_data_02krq5q5' # all good
table_name = 'grow_data_0srkxe23' # all good
good_tables = ['grow_data_5pga25ec','grow_data_5pga25ec','grow_data_5kc81f8r','grow_data_02krq5q5','grow_data_0srkxe23']
# took 775.566321 Sec. (12min) to train above list of tables on 3 variables
# took 772.855652 Sec. (12min) to train above list of table on 1 variable (!!)
conn = create_engine('postgresql+psycopg2://')
######## FOR PREDICT TABLE ########
table_name = 'grow_data_4bcqg5xe' # flatline soil moisture @ end
table_name = 'grow_data_8em4zdze' # all flatline light&soil moisture, temp ok
table_name = 'grow_data_63egxgj1' # light highlines and goes funky then returns to normalcy
table_name = 'grow_data_566xkgq2' # light square peaking at end
table_name = 'grow_data_cyza0e03' # light & soil go funky
######## FOR SINGLE TABLE ########
sql = f"""SELECT * FROM {table_name}"""
df = pd.read_sql(sql, conn, parse_dates=['datetime'])
# Dataframe is not ordered, sort by datetime
df = df.sort_values(axis=0, by=['datetime'])
scaler = MinMaxScaler()
remainder = len(df) % 96
predict_df = df[remainder:] # df divisible by 96
##################################
######### PREDICT DFS ###########
predict_df_2 = predict_df.copy(deep=True)
predict_df_3 = predict_df.copy(deep=True)
predict_df_4 = predict_df.copy(deep=True)
# predict_dates = predict_df['datetime']
predict_dates_2 = predict_df_4.drop(['sensor_id'], axis=1).drop(['soil_moisture'], axis=1).drop(['light'], axis=1).drop(['air_temperature'], axis=1)
predict_df_soil = predict_df.drop(['sensor_id'], axis=1).drop(['datetime'], axis=1).drop(['light'], axis=1).drop(['air_temperature'], axis=1)
predict_df_light = predict_df_2.drop(['sensor_id'], axis=1).drop(['datetime'], axis=1).drop(['soil_moisture'], axis=1).drop(['air_temperature'], axis=1)
predict_df_air = predict_df_3.drop(['sensor_id'], axis=1).drop(['datetime'], axis=1).drop(['soil_moisture'], axis=1).drop(['light'], axis=1)
predict_df_soil_scaled = scaler.fit_transform(predict_df_soil)
predict_df_light_scaled = scaler.fit_transform(predict_df_light)
predict_df_air_scaled = scaler.fit_transform(predict_df_air)
timesteps = 96
dim = 1
samples = len(predict_df_soil_scaled)
predict_df_soil_scaled.shape = (int(samples/timesteps),timesteps,dim)
predict_df_light_scaled.shape = (int(samples/timesteps),timesteps,dim)
predict_df_air_scaled.shape = (int(samples/timesteps),timesteps,dim)
# samples = len(predict_dates)
predict_dates_array = predict_dates.values
predict_dates_array.shape = (int(samples/timesteps),timesteps,dim)
predictions_soil = model_soil.predict(predict_df_soil_scaled)
mse_soil = np.mean(np.power(predict_df_soil_scaled - predictions_soil, 2), axis=1)
predictions_light = model_light.predict(predict_df_light_scaled)
mse_light = np.mean(np.power(predict_df_light_scaled - predictions_light, 2), axis=1)
predictions_air = model_air.predict(predict_df_air_scaled)
mse_air = np.mean(np.power(predict_df_air_scaled - predictions_air, 2), axis=1)
######## FOR LIST OF TABLES ########
big_df = pd.DataFrame()
for table in good_tables:
df = pd.read_sql(f"SELECT * FROM {table}", conn, parse_dates=['datetime'])
df = df.sort_values(axis=0, by=['datetime'])
big_df = pd.concat([big_df, df])
# Make big_df divisible by 96, 96 observations per day
scaler = MinMaxScaler()
remainder = len(big_df) % 96
new_df = big_df[remainder:]
####################################
# Copy 2 new dfs for light and air temp GROW variables
new_df_2 = new_df.copy(deep=True)
new_df_3 = new_df.copy(deep=True)
# Drop extra columns so each df has one column each
new_dates = new_df['datetime']
new_df_soil = new_df.drop(['sensor_id'], axis=1).drop(['datetime'], axis=1).drop(['light'], axis=1).drop(['air_temperature'], axis=1)
new_df_light = new_df_2.drop(['sensor_id'], axis=1).drop(['datetime'], axis=1).drop(['soil_moisture'], axis=1).drop(['air_temperature'], axis=1)
new_df_air = new_df_3.drop(['sensor_id'], axis=1).drop(['datetime'], axis=1).drop(['soil_moisture'], axis=1).drop(['light'], axis=1)
# new_df_soil.columns
# Index(['soil_moisture'], dtype='object')
new_df_soil_scaled = scaler.fit_transform(new_df_soil)
new_df_light_scaled = scaler.fit_transform(new_df_light)
new_df_air_scaled = scaler.fit_transform(new_df_air)
timesteps = 96
dim = 1
samples = len(new_df_soil_scaled)
new_df_soil_scaled.shape = (int(samples/timesteps),timesteps,dim)
new_df_light_scaled.shape = (int(samples/timesteps),timesteps,dim)
new_df_air_scaled.shape = (int(samples/timesteps),timesteps,dim)
predictions5 = model.predict(new_df_soil_scaled)
mse5 = np.mean(np.power(new_df_soil_scaled - predictions5, 2), axis=1)
# Plot the 3 variables to visualize
size = len(df)
fig, ax = plt.subplots(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')
ax.plot(range(0,size), df['soil_moisture'], 'o', color='blue', linewidth=1, label='soil_moisture')
ax.plot(range(0,size), df['light'], 'o', color='red', linewidth=1, label='light')
ax.plot(range(0,size), df['air_temperature'], 'o', color='green', linewidth=1, label='air_temperature')
legend = ax.legend(loc='upper center', shadow=True, fontsize='x-large')
model_soil = Sequential()
model_soil.add(LSTM(50,batch_input_shape=(batch_size,timesteps,dim),return_sequences=True,stateful=True))
model_soil.add(LSTM(50,batch_input_shape=(batch_size,timesteps,dim),return_sequences=True,stateful=True))
model_soil.add(LSTM(50,batch_input_shape=(batch_size,timesteps,dim),return_sequences=True,stateful=True))
model_soil.add(LSTM(50,batch_input_shape=(batch_size,timesteps,dim),return_sequences=True,stateful=True))
model_soil.add(Dense(1))
model_soil.compile(loss='mse', optimizer='adam')
model_soil = Sequential()
model_soil.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_soil.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_soil.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_soil.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_soil.add(Dense(1))
model_soil.compile(loss='mse', optimizer='adam') # starts and finishes with less val_loss & loss
# model.compile(loss='mae', optimizer='adam') # compared to mae loss parameter
# prediction mse is slightly less with 'mse' loss parameter
########
# model & predictions performed MUCH better when the train&test datasets were
# divisible by 96 and there was no observation gap between them
model_light = Sequential()
model_light.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_light.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_light.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_light.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_light.add(Dense(1))
model_light.compile(loss='mse', optimizer='adam')
model_air = Sequential()
model_air.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_air.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_air.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_air.add(LSTM(50,input_shape=(timesteps,dim),return_sequences=True))
model_air.add(Dense(1))
model_air.compile(loss='mse', optimizer='adam')
nb_epoch = 100
batch_size = 32
nb_epoch = 100
batch_size = 1
# Model trained on soil moisture data
start_time = datetime.datetime.now()
history_soil = model_soil.fit(new_df_soil_scaled, new_df_soil_scaled,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_split=0.1,
verbose=0
)
end_time = datetime.datetime.now()
print('Time to run the model: {} Sec.'.format((end_time - start_time).total_seconds()))
df_history_soil = pd.DataFrame(history_soil.history)
# Model trained on light data
start_time = datetime.datetime.now()
history_light = model_light.fit(new_df_light_scaled, new_df_light_scaled,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_split=0.1,
verbose=0
)
end_time = datetime.datetime.now()
print('Time to run the model: {} Sec.'.format((end_time - start_time).total_seconds()))
df_history_light = pd.DataFrame(history_light.history)
# Model trained on air temperature data
start_time = datetime.datetime.now()
history_air = model_air.fit(new_df_air_scaled, new_df_air_scaled,
epochs=nb_epoch,
batch_size=batch_size,
shuffle=True,
validation_split=0.1,
verbose=0
)
end_time = datetime.datetime.now()
print('Time to run the model: {} Sec.'.format((end_time - start_time).total_seconds()))
df_history_air = pd.DataFrame(history_air.history)
predictions = model.predict(new_df_soil_scaled)
mse = np.mean(np.power(new_df_soil_scaled - predictions, 2), axis=1)
# Plot training and validation loss to check for over/underfitting
loss = df_history_air['loss']
val_loss = df_history_air['val_loss']
epochs = range(nb_epoch)
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
predictions = model.predict(X_train_scaled)
# Reconstruction Error
mse = np.mean(np.power(X_train_scaled - predictions, 2), axis=1)
predictions2 = model.predict(X_test_scaled)
prediction2_diff = X_test_scaled - predictions2
mse2 = np.mean(np.power(X_test_scaled - predictions2, 2), axis=1)
# Conclusion after 2 sets of runs 32 versus 96 batch size:
# 32 takes twice as long, but model performs better and predicts better
# batch_size = 32 (5 96/2/2 LSTM layers) took 81.720021 Sec.
# 80.21589 Sec.
# batch_size = 32 (5 50 LSTM layers) took 85.752646 Sec.
# batch_size = 32 (5 50 LSTM layers) 55k df took 363.238711 Sec.
# batch_size = 32 (4 50 LSTM layers) took 63.534043 Sec.
# 66.289958 Sec.
# 62.486536 Sec.
# batch_size = 96 (4 50 LSTM layers) took 30.50511 Sec.
# 33.106675 Sec.
# df_history - 5 96/2/2 LSTM layers
# 32 val_loss loss
# 0 0.068745 0.126029
# 99 0.002147 0.003971
# mse is a bit bigger than that of 4 50 LSTM layers
# df_history - 5 96/2/2 LSTM layers
# 32 val_loss loss
# 0 0.061319 0.122805
# 99 0.001662 0.003950
# df_history - 5 50 LSTM layers
# 32 val_loss loss
# 0 0.064096 0.124795
# 99 0.001596 0.004357
# df_history - 4 50 LSTM layers
# 32 val_loss loss
# 0 0.055516 0.115401
# 99 0.001224 0.003543
# df_history3 - 4 50 LSTM layers
# 32 val_loss loss
# 0 0.065078 0.125813
# 99 0.001556 0.004012
# mse generally less for soil_moisture, air_temp, about equal for light
# df_history5 - 4 50 LSTM layers
# 32 val_loss loss
# 0 0.062452 0.128161
# 99 0.001678 0.004139
# mse6-mse5 : mse5 generally less for soil_moisture, air_temp, equal for light
# df_history4 - 4 50 LSTM layers
# 96 val_loss loss
# 0 0.083342 0.131676
# 99 0.003545 0.007538
# df_history6 - 4 50 LSTM layers
# 96 val_loss loss
# 0 0.078225 0.129254
# 99 0.003618 0.007467
|
def connect2():
from twython import Twython
APP_KEY = 'pIHTSqoX7QzW4HhFPJauhNglA'
APP_SECRET = 'hR4x7xDdWkGkX3NafcXCTeP8Mlk5pH0J9OODKD4T7vPT1LJoQM'
twitter = Twython(APP_KEY, APP_SECRET)
auth = twitter.get_authentication_tokens()
#{u'oauth_token_secret': u'EZCPEc5x6bncqnl4z1cyDz5136UPCN9a',
#'auth_url': 'https://api.twitter.com/oauth/authenticate?oauth_token=NltG6gAAAAAAydthAAABWaMwqEg',
#u'oauth_token': u'NltG6gAAAAAAydthAAABWaMwqEg',
#u'oauth_callback_confirmed': u'true'}
OAUTH_TOKEN = auth['oauth_token']
OAUTH_TOKEN_SECRET = auth['oauth_token_secret']
OAUTH_URL = auth["auth_url"]
print OAUTH_URL
variable = raw_input('input PIN CODE!: ')
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
final_step = twitter.get_authorized_tokens(int(variable))
OAUTH_TOKEN = final_step['oauth_token']
OAUTH_TOKEN_SECRET = final_step['oauth_token_secret']
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
return twitter
def load(filename,retdef):
import os
import pickle
if(not os.path.isfile(filename)):
print "File "+filename+ " does not exist"
return retdef
with open(filename, "rb") as binFile:
return pickle.load(binFile)
def main():
from twython import TwythonError, TwythonRateLimitError, TwythonAuthError
twitter= connect2()
path = "tables/users2follow.bin"
vUsers = load(path,[])
for user in vUsers:
print user
try:
twitter.create_friendship(user_id=user,follow=True)
except TwythonError as e:
print e
if __name__ == "__main__":
main()
'''{u'follow_request_sent': True
u'has_extended_profile': False
u'profile_use_background_image': True
u'default_profile_image': False
u'id': 774314204828499969
u'profile_background_image_url_https': None
u'verified': False
u'translator_type': u'none'
u'profile_text_color': u'333333'
u'muting': False
u'profile_image_url_https': u'https://pbs.twimg.com/profile_images/819567389230698498/Z-I6SnNw_normal.jpg'
u'profile_sidebar_fill_color': u'DDEEF6'
u'entities': {u'description': {u'urls': []}}
u'followers_count': 29
u'profile_sidebar_border_color': u'C0DEED'
u'id_str': u'774314204828499969'
u'profile_background_color': u'F5F8FA'
u'listed_count': 1
u'is_translation_enabled': False
u'utc_offset': None
u'statuses_count': 6240
u'description': u'weno'
u'friends_count': 97
u'location': u' Conchinchina'
u'profile_link_color': u'1DA1F2'
u'profile_image_url': u'http://pbs.twimg.com/profile_images/819567389230698498/Z-I6SnNw_normal.jpg'
u'following': False
u'geo_enabled': False
u'profile_banner_url': u'https://pbs.twimg.com/profile_banners/774314204828499969/1483411260'
u'profile_background_image_url': None
u'screen_name': u' '
u'lang': u'es'
u'profile_background_tile': False
u'favourites_count': 506
u'name': u'Max Power'
u'notifications': False
u'url': None
u'created_at': u'Fri Sep 09 18:31:07 +0000 2016'
u'contributors_enabled': False
u'time_zone': None
u'protected': True
u'default_profile': True
u'is_translator': False}
''' |
for i in range(int(input())):
seat=int(input())
if seat%12==0:
print(seat-11,'WS')
elif seat%12==1:
print(seat+11,'WS')
elif seat%12==2:
print(seat+9,'MS')
elif seat%12==3:
print(seat+7,'AS')
elif seat%12==4:
print(seat+5,'AS')
elif seat%12==5:
print(seat+3,'MS')
elif seat%12==6:
print(seat+1,'WS')
elif seat%12==7:
print(seat-1,'WS')
elif seat%12==8:
print(seat-3,'MS')
elif seat%12==9:
print(seat-5,'AS')
elif seat%12==10:
print(seat-7,'AS')
elif seat%12==11:
print(seat-9,'MS') |
#!/usr/bin/env python3
import sys
import os.path
hash = {
'A': 'Alpha',
'B': 'Bravo',
'C': 'Charlie',
'D': 'Delta',
'E': 'Echo',
'F': 'Foxtrot',
'G': 'Golf',
'H': 'Hotel',
'I': 'India',
'J': 'Juliett',
'K': 'Kilo',
'L': 'Lima',
'M': 'Mike',
'N': 'November',
'O': 'Oscar',
'P': 'Papa',
'Q': 'Quebec',
'R': 'Romeo',
'S': 'Sierra',
'T': 'Tango',
'U': 'Uniform',
'V': 'Victor',
'W': 'Whiskey',
'X': 'Xray',
'Y': 'Yankee',
'Z': 'Zulu',
'0': 'Zero',
'1': 'One',
'2': 'Two',
'3': 'Tree',
'4': 'Fower',
'5': 'Fife',
'6': 'Six',
'7': 'Seven',
'8': 'Eight',
'9': 'Niner',
'-': 'Dash',
'.': 'Stop',
}
de = {
'A': 'Anton',
'Ä': 'Ärger',
'B': 'Berta',
'C': 'Cäsar',
'D': 'Dora',
'E': 'Emil',
'F': 'Friedrich',
'G': 'Gustav',
'H': 'Heinrich',
'I': 'Ida',
'J': 'Julius',
'K': 'Kaufmann',
'L': 'Ludwig',
'M': 'Martha',
'N': 'Nordpol',
'O': 'Otto',
'Ö': 'Ökonom',
'P': 'Paula',
'Q': 'Quelle',
'R': 'Richard',
'S': 'Samuel',
'ß': 'Eszett',
'T': 'Theodor',
'U': 'Ulrich',
'Ü': 'Übermut',
'V': 'Viktor',
'W': 'Wilhelm',
'X': 'Xanthippe',
'Y': 'Ypsilon',
'Z': 'Zacharias',
'-': 'Strich',
'.': 'Punkt',
}
if os.path.basename(sys.argv[0]) == 'natode':
hash = de
if len(sys.argv) < 2 or sys.argv[1] == '-':
text = [line.strip() for line in sys.stdin.readlines()]
else:
text = sys.argv[1:]
for i in range(len(text)):
for char in text[i].upper():
if char in hash.keys():
print(hash[char])
elif char == ' ':
print()
else:
print(">> " + char)
if i != len(text) - 1:
print()
print("===")
print()
|
from ariadne import MutationType, QueryType
from ariadne_token_auth.api import resolvers
from ariadne_token_auth.decorators import login_required
query = QueryType()
mutation = MutationType()
auth_mutation = MutationType()
auth_mutation.set_field("getAuthToken", resolvers.get_auth_token)
auth_mutation.set_field("deleteAuthToken", resolvers.delete_auth_token)
auth_type_definitions = [
resolvers.type_defs,
]
@query.field("testQuery")
@login_required
def test_query(self, info, *args, **kwargs):
return {"user": info.context.get("request").user}
@mutation.field("testMutation")
@login_required
def test_mutation(self, info, *args, **kwargs):
return {"user": info.context.get("request").user}
|
from db import db
from flask import session
from datetime import datetime
from sqlalchemy import func, asc, select
import json
import random
class Model:
errors = []
def validate(self):
self.errors = []
return len(self.errors) == 0
def save(self):
if self.validate():
print(self.id)
if self.id is None:
db.session.add(self)
print(2)
db.session.commit()
return True
else:
return False
class Pupil(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, unique=True, autoincrement=True)
name = db.Column(db.String)
surname = db.Column(db.String)
permit = db.Column(db.String, unique=True)
login = db.Column(db.String, unique=True)
password = db.Column(db.String)
position = ""
marks = db.Column(db.JSON)
clas = db.Column(db.String)
def __init__(self, name, surname, login, password, clas):
self.name = name
self.surname = surname
self.login = login
self.password = password
self.position = "Pupil"
self.permit = self.login + str(random.randint(1, 10000000))
self.marks = Pupil.init_marks(clas)
self.clas = clas
@staticmethod
def init_marks(clas):
arr_marks = []
arr_subject_unique = db.session.query(Subject).filter(Subject.school_class_name == clas).all()
print(arr_subject_unique)
for subject in arr_subject_unique:
arr_marks.append({
"arr_marks": [],
"name": subject.name
})
print(arr_marks)
return json.dumps(arr_marks)
def get_marks(self):
return self.marks
def add_marks(self, name_subject, new_mark):
subjects_data_mark = json.loads(self.marks)
for i in range(0, len(subjects_data_mark)):
if name_subject == subjects_data_mark[i].get('name'):
subjects_data_mark[i].get("arr_marks").append(new_mark)
break
self.marks = json.dumps(subjects_data_mark)
self.save()
@staticmethod
def auth(login, password):
return db.session.query(Pupil).filter(Pupil.login == login).filter(Pupil.password == password).first()
class Teacher(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, unique=True, autoincrement=True)
permit = db.Column(db.String, unique=True)
position = ""
qualification = db.Column(db.String)
name = db.Column(db.String)
surname = db.Column(db.String)
phone = db.Column(db.Integer)
email = db.Column(db.Integer)
login = db.Column(db.String, unique=True)
password = db.Column(db.String)
is_admin = db.Column(db.Boolean)
def __init__(self, login, password, name, surname, qualification, phone, email, is_admin):
self.name = name
self.surname = surname
self.login = login
self.password = password
self.qualification = qualification
self.phone = phone
self.email = email
self.position = "Teacher"
self.permit = self.login + str(random.randint(1, 10000000))
self.is_admin = is_admin
def set_admin(self, is_ad):
self.is_admin = is_ad
self.save()
@staticmethod
def auth(login, password):
return db.session.query(Teacher).filter(Teacher.login == login).filter(Teacher.password == password).first()
class Subject(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, unique=True)
type = db.Column(db.String) #subject || section || elective
students_list = db.Column(db.String)
teacher_id = db.Column(db.Integer, db.ForeignKey(Teacher.id))
homework = db.Column(db.String)
name = db.Column(db.String)
classroom = db.Column(db.String)
school_class_name = db.Column(db.String)
def __init__(self, type_, name, students_list, teacher_id, room, school_class_name):
self.type = type_
self.name = name
self.students_list = students_list
self.homework = ""
self.teacher_id = teacher_id
self.classroom = str(room)
self.school_class_name = school_class_name
teacher = db.relationship(Teacher)
def get_id(self):
return self.id
def get_type(self):
return self.type
def get_students_list(self):
return list(str(self.students_list).split(" "))
def get_teacher_id(self):
return self.teacher_id
def set_homework(self, new_homework):
self.homework = new_homework
self.save()
def get_hw(self):
return self.homework
def get_name(self):
return self.name
def get_class(self):
return self.classroom
def set_class(self, cl):
self.classroom = cl
self.save()
def get_school_class_name(self):
return self.school_class_name
@staticmethod
def get_subject(name, teacher_id, school_class_name):
return db.session.query(Subject)\
.filter(Subject.name == name)\
.filter(Subject.teacher_id == teacher_id)\
.filter(Subject.school_class_name == school_class_name)\
.first()
class Marks(db.Model, Model):
id = db.Column(db.Integer, unique=True, autoincrement=True, primary_key=True)
id_subject = db.Column(db.Integer, db.ForeignKey(Subject.id))
marks = db.Column(db.String)#каждая оценка через пробел
def __init__(self, id_subject, marks):
self.id_subject = id_subject
self.marks = marks
def add_mark(self, new_mark):
self.marks += " " + new_mark
self.save()
class Parent(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, unique=True, autoincrement=True)
name = db.Column(db.String)
surname = db.Column(db.String)
child_id = db.Column(db.Integer, db.ForeignKey(Pupil.id))
login = db.Column(db.String, unique=True)
password = db.Column(db.String)
position = db.Column(db.String)
def __init__(self, name, surname, child_id, login, password):
self.name = name
self.surname = surname
self.login = login
self.password = password
self.child_id = child_id
child = db.relationship(Pupil)
@staticmethod
def auth(login, password):
return db.session.query(Parent).filter(Parent.login == login).filter(Parent.password == password).first()
class TimetableDay(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, unique=True, autoincrement=True)
id_first_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
id_second_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
id_third_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
id_fourth_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
id_fifth_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
id_sixth_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
id_seventh_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
id_eighth_lesson = db.Column(db.Integer, db.ForeignKey(Subject.id))
def __init__(self, id1, id2, id3, id4, id5, id6, id7, id8):
self.id_first_lesson = id1
self.id_second_lesson = id2
self.id_third_lesson = id3
self.id_fourth_lesson = id4
self.id_fifth_lesson = id5
self.id_sixth_lesson = id6
self.id_seventh_lesson = id7
self.id_eighth_lesson = id8
def get_first_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_first_lesson).first()
def get_second_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_second_lesson).first()
def get_third_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_third_lesson).first()
def get_fourth_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_fourth_lesson).first()
def get_fifth_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_fifth_lesson).first()
def get_sixth_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_sixth_lesson).first()
def get_seventh_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_seventh_lesson).first()
def get_eighth_lesson(self):
return db.session.query(Subject).filter(Subject.id == self.id_eighth_lesson).first()
def set_first(self, f):
self.id_first_lesson = f
self.save()
def set_second(self, s):
self.id_second_lesson = s
self.save()
def set_third(self, t):
self.id_third_lesson = t
self.save()
def set_fourth(self, f):
self.id_fourth_lesson = f
self.save()
def set_fifth(self, f):
self.id_fifth_lesson = f
self.save()
def set_sixth(self, s):
self.id_sixth_lesson = s
self.save()
def set_seventh(self, s):
self.id_seventh_lesson = s
self.save()
def set_eighth(self, e):
self.id_eighth_lesson = e
self.save()
def get_all_lesson(self):
first_lesson = self.get_first_lesson()
second_lesson = self.get_second_lesson()
third_lesson = self.get_third_lesson()
fourth_lesson = self.get_fourth_lesson()
fifth_lesson = self.get_fifth_lesson()
sixth_lesson = self.get_sixth_lesson()
seventh_lesson = self.get_seventh_lesson()
eighth_lesson = self.get_eighth_lesson()
return [
first_lesson,
second_lesson,
third_lesson,
fourth_lesson,
fifth_lesson,
sixth_lesson,
seventh_lesson,
eighth_lesson
]
def get_list_lesson(self):
id_arr = self.get_all_lesson()
subject_arr = []
for subject in id_arr:
if subject is None:
subject_arr.append("")
continue
dictionary = {
"id": subject.get_id(),
"type": subject.get_type(),
"students_list": subject.get_students_list(),
"teacher_id": subject.get_teacher_id(),
"homework": subject.get_hw(),
"name": subject.get_name(),
"classroom": subject.get_class(),
"school_class_name": subject.get_school_class_name()
}
subject_arr.append(dictionary)
print(subject_arr)
return subject_arr
@staticmethod
def get_by_id(id_day):
return db.session.query(TimetableDay).filter(TimetableDay.id == id_day).first()
class TimetableClass(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, unique=True, autoincrement=True)
id_monday = db.Column(db.Integer, db.ForeignKey(TimetableDay.id))
id_tuesday = db.Column(db.Integer, db.ForeignKey(TimetableDay.id))
id_wednesday = db.Column(db.Integer, db.ForeignKey(TimetableDay.id))
id_thursday = db.Column(db.Integer, db.ForeignKey(TimetableDay.id))
id_friday = db.Column(db.Integer, db.ForeignKey(TimetableDay.id))
def __init__(self, id1, id2, id3, id4, id5):
self.id_monday = id1
self.id_tuesday = id2
self.id_wednesday = id3
self.id_thursday = id4
self.id_friday = id5
def get_monday_timetable(self):
return (db.session.query(TimetableDay).filter(TimetableDay.id == self.id_monday).first()).get_all_lesson()
def get_tuesday_timetable(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_tuesday).first().get_all_lesson()
def get_wednesday_timetable(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_wednesday).first().get_all_lesson()
def get_thursday_timetable(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_thursday).first().get_all_lesson()
def get_friday_timetable(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_friday).first().get_all_lesson()
def get_monday_timetable_list(self):
return (db.session.query(TimetableDay).filter(TimetableDay.id == self.id_monday).first()).get_list_lesson()
def get_tuesday_timetable_list(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_tuesday).first().get_list_lesson()
def get_wednesday_timetable_list(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_wednesday).first().get_list_lesson()
def get_thursday_timetable_list(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_thursday).first().get_list_lesson()
def get_friday_timetable_list(self):
return db.session.query(TimetableDay).filter(TimetableDay.id == self.id_friday).first().get_list_lesson()
def set_monday(self, new):
self.id_monday = new
self.save()
def set_tuesday(self, new):
self.id_tuesday = new
self.save()
def set_wednesday(self, new):
self.id_wednesday = new
self.save()
def set_thursday(self, new):
self.id_thursday = new
self.save()
def set_friday(self, new):
self.id_friday = new
self.save()
def get_all_day_timetable(self):
return {
'monday': self.get_monday_timetable(),
'tuesday': self.get_tuesday_timetable(),
'wednesday': self.get_wednesday_timetable(),
'thursday': self.get_thursday_timetable(),
'friday': self.get_friday_timetable()
}
def get_week_dictionary_timetable(self):
return {
'monday': self.get_monday_timetable_list(),
'tuesday': self.get_tuesday_timetable_list(),
'wednesday': self.get_wednesday_timetable_list(),
'thursday': self.get_thursday_timetable_list(),
'friday': self.get_friday_timetable_list()
}
@staticmethod
def get_by_id(id_timetable_class):
return db.session.query(TimetableClass).filter(TimetableClass.id == id_timetable_class).first()
class SchoolClass(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, autoincrement=True, unique=True)
name = db.Column(db.String, unique=True)
students_list = db.Column(db.String)
teacher_id = db.Column(db.Integer, db.ForeignKey(Teacher.id))
id_timetable_class = db.Column(db.Integer, db.ForeignKey(TimetableClass.id))
def __init__(self, name, students_list, teacher_id):
self.name = name
self.students_list = students_list
self.teacher_id = teacher_id
teacher = db.relationship(Teacher)
def get_students_list(self):
return list(str(self.students_list).split(" "))
def get_list_parents(self):
list_parents = []
for student in list(str(self.students_list).split(" ")):
list_parents.append(db.session.query(Parent).filter(Parent.child_id == int(student)).first())
return list_parents
def get_timetable_class(self):
print(self.id_timetable_class)
timetable_class = db.session.query(TimetableClass).filter(self.id_timetable_class == TimetableClass.id).first()
return timetable_class.get_all_day_timetable()
def get_timetable_class_list(self):
timetable_class = db.session.query(TimetableClass).filter(self.id_timetable_class == TimetableClass.id).first()
return timetable_class.get_week_dictionary_timetable()
def set_timetable_class(self, new):
self.id_timetable_class = new
self.save()
def set_students_list(self, new):
new = str(new)
self.students_list = new
self.save()
def add_student(self, new):
new = " " + str(new)
self.students_list += new
self.save()
@staticmethod
def get_class_by_name(name):
return db.session.query(SchoolClass).filter(SchoolClass.name == name).first()
class School(db.Model, Model):
id = db.Column(db.Integer, primary_key=True, unique=True, autoincrement=True)
name = db.Column(db.String, unique=True)
address = db.Column(db.String)
classes = db.Column(db.String)
pupils = db.Column(db.String)
teachers = db.Column(db.String)
def __init__(self, name, address, classes, teachers, pupils):
self.name = name
self.address = address
self.classes = classes
self.teachers = teachers
self.pupils = pupils
def get_list_teachers(self):
return list(str(self.teachers).split(" "))
def get_list_classes(self):
return list(str(self.classes).split(" "))
def get_list_pupils(self):
return list(str(self.pupils).split(" "))
|
from .isi.hbase import cdr_ad_ids_for_image_hash
from .isi.hbase import image_hash_for_cdr_image_id
from .memex.cdr import cdr_ad_ids_for_cdr_image_ids
from .memex.cdr import cdr_fields_for_cdr_ids
from .memex.cdr import cdr_image_ids_for_cdr_ad_ids
from .lattice.sqlite import df_of_tables_for_cdr_ad_ids
from .uncharted.hbase import image_hash_for_memex_ht_id
import pandas as pd
def cdr_ad_ids_for_hashed_cdr_image_id(cdr_image_id, es=None):
"""
:param str cdr_image_id: The CDR ID of an image
:param elasticsearch.Elasticsearch es: CDR Connection (can be omitted)
:returns: `list` -- A list of the CDR Ad IDs in which the image was used.
"""
cur_hash = image_hash(cdr_image_id, es)
# TODO: Hit Gabriel's table of hashes instead of Svebor's table or ES.
# Check Svebor's table for a copy of the hashed image
# and the ads in which it was used
ad_ids = cdr_ad_ids_for_image_hash(cur_hash)
if ad_ids is not None:
return ad_ids
# Our fallback is to hit elastic and get every parent of
# the *specific* current image
return cdr_ad_ids_for_cdr_image_ids([cdr_image_id], es)[0]
def image_hashes(cdr_image_ids, es=None):
"""
* Check various MEMEX resources for the SHA1 hashes of `cdr_image_ids`
* If they can't be found, calculate the hashes
* Returns an ordered list of the results.
:param list cdr_image_ids: A list of CDR Image IDs
:param elasticsearch.Elasticsearch es: CDR Connection (can be omitted)
:returns: `list(str)` -- A list of hashes of `cdr_image_ids`
"""
import requests
# Populate hash dict with ISI table data
hash_dict = {cdr_image_id: image_hash_for_cdr_image_id(cdr_image_id)
for cdr_image_id in cdr_image_ids}
missing_image_cdr_ids = [x for x in hash_dict if hash_dict[x] is None]
if len(missing_image_cdr_ids) == 0:
return [hash_dict[cdr_image_id] for cdr_image_id in cdr_image_ids]
# Hit the CDR for original URLs and lookup ids for image_hash
data_dict = cdr_fields_for_cdr_ids(missing_image_cdr_ids,
['crawl_data.image_id',
'obj_stored_url'],
es)
for cdr_image_id in data_dict:
if 'crawl_data.image_id' in data_dict[cdr_image_id]:
hash_dict[cdr_image_id] =\
image_hash_for_memex_ht_id(
data_dict[cdr_image_id]['crawl_data.image_id'])
if hash_dict[cdr_image_id] is None:
if 'obj_stored_url' not in data_dict:
# No stored URL, so nothing to be done
continue
r = requests.get(data_dict['obj_stored_url'])
data = r.text
from hashlib import sha1
h = sha1()
h.update(data.encode('utf8'))
hash_dict[cdr_image_id] = h.hexdigest().upper()
return [hash_dict[cdr_image_id] for cdr_image_id in cdr_image_ids]
def image_hash(cdr_image_id, es=None):
"""
Invokes `image_hashes` on a single `cdr_image_id`
:param str cdr_image_id: A CDR Image ID
:param elasticsearch.Elasticsearch es: CDR Connection (can be omitted)
:returns: `str` -- SHA1 hash of `cdr_image_id`
"""
return image_hashes([cdr_image_id], es)[0]
def post_dates_for_cdr_ad_ids(cdr_ad_ids):
"""
Given a list of CDR IDs of advertisements, find their DD IDs and when they were posted.
:param list cdr_ad_ids: A list of CDR IDs of escort ads.
:returns: `pandas.DataFrame` -- DataFrame of CDR IDs, DD IDs, and Post Dates
"""
return df_of_tables_for_cdr_ad_ids(cdr_ad_ids, ['dd_id_to_post_date'])
def post_dates_for_hashed_cdr_image_id(cdr_image_id, es=None):
"""
Given the ID of an image in the CDR, hash it, and get all of the dates \
on which it was posted.
:param str cdr_image_id: An image's CDR ID
:param elasticsearch.Elasticsearch es: CDR Connection (can be omitted)
:returns: `pandas.DataFrame` -- DataFrame of CDR IDs, Deep Dive IDs, and \
Post Dates
"""
cdr_ad_ids = cdr_ad_ids_for_hashed_cdr_image_id(cdr_image_id, es)
if isinstance(cdr_ad_ids, (str, unicode)):
cdr_ad_ids = [cdr_ad_ids]
return post_dates_for_cdr_ad_ids(cdr_ad_ids)
def df_of_tables_for_cdr_image_ids(cdr_image_ids, dd_tables):
"""
:param list cdr_image_ids: List of CDR IDs for images.
:param list dd_tables: List of target SQLite / Deep Dive tables.
:returns: `pandas.DataFrame` -- DataFrame of CDR Image IDS, CDR Ad IDS, DD IDs, and other desired tables.
"""
cdr_ad_ids = cdr_ad_ids_for_cdr_image_ids(cdr_image_ids)
ad_image_df = pd.DataFrame({'cdr_id': cdr_ad_ids, 'cdr_image_id': cdr_image_ids})
df = df_of_tables_for_cdr_ad_ids(cdr_ad_ids, dd_tables)
return ad_image_df.join(df, on='cdr_id')
|
import numpy as np
from IPython.display import HTML
from IPython.display import display
import ast
def softmax(x):
denom = sum([np.exp(p) for p in x])
return [np.exp(p) / denom for p in x]
def cstr(s, color = 'black'):
return "<text style=color:#000;padding-top:1.5px;padding-bottom:1.5px;padding-left:2.5px;padding-right:2.5px;background-color:{}>{} </text>".format(color, s)
def get_clr(value, mode):
if(mode == 'l'):
colors = ['#85c2e1', '#89c4e2', '#95cae5', '#99cce6', '#a1d0e8', '#b2d9ec', '#baddee', '#c2e1f0', '#eff7fb', '#f9e8e8', '#f9e8e8', '#f9d4d4', '#f9bdbd', '#f8a8a8', '#f68f8f', '#f47676', '#f45f5f', '#f34343', '#f33b3b', '#f42e2e']
value = int((value * 100) / 5)
return colors[value]
else:
# colors = ['#FFFFFF','#DFFFFF','#BFFFFF','#9FFFFF','#7FFFFF','#5FFFFF','#3FFFFF','#03FFFF','#00EFFF','#00DFFF','#00CFFF','#00BFFF','#00AFFF','#009FFF']
# factor = 0.07142857142857142
# color_index = int(value/factor)
# return colors[color_index]
colors = ['#ffffff', '#ecf7fb', '#daeff7', '#c7e7f3', '#b5dfef', '#a2d7eb', '#90cfe7', '#7dc7e3', '#6abfdf', '#58b7db', '#46afd7']
value = int((value * 100) / 10)
return colors[value]
def visualize_c(dec_char, text_colours):
if (dec_char == "<e>"):
display(HTML(''.join([cstr(ti, color = ci) for ti, ci in text_colours]) + " <b>   < e > </b>   "))
else:
display(HTML(''.join([cstr(ti, color = ci) for ti, ci in text_colours]) + " <b>   {}</b>     ".format(dec_char)))
def visualize_l(dec_seq, prob):
text_colours = []
for c, p in zip(dec_seq, prob):
text = (c, get_clr(p, 'l'))
text_colours.append(text)
display(HTML(''.join([cstr(ti, color = ci) for ti, ci in text_colours])))
def visualize_connectivity(N):
# Reading from conv_vis file
with open("conn_vis.txt", "r", encoding='utf-8') as filepointer:
lines = filepointer.readlines()
i = 0
words_visualized = 0
while i < len(lines) and words_visualized< N:
line = lines[i]
if line[:4] == "Next":
words_visualized += 1
i += 1
continue
if line[:4] != "Next":
true_word, dec_char_len = line.split('\t')
dec_word_len = int(dec_char_len)
i += 1
true_word_array = [c for c in true_word]
for j in range(dec_word_len):
line = lines[i]
line = line.split('\t')
dec_char = line[0]
text_colours = []
prob = []
for prob_index in range(1,len(true_word)+1) :
p = float(line[prob_index])
prob.append(p)
line = softmax(prob)
for prob_index in range(len(true_word)) :
p = float(line[prob_index])
true_char = true_word_array[prob_index]
text= (true_char, get_clr(p, 'c') )
text_colours.append(text)
visualize_c(dec_char, text_colours)
i += 1
print("\n\n")
def visualize_lstm(N, neuron):
for i in range(N):
file = open("lstm_vis_" + str(i) + ".txt", "r")
input_seq = file.readline()[:-1]
dec_seq = []
prob = []
for line in file:
temp = line.split('\t')
dec_seq.append(temp[0])
prob.append(ast.literal_eval(temp[1][:-1])[neuron - 1])
visualize_l(dec_seq, prob)
print()
visualize_connectivity(10)
# visualize_lstm(10, 0) |
from django.db import models
from django.contrib.auth.models import User
class Like(models.Model):
sender = models.ForeignKey(User, on_delete=models.CASCADE, related_name='likes_given')
receiver = models.ForeignKey(User, on_delete=models.CASCADE, related_name='likes_received')
class Dislike(models.Model):
sender = models.ForeignKey(User, on_delete=models.CASCADE, related_name='dislikes_given')
receiver = models.ForeignKey(User, on_delete=models.CASCADE, related_name='dislikes_received')
|
from unittest.mock import Mock
import pytest
from tests.assert_utils import assert_file_exists
from tests.test_scene_rendering.simple_scenes import *
def test_write_to_movie_disables_window(using_temp_opengl_config, disabling_caching):
"""write_to_movie should disable window by default"""
scene = SquareToCircle()
renderer = scene.renderer
renderer.update_frame = Mock(wraps=renderer.update_frame)
scene.render()
assert renderer.window is None
assert_file_exists(config["output_file"])
@pytest.mark.skip(msg="Temporarily skip due to failing in Windows CI")
def test_force_window_opengl_render_with_movies(
using_temp_opengl_config,
force_window_config_write_to_movie,
disabling_caching,
):
"""force_window creates window when write_to_movie is set"""
scene = SquareToCircle()
renderer = scene.renderer
renderer.update_frame = Mock(wraps=renderer.update_frame)
scene.render()
assert renderer.window is not None
assert_file_exists(config["output_file"])
renderer.window.close()
def test_force_window_opengl_render_with_format(
using_temp_opengl_config,
force_window_config_pngs,
disabling_caching,
):
"""force_window creates window when format is set"""
scene = SquareToCircle()
renderer = scene.renderer
renderer.update_frame = Mock(wraps=renderer.update_frame)
scene.render()
assert renderer.window is not None
renderer.window.close()
|
import random
import math
from datetime import datetime
import torch
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch import nn
from torch.nn import functional as F
from dataset import ChessPositionDataset
from transform import transformations
from model import ChessPositionNet
from fen import fen2matrix
from config import target_dim
from config import device
from config import validation_frac
from config import epochs
from config import state_file_name
from config import learning_rate
def run_validation(net, data, ids):
correct = 0
for sample_id in ids:
_board, _labels = data[sample_id]
for _i in range(8):
for _j in range(8):
_x,_y = _board[_i][_j], _labels[_i][_j]
predicted = F.log_softmax(net(_x), dim=1).argmax()
correct += int(_y == predicted)
return correct / (len(ids) * 8 * 8)
print('-- TRAINING --')
# Datasets
train_data = ChessPositionDataset(img_dir='dataset/train', target_transform=fen2matrix)
# Dataloaders
train_dataloader = DataLoader(train_data, shuffle=True)
print(f'Loaded {len(train_data)} training samples')
print('Using device', device)
model = ChessPositionNet(target_dim=target_dim).to(device)
try:
model.load_state_dict(torch.load(state_file_name, map_location=device))
except Exception:
print('No saved state, model state is new')
optimizer = Adam(model.parameters(), lr=learning_rate)
loss = nn.CrossEntropyLoss()
# the number of samples to train in each epoch
total_samples = 2000
# use for training over all samples in each epoch
# total_samples = len(train_data)
train_ids = list(range(len(train_data)))
train_val_split_pos = math.floor(total_samples * validation_frac)
print(f'Training samples: {total_samples - train_val_split_pos}')
print(f'Validation samples: {train_val_split_pos}')
for _e in range(epochs):
# randomly choose training and validation indices
sample_ids = random.sample(train_ids, total_samples)
validation_ids = sample_ids[:train_val_split_pos]
train_ids = sample_ids[train_val_split_pos:]
start_time = datetime.now().timestamp()
epoch_loss = 0.0
print(f'Epoch: {_e+1}/{epochs}')
count = 0
for k in train_ids:
count += 1
print(f'{count}/{len(train_ids)}',end='\r')
board, labels = train_data[k]
board_loss = 0.0
for i in range(8):
for j in range(8):
y = model(board[i][j]).to(device)
board_loss += loss(y, labels[i][j].reshape(1))
# backpropagation
optimizer.zero_grad()
board_loss.backward()
optimizer.step()
epoch_loss += int(board_loss)
epoch_duration = datetime.now().timestamp() - start_time
print(f'Epoch took {epoch_duration:.2f} second')
print(f'Epoch loss: {epoch_loss}')
validation_acc = run_validation(model, train_data, validation_ids)
print(f'Validation accuracy: {validation_acc}')
# save model state to file
torch.save(model.state_dict(), state_file_name)
|
import base64
import aes_128
import ecb_mode
import string
import binascii
import my_rand
import ecb_detect
key = my_rand.my_rand_str(16)
def encrypt_with_fixed_key(plain):
#string = "Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK"
string = "Um9sbGluJyBpbiBteSA1LjA="
plain = plain + base64.b64decode(string)
return ecb_mode.ecb_encrypt(aes_128.aes_128_encrypt, plain, key)
def detect_block_size(encrypt_func):
plain = ''
cipher_len = len(encrypt_func(plain))
pad_len = 0
block_len = 0
while True:
plain += 'a'
pad_len += 1
_cipher_len = len(encrypt_func(plain))
if cipher_len != _cipher_len:
cipher_len = _cipher_len
break
while True:
plain += 'a'
block_len += 1
_cipher_len = len(encrypt_func(plain))
if _cipher_len != cipher_len:
break
return block_len, pad_len
def is_ecb_mode(encrypt_func, block_len, pad_len):
if block_len == 1:
return False
# extra block_len for "xxaa aaaa aaax" pad_len overflow
plain = 'a' * (pad_len + block_len * 3)
cipher = encrypt_func(plain)
if ecb_detect.detect_ecb_mode(cipher) > 0:
return True
else:
return False
def make_one_byte_short_dic(encrypt_func, one_byte_short, offset_block = 0, pre_pad_len = 0):
dic = {}
block_len = len(one_byte_short) + 1
offset = offset_block * block_len
for c in range(256):#string.printable:
c = chr(c)
plain = 'a' * pre_pad_len + one_byte_short + c
#print len(plain), plain
key = encrypt_func(plain)[offset:offset + block_len]
dic[key] = c
return dic
def byte_at_a_time_ecb_decryption(encrypt_func):
block_len, pad_len = detect_block_size(encrypt_func)
print "block size:", block_len, "pad_len:", pad_len
empty_len = len(encrypt_func(''))
if empty_len % block_len != 0:
print "Cannot detect block count"
return None
block_cnt = empty_len / block_len
if not is_ecb_mode(encrypt_func, block_len, pad_len):
print "Not ECB mode"
return None
else:
print "Is ECB mode"
r = ''
one_byte_short = 'a' * (block_len * block_cnt - 1)
for i in range(block_cnt * block_len - pad_len):
_one_byte_short = (one_byte_short + r)[1-block_len:]
#print len(_one_byte_short), _one_byte_short
dic = make_one_byte_short_dic(encrypt_func, _one_byte_short)
#print len(one_byte_short), one_byte_short
cipher = encrypt_func(one_byte_short)
#print len(cipher)
#print cipher[: block_cnt*block_len]
#print (block_cnt-1)*block_len, block_cnt*block_len
cipher_block = cipher[(block_cnt-1)*block_len : block_cnt*block_len]
c = dic[cipher_block]
one_byte_short = one_byte_short[1:]
r += c
#print 'r=', r
return r
def main():
decrypted_text = byte_at_a_time_ecb_decryption(encrypt_with_fixed_key)
if decrypted_text:
print "succeed:"
print decrypted_text
else:
print "failed"
if __name__ == "__main__":
main()
|
# File: hw4_part2.py
# Author: Mitchell Angelos
# Date: 3/3/19
# Section: 12
# E-mail: a242@umbc.edu
# Description: This program gathers a list of unique superpowers that the user
# enters. When the user's done, it prints the number of powers.
SENTINEL_VALUE = "QUIT"
UNDERPOWERED = "You're underpowered!"
PERFECT_HERO = "You're a perfect hero!"
TOO_STRONG = "You're too strong! Wow!"
POWER_THRESHOLD = 3
def main():
superpowers = []
powerAmount = 0 #this variable tracks the amount of superpowers.
tempPower = input("Please enter a superpower ('QUIT' to stop): ")
#this var above is a helper var that temporarily stores a power.
while tempPower != SENTINEL_VALUE:
if tempPower in superpowers:
print("You've already entered that superpower!")
elif tempPower not in superpowers:
superpowers.append(str(tempPower))
powerAmount += 1
tempPower = input("Please enter a superpower ('QUIT' to stop): ")
print("You have " + str(powerAmount) + " superpowers.")
if powerAmount < POWER_THRESHOLD:
print(UNDERPOWERED)
elif powerAmount == POWER_THRESHOLD:
print(PERFECT_HERO)
elif powerAmount > POWER_THRESHOLD:
print(TOO_STRONG)
main()
|
# 3.12 权重衰减
import d2lzh as d2l
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import data as gdata, loss as gloss, nn
n_train, n_test, num_inputs = 20, 100, 200
true_w, true_b = nd.ones((num_inputs, 1))*0.01, 0.05
features = nd.random.normal(shape=(n_train+n_test, num_inputs))
labels = nd.dot(features, true_w)+true_b
labels += nd.random.normal(scale=0.01, shape =labels.shape)
train_features, test_features = features[ :n_train, : ], features[n_train: , : ]
train_labels, test_labels = labels[ :n_train], labels[n_train: ]
# 初始化模型参数
def init_params():
w = nd.random.normal(scale=1, shape=(num_inputs,1))
b = nd.zeros(shape=(1, ))
w.attach_grad()
b.attach_grad()
return[w, b]
# 定义L2范数惩罚项
def l2_penalty(w):
return (w**2).sum()/2
# 定义训练和测试
batch_size, num_epochs, lr = 1, 100, 0.003
net, loss = d2l.linreg, d2l.squared_loss
train_iter = gdata.DataLoader(gdata.ArrayDataset(train_features, train_labels),batch_size, shuffle=True)
def fit_and_plot(lambd):
w, b = init_params()
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
with autograd.record():
l = loss(net(X, w, b), y) + lambd* l2_penalty(w)
l.backward()
d2l.sgd([w, b],lr,batch_size)
train_ls.append(loss(net(train_features, w, b),train_labels).mean().asscalar())
test_ls.append(loss(net(test_features, w, b),test_labels).mean().asscalar())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss', range(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', w.norm().asscalar())
def fit_and_plot_gluon(wd):
net = nn.Sequential()
net.add(nn.Dense(1))
net.initialize(init.Normal(sigma=1))
# 对权重参数衰减。权重名称一般是以weight结尾
trainer_w = gluon.Trainer(net.collect_params('.*weight'), 'sgd',{'learning_rate': lr, 'wd': wd})
# 不对偏差参数衰减。偏差名称一般是以bias结尾
trainer_b = gluon.Trainer(net.collect_params('.*bias'), 'sgd',{'learning_rate': lr})
train_ls, test_ls = [], []
for _ in range(num_epochs):
for X, y in train_iter:
with autograd.record():
l = loss(net(X), y)
l.backward()
# 对两个Trainer实例分别调用step函数,从而分别更新权重和偏差
trainer_w.step(batch_size)
trainer_b.step(batch_size)
train_ls.append(loss(net(train_features), train_labels).mean().asscalar())
test_ls.append(loss(net(test_features),test_labels).mean().asscalar())
d2l.semilogy(range(1, num_epochs + 1), train_ls, 'epochs', 'loss',srange(1, num_epochs + 1), test_ls, ['train', 'test'])
print('L2 norm of w:', net[0].weight.data().norm().asscalar()) |
import jwt
import pytest
from datetime import timedelta
from flask import Flask
from jwt import ExpiredSignatureError, InvalidSignatureError
from flask_jwt_extended import (
JWTManager, create_access_token, decode_token, create_refresh_token,
get_jti
)
from flask_jwt_extended.config import config
from flask_jwt_extended.exceptions import JWTDecodeError
from tests.utils import get_jwt_manager, encode_token
@pytest.fixture(scope='function')
def app():
app = Flask(__name__)
app.config['JWT_SECRET_KEY'] = 'change_me'
app.config['JWT_TOKEN_LOCATION'] = ['cookies', 'headers']
app.config['JWT_COOKIE_CSRF_PROTECT'] = True
JWTManager(app)
return app
@pytest.fixture(scope='function')
def default_access_token(app):
with app.test_request_context():
return {
'jti': '1234',
config.identity_claim_key: 'username',
'type': 'access',
'fresh': True,
'csrf': 'abcd'
}
@pytest.mark.parametrize("user_loader_return", [{}, None])
def test_no_user_claims(app, user_loader_return):
jwtM = get_jwt_manager(app)
@jwtM.user_claims_loader
def empty_user_loader_return(identity):
return user_loader_return
# Identity should not be in the actual token, but should be in the data
# returned via the decode_token call
with app.test_request_context():
token = create_access_token('username')
pure_decoded = jwt.decode(token, config.decode_key, algorithms=[config.algorithm])
assert config.user_claims_key not in pure_decoded
extension_decoded = decode_token(token)
assert config.user_claims_key in extension_decoded
@pytest.mark.parametrize("missing_claim", ['jti', 'type', 'identity', 'fresh', 'csrf'])
def test_missing_jti_claim(app, default_access_token, missing_claim):
del default_access_token[missing_claim]
missing_jwt_token = encode_token(app, default_access_token)
with pytest.raises(JWTDecodeError):
with app.test_request_context():
decode_token(missing_jwt_token, csrf_value='abcd')
def test_bad_token_type(app, default_access_token):
default_access_token['type'] = 'banana'
bad_type_token = encode_token(app, default_access_token)
with pytest.raises(JWTDecodeError):
with app.test_request_context():
decode_token(bad_type_token)
def test_expired_token(app):
with app.test_request_context():
delta = timedelta(minutes=-5)
access_token = create_access_token('username', expires_delta=delta)
refresh_token = create_refresh_token('username', expires_delta=delta)
with pytest.raises(ExpiredSignatureError):
decode_token(access_token)
with pytest.raises(ExpiredSignatureError):
decode_token(refresh_token)
def test_never_expire_token(app):
with app.test_request_context():
access_token = create_access_token('username', expires_delta=False)
refresh_token = create_refresh_token('username', expires_delta=False)
for token in (access_token, refresh_token):
decoded = decode_token(token)
assert 'exp' not in decoded
def test_alternate_identity_claim(app, default_access_token):
app.config['JWT_IDENTITY_CLAIM'] = 'sub'
# Insure decoding fails if the claim isn't there
token = encode_token(app, default_access_token)
with pytest.raises(JWTDecodeError):
with app.test_request_context():
decode_token(token)
# Insure the claim exists in the decoded jwt
del default_access_token['identity']
default_access_token['sub'] = 'username'
token = encode_token(app, default_access_token)
with app.test_request_context():
decoded = decode_token(token)
assert 'sub' in decoded
assert 'identity' not in decoded
def test_get_jti(app, default_access_token):
token = encode_token(app, default_access_token)
with app.test_request_context():
assert default_access_token['jti'] == get_jti(token)
def test_encode_decode_callback_values(app, default_access_token):
jwtM = get_jwt_manager(app)
app.config['JWT_SECRET_KEY'] = 'foobarbaz'
with app.test_request_context():
assert jwtM._decode_key_callback({}) == 'foobarbaz'
assert jwtM._encode_key_callback({}) == 'foobarbaz'
@jwtM.decode_key_loader
def get_decode_key_1(claims):
return 'different secret'
@jwtM.encode_key_loader
def get_decode_key_2(identity):
return 'different secret'
assert jwtM._decode_key_callback({}) == 'different secret'
assert jwtM._encode_key_callback('') == 'different secret'
def test_custom_encode_decode_key_callbacks(app, default_access_token):
jwtM = get_jwt_manager(app)
app.config['JWT_SECRET_KEY'] = 'foobarbaz'
@jwtM.encode_key_loader
def get_encode_key_1(identity):
assert identity == 'username'
return 'different secret'
with pytest.raises(InvalidSignatureError):
with app.test_request_context():
token = create_access_token('username')
decode_token(token)
with pytest.raises(InvalidSignatureError):
with app.test_request_context():
token = create_refresh_token('username')
decode_token(token)
@jwtM.decode_key_loader
def get_decode_key_1(claims):
assert claims['identity'] == 'username'
return 'different secret'
with app.test_request_context():
token = create_access_token('username')
decode_token(token)
token = create_refresh_token('username')
decode_token(token)
|
# -*- coding: utf-8 -*-
from PIL import Image
from keras.utils.vis_utils import plot_model
from keras.models import *
from keras.layers import *
import glob
import pickle
import numpy as np
import tensorflow.gfile as gfile
import os
os.environ["PATH"] += os.pathsep + 'D:\Graphviz 2.44.1\\bin'
##################################################################################
# 定义常量,字符集和参数
NUMBER = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
LOWERCASE = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
UPPERCASE = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U',
'V', 'W', 'X', 'Y', 'Z']
CAPTCHA_CHARSET = NUMBER # 验证码字符集
CAPTCHA_LEN = 4 # 验证码长度
CAPTCHA_HEIGHT = 60 # 验证码高度
CAPTCHA_WIDTH = 160 # 验证码宽度
TRAIN_DATA_DIR = './train-dataset/' # 验证码数据集目录
TEST_DATA_DIR = './test-dataset/'
BATCH_SIZE = 100
EPOCHS = 10
OPT = 'adam'
LOSS = 'binary_crossentropy'
MODEL_DIR = './model/'
MODEL_FORMAT = '.h5'
HISTORY_DIR = './history/'
HISTORY_FORMAT = '.history'
filename_str = "{}captcha_{}_{}_bs_{}_epochs_{}{}"
# 模型网络结构文件
MODEL_VIS_FILE = './Data/captcha_classfication' + '.png'
# 模型文件
MODEL_FILE = filename_str.format(MODEL_DIR, OPT, LOSS, str(BATCH_SIZE), str(EPOCHS), MODEL_FORMAT)
# 训练记录文件
HISTORY_FILE = filename_str.format(HISTORY_DIR, OPT, LOSS, str(BATCH_SIZE), str(EPOCHS), HISTORY_FORMAT)
# 将 RGB 验证码图像转为灰度图
def rgb2gray(img):
# Y' = 0.299 R + 0.587 G + 0.114 B
# https://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
return np.dot(img[..., :3], [0.299, 0.587, 0.114])
# 对验证码中每个字符进行 one-hot 编码
def text2vec(text, length=CAPTCHA_LEN, charset=CAPTCHA_CHARSET):
text_len = len(text)
# 验证码长度校验
if text_len != length:
raise ValueError('Error: length of captcha should be {}, but got {}'.format(length, text_len))
# 生成一个形如(CAPTCHA_LEN*CAPTHA_CHARSET,) 的一维向量
# 例如,4个纯数字的验证码生成形如(4*10,)的一维向量
vec = np.zeros(length * len(charset))
for i in range(length):
# One-hot 编码验证码中的每个数字
# 每个字符的热码 = 索引 + 偏移量
vec[charset.index(text[i]) + i * len(charset)] = 1
return vec
# 将验证码向量解码为对应字符
def vec2text(vector):
if not isinstance(vector, np.ndarray):
vector = np.asarray(vector)
vector = np.reshape(vector, [CAPTCHA_LEN, -1])
text = ''
for item in vector:
text += CAPTCHA_CHARSET[np.argmax(item)]
return text
# 适配 Keras 图像数据格式
def fit_keras_channels(batch, rows=CAPTCHA_HEIGHT, cols=CAPTCHA_WIDTH):
if K.image_data_format() == 'channels_first':
batch = batch.reshape(batch.shape[0], 1, rows, cols)
input_shape = (1, rows, cols)
else:
batch = batch.reshape(batch.shape[0], rows, cols, 1)
input_shape = (rows, cols, 1)
return batch, input_shape
def create_model(input_shape):
# 输入层
inputs = Input(shape=input_shape, name="inputs")
# 第1层卷积
conv1 = Conv2D(32, (3, 3), name="conv1")(inputs)
relu1 = Activation('relu', name="relu1")(conv1)
# 第2层卷积
conv2 = Conv2D(32, (3, 3), name="conv2")(relu1)
relu2 = Activation('relu', name="relu2")(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), padding='same', name="pool2")(relu2)
# 第3层卷积
conv3 = Conv2D(64, (3, 3), name="conv3")(pool2)
relu3 = Activation('relu', name="relu3")(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2), padding='same', name="pool3")(relu3)
# 将 Pooled feature map 摊平后输入全连接网络
x = Flatten()(pool3)
# Dropout
x = Dropout(0.25)(x)
# 4个全连接层分别做10分类,分别对应4个字符。
x = [Dense(10, activation='softmax', name='fc%d' % (i + 1))(x) for i in range(4)]
# 4个字符向量拼接在一起,与标签向量形式一致,作为模型输出。
outs = Concatenate()(x)
# 定义模型的输入与输出
model = Model(inputs=inputs, outputs=outs)
model.compile(optimizer=OPT, loss=LOSS, metrics=['accuracy'])
return model
if __name__ == '__main__':
##################################################################################
# 读取训练集
X_train = []
Y_train = []
for filename in glob.glob(TRAIN_DATA_DIR + '*.png'):
X_train.append(np.array(Image.open(filename)))
Y_train.append(filename.lstrip(TRAIN_DATA_DIR).lstrip('\\').rstrip('.png'))
# 处理训练集图像
# list -> rgb(numpy)
X_train = np.array(X_train, dtype=np.float32)
# rgb -> gray
X_train = rgb2gray(X_train)
# normalize
X_train = X_train / 255
# Fit keras channels
X_train, input_shape = fit_keras_channels(X_train)
print(X_train.shape, type(X_train))
print(input_shape)
# 处理训练集标签
Y_train = list(Y_train)
for i in range(len(Y_train)):
Y_train[i] = text2vec(Y_train[i])
Y_train = np.asarray(Y_train)
print(Y_train.shape, type(Y_train))
##################################################################################
# 读取测试集
X_test = []
Y_test = []
for filename in glob.glob(TEST_DATA_DIR + '*.png'):
X_test.append(np.array(Image.open(filename)))
Y_test.append(filename.lstrip(TEST_DATA_DIR).lstrip('\\').rstrip('.png'))
# 处理测试集图像
# list -> rgb -> gray -> normalization -> fit keras
X_test = np.array(X_test, dtype=np.float32)
X_test = rgb2gray(X_test)
X_test = X_test / 255
X_test, _ = fit_keras_channels(X_test)
# 处理测试集标签
Y_test = list(Y_test)
for i in range(len(Y_test)):
Y_test[i] = text2vec(Y_test[i])
Y_test = np.asarray(Y_test)
print(X_test.shape, type(X_test))
print(Y_test.shape, type(Y_test))
##################################################################################
# 创建验证码识别模型
model = create_model(input_shape)
# 查看模型摘要
model.summary()
# 模型可视化
# OSError: `pydot` failed to call GraphViz.Please install GraphViz (https://www.graphviz.org/) and ensure that its executables are in the $PATH.
# "dot" with args ['-Tps', 'C:\\Users\\rmliu\\AppData\\Local\\Temp\\tmpufl8hjk1'] returned code: 1
# OSError: `pydot` failed to call GraphViz.Please install GraphViz problem solved
# link:https://www.programmersought.com/article/3044448710/
# solve:
# pip install pydot_ng
# pip install graphviz
plot_model(model, to_file=MODEL_VIS_FILE, show_shapes=True)
# 训练模型
history = model.fit(X_train,
Y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=2,
validation_data=(X_test, Y_test))
# 预测样例
# change index
print(vec2text(Y_test[11]))
yy = model.predict(X_test[11].reshape(1, 60, 160, 1))
print(vec2text(yy))
# 保存模型
if not gfile.Exists(MODEL_DIR):
gfile.MakeDirs(MODEL_DIR)
model.save(MODEL_FILE)
print('Saved trained model at %s ' % MODEL_FILE)
# 保存训练过程记录
print(history.history['acc'])
print(history.history.keys())
if not gfile.Exists(HISTORY_DIR):
gfile.MakeDirs(HISTORY_DIR)
with open(HISTORY_FILE, 'wb') as f:
pickle.dump(history.history, f)
print(HISTORY_FILE)
|
def fib2(n, a=0, b=1, i=1):
if n < 1:
return None
if i == n:
return a
else:
return fib2(n, a+b, a, i+1) |
import matplotlib.pyplot as plt
import numpy as np
import math
m = 0.04593
g = 9.81
v_0 = 50
angle = math.pi / 4
vy_0 = 50* math.sin(angle)
vx_0 = 50 * math.sin(angle)
x_0 = 0
y_0 = 0
ro = 1.2
A = 0.427
c_D = 0.3
# F_D = 0.5 * ro * v * v * c_D * A
# F_G = m*g
time = np.linspace(0,2*vy_0/g,100)
y = -0.5*time*time*g + vy_0*time + y_0
x = vx_0 * time + x_0
v = -1 *time*g
x_with_F_D = -0.5*time*time*g + v_0*time + x_0 - 0.5 * ro * v * v * c_D * A
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.spines['left'].set_position('zero')
ax.spines['bottom'].set_position('zero')
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
plt.plot(x,y, 'r')
plt.show()
|
# global variables
game_still_going = True
# board
game_board = ["-", "-", "-",
"-", "-", "-",
"-", "-", "-"]
winner = None
current_player = "X"
# display board
def display_board():
print(game_board[0] + " | " + game_board[1] + " | " + game_board[2])
print(game_board[3] + " | " + game_board[4] + " | " + game_board[5])
print(game_board[6] + " | " + game_board[7] + " | " + game_board[8])
# playing tic tac toe
def play_game():
# print the initial board
display_board()
while game_still_going:
# manage the moves of current player
handle_turn(current_player)
# check game status
check_if_game_over()
# change to other player
flip_player()
# ending the game
if winner == "X" or winner == "O":
print(winner + " won!")
elif winner is None:
print("Tie game!")
# function for moves of a player
def handle_turn(player):
print(player + "'s turn.")
position = input("choose a position from 1 to 9: ")
valid = False
while not valid:
while position not in ["1", "2", "3", "4", "5", "6", "7", "8", "9"]:
position = input("Choose a position from 1-9: ")
position = int(position) - 1
if game_board[position] == "-":
valid = True
else:
print("You can't go there. Go again!")
game_board[position] = player
display_board()
def check_if_game_over():
check_for_winner()
check_if_tie()
def check_for_winner():
global winner
# check rows
row_winner = check_rows()
# check columns
column_winner = check_columns()
# check diagonals
diagonal_winner = check_diagonals()
if row_winner:
winner = row_winner
elif column_winner:
winner = column_winner
elif diagonal_winner:
winner = diagonal_winner
else:
# there was no winner
winner = None
return
# checks if three in a column
def check_columns():
global game_still_going
# check if value of columns is same but not -
column_1 = game_board[0] == game_board[3] == game_board[6] != "-"
column_2 = game_board[1] == game_board[4] == game_board[7] != "-"
column_3 = game_board[2] == game_board[5] == game_board[8] != "-"
if column_1 or column_2 or column_3:
game_still_going = False
# return winning player
if column_1:
return game_board[0]
elif column_2:
return game_board[1]
elif column_3:
return game_board[2]
return
# checks if three in a column
def check_rows():
global game_still_going
# check if value of rows is same but not -
row_1 = game_board[0] == game_board[1] == game_board[2] != "-"
row_2 = game_board[3] == game_board[4] == game_board[5] != "-"
row_3 = game_board[6] == game_board[7] == game_board[8] != "-"
if row_1 or row_2 or row_3:
game_still_going = False
# return winning player
if row_1:
return game_board[0]
elif row_2:
return game_board[3]
elif row_3:
return game_board[6]
return
# checks if three diagonally
def check_diagonals():
global game_still_going
# check if value of diagonal is same but not -
diagonal_1 = game_board[0] == game_board[4] == game_board[8] != "-"
diagonal_2 = game_board[2] == game_board[4] == game_board[6] != "-"
if diagonal_1 or diagonal_2:
game_still_going = False
# return winning player
if diagonal_1:
return game_board[0]
elif diagonal_2:
return game_board[2]
return
def check_if_tie():
global game_still_going
if "-" not in game_board:
game_still_going = False
return
def flip_player():
global current_player
# change player token from current to next
if current_player == "X":
current_player = "O"
elif current_player == "O":
current_player = "X"
return
play_game() |
import math
def pow(x):
return x**x
def print_graph(function, width, height):
data = [None]*width
for i in range(width):
data[i] = function(i)
magnitude = max(data)
for h in range(height, 0, -1):
print("|", end="")
for w in range(width):
if data[w] >= (magnitude / height) * h and data[w] < (magnitude / height) * (h + 1):
print(" *", end="")
else:
print(" ", end="")
print("")
print("|", end="")
for w in range(width):
print("__", end="")
print("")
def f(x):
try:
return 0.3 * math.sin(x)
except ValueError:
return 0
print_graph(f, 80, 40)
|
import logging
logger = logging.getLogger('iSDM.environment')
logger.setLevel(logging.DEBUG)
class EnvironmentalLayer(object):
def __init__(self, **kwargs):
# you want to be able to agregate at a different resolution
# and back/forth, right?
self.resolution = kwargs['resolution']
def scale_resolution(degrees):
pass
def load_data(self, file_path=None):
pass
class ClimateLayer(EnvironmentalLayer):
pass
class LandCoverlayer(EnvironmentalLayer):
pass
class LandUseLayer(EnvironmentalLayer):
pass
|
from functools import wraps
from flask import abort, redirect, url_for
from flask_login import current_user
from sqlalchemy import func
def roles_required(roles):
"""Checks if Current User has at least one of acceptable roles and redirects him accordingly
:param roles: List of Acceptable roles
:return: 403 page if User os not allowed and Login page if User is not Authenticated
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.is_authenticated:
return redirect(url_for('users.login'))
if not current_user.allowed(roles):
abort(403)
return f(*args, **kwargs)
return decorated_function
return decorator
def get_count(q):
"""Count number of results in SQLAlchemy statement
:param q: SQLAlchemy Query
:return: Number of Results (Integer)
"""
count_q = q.statement.with_only_columns([func.count()]).order_by(None)
count = q.session.execute(count_q).scalar()
return count
|
#!/usr/bin/python
import os
import sys
xenonList = [2,3,4,5,7]#[8,9,10]
sourceList = [1,2,3,4,5,6,7,8,9,10,11,12]#[13,14,15,16,17,18,19,20,21,22,23,24]
octetRange = [30, 59]
#for index in range(octetRange[0], octetRange[1], 1):
# os.system("./findADCthreshold.exe octet %i"%index)
#for index in xenonList:
# os.system("./findADCthreshold.exe xenon %i"%index)
for index in sourceList:
os.system("./findADCthreshold.exe source %i"%index)
|
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import logging
import settings
import ephem
from datetime import datetime, date, time
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO,
filename='bot.log'
)
now = datetime.now()
def const(bot, update):
user_text = update.message.text
user_text_list = user_text.split()
try:
planet_class = getattr(ephem, user_text_list[1].capitalize())
planet_instance = planet_class(datetime.now().strftime("%Y/%m/%d"))
logging.info("User: %s, Chat id: %s, Message: %s", update.message.chat.username,
update.message.chat.id, update.message.text)
update.message.reply_text(f'Планета находится в созвездии: {ephem.constellation(planet_instance)}')
logging.info(f'Планета находится в созвездии: {ephem.constellation(planet_instance)}')
except AttributeError:
logging.info("Не знаю такой планеты :(")
update.message.reply_text("Не знаю такой планеты :(")
def greet_user(bot, update):
user_text = 'Вызван /start'
logging.info(user_text)
update.message.reply_text(user_text)
def talk_to_me(bot, update):
user_text = "Привет {}! Ты написал: {}".format(update.message.chat.first_name, update.message.text)
logging.info("User: %s, Chat id: %s, Message: %s", update.message.chat.username,
update.message.chat.id, update.message.text)
update.message.reply_text(user_text)
def main():
mybot = Updater(settings.API_KEY, request_kwargs=settings.PROXY)
logging.info('Бот запускается')
dp = mybot.dispatcher
dp.add_handler(CommandHandler("start", greet_user))
dp.add_handler(MessageHandler(Filters.text, talk_to_me))
dp.add_handler(CommandHandler("planet", const))
mybot.start_polling()
mybot.idle()
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.