index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
992,300 | 7a31ab4e4cdc315aca0656e73b8b65337bd0eef9 | number=sum=0
k=int(input("Enter the value"))
for i in range(0,k):
number=i
sum=0
while number>0:
digit = number%10
sum=sum+pow(digit,3)
number= int(number/10)
if i==sum:
print(i) |
992,301 | 303de2fdf813e4e17bd3888a4cc2f0d92a147140 | #!/usr/bin/env python
# encoding: utf-8
import npyscreen
class TinyForm(npyscreen.FormBaseNew):
DEFAULT_NEXTRELY = 0
BLANK_LINES_BASE = 0
class TestApp(npyscreen.NPSApp):
def main(self):
F = TinyForm(name = "Welcome to Npyscreen",
framed=False,
lines=1,
columns=0,
minimum_lines = 1)
ms = F.add(npyscreen.TitleText, name='Test', )
F.edit()
if __name__ == "__main__":
App = TestApp()
App.run()
|
992,302 | 7dbfca13ced1fb25d1363fca634bbd45560025a6 | def binary_search(arr, ele):
start = 0
end = len(arr) - 1
while start <= end:
mid = (start + end) // 2
if (arr[mid] == ele):
return mid
elif (arr[mid] > ele):
end = mid - 1
else:
start = mid + 1
return -1
print("Enter The Size Of The Array : ", end="\n")
size = int(input())
arr = []
print("Enter ", size, "Elements In Ascending Order: ")
for i in range(size):
ele = int(input())
arr.append(ele)
print("Enter The Element To Do Binary Search : ", end="\n")
ele = int(input())
res = binary_search(arr, ele)
if (res == -1):
print("The Element " , ele, "Is Not Founded In The Array ")
else:
print("The Element " , ele, " Is present At Index - ", res)
|
992,303 | 1c7212d85d47218566c97c05cf41a248346c7a32 | #!/usr/bin/env python
#$ python setup.py build_ext --inplace
from numpy.distutils.command import build_src
# a bit of monkeypatching ...
import Cython.Compiler.Main
build_src.Pyrex = Cython
build_src.have_pyrex = True
def have_pyrex():
import sys
try:
import Cython.Compiler.Main
sys.modules['Pyrex'] = Cython
sys.modules['Pyrex.Compiler'] = Cython.Compiler
sys.modules['Pyrex.Compiler.Main'] = Cython.Compiler.Main
return True
except ImportError:
return False
build_src.have_pyrex = have_pyrex
##########################
# BEGIN additionnal code #
##########################
from numpy.distutils.misc_util import appendpath
from numpy.distutils import log
from os.path import join as pjoin, dirname
from distutils.dep_util import newer_group
from distutils.errors import DistutilsError
def generate_a_pyrex_source(self, base, ext_name, source, extension):
''' Monkey patch for numpy build_src.build_src method
Uses Cython instead of Pyrex.
Assumes Cython is present
'''
if self.inplace:
target_dir = dirname(base)
else:
target_dir = appendpath(self.build_src, dirname(base))
target_file = pjoin(target_dir, ext_name + '.c')
depends = [source] + extension.depends
if self.force or newer_group(depends, target_file, 'newer'):
import Cython.Compiler.Main
log.info("cythonc:> %s" % (target_file))
self.mkpath(target_dir)
options = Cython.Compiler.Main.CompilationOptions(
defaults=Cython.Compiler.Main.default_options,
include_path=extension.include_dirs,
output_file=target_file)
cython_result = Cython.Compiler.Main.compile(source, options=options)
if cython_result.num_errors != 0:
raise DistutilsError("%d errors while compiling %r with Cython" % (cython_result.num_errors, source))
return target_file
build_src.build_src.generate_a_pyrex_source = generate_a_pyrex_source
########################
# END additionnal code #
########################
def configuration(parent_package='',top_path=None):
INCLUDE_DIRS = []
LIBRARY_DIRS = []
LIBRARIES = []
# PETSc
import os
PETSC_DIR = os.environ['PETSC_DIR']
PETSC_ARCH = os.environ.get('PETSC_ARCH', '')
from os.path import join, isdir
if PETSC_ARCH and isdir(join(PETSC_DIR, PETSC_ARCH)):
INCLUDE_DIRS += [join(PETSC_DIR, PETSC_ARCH, 'include'),
join(PETSC_DIR, 'include')]
LIBRARY_DIRS += [join(PETSC_DIR, PETSC_ARCH, 'lib')]
else:
if PETSC_ARCH: pass # XXX should warn ...
INCLUDE_DIRS += [join(PETSC_DIR, 'include')]
LIBRARY_DIRS += [join(PETSC_DIR, 'lib')]
LIBRARIES += [#'petscts', 'petscsnes', 'petscksp',
#'petscdm', 'petscmat', 'petscvec',
'petsc']
# PETSc for Python
import petsc4py
INCLUDE_DIRS += [petsc4py.get_include()]
# Configuration
from numpy.distutils.misc_util import Configuration
config = Configuration('', parent_package, top_path)
config.add_extension('CPSmooth',
sources = ['CPSmooth.pyx',
'CPSmoothimpl.c'],
depends = ['CPSmoothimpl.h'],
include_dirs=INCLUDE_DIRS + [os.curdir],
libraries=LIBRARIES,
library_dirs=LIBRARY_DIRS,
runtime_library_dirs=LIBRARY_DIRS)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
992,304 | 09888e1e35fe7b5cecf8bf075a79715672a15cb3 | from CardDetector import CardDetector
import cv2
import pyfirmata
import time
import VideoStream
import time
import CardSort
cardDetector = CardDetector()
#cap = cv2.VideoCapture(0)
#for i in(range(10)):
# Capture frame-by-frame
# ret, frame = cap.read()
IM_WIDTH = 1280
IM_HEIGHT = 720
FRAME_RATE = 10
# videostream = VideoStream.VideoStream((IM_WIDTH,IM_HEIGHT),FRAME_RATE,2,0).start()
time.sleep(1) # Give the camera time to warm up
cap = cv2.VideoCapture(0)
cap.set(15, -7.0)
board=pyfirmata.Arduino('COM7')#need to change to make work
iter8 = pyfirmata.util.Iterator(board)
iter8.start()
arm=board.get_pin('d:10:s')
vert=board.get_pin('d:3:s')
succ=board.get_pin('d:11:s')
arm.write(90)
vert.write(45)
succ.write(20)#20 is resting
time.sleep(3)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cards = cardDetector.detectImage(frame)
if cv2.waitKey(1) & 0xFF == ord('s'):
if len(cards) == 0:
print("No cards detected")
continue
cards_sorted, ranks_sorted = CardSort.sort_cards_by_x(cards)
swap_sequence = CardSort.get_swap_sequence(ranks_sorted)
print("Card swaps", swap_sequence)
#swap_sequence = [(0, 3)]
#swap_sequence = [(0, 1), (3, 4), (0, 3)]
CardSort.execute_swap_sequence(swap_sequence, arm,vert,succ)
#CardSort.move_card(0, 3, arm, vert, succ)
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
992,305 | e5d67862453e2e3496baac2d0ea16133b8dc38cc | import numpy as np
## Accuracy calc
def accuracy(y_test, y_pred):
return np.sum(y_test == y_pred) / y_pred.shape[0]
|
992,306 | 54e8a219141ca080f2e574fc92626f00b0cb19b4 | import requests
import urllib
import urllib2
import json
from urllib2 import urlopen
import bs4 as BeautifulSoup
import sys
import re
community = open("list-stable-community-larbi.txt", "w")
stable = []
with open("list-community-larbi.txt") as f:
userBlog = f.readlines()
userBlog = [x.lower() for x in userBlog]
with open("list-followers-larbi_org.txt") as f:
userTwitter = f.readlines()
userTwitter = [x.lower() for x in userTwitter]
print "Blog : " + str(len(userBlog))
for user in userBlog:
for foll in userTwitter:
if user in foll or foll in user:
stable.append(user)
uniqUsers = set(stable)
print "Stable : " + str(len(uniqUsers))
print uniqUsers
for s in uniqUsers:
community.write(s + "\n") |
992,307 | 699b0fb6c6a85a7e9da85563d0550dc3ee914619 | """
=========================
Project -> File: leetcode -> 1306_Jump_Game_III.py
Author: zhangchao
=========================
Given an array of non-negative integers arr,
you are initially positioned at start index of the array.
When you are at index i, you can jump to i + arr[i] or i - arr[i],
check if you can reach to any index with value 0.
Notice that you can not jump outside of the array at any time.
Example 1:
Input:
arr = [4,2,3,0,3,1,2], start = 5
Output:
true
Explanation:
All possible ways to reach at index 3 with value 0 are:
index 5 -> index 4 -> index 1 -> index 3
index 5 -> index 6 -> index 4 -> index 1 -> index 3
Example 2:
Input:
arr = [4,2,3,0,3,1,2], start = 0
Output:
true
Explanation:
One possible way to reach at index 3 with value 0 is:
index 0 -> index 4 -> index 1 -> index 3
Example 3:
Input:
arr = [3,0,2,1,2], start = 2
Output:
false
Explanation:
There is no way to reach at index 1 with value 0.
Constraints:
1 <= arr.length <= 5 * 10^4
0 <= arr[i] < arr.length
0 <= start < arr.length
"""
class Solution(object):
def canReach(self, arr, start):
"""
:type arr: List[int]
:type start: int
:rtype: bool
"""
cache = {}
q = [start]
while q:
tmp = []
for v in q:
if arr[v] == 0:
return True
l, r = v - arr[v], v + arr[v]
if 0 <= l < len(arr) and l not in cache:
tmp.append(l)
cache[l] = 1
if 0 <= r < len(arr) and r not in cache:
tmp.append(r)
cache[r] = 1
q = tmp
return False
examples = [
{
"input": {
"arr": [4, 2, 3, 0, 3, 1, 2],
"start": 5
},
"output": True
}, {
"input": {
"arr": [4, 2, 3, 0, 3, 1, 2],
"start": 0
},
"output": True
}, {
"input": {
"arr": [3, 0, 2, 1, 2],
"start": 2
},
"output": False
},
]
import time
if __name__ == '__main__':
solution = Solution()
for n in dir(solution):
if not n.startswith('__'):
func = getattr(solution, n)
print(func)
for example in examples:
print '----------'
start = time.time()
v = func(**example['input'])
end = time.time()
print v, v == example['output'], end - start
|
992,308 | fab655e8b21b0caaf6f20f95d14e058815d4e94f | import time
import random
def maxHeap(arr, n, i):
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and arr[i] < arr[l]:
largest = l
if r < n and arr[largest] < arr[r]:
largest = r
if largest != i:
arr[i],arr[largest] = arr[largest],arr[i]
maxHeap(arr, n, largest)
def heapSort(arr):
for i in range(len(arr), -1, -1):
maxHeap(arr, len(arr), i)
for i in range(len(arr)-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i]
maxHeap(arr, i, 0)
def main():
cases=50
size=150
int_max=2146483647
f=open("normal.txt",'w')
for i in range(cases):
arr=[0]*size
f.write(str(size)+"\n")
for j in range(size):
arr[j]=int(random.randrange(0,int_max))
start=time.time()
heapSort(arr)
end=time.time()
f.write(str(end-start)+"\n")
size+=150
f.close()
main()
|
992,309 | dfff9db720145f45efec953a5b0c334f38bf592c | # 349. 两个数组的交集
# 给定两个数组,编写一个函数来计算它们的交集。
# 示例 1:
# 输入: nums1 = [1,2,2,1], nums2 = [2,2]
# 输出: [2]
# 示例 2:
# 输入: nums1 = [4,9,5], nums2 = [9,4,9,8,4]
# 输出: [9,4]
# 说明:
# 输出结果中的每个元素一定是唯一的。
# 我们可以不考虑输出结果的顺序。
class Solution(object):
def intersection(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
res = []
nums1 = list(set(nums1))
nums2 = list(set(nums2))
if len(nums1) <= len(nums2):
for i in nums1:
if nums2.count(i) != 0:
res.append(i)
else:
for i in nums2:
if nums1.count(i) != 0:
res.append(i)
return res
sol = Solution()
print(sol.intersection([4,9,5],[9,4,9,8,4])) |
992,310 | ccdefc8e1d51f8fe07a9326e0a7417f6bdc1a2c6 | import numpy as np
import os
import random
import pandas as pd
import configargparse as argparse
import itertools
from group_robustness_fairness.prediction_utils.util import yaml_write
from sklearn.model_selection import ParameterGrid
from group_robustness_fairness.omop.train_model import filter_cohort
parser = argparse.ArgumentParser(
config_file_parser_class=argparse.YAMLConfigFileParser,
)
parser.add_argument(
"--data_path", type=str, default="", help="The root data path",
)
parser.add_argument(
"--cohort_path",
type=str,
default="",
help="File name for the file containing metadata",
)
parser.add_argument(
"--experiment_name_prefix",
type=str,
default="scratch",
help="The name of the experiment",
)
parser.add_argument(
"--grid_size",
type=int,
default=None,
help="The number of elements in the random grid",
)
parser.add_argument(
"--attributes",
type=str,
nargs="*",
required=False,
default=["race_eth", "gender_concept_name", "age_group"],
)
parser.add_argument(
"--tasks",
type=str,
nargs="*",
required=False,
default=["hospital_mortality", "LOS_7", "readmission_30"],
)
parser.add_argument(
"--num_folds", type=int, required=False, default=10,
)
parser.add_argument("--seed", type=int, default=234)
def generate_grid(
global_tuning_params_dict,
model_tuning_params_dict=None,
experiment_params_dict=None,
grid_size=None,
seed=None,
):
the_grid = list(ParameterGrid(global_tuning_params_dict))
if model_tuning_params_dict is not None:
local_grid = []
for i, pair_of_grids in enumerate(
itertools.product(the_grid, list(ParameterGrid(model_tuning_params_dict)))
):
local_grid.append({**pair_of_grids[0], **pair_of_grids[1]})
the_grid = local_grid
if grid_size is not None:
if seed is not None:
random.seed(seed)
np.random.seed(seed)
np.random.shuffle(the_grid)
the_grid = the_grid[:grid_size]
if experiment_params_dict is not None:
outer_grid = list(ParameterGrid(experiment_params_dict))
final_grid = []
for i, pair_of_grids in enumerate(itertools.product(outer_grid, the_grid)):
final_grid.append({**pair_of_grids[0], **pair_of_grids[1]})
return final_grid
else:
return the_grid
if __name__ == "__main__":
args = parser.parse_args()
cohort = pd.read_parquet(args.cohort_path)
cohort = filter_cohort(cohort)
common_grid = {
"global": {
"lr": [1e-4, 1e-5],
"batch_size": [512],
"num_epochs": [150],
"gamma": [1.0],
"early_stopping": [True],
"early_stopping_patience": [25],
},
"model_specific": {
"feedforward_net": {
"drop_prob": [0.25, 0.75],
"num_hidden": [1, 3],
"hidden_dim": [128, 256],
},
"logistic_regression": {"num_hidden": [0], "weight_decay": [0, 1e-2, 1e-1]},
},
"experiment": {
"label_col": args.tasks,
"fold_id": [str(i + 1) for i in range(args.num_folds)],
},
}
grids = {
"erm_tuning": {
"global": common_grid["global"],
"model_specific": common_grid["model_specific"]["feedforward_net"],
"experiment": {
**common_grid["experiment"],
**{
"group_objective_type": ["standard"],
"balance_groups": [False],
"selection_metric": ["loss"],
},
},
},
"erm_group_aware": {
"global": common_grid["global"],
"model_specific": common_grid["model_specific"]["feedforward_net"],
"experiment": [
{
**common_grid["experiment"],
**{
"group_objective_type": ["standard"],
"sensitive_attribute": args.attributes,
"balance_groups": [False],
"selection_metric": ["auc_min", "loss_bce_max"],
},
},
{
**common_grid["experiment"],
**{
"group_objective_type": ["standard"],
"sensitive_attribute": args.attributes,
"balance_groups": [True],
"selection_metric": ["loss", "auc_min", "loss_bce_max"],
},
},
],
},
"erm_subset_tuning": {
"global": common_grid["global"],
"model_specific": [
common_grid["model_specific"]["feedforward_net"],
common_grid["model_specific"]["logistic_regression"],
],
"experiment": [
{
**common_grid["experiment"],
**{"group_objective_type": ["standard"]},
"subset_attribute": [attribute],
"subset_group": list(cohort[attribute].unique()),
}
for attribute in args.attributes
],
},
}
for experiment_name, value in grids.items():
print(experiment_name)
the_grid = generate_grid(
grids[experiment_name]["global"],
grids[experiment_name]["model_specific"],
grids[experiment_name]["experiment"],
)
print("{}, length: {}".format(experiment_name, len(the_grid)))
experiment_dir_name = "{}_{}".format(
args.experiment_name_prefix, experiment_name
)
grid_df = pd.DataFrame(the_grid)
config_path = os.path.join(
args.data_path, "experiments", experiment_dir_name, "config"
)
os.makedirs(config_path, exist_ok=True)
grid_df.to_csv(os.path.join(config_path, "config.csv"), index_label="id")
for i, config_dict in enumerate(the_grid):
yaml_write(config_dict, os.path.join(config_path, "{}.yaml".format(i)))
|
992,311 | 0736931053f71ca3d102281a00ba983d825760ba | # Generated by Django 2.0.5 on 2018-11-26 08:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('bo', '0010_auto_20181114_1356'),
]
operations = [
migrations.CreateModel(
name='Absence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('JUSTI', 'Justifié'), ('PASJU', 'Pas justifié')], default='PASJU', max_length=5)),
('cours', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bo.Cours')),
('eleve', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bo.Eleve')),
],
),
migrations.CreateModel(
name='Devoir',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('NOTE', 'Noté'), ('PANO', 'Pas noté')], default='PANO', max_length=4)),
('bareme', models.CharField(blank=True, choices=[('VIN', '20'), ('DIX', '10')], default='VIN', max_length=3, null=True)),
('cours', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='bo.Cours')),
],
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.FloatField()),
('cours', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bo.Cours')),
('eleve', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bo.Eleve')),
],
),
]
|
992,312 | 632d988fb2320e48ff844c499b4713c7eb643e42 | from django.apps import AppConfig
class AppmoviesapiConfig(AppConfig):
name = 'appmoviesapi'
|
992,313 | a8875ad54ab6dab1a18855c2c817e40465322cd7 | import pytest
from isic_challenge_scoring.classification import ClassificationMetric, ClassificationScore
@pytest.mark.parametrize(
'target_metric',
[
ClassificationMetric.AUC,
ClassificationMetric.BALANCED_ACCURACY,
ClassificationMetric.AVERAGE_PRECISION,
],
)
def test_score(classification_truth_file_path, classification_prediction_file_path, target_metric):
score = ClassificationScore.from_file(
classification_truth_file_path,
classification_prediction_file_path,
target_metric,
)
assert isinstance(score.validation, float)
|
992,314 | 65cb5506cc12818421e62b5bf41d8d51bc849ec7 | /home/hanh/anaconda3/lib/python3.7/genericpath.py |
992,315 | 138dda6a37a0e52fdc19fcfb94d61ff3d7b375fd |
#Driver Path
driver_path = '/Users/[yourusername]/Documents/Scraping/linkedin/chromedriver'
#Login
email = '[yourusername]@gmail.com'
password = '[yourpassword]'
#Input Filenames
company_url_path = '/Users/[yourusername]/Documents/Scraping/Linkedin/company_urls.csv'
profile_url_path = '/Users/[yourusername]/Documents/Scraping/Linkedin/profile_urls.csv'
#records to run each launch
#headless?
headless = False
|
992,316 | 63e5806ecf0695a781b6acd639cd70b9b1d67a52 | '''
Created on Jun 3, 2013
@author: sumanravuri
'''
import sys
import numpy as np
import scipy.io as sp
import copy
import argparse
from Vector_Math import Vector_Math
import datetime
from scipy.special import expit
from RNNLM_Weight import RNNLM_Weight
class Recurrent_Neural_Network_Language_Model(object, Vector_Math):
"""features are stored in format max_seq_len x nseq x nvis where n_max_obs is the maximum number of observations per sequence
and nseq is the number of sequences
weights are stored as nvis x nhid at feature level
biases are stored as 1 x nhid
rbm_type is either rbm_gaussian_bernoulli, rbm_bernoulli_bernoulli, logistic"""
def __init__(self, config_dictionary): #completed
"""variables for Neural Network: feature_file_name(read from)
required_variables - required variables for running system
all_variables - all valid variables for each type"""
self.feature_file_name = self.default_variable_define(config_dictionary, 'feature_file_name', arg_type='string')
self.features, self.feature_sequence_lens = self.read_feature_file()
self.model = RNNLM_Weight()
self.output_name = self.default_variable_define(config_dictionary, 'output_name', arg_type='string')
self.required_variables = dict()
self.all_variables = dict()
self.required_variables['train'] = ['mode', 'feature_file_name', 'output_name']
self.all_variables['train'] = self.required_variables['train'] + ['label_file_name', 'num_hiddens', 'weight_matrix_name',
'initial_weight_max', 'initial_weight_min', 'initial_bias_max', 'initial_bias_min', 'save_each_epoch',
'do_pretrain', 'pretrain_method', 'pretrain_iterations',
'pretrain_learning_rate', 'pretrain_batch_size',
'do_backprop', 'backprop_method', 'backprop_batch_size', 'l2_regularization_const',
'num_epochs', 'num_line_searches', 'armijo_const', 'wolfe_const',
'steepest_learning_rate', 'momentum_rate', 'max_num_decreases',
'conjugate_max_iterations', 'conjugate_const_type',
'truncated_newton_num_cg_epochs', 'truncated_newton_init_damping_factor',
'krylov_num_directions', 'krylov_num_batch_splits', 'krylov_num_bfgs_epochs', 'second_order_matrix',
'krylov_use_hessian_preconditioner', 'krylov_eigenvalue_floor_const',
'fisher_preconditioner_floor_val', 'use_fisher_preconditioner',
'structural_damping_const',
'validation_feature_file_name', 'validation_label_file_name',
'use_maxent', 'seed']
self.required_variables['test'] = ['mode', 'feature_file_name', 'weight_matrix_name', 'output_name']
self.all_variables['test'] = self.required_variables['test'] + ['label_file_name']
def dump_config_vals(self):
no_attr_key = list()
print "********************************************************************************"
print "Neural Network configuration is as follows:"
for key in self.all_variables[self.mode]:
if hasattr(self,key):
print key, "=", eval('self.' + key)
else:
no_attr_key.append(key)
print "********************************************************************************"
print "Undefined keys are as follows:"
for key in no_attr_key:
print key, "not set"
print "********************************************************************************"
def default_variable_define(self,config_dictionary,config_key, arg_type='string',
default_value=None, error_string=None, exit_if_no_default=True,
acceptable_values=None):
#arg_type is either int, float, string, int_comma_string, float_comma_string, boolean
try:
if arg_type == 'int_comma_string':
return self.read_config_comma_string(config_dictionary[config_key], needs_int=True)
elif arg_type == 'float_comma_string':
return self.read_config_comma_string(config_dictionary[config_key], needs_int=False)
elif arg_type == 'int':
return int(config_dictionary[config_key])
elif arg_type == 'float':
return float(config_dictionary[config_key])
elif arg_type == 'string':
return config_dictionary[config_key]
elif arg_type == 'boolean':
if config_dictionary[config_key] == 'False' or config_dictionary[config_key] == '0' or config_dictionary[config_key] == 'F':
return False
elif config_dictionary[config_key] == 'True' or config_dictionary[config_key] == '1' or config_dictionary[config_key] == 'T':
return True
else:
print config_dictionary[config_key], "is not valid for boolean type... Acceptable values are True, False, 1, 0, T, or F... Exiting now"
sys.exit()
else:
print arg_type, "is not a valid type, arg_type can be either int, float, string, int_comma_string, float_comma_string... exiting now"
sys.exit()
except KeyError:
if error_string != None:
print error_string
else:
print "No", config_key, "defined,",
if default_value == None and exit_if_no_default:
print "since", config_key, "must be defined... exiting now"
sys.exit()
else:
if acceptable_values != None and (default_value not in acceptable_values):
print default_value, "is not an acceptable input, acceptable inputs are", acceptable_values, "... Exiting now"
sys.exit()
if error_string == None:
print "setting", config_key, "to", default_value
return default_value
def read_feature_file(self, feature_file_name = None): #completed
if feature_file_name is None:
feature_file_name = self.feature_file_name
try:
feature_data = sp.loadmat(feature_file_name)
features = feature_data['features'].astype(np.int32)
sequence_len = feature_data['feature_sequence_lengths']
sequence_len = np.reshape(sequence_len, (sequence_len.size,))
return features, sequence_len#in MATLAB format
except IOError:
print "Unable to open ", feature_file_name, "... Exiting now"
sys.exit()
def read_label_file(self, label_file_name = None): #completed
"""label file is a two-column file in the form
sent_id label_1
sent_id label_2
...
"""
if label_file_name is None:
label_file_name = self.label_file_name
try:
label_data = sp.loadmat(label_file_name)['labels'].astype(np.int32)
return label_data#[:,1], label_data[:,0]#in MATLAB format
except IOError:
print "Unable to open ", label_file_name, "... Exiting now"
sys.exit()
def batch_size(self, feature_sequence_lens):
return np.sum(feature_sequence_lens)
def read_config_comma_string(self,input_string,needs_int=False):
output_list = []
for elem in input_string.split(','):
if '*' in elem:
elem_list = elem.split('*')
if needs_int:
output_list.extend([int(elem_list[1])] * int(elem_list[0]))
else:
output_list.extend([float(elem_list[1])] * int(elem_list[0]))
else:
if needs_int:
output_list.append(int(elem))
else:
output_list.append(float(elem))
return output_list
def levenshtein_string_edit_distance(self, string1, string2): #completed
dist = dict()
string1_len = len(string1)
string2_len = len(string2)
for idx in range(-1,string1_len+1):
dist[(idx, -1)] = idx + 1
for idx in range(-1,string2_len+1):
dist[(-1, idx)] = idx + 1
for idx1 in range(string1_len):
for idx2 in range(string2_len):
if string1[idx1] == string2[idx2]:
cost = 0
else:
cost = 1
dist[(idx1,idx2)] = min(
dist[(idx1-1,idx2)] + 1, # deletion
dist[(idx1,idx2-1)] + 1, # insertion
dist[(idx1-1,idx2-1)] + cost, # substitution
)
if idx1 and idx2 and string1[idx1]==string2[idx2-1] and string1[idx1-1] == string2[idx2]:
dist[(idx1,idx2)] = min (dist[(idx1,idx2)], dist[idx1-2,idx2-2] + cost) # transposition
return dist[(string1_len-1, string2_len-1)]
def check_keys(self, config_dictionary): #completed
print "Checking config keys...",
exit_flag = False
config_dictionary_keys = config_dictionary.keys()
if self.mode == 'train':
correct_mode = 'train'
incorrect_mode = 'test'
elif self.mode == 'test':
correct_mode = 'test'
incorrect_mode = 'train'
for req_var in self.required_variables[correct_mode]:
if req_var not in config_dictionary_keys:
print req_var, "needs to be set for", correct_mode, "but is not."
if exit_flag == False:
print "Because of above error, will exit after checking rest of keys"
exit_flag = True
for var in config_dictionary_keys:
if var not in self.all_variables[correct_mode]:
print var, "in the config file given is not a valid key for", correct_mode
if var in self.all_variables[incorrect_mode]:
print "but", var, "is a valid key for", incorrect_mode, "so either the mode or key is incorrect"
else:
string_distances = np.array([self.levenshtein_string_edit_distance(var, string2) for string2 in self.all_variables[correct_mode]])
print "perhaps you meant ***", self.all_variables[correct_mode][np.argmin(string_distances)], "\b*** (levenshtein string edit distance", np.min(string_distances), "\b) instead of ***", var, "\b***?"
if exit_flag == False:
print "Because of above error, will exit after checking rest of keys"
exit_flag = True
if exit_flag:
print "Exiting now"
sys.exit()
else:
print "seems copacetic"
def check_labels(self): #want to prune non-contiguous labels, might be expensive
#TODO: check sentids to make sure seqences are good
print "Checking labels..."
if len(self.labels.shape) != 2 :
print "labels need to be in (n_samples,2) format and the shape of labels is ", self.labels.shape, "... Exiting now"
sys.exit()
if self.labels.shape[0] != sum(self.feature_sequence_lens):
print "Number of examples in feature file: ", sum(self.feature_sequence_lens), " does not equal size of label file, ", self.labels.shape[0], "... Exiting now"
sys.exit()
# if [i for i in np.unique(self.labels)] != range(np.max(self.labels)+1):
# print "Labels need to be in the form 0,1,2,....,n,... Exiting now"
sys.exit()
# label_counts = np.bincount(np.ravel(self.labels[:,1])) #[self.labels.count(x) for x in range(np.max(self.labels)+1)]
# print "distribution of labels is:"
# for x in range(len(label_counts)):
# print "#", x, "\b's:", label_counts[x]
print "labels seem copacetic"
def forward_layer(self, inputs, weights, biases, weight_type, prev_hiddens = None, hidden_hidden_weights = None): #completed
if weight_type == 'logistic':
if hidden_hidden_weights is None:
return self.softmax(self.weight_matrix_multiply(inputs, weights, biases))
else:
return self.softmax(self.weight_matrix_multiply(inputs, weights, biases) + hidden_hidden_weights[(inputs),:])
elif weight_type == 'rbm_gaussian_bernoulli' or weight_type == 'rbm_bernoulli_bernoulli':
# layer = weights[(inputs),:] + self.weight_matrix_multiply(prev_hiddens, hidden_hidden_weights, biases)
# np.clip(layer, a_min = 0.0, out = layer)
# return layer
return self.sigmoid(weights[(inputs),:] + self.weight_matrix_multiply(prev_hiddens, hidden_hidden_weights, biases))
#added to test finite differences calculation for pearlmutter forward pass
elif weight_type == 'linear': #only used for the logistic layer
return self.weight_matrix_multiply(inputs, weights, biases)
else:
print "weight_type", weight_type, "is not a valid layer type.",
print "Valid layer types are", self.model.valid_layer_types,"Exiting now..."
sys.exit()
# def forward_pass_linear(self, inputs, verbose=True, model=None):
# #to test finite differences calculation for pearlmutter forward pass, just like forward pass, except it spits linear outputs
# if model == None:
# model = self.model
# architecture = self.model.get_architecture()
# max_sequence_observations = inputs.shape[0]
# num_hiddens = architecture[1]
# num_sequences = inputs.shape[2]
# num_outs = architecture[2]
# hiddens = np.zeros((max_sequence_observations, num_sequences, num_hiddens))
# outputs = np.zeros((max_sequence_observations, num_sequences, num_outs))
#
# #propagate hiddens
# hiddens[0,:,:] = self.forward_layer(inputs[0,:,:], self.model.weights['visible_hidden'], self.model.bias['hidden'],
# self.model.weight_type['visible_hidden'], self.model.init_hiddens,
# self.model.weights['hidden_hidden'])
# outputs[0,:,:] = self.forward_layer(hiddens[0,:,:], self.model.weights['hidden_output'], self.model.bias['output'],
# self.model.weight_type['hidden_output'])
# for sequence_index in range(1, max_sequence_observations):
# sequence_input = inputs[sequence_index,:,:]
# hiddens[sequence_index,:,:] = self.forward_layer(sequence_input, self.model.weights['visible_hidden'], self.model.bias['hidden'],
# self.model.weight_type['visible_hidden'], hiddens[sequence_index-1,:,:],
# self.model.weights['hidden_hidden'])
# outputs[sequence_index,:,:] = self.forward_layer(hiddens[sequence_index,:,:], self.model.weights['hidden_output'], self.model.bias['output'],
# 'linear')
# #find the observations where the sequence has ended,
# #and then zero out hiddens and outputs, so nothing horrible happens during backprop, etc.
# zero_input = np.where(self.feature_sequence_lens >= sequence_index)
# hiddens[sequence_index,:,zero_input] = 0.0
# outputs[sequence_index,:,zero_input] = 0.0
#
# del hiddens
# return outputs
def forward_pass_single_batch(self, inputs, model = None, return_hiddens = False, linear_output = False):
"""forward pass for single batch size. Mainly for speed in this case
"""
if model == None:
model = self.model
num_observations = inputs.size
hiddens = model.weights['visible_hidden'][(inputs),:]
hiddens[:1,:] += self.weight_matrix_multiply(model.init_hiddens, model.weights['hidden_hidden'], model.bias['hidden'])
# np.clip(hiddens[0, :], a_min = 0.0, out = hiddens[0, :])
expit(hiddens[0,:], hiddens[0,:])
for time_step in range(1, num_observations):
hiddens[time_step:time_step+1,:] += self.weight_matrix_multiply(hiddens[time_step-1:time_step,:], model.weights['hidden_hidden'], model.bias['hidden'])
# np.clip(hiddens[time_step, :], a_min = 0.0, out = hiddens[time_step, :])
expit(hiddens[time_step,:], hiddens[time_step,:]) #sigmoid
if 'visible_output' in model.weights:
outputs = self.forward_layer(hiddens, model.weights['hidden_output'], model.bias['output'], model.weight_type['hidden_output'],
model.weights['visible_output'])
else:
outputs = self.forward_layer(hiddens, model.weights['hidden_output'], model.bias['output'], model.weight_type['hidden_output'])
if return_hiddens:
return outputs, hiddens
else:
del hiddens
return outputs
def forward_pass(self, inputs, feature_sequence_lens, model=None, return_hiddens=False, linear_output=False): #completed
"""forward pass each layer starting with feature level
inputs in the form n_max_obs x n_seq x n_vis"""
if model == None:
model = self.model
architecture = self.model.get_architecture()
max_sequence_observations = inputs.shape[0]
num_sequences = inputs.shape[1]
num_hiddens = architecture[1]
num_outs = architecture[2]
hiddens = np.zeros((max_sequence_observations, num_sequences, num_hiddens))
outputs = np.zeros((max_sequence_observations, num_sequences, num_outs))
#propagate hiddens
hiddens[0,:,:] = self.forward_layer(inputs[0,:], model.weights['visible_hidden'], model.bias['hidden'],
model.weight_type['visible_hidden'], model.init_hiddens,
model.weights['hidden_hidden'])
if linear_output:
if 'visible_output' in model.weights:
outputs[0,:,:] = self.forward_layer(hiddens[0,:,:], model.weights['hidden_output'], model.bias['output'],
'linear', model.weights['visible_output'])
else:
outputs[0,:,:] = self.forward_layer(hiddens[0,:,:], model.weights['hidden_output'], model.bias['output'],
'linear')
else:
if 'visible_output' in model.weights:
outputs[0,:,:] = self.forward_layer(hiddens[0,:,:], model.weights['hidden_output'], model.bias['output'],
model.weight_type['hidden_output'], model.weights['visible_output'])
else:
outputs[0,:,:] = self.forward_layer(hiddens[0,:,:], model.weights['hidden_output'], model.bias['output'],
model.weight_type['hidden_output'])
for sequence_index in range(1, max_sequence_observations):
sequence_input = inputs[sequence_index,:]
hiddens[sequence_index,:,:] = self.forward_layer(sequence_input, model.weights['visible_hidden'], model.bias['hidden'],
model.weight_type['visible_hidden'], hiddens[sequence_index-1,:,:],
model.weights['hidden_hidden'])
if linear_output:
if 'visible_output' in model.weights:
outputs[sequence_index,:,:] = self.forward_layer(hiddens[sequence_index,:,:], model.weights['hidden_output'], model.bias['output'],
'linear', model.weights['visible_output'])
else:
outputs[sequence_index,:,:] = self.forward_layer(hiddens[sequence_index,:,:], model.weights['hidden_output'], model.bias['output'],
'linear')
else:
if 'visible_output' in model.weights:
outputs[sequence_index,:,:] = self.forward_layer(hiddens[sequence_index,:,:], model.weights['hidden_output'], model.bias['output'],
model.weight_type['hidden_output'], model.weights['visible_output'])
else:
outputs[sequence_index,:,:] = self.forward_layer(hiddens[sequence_index,:,:], model.weights['hidden_output'], model.bias['output'],
model.weight_type['hidden_output'])
#find the observations where the sequence has ended,
#and then zero out hiddens and outputs, so nothing horrible happens during backprop, etc.
zero_input = np.where(feature_sequence_lens <= sequence_index)
hiddens[sequence_index,zero_input,:] = 0.0
outputs[sequence_index,zero_input,:] = 0.0
if return_hiddens:
return outputs, hiddens
else:
del hiddens
return outputs
def flatten_output(self, output, feature_sequence_lens=None):
"""outputs in the form of max_obs_seq x n_seq x n_outs get converted to form
n_data x n_outs, so we can calculate classification accuracy and cross-entropy
"""
if feature_sequence_lens == None:
feature_sequence_lens = self.feature_sequence_lens
num_outs = output.shape[2]
# num_seq = output.shape[1]
flat_output = np.zeros((self.batch_size(feature_sequence_lens), num_outs))
cur_index = 0
for seq_index, num_obs in enumerate(feature_sequence_lens):
flat_output[cur_index:cur_index+num_obs, :] = copy.deepcopy(output[:num_obs, seq_index, :])
cur_index += num_obs
return flat_output
def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled
"""calculates perplexity with flat labels
"""
return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])
def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled
"""calculates perplexity with flat labels
"""
return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])
def calculate_classification_accuracy(self, flat_output, labels): #completed, possibly expensive
prediction = flat_output.argmax(axis=1).reshape(labels.shape)
classification_accuracy = sum(prediction == labels) / float(labels.size)
return classification_accuracy[0]
class RNNLM_Tester(Recurrent_Neural_Network_Language_Model): #completed
def __init__(self, config_dictionary): #completed
"""runs DNN tester soup to nuts.
variables are
feature_file_name - name of feature file to load from
weight_matrix_name - initial weight matrix to load
output_name - output predictions
label_file_name - label file to check accuracy
required are feature_file_name, weight_matrix_name, and output_name"""
self.mode = 'test'
super(RNNLM_Tester,self).__init__(config_dictionary)
self.check_keys(config_dictionary)
self.weight_matrix_name = self.default_variable_define(config_dictionary, 'weight_matrix_name', arg_type='string')
self.model.open_weights(self.weight_matrix_name)
self.label_file_name = self.default_variable_define(config_dictionary, 'label_file_name', arg_type='string',error_string="No label_file_name defined, just running forward pass",exit_if_no_default=False)
if self.label_file_name != None:
self.labels = self.read_label_file()
# self.labels, self.labels_sent_id = self.read_label_file()
self.check_labels()
else:
del self.label_file_name
self.dump_config_vals()
self.classify()
self.write_posterior_prob_file()
# self.classify_log_perplexity()
# self.write_log_perplexity_file()
def classify(self): #completed
self.posterior_probs = self.forward_pass(self.features, self.feature_sequence_lens)
self.flat_posterior_probs = self.flatten_output(self.posterior_probs)
try:
avg_cross_entropy = self.calculate_cross_entropy(self.flat_posterior_probs, self.labels) / self.labels.size
print "Average cross-entropy is", avg_cross_entropy
print "Classification accuracy is %f%%" % self.calculate_classification_accuracy(self.flat_posterior_probs, self.labels) * 100
except AttributeError:
print "no labels given, so skipping classification statistics"
def classify_log_perplexity(self):
start_frame = 0
self.log_perplexity = np.empty((len(self.feature_sequence_lens),))
for batch_index, feature_sequence_len in enumerate(self.feature_sequence_lens):
end_frame = start_frame + feature_sequence_len
batch_features = self.features[:feature_sequence_len, batch_index]
batch_labels = self.labels[start_frame:end_frame,:]
self.log_perplexity[batch_index] = -self.calculate_cross_entropy(self.forward_pass_single_batch(batch_features), batch_labels) / batch_labels.size
start_frame = end_frame
def classify_log_perplexity_discount_factor(self, discount_factor = 1.0, unknown_word_index = None):
if unknown_word_index is None:
print "WARNING: not unknown_word_index index included... will give bad results if features have no unknown words"
unknown_word_index = np.max(self.features)
start_frame = 0
self.log_perplexity = np.empty((len(self.feature_sequence_lens),))
for batch_index, feature_sequence_len in enumerate(self.feature_sequence_lens):
end_frame = start_frame + feature_sequence_len
batch_features = self.features[:feature_sequence_len, batch_index]
batch_labels = self.labels[start_frame:end_frame,:]
outputs = self.forward_pass_single_batch(batch_features)
self.log_perplexity[batch_index] = -self.calculate_cross_entropy(outputs, batch_labels) / batch_labels.size
num_unknown_words = np.where(batch_labels == unknown_word_index)[0].size
if num_unknown_words > 0:
self.log_perplexity[batch_index] -= np.log(discount_factor) / batch_labels.size * num_unknown_words
start_frame = end_frame
def write_log_perplexity_file(self):
try:
print "Writing to", self.output_name
sp.savemat(self.output_name,{'targets' : self.log_perplexity}, oned_as='column') #output name should have .mat extension
except IOError:
print "Unable to write to ", self.output_name, "... Exiting now"
sys.exit()
def write_posterior_prob_file(self): #completed
try:
print "Writing to", self.output_name
sp.savemat(self.output_name,{'targets' : self.posterior_probs, 'sequence_lengths' : self.feature_sequence_lens}, oned_as='column') #output name should have .mat extension
except IOError:
print "Unable to write to ", self.output_name, "... Exiting now"
sys.exit()
class RNNLM_Trainer(Recurrent_Neural_Network_Language_Model):
def __init__(self,config_dictionary): #completed
"""variables in NN_trainer object are:
mode (set to 'train')
feature_file_name - inherited from Neural_Network class, name of feature file (in .mat format with variable 'features' in it) to read from
features - inherited from Neural_Network class, features
label_file_name - name of label file (in .mat format with variable 'labels' in it) to read from
labels - labels for backprop
architecture - specified by n_hid, n_hid, ..., n_hid. # of feature dimensions and # of classes need not be specified
weight_matrix_name - initial weight matrix, if specified, if not, will initialize from random
initial_weight_max - needed if initial weight matrix not loaded
initial_weight_min - needed if initial weight matrix not loaded
initial_bias_max - needed if initial weight matrix not loaded
initial_bias_min - needed if initial weight matrix not loaded
do_pretrain - set to 1 or 0 (probably should change to boolean values)
pretrain_method - not yet implemented, will either be 'mean_field' or 'sampling'
pretrain_iterations - # of iterations per RBM. Must be equal to the number of hidden layers
pretrain_learning_rate - learning rate for each epoch of pretrain. must be equal to # hidden layers * sum(pretrain_iterations)
pretrain_batch_size - batch size for pretraining
do_backprop - do backpropagation (set to either 0 or 1, probably should be changed to boolean value)
backprop_method - either 'steepest_descent', 'conjugate_gradient', or '2nd_order', latter two not yet implemented
l2_regularization_constant - strength of l2 (weight decay) regularization
steepest_learning_rate - learning rate for steepest_descent backprop
backprop_batch_size - batch size for backprop
output_name - name of weight file to store to.
********************************************************************************
At bare minimum, you'll need these variables set to train
feature_file_name
output_name
this will run logistic regression using steepest descent, which is a bad idea"""
#Raise error if we encounter under/overflow during training, because this is bad... code should handle this gracefully
old_settings = np.seterr(over='raise',under='raise',invalid='raise')
self.mode = 'train'
super(RNNLM_Trainer,self).__init__(config_dictionary)
self.num_training_examples = self.batch_size(self.feature_sequence_lens)
self.num_sequences = self.features.shape[1]
self.check_keys(config_dictionary)
#read label file
self.label_file_name = self.default_variable_define(config_dictionary, 'label_file_name', arg_type='string', error_string="No label_file_name defined, can only do pretraining",exit_if_no_default=False)
if self.label_file_name != None:
self.labels = self.read_label_file()
# self.labels, self.labels_sent_id = self.read_label_file()
self.check_labels()
# self.unflattened_labels = self.unflatten_labels(self.labels, self.labels_sent_id)
else:
del self.label_file_name
self.validation_feature_file_name = self.default_variable_define(config_dictionary, 'validation_feature_file_name', arg_type='string', exit_if_no_default = False)
if self.validation_feature_file_name is not None:
self.validation_features, self.validation_fsl = self.read_feature_file(self.validation_feature_file_name)
self.validation_label_file_name = self.default_variable_define(config_dictionary, 'validation_label_file_name', arg_type='string', exit_if_no_default = False)
if self.validation_label_file_name is not None:
self.validation_labels = self.read_label_file(self.validation_label_file_name)
#initialize weights
self.weight_matrix_name = self.default_variable_define(config_dictionary, 'weight_matrix_name', exit_if_no_default=False)
self.use_maxent = self.default_variable_define(config_dictionary, 'use_maxent', arg_type='boolean', default_value=False)
self.nonlinearity = self.default_variable_define(config_dictionary, 'nonlinearity', arg_type='string', default_value='sigmoid',
acceptable_values = ['sigmoid', 'relu', 'tanh'])
if self.weight_matrix_name != None:
print "Since weight_matrix_name is defined, ignoring possible value of hiddens_structure"
self.model.open_weights(self.weight_matrix_name)
else: #initialize model
del self.weight_matrix_name
self.num_hiddens = self.default_variable_define(config_dictionary, 'num_hiddens', arg_type='int', exit_if_no_default=True)
architecture = [np.max(self.features)+1, self.num_hiddens] #+1 because index starts at 0
if hasattr(self, 'labels'):
architecture.append(np.max(self.labels[:,1])+1) #will have to change later if I have soft weights
self.seed = self.default_variable_define(config_dictionary, 'seed', 'int', '0')
self.initial_weight_max = self.default_variable_define(config_dictionary, 'initial_weight_max', arg_type='float', default_value=0.1)
self.initial_weight_min = self.default_variable_define(config_dictionary, 'initial_weight_min', arg_type='float', default_value=-0.1)
self.initial_bias_max = self.default_variable_define(config_dictionary, 'initial_bias_max', arg_type='float', default_value=0.1)
self.initial_bias_min = self.default_variable_define(config_dictionary, 'initial_bias_max', arg_type='float', default_value=-0.1)
self.use_maxent = self.default_variable_define(config_dictionary, 'use_maxent', arg_type='boolean', default_value=False)
self.model.init_random_weights(architecture,
# self.initial_bias_max, self.initial_bias_min,
# self.initial_weight_min, self.initial_weight_max,
maxent=self.use_maxent, seed = self.seed)
# one_prob = float(self.labels[:,1].sum()) / self.labels.shape[0]
# self.model.bias['output'][0,0] = np.log(1.0 - one_prob)
# self.model.bias['output'][0,1] = np.log(one_prob)
del architecture #we have it in the model
#
self.save_each_epoch = self.default_variable_define(config_dictionary, 'save_each_epoch', arg_type='boolean', default_value=False)
#pretraining configuration
self.do_pretrain = self.default_variable_define(config_dictionary, 'do_pretrain', default_value=False, arg_type='boolean')
if self.do_pretrain:
self.pretrain_method = self.default_variable_define(config_dictionary, 'pretrain_method', default_value='mean_field', acceptable_values=['mean_field', 'sampling'])
self.pretrain_iterations = self.default_variable_define(config_dictionary, 'pretrain_iterations', default_value=[5] * len(self.hiddens_structure),
error_string="No pretrain_iterations defined, setting pretrain_iterations to default 5 per layer",
arg_type='int_comma_string')
weight_last_layer = ''.join([str(self.model.num_layers-1), str(self.model.num_layers)])
if self.model.weight_type[weight_last_layer] == 'logistic' and (len(self.pretrain_iterations) != self.model.num_layers - 1):
print "given layer type", self.model.weight_type[weight_last_layer], "pretraining iterations length should be", self.model.num_layers-1, "but pretraining_iterations is length ", len(self.pretrain_iterations), "... Exiting now"
sys.exit()
elif self.model.weight_type[weight_last_layer] != 'logistic' and (len(self.pretrain_iterations) != self.model.num_layers):
print "given layer type", self.model.weight_type[weight_last_layer], "pretraining iterations length should be", self.model.num_layers, "but pretraining_iterations is length ", len(self.pretrain_iterations), "... Exiting now"
sys.exit()
self.pretrain_learning_rate = self.default_variable_define(config_dictionary, 'pretrain_learning_rate', default_value=[0.01] * sum(self.pretrain_iterations),
error_string="No pretrain_learning_rate defined, setting pretrain_learning_rate to default 0.01 per iteration",
arg_type='float_comma_string')
if len(self.pretrain_learning_rate) != sum(self.pretrain_iterations):
print "pretraining learning rate should have ", sum(self.pretrain_iterations), " learning rate iterations but only has ", len(self.pretrain_learning_rate), "... Exiting now"
sys.exit()
self.pretrain_batch_size = self.default_variable_define(config_dictionary, 'pretrain_batch_size', default_value=256, arg_type='int')
#backprop configuration
self.do_backprop = self.default_variable_define(config_dictionary, 'do_backprop', default_value=True, arg_type='boolean')
if self.do_backprop:
if not hasattr(self, 'labels'):
print "No labels found... cannot do backprop... Exiting now"
sys.exit()
self.backprop_method = self.default_variable_define(config_dictionary, 'backprop_method', default_value='steepest_descent',
acceptable_values=['steepest_descent', 'adagrad', 'krylov_subspace', 'truncated_newton'])
self.backprop_batch_size = self.default_variable_define(config_dictionary, 'backprop_batch_size', default_value=16, arg_type='int')
self.l2_regularization_const = self.default_variable_define(config_dictionary, 'l2_regularization_const', arg_type='float', default_value=0.0, exit_if_no_default=False)
if self.backprop_method == 'steepest_descent':
self.steepest_learning_rate = self.default_variable_define(config_dictionary, 'steepest_learning_rate', default_value=0.08, arg_type='float_comma_string')
self.momentum_rate = self.default_variable_define(config_dictionary, 'momentum_rate', arg_type='float_comma_string', default_value = 0.0, exit_if_no_default=False)
self.max_num_decreases = self.default_variable_define(config_dictionary, 'max_num_decreases', default_value=2, arg_type='int')
if hasattr(self, 'momentum_rate'):
assert(len(self.momentum_rate) == len(self.steepest_learning_rate))
if self.backprop_method == 'adagrad':
self.steepest_learning_rate = self.default_variable_define(config_dictionary, 'steepest_learning_rate', default_value=[0.08, 0.04, 0.02, 0.01], arg_type='float_comma_string')
else: #second order methods
self.num_epochs = self.default_variable_define(config_dictionary, 'num_epochs', default_value=20, arg_type='int')
self.use_fisher_preconditioner = self.default_variable_define(config_dictionary, 'use_fisher_preconditioner', arg_type='boolean', default_value=False)
self.second_order_matrix = self.default_variable_define(config_dictionary, 'second_order_matrix', arg_type='string', default_value='gauss-newton',
acceptable_values=['gauss-newton', 'hessian', 'fisher'])
self.structural_damping_const = self.default_variable_define(config_dictionary, 'structural_damping_const', arg_type='float', default_value=0.0, exit_if_no_default=False)
if self.use_fisher_preconditioner:
self.fisher_preconditioner_floor_val = self.default_variable_define(config_dictionary, 'fisher_preconditioner_floor_val', arg_type='float', default_value=1E-4)
if self.backprop_method == 'krylov_subspace':
self.krylov_num_directions = self.default_variable_define(config_dictionary, 'krylov_num_directions', arg_type='int', default_value=20,
acceptable_values=range(2,2000))
self.krylov_num_batch_splits = self.default_variable_define(config_dictionary, 'krylov_num_batch_splits', arg_type='int', default_value=self.krylov_num_directions,
acceptable_values=range(2,2000))
self.krylov_num_bfgs_epochs = self.default_variable_define(config_dictionary, 'krylov_num_bfgs_epochs', arg_type='int', default_value=self.krylov_num_directions)
self.krylov_use_hessian_preconditioner = self.default_variable_define(config_dictionary, 'krylov_use_hessian_preconditioner', arg_type='boolean', default_value=True)
if self.krylov_use_hessian_preconditioner:
self.krylov_eigenvalue_floor_const = self.default_variable_define(config_dictionary, 'krylov_eigenvalue_floor_const', arg_type='float', default_value=1E-4)
self.num_line_searches = self.default_variable_define(config_dictionary, 'num_line_searches', default_value=20, arg_type='int')
self.armijo_const = self.default_variable_define(config_dictionary, 'armijo_const', arg_type='float', default_value=0.0001)
self.wolfe_const = self.default_variable_define(config_dictionary, 'wolfe_const', arg_type='float', default_value=0.9)
elif self.backprop_method == 'truncated_newton':
self.truncated_newton_num_cg_epochs = self.default_variable_define(config_dictionary, 'truncated_newton_num_cg_epochs', arg_type='int', default_value=20)
self.truncated_newton_init_damping_factor = self.default_variable_define(config_dictionary, 'truncated_newton_init_damping_factor', arg_type='float', default_value=0.1)
self.dump_config_vals()
def calculate_gradient_single_batch(self, batch_inputs, batch_labels, gradient_weights, hiddens = None, outputs = None, check_gradient=False,
model=None, l2_regularization_const = 0.0, dropout = 0.0, return_cross_entropy = False):
#need to check regularization
#TO DO: fix gradient when there is only a single word (empty?)
#calculate gradient with particular Neural Network model. If None is specified, will use current weights (i.e., self.model)
batch_size = batch_labels.size
if model == None:
model = self.model
if hiddens == None or outputs == None:
outputs, hiddens = self.forward_pass_single_batch(batch_inputs, model, return_hiddens=True)
#derivative of log(cross-entropy softmax)
batch_indices = np.arange(batch_size)
gradient_weights *= 0.0
backward_inputs = outputs
if dropout < 0.0 or dropout > 1.0:
print "dropout must be between 0 and 1 but is", dropout
raise ValueError
if dropout != 0.0:
hiddens *= (np.random.rand(hiddens.shape[0], hiddens.shape[1]) > dropout)
# print batch_inputs
# print batch_labels
# print batch_indices
if return_cross_entropy:
cross_entropy = -np.sum(np.log2(backward_inputs[batch_indices, batch_labels]))
backward_inputs[batch_indices, batch_labels] -= 1.0
# print backward_inputs.shape
# gradient_weights = RNNLM_Weight()
# gradient_weights.init_zero_weights(self.model.get_architecture(), verbose=False)
# gradient_weights.bias['output'][0] = np.sum(backward_inputs, axis=0)
np.sum(backward_inputs, axis=0, out = gradient_weights.bias['output'][0])
np.dot(hiddens.T, backward_inputs, out = gradient_weights.weights['hidden_output'])
if 'visible_output' in model.weights:
gradient_weights.weights['visible_output'][batch_inputs] += backward_inputs
# backward_inputs = outputs - batch_unflattened_labels
pre_nonlinearity_hiddens = np.dot(backward_inputs[batch_size-1,:], model.weights['hidden_output'].T)
# pre_nonlinearity_hiddens *= hiddens[batch_size-1,:] > 0.0
pre_nonlinearity_hiddens *= hiddens[batch_size-1,:]
pre_nonlinearity_hiddens *= 1 - hiddens[batch_size-1,:]
# if structural_damping_const > 0.0:
# pre_nonlinearity_hiddens += structural_damping_const * hidden_deriv[n_obs-1,:,:]
# output_model.weights['visible_hidden'] += np.dot(visibles[n_obs-1,:,:].T, pre_nonlinearity_hiddens)
if batch_size > 1:
gradient_weights.weights['visible_hidden'][batch_inputs[batch_size-1]] += pre_nonlinearity_hiddens
gradient_weights.weights['hidden_hidden'] += np.outer(hiddens[batch_size-2,:], pre_nonlinearity_hiddens)
gradient_weights.bias['hidden'][0] += pre_nonlinearity_hiddens
for observation_index in range(batch_size-2,0,-1):
pre_nonlinearity_hiddens = ((np.dot(backward_inputs[observation_index,:], model.weights['hidden_output'].T) +
np.dot(pre_nonlinearity_hiddens, model.weights['hidden_hidden'].T))
# * (hiddens[observation_index] > 0.0))
* hiddens[observation_index,:] * (1 - hiddens[observation_index,:]))
# np.dot(backward_inputs[observation_index,:], model.weights['hidden_output'].T, out = pre_nonlinearity_hiddens)
# pre_nonlinearity_hiddens += np.dot(pre_nonlinearity_hiddens, model.weights['hidden_hidden'].T)
# pre_nonlinearity_hiddens *= hiddens[observation_index,:]
# pre_nonlinearity_hiddens *= (1 - hiddens[observation_index,:])
# print pre_nonlinearity_hiddens.shape
# if structural_damping_const > 0.0:
# pre_nonlinearity_hiddens += structural_damping_const * hidden_deriv[observation_index,:,:]
gradient_weights.weights['visible_hidden'][batch_inputs[observation_index]] += pre_nonlinearity_hiddens #+= np.dot(visibles[observation_index,:,:].T, pre_nonlinearity_hiddens)
gradient_weights.weights['hidden_hidden'] += np.outer(hiddens[observation_index-1,:], pre_nonlinearity_hiddens)
gradient_weights.bias['hidden'][0] += pre_nonlinearity_hiddens
if batch_size > 1:
pre_nonlinearity_hiddens = ((np.dot(backward_inputs[0,:], model.weights['hidden_output'].T)
+ np.dot(pre_nonlinearity_hiddens, model.weights['hidden_hidden'].T))
* hiddens[0,:] * (1 - hiddens[0,:]))
gradient_weights.weights['visible_hidden'][batch_inputs[0]] += pre_nonlinearity_hiddens# += np.dot(visibles[0,:,:].T, pre_nonlinearity_hiddens)
gradient_weights.weights['hidden_hidden'] += np.outer(model.init_hiddens, pre_nonlinearity_hiddens) #np.dot(np.tile(model.init_hiddens, (pre_nonlinearity_hiddens.shape[0],1)).T, pre_nonlinearity_hiddens)
gradient_weights.bias['hidden'][0] += pre_nonlinearity_hiddens
gradient_weights.init_hiddens[0] = np.dot(pre_nonlinearity_hiddens, model.weights['hidden_hidden'].T)
# gradient_weights = self.backward_pass(backward_inputs, hiddens, batch_inputs, model)
backward_inputs[batch_indices, batch_labels] += 1.0
gradient_weights /= batch_size
if l2_regularization_const > 0.0:
gradient_weights += model * l2_regularization_const
if return_cross_entropy and not check_gradient:
return cross_entropy
# if not check_gradient:
# if not return_cross_entropy:
# if l2_regularization_const > 0.0:
# return gradient_weights / batch_size + model * l2_regularization_const
# return gradient_weights / batch_size
# else:
# if l2_regularization_const > 0.0:
# return gradient_weights / batch_size + model * l2_regularization_const, cross_entropy
# return gradient_weights / batch_size, cross_entropy
### below block checks gradient... only to be used if you think the gradient is incorrectly calculated ##############
else:
if l2_regularization_const > 0.0:
gradient_weights += model * (l2_regularization_const * batch_size)
sys.stdout.write("\r \r")
print "checking gradient..."
finite_difference_model = RNNLM_Weight()
finite_difference_model.init_zero_weights(self.model.get_architecture(), verbose=False)
direction = RNNLM_Weight()
direction.init_zero_weights(self.model.get_architecture(), verbose=False)
epsilon = 1E-5
print "at initial hiddens"
for index in range(direction.init_hiddens.size):
direction.init_hiddens[0][index] = epsilon
forward_loss = -np.sum(np.log(self.forward_pass_single_batch(batch_inputs, model = model + direction)[batch_indices, batch_labels]))
backward_loss = -np.sum(np.log(self.forward_pass_single_batch(batch_inputs, model = model - direction)[batch_indices, batch_labels]))
finite_difference_model.init_hiddens[0][index] = (forward_loss - backward_loss) / (2 * epsilon)
direction.init_hiddens[0][index] = 0.0
for key in direction.bias.keys():
print "at bias key", key
for index in range(direction.bias[key].size):
direction.bias[key][0][index] = epsilon
#print direction.norm()
forward_loss = -np.sum(np.log(self.forward_pass_single_batch(batch_inputs, model = model + direction)[batch_indices, batch_labels]))
backward_loss = -np.sum(np.log(self.forward_pass_single_batch(batch_inputs, model = model - direction)[batch_indices, batch_labels]))
finite_difference_model.bias[key][0][index] = (forward_loss - backward_loss) / (2 * epsilon)
direction.bias[key][0][index] = 0.0
for key in direction.weights.keys():
print "at weight key", key
for index0 in range(direction.weights[key].shape[0]):
for index1 in range(direction.weights[key].shape[1]):
direction.weights[key][index0][index1] = epsilon
forward_loss = -np.sum(np.log(self.forward_pass_single_batch(batch_inputs, model = model + direction)[batch_indices, batch_labels]))
backward_loss = -np.sum(np.log(self.forward_pass_single_batch(batch_inputs, model = model - direction)[batch_indices, batch_labels]))
finite_difference_model.weights[key][index0][index1] = (forward_loss - backward_loss) / (2 * epsilon)
direction.weights[key][index0][index1] = 0.0
print "calculated gradient for initial hiddens"
print gradient_weights.init_hiddens
print "finite difference approximation for initial hiddens"
print finite_difference_model.init_hiddens
print "calculated gradient for hidden bias"
print gradient_weights.bias['hidden']
print "finite difference approximation for hidden bias"
print finite_difference_model.bias['hidden']
print "calculated gradient for output bias"
print gradient_weights.bias['output']
print "finite difference approximation for output bias"
print finite_difference_model.bias['output']
print "calculated gradient for visible_hidden layer"
print gradient_weights.weights['visible_hidden']
print "finite difference approximation for visible_hidden layer"
print finite_difference_model.weights['visible_hidden']
print np.sum((finite_difference_model.weights['visible_hidden'] - gradient_weights.weights['visible_hidden']) ** 2)
print "calculated gradient for hidden_hidden layer"
print gradient_weights.weights['hidden_hidden']
print "finite difference approximation for hidden_hidden layer"
print finite_difference_model.weights['hidden_hidden']
print "calculated gradient for hidden_output layer"
print gradient_weights.weights['hidden_output']
print "finite difference approximation for hidden_output layer"
print finite_difference_model.weights['hidden_output']
sys.exit()
##########################################################
def calculate_gradient(self, batch_inputs, batch_labels, feature_sequence_lens, hiddens = None, outputs = None,
check_gradient=False, model=None, l2_regularization_const = 0.0, return_cross_entropy = False):
#need to check regularization
#calculate gradient with particular Neural Network model. If None is specified, will use current weights (i.e., self.model)
excluded_keys = {'bias':['0'], 'weights':[]} #will have to change this later
if model == None:
model = self.model
if hiddens == None or outputs == None:
outputs, hiddens = self.forward_pass(batch_inputs, feature_sequence_lens, model, return_hiddens=True)
#derivative of log(cross-entropy softmax)
batch_size = self.batch_size(feature_sequence_lens)
batch_indices = np.zeros((batch_size,), dtype=np.int)
cur_frame = 0
for seq_len in feature_sequence_lens:
batch_indices[cur_frame:cur_frame+seq_len] = np.arange(seq_len)
cur_frame += seq_len
backward_inputs = copy.deepcopy(outputs)
# print batch_labels
# print batch_indices
if return_cross_entropy:
cross_entropy = -np.sum(np.log2(backward_inputs[batch_indices, batch_labels[:,0], batch_labels[:,1]]))
backward_inputs[batch_indices, batch_labels[:,0], batch_labels[:,1]] -= 1.0
# backward_inputs = outputs - batch_unflattened_labels
gradient_weights = self.backward_pass(backward_inputs, hiddens, batch_inputs, model)
if not check_gradient:
if not return_cross_entropy:
if l2_regularization_const > 0.0:
return gradient_weights / batch_size + model * l2_regularization_const
return gradient_weights / batch_size
else:
if l2_regularization_const > 0.0:
return gradient_weights / batch_size + model * l2_regularization_const, cross_entropy
return gradient_weights / batch_size, cross_entropy
### below block checks gradient... only to be used if you think the gradient is incorrectly calculated ##############
else:
if l2_regularization_const > 0.0:
gradient_weights += model * (l2_regularization_const * batch_size)
sys.stdout.write("\r \r")
print "checking gradient..."
finite_difference_model = RNNLM_Weight()
finite_difference_model.init_zero_weights(self.model.get_architecture(), verbose=False)
direction = RNNLM_Weight()
direction.init_zero_weights(self.model.get_architecture(), verbose=False)
epsilon = 1E-5
print "at initial hiddens"
for index in range(direction.init_hiddens.size):
direction.init_hiddens[0][index] = epsilon
forward_loss = self.calculate_cross_entropy(self.flatten_output(self.forward_pass(batch_inputs, feature_sequence_lens, model = model + direction),
feature_sequence_lens), batch_labels)
backward_loss = self.calculate_cross_entropy(self.flatten_output(self.forward_pass(batch_inputs, feature_sequence_lens, model = model - direction),
feature_sequence_lens), batch_labels)
finite_difference_model.init_hiddens[0][index] = (forward_loss - backward_loss) / (2 * epsilon)
direction.init_hiddens[0][index] = 0.0
for key in direction.bias.keys():
print "at bias key", key
for index in range(direction.bias[key].size):
direction.bias[key][0][index] = epsilon
#print direction.norm()
forward_loss = self.calculate_cross_entropy(self.flatten_output(self.forward_pass(batch_inputs, feature_sequence_lens, model = model + direction),
feature_sequence_lens), batch_labels)
backward_loss = self.calculate_cross_entropy(self.flatten_output(self.forward_pass(batch_inputs, feature_sequence_lens, model = model - direction),
feature_sequence_lens), batch_labels)
finite_difference_model.bias[key][0][index] = (forward_loss - backward_loss) / (2 * epsilon)
direction.bias[key][0][index] = 0.0
for key in direction.weights.keys():
print "at weight key", key
for index0 in range(direction.weights[key].shape[0]):
for index1 in range(direction.weights[key].shape[1]):
direction.weights[key][index0][index1] = epsilon
forward_loss = self.calculate_cross_entropy(self.flatten_output(self.forward_pass(batch_inputs, feature_sequence_lens, model = model + direction),
feature_sequence_lens), batch_labels)
backward_loss = self.calculate_cross_entropy(self.flatten_output(self.forward_pass(batch_inputs, feature_sequence_lens, model = model - direction),
feature_sequence_lens), batch_labels)
finite_difference_model.weights[key][index0][index1] = (forward_loss - backward_loss) / (2 * epsilon)
direction.weights[key][index0][index1] = 0.0
print "calculated gradient for initial hiddens"
print gradient_weights.init_hiddens
print "finite difference approximation for initial hiddens"
print finite_difference_model.init_hiddens
print "calculated gradient for hidden bias"
print gradient_weights.bias['hidden']
print "finite difference approximation for hidden bias"
print finite_difference_model.bias['hidden']
print "calculated gradient for output bias"
print gradient_weights.bias['output']
print "finite difference approximation for output bias"
print finite_difference_model.bias['output']
print "calculated gradient for visible_hidden layer"
print gradient_weights.weights['visible_hidden']
print "finite difference approximation for visible_hidden layer"
print finite_difference_model.weights['visible_hidden']
print "calculated gradient for hidden_hidden layer"
print gradient_weights.weights['hidden_hidden']
print "finite difference approximation for hidden_hidden layer"
print finite_difference_model.weights['hidden_hidden']
print "calculated gradient for hidden_output layer"
print gradient_weights.weights['hidden_output']
print "finite difference approximation for hidden_output layer"
print finite_difference_model.weights['hidden_output']
sys.exit()
##########################################################
def backward_pass(self, backward_inputs, hiddens, visibles, model=None, structural_damping_const = 0.0, hidden_deriv = None): #need to test
if model == None:
model = self.model
output_model = RNNLM_Weight()
output_model.init_zero_weights(self.model.get_architecture(), verbose=False)
n_obs = backward_inputs.shape[0]
n_outs = backward_inputs.shape[2]
n_hids = hiddens.shape[2]
n_seq = backward_inputs.shape[1]
#backward_inputs - n_obs x n_seq x n_outs
#hiddens - n_obs x n_seq x n_hids
flat_outputs = np.reshape(np.transpose(backward_inputs, axes=(1,0,2)),(n_obs * n_seq,n_outs))
flat_hids = np.reshape(np.transpose(hiddens, axes=(1,0,2)),(n_obs * n_seq,n_hids))
#average layers in batch
output_model.bias['output'][0] = np.sum(flat_outputs, axis=0)
output_model.weights['hidden_output'] = np.dot(flat_hids.T, flat_outputs)
#HERE is where the fun begins... will need to store gradient updates in the form of dL/da_i
#(where a is the pre-nonlinear layer for updates to the hidden hidden and input hidden weight matrices
pre_nonlinearity_hiddens = np.dot(backward_inputs[n_obs-1,:,:], model.weights['hidden_output'].T) * hiddens[n_obs-1,:,:] * (1 - hiddens[n_obs-1,:,:])
if structural_damping_const > 0.0:
pre_nonlinearity_hiddens += structural_damping_const * hidden_deriv[n_obs-1,:,:]
# output_model.weights['visible_hidden'] += np.dot(visibles[n_obs-1,:,:].T, pre_nonlinearity_hiddens)
output_model.weights['visible_hidden'][visibles[n_obs-1,:]] += pre_nonlinearity_hiddens
output_model.weights['hidden_hidden'] += np.dot(hiddens[n_obs-2,:,:].T, pre_nonlinearity_hiddens)
output_model.bias['hidden'][0] += np.sum(pre_nonlinearity_hiddens, axis=0)
for observation_index in range(1,n_obs-1)[::-1]:
pre_nonlinearity_hiddens = ((np.dot(backward_inputs[observation_index,:,:], model.weights['hidden_output'].T) + np.dot(pre_nonlinearity_hiddens, model.weights['hidden_hidden'].T))
* hiddens[observation_index,:,:] * (1 - hiddens[observation_index,:,:]))
if structural_damping_const > 0.0:
pre_nonlinearity_hiddens += structural_damping_const * hidden_deriv[observation_index,:,:]
output_model.weights['visible_hidden'][visibles[observation_index,:]] += pre_nonlinearity_hiddens #+= np.dot(visibles[observation_index,:,:].T, pre_nonlinearity_hiddens)
output_model.weights['hidden_hidden'] += np.dot(hiddens[observation_index-1,:,:].T, pre_nonlinearity_hiddens)
output_model.bias['hidden'][0] += np.sum(pre_nonlinearity_hiddens, axis=0)
pre_nonlinearity_hiddens = ((np.dot(backward_inputs[0,:,:], model.weights['hidden_output'].T) + np.dot(pre_nonlinearity_hiddens, model.weights['hidden_hidden'].T))
* hiddens[0,:,:] * (1 - hiddens[0,:,:]))
output_model.weights['visible_hidden'][visibles[0,:]] += pre_nonlinearity_hiddens# += np.dot(visibles[0,:,:].T, pre_nonlinearity_hiddens)
output_model.weights['hidden_hidden'] += np.dot(np.tile(model.init_hiddens, (pre_nonlinearity_hiddens.shape[0],1)).T, pre_nonlinearity_hiddens)
output_model.bias['hidden'][0] += np.sum(pre_nonlinearity_hiddens, axis=0)
output_model.init_hiddens[0] = np.sum(np.dot(pre_nonlinearity_hiddens, model.weights['hidden_hidden'].T), axis=0)
return output_model
def calculate_loss(self, inputs, feature_sequence_lens, labels, batch_size, model = None, l2_regularization_const = None):
#differs from calculate_cross_entropy in that it also allows for regularization term
#### THIS FUNCTION DOES NOT WORK!!!!!
if model == None:
model = self.model
if l2_regularization_const == None:
l2_regularization_const = self.l2_regularization_const
excluded_keys = {'bias':['0'], 'weights':[]}
outputs = self.flatten_output(self.forward_pass(inputs, feature_sequence_lens, model = model), feature_sequence_lens)
if self.l2_regularization_const == 0.0:
return self.calculate_cross_entropy(outputs, labels)
else:
return self.calculate_cross_entropy(outputs, labels) + (model.norm(excluded_keys) ** 2) * l2_regularization_const / 2. * batch_size
def calculate_classification_statistics(self, features, flat_labels, feature_sequence_lens, model=None):
if model == None:
model = self.model
excluded_keys = {'bias': ['0'], 'weights': []}
if self.do_backprop == False:
classification_batch_size = 1024
else:
classification_batch_size = max(self.backprop_batch_size, 1024)
batch_index = 0
end_index = 0
cross_entropy = 0.0
log_perplexity = 0.0
num_correct = 0
num_sequences = features.shape[1]
num_examples = self.batch_size(feature_sequence_lens)
# print features.shape
print "Calculating Classification Statistics"
while end_index < num_sequences: #run through the batches
per_done = float(batch_index)/num_sequences*100
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\rCalculating Classification Statistics: %.1f%% done " % per_done), sys.stdout.flush()
end_index = min(batch_index+classification_batch_size, num_sequences)
max_seq_len = max(feature_sequence_lens[batch_index:end_index])
# print batch_index, max_seq_len
output = self.flatten_output(self.forward_pass(features[:max_seq_len,batch_index:end_index], feature_sequence_lens[batch_index:end_index], model=model),
feature_sequence_lens[batch_index:end_index])
start_frame = np.where(flat_labels[:,0] == batch_index)[0][0]
end_frame = np.where(flat_labels[:,0] == end_index-1)[0][-1] + 1
label = flat_labels[start_frame:end_frame]
cross_entropy += self.calculate_cross_entropy(output, label)
log_perplexity += self.calculate_log_perplexity(output, label)
#don't use calculate_classification_accuracy() because of possible rounding error
prediction = output.argmax(axis=1)
num_correct += np.sum(prediction == label[:,1]) #- (prediction.size - num_examples) #because of the way we handle features, where some observations are null, we want to remove those examples for calculating accuracy
batch_index += classification_batch_size
# sys.stdout.write("\r \r") #clear line
loss = cross_entropy
if self.l2_regularization_const > 0.0:
loss += (model.norm(excluded_keys) ** 2) * self.l2_regularization_const
# cross_entropy /= np.log(2) * num_examples
loss /= np.log(2) * num_examples
log_perplexity /= num_examples
perplexity = 2 ** log_perplexity
return cross_entropy, perplexity, num_correct, num_examples, loss
def backprop_mikolov_steepest_descent(self, bptt = 4, bptt_block = 4, independent = True):
print "Starting backprop using Mikolov steepest descent"
hidden_hidden_gradient = np.zeros(self.model.weights['hidden_hidden'].shape)
# gradient = RNNLM_Weight()
# gradient.init_zero_weights(self.model.get_architecture(), False)
self.model.bias['visible'] *= 0.0
self.model.bias['hidden'] *= 0.0
self.model.bias['output'] *= 0.0
self.model.init_hiddens *= 0.0
self.model.init_hiddens += 1.0
# if self.validation_feature_file_name is not None:
# cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
# print "cross-entropy before steepest descent is", cross_entropy
# print "perplexity before steepest descent is", perplexity
# if self.l2_regularization_const > 0.0:
# print "regularized loss is", loss
# print "number correctly classified is", num_correct, "of", num_examples
#
excluded_keys = {'bias':['0'], 'weights':[]}
# frame_table = np.cumsum(self.feature_sequence_lens)
num_words = self.model.weights['hidden_output'].shape[1]
# print num_words
bptt_hiddens = np.zeros((bptt + bptt_block, self.num_hiddens))
word_history = -np.ones((bptt + bptt_block,), dtype=np.int)
bptt_error = np.zeros((bptt + bptt_block, self.num_hiddens))
cur_hiddens = np.zeros((self.num_hiddens,))
prev_hiddens = np.ones((self.num_hiddens,))
hidden_hidden_buffer = np.empty((self.num_hiddens,))
output_layer = np.zeros((num_words,))
num_sequences = self.features.shape[1]
cur_entropy = 0.0
one_perc_num_sequences = max(num_sequences / 100, 1)
for epoch_num in range(len(self.steepest_learning_rate)):
print "At epoch", epoch_num+1, "of", len(self.steepest_learning_rate), "with learning rate", self.steepest_learning_rate[epoch_num]
# batch_index = 0
# end_index = 0
cur_entropy = 0.0
counter = 0
for seq_index in range(num_sequences):
# prev_hiddens[:] = 1.0
word_history[:] = -1
bptt_hiddens *= 0.0
features = self.features[:self.feature_sequence_lens[seq_index], seq_index]
if (seq_index + 1) % one_perc_num_sequences == 0:
per_done = float(seq_index) / num_sequences * 100
print "Finished %.2f%% of training with average entropy: %f" % (per_done, cur_entropy / counter)
for feature in features:
# print self.features[:self.feature_sequence_lens[index],index]
# print self.labels[:self.feature_sequence_lens[index]]
# print self.labels.shape#[:self.feature_sequence_lens[index],1]
label = self.labels[counter,1]
# print feature, label
word_history[-1] = feature
np.dot(prev_hiddens, self.model.weights['hidden_hidden'], out = cur_hiddens) #Tn-1 to Tn
cur_hiddens += self.model.weights['visible_hidden'][feature,:]
np.clip(cur_hiddens, -50.0, 50.0, out=cur_hiddens)
cur_hiddens[:] = self.sigmoid(cur_hiddens)
bptt_hiddens[-1,:] = cur_hiddens
# print cur_hiddens.shape
# print np.dot(cur_hiddens, self.model.weights['hidden_output']).shape
np.dot(cur_hiddens, self.model.weights['hidden_output'], out = output_layer)
output_layer += self.model.bias['output'].reshape((output_layer.size,))
output_layer -= np.max(output_layer)
np.exp(output_layer, out = output_layer)
output_layer /= np.sum(output_layer)
cur_entropy -= np.log2(output_layer[label])
output_layer[label] -= 1.0
self.model.bias['output'] -= self.steepest_learning_rate[epoch_num] * output_layer.reshape((output_layer.size,))
self.model.weights['hidden_output'] -= self.steepest_learning_rate[epoch_num] * np.outer(cur_hiddens, output_layer)
bptt_error[-1,:] = np.dot(output_layer, self.model.weights['hidden_output'].T)
bptt_error[-1,:] *= cur_hiddens
bptt_error[-1,:] *= (1 - cur_hiddens)
if (counter+1) % bptt_block == 0: #or label == (num_words-1): #encounters end of sentence
# print bptt_error
# print word_history
for time_index in range(bptt+bptt_block-1):
word_hist = word_history[-time_index-1]
# print time_index, word_hist, word_history
if word_hist == -1:
break
# print time_index
# print word_hist
# print self.model.weights['visible_hidden'].shape
# gradient.weights['visible_hidden'][word_hist,:] += bptt_error[-time_index-1,:]
# gradient.weights['hidden_hidden'] += np.outer(bptt_hiddens[-time_index-2,:], bptt_error[-time_index-1,:])
self.model.weights['visible_hidden'][word_hist,:] -= self.steepest_learning_rate[epoch_num] * bptt_error[-time_index-1,:]
hidden_hidden_gradient += np.outer(bptt_hiddens[-time_index-2,:], bptt_error[-time_index-1,:])
# self.model.weights['hidden_hidden'] -= self.steepest_learning_rate[epoch_num] * np.outer(bptt_hiddens[-time_index-2,:], bptt_error[-time_index-1,:])
np.dot(bptt_error[-time_index-1,:], self.model.weights['hidden_hidden'].T, out = hidden_hidden_buffer)
#now BPTT a step
hidden_hidden_buffer *= bptt_hiddens[-time_index-2,:]
hidden_hidden_buffer *= (1 - bptt_hiddens[-time_index-2,:])
bptt_error[time_index-2,:] += hidden_hidden_buffer
# gradient.weights['visible_hidden'][word_history[0],:] += bptt_error[0,:]
self.model.weights['visible_hidden'][word_history[0],:] -= self.steepest_learning_rate[epoch_num] * bptt_error[0,:]
# print gradient.weights['hidden_hidden']
word_history[:] = -1
bptt_error *= 0.0
self.model.weights['hidden_hidden'] -= self.steepest_learning_rate[epoch_num] * hidden_hidden_gradient
# self.model -= gradient * self.steepest_learning_rate[epoch_num]
# gradient *= 0.0
# if label == (num_words -1):
# bptt_hiddens *= 0.0
# word_history[:] = -1
prev_hiddens[:] = bptt_hiddens[-1,:]
bptt_hiddens[:-1,:] = bptt_hiddens[1:,:]
bptt_error[:-1,:] = bptt_error[1:,:]
word_history[:-1] = word_history[1:]
counter += 1
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.features, self.labels, self.feature_sequence_lens, self.model)
print "cross-entropy at the end of the epoch is", cross_entropy
print "perplexity before steepest descent is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
sys.stdout.write("\r100.0% done \r")
sys.stdout.write("\r \r") #clear line
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
# while end_index < self.num_sequences: #run through the batches
# per_done = float(batch_index)/self.num_sequences*100
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
# end_index = min(batch_index+self.backprop_batch_size,self.num_sequences)
# max_seq_len = max(self.feature_sequence_lens[batch_index:end_index])
# batch_inputs = self.features[:max_seq_len,batch_index:end_index]
# start_frame = np.where(self.labels[:,0] == batch_index)[0][0]
# end_frame = np.where(self.labels[:,0] == end_index-1)[0][-1] + 1
# batch_labels = copy.deepcopy(self.labels[start_frame:end_frame,:])
# batch_labels[:,0] -= batch_labels[0,0]
# batch_fsl = self.feature_sequence_lens[batch_index:end_index]
#
## sys.stdout.write("\r \r") #clear line
## sys.stdout.write("\rcalculating gradient\r"), sys.stdout.flush()
# gradient = self.calculate_gradient(batch_inputs, batch_labels, batch_fsl, model=self.model, check_gradient = False)
# self.model -= gradient * self.steepest_learning_rate[epoch_num]
# del batch_labels
# batch_index += self.backprop_batch_size
def backprop_steepest_descent_single_batch(self):
print "Starting backprop using steepest descent"
start_time = datetime.datetime.now()
print "Training started at", start_time
prev_step = RNNLM_Weight()
prev_step.init_zero_weights(self.model.get_architecture(), maxent = self.use_maxent)
gradient = RNNLM_Weight()
gradient.init_zero_weights(self.model.get_architecture(), maxent = self.use_maxent)
self.dropout = 0.0
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy before steepest descent is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
# excluded_keys = {'bias':['0'], 'weights':[]}
# frame_table = np.cumsum(self.feature_sequence_lens)
for epoch_num in range(len(self.steepest_learning_rate)):
print "At epoch", epoch_num+1, "of", len(self.steepest_learning_rate), "with learning rate", self.steepest_learning_rate[epoch_num]
print "Training for epoch started at", datetime.datetime.now()
start_frame = 0
end_frame = 0
cross_entropy = 0.0
num_examples = 0
if hasattr(self, 'momentum_rate'):
momentum_rate = self.momentum_rate[epoch_num]
print "momentum is", momentum_rate
else:
momentum_rate = 0.0
if self.dropout != 0.0:
print "dropout rate is", self.dropout
self.model.weights['hidden_output'] /= (1 - self.dropout)
self.model.weights['hidden_hidden'] /= (1 - self.dropout)
for batch_index, feature_sequence_len in enumerate(self.feature_sequence_lens):
end_frame = start_frame + feature_sequence_len
batch_features = self.features[:feature_sequence_len, batch_index]
batch_labels = self.labels[start_frame:end_frame,1]
# print ""
# print batch_index
# print batch_features
# print batch_labels
cur_xent = self.calculate_gradient_single_batch(batch_features, batch_labels, gradient, return_cross_entropy = True,
check_gradient = False, dropout = self.dropout)
# print self.model.norm()
# print gradient.norm()
cross_entropy += cur_xent
# per_done = float(batch_index)/self.num_sequences*100
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
# ppp = cross_entropy / end_frame
# sys.stdout.write("train X-ent: %f " % ppp), sys.stdout.flush()
gradient *= -self.steepest_learning_rate[epoch_num]
if self.l2_regularization_const > 0.0:
self.model *= (1-self.l2_regularization_const) #l2 regularization_const
self.model += gradient #/ batch_size
if momentum_rate > 0.0:
prev_step *= momentum_rate
self.model += prev_step
prev_step.assign_weights(gradient)
# prev_step *= -self.steepest_learning_rate[epoch_num]
start_frame = end_frame
print "Training for epoch finished at", datetime.datetime.now()
if self.dropout != 0.0:
self.model.weights['hidden_output'] *= (1 - self.dropout)
self.model.weights['hidden_hidden'] *= (1 - self.dropout)
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy at the end of the epoch is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
# sys.stdout.write("\r100.0% done \r")
# sys.stdout.write("\r \r") #clear line
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
print "Epoch finished at", datetime.datetime.now()
end_time = datetime.datetime.now()
print "Training finished at", end_time, "and ran for", end_time - start_time
def backprop_steepest_descent(self):
print "Starting backprop using steepest descent"
prev_step = RNNLM_Weight()
prev_step.init_zero_weights(self.model.get_architecture())
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy before steepest descent is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
# excluded_keys = {'bias':['0'], 'weights':[]}
# frame_table = np.cumsum(self.feature_sequence_lens)
for epoch_num in range(len(self.steepest_learning_rate)):
print "At epoch", epoch_num+1, "of", len(self.steepest_learning_rate), "with learning rate", self.steepest_learning_rate[epoch_num]
batch_index = 0
end_index = 0
cross_entropy = 0.0
num_examples = 0
if hasattr(self, 'momentum_rate'):
momentum_rate = self.momentum_rate[epoch_num]
print "momentum is", momentum_rate
else:
momentum_rate = 0.0
while end_index < self.num_sequences: #run through the batches
per_done = float(batch_index)/self.num_sequences*100
sys.stdout.write("\r \r") #clear line
sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
if num_examples > 0:
ppp = cross_entropy / num_examples
sys.stdout.write("train X-ent: %f " % ppp), sys.stdout.flush()
end_index = min(batch_index+self.backprop_batch_size,self.num_sequences)
max_seq_len = max(self.feature_sequence_lens[batch_index:end_index])
batch_inputs = self.features[:max_seq_len,batch_index:end_index]
start_frame = np.where(self.labels[:,0] == batch_index)[0][0]
end_frame = np.where(self.labels[:,0] == end_index-1)[0][-1] + 1
batch_labels = copy.deepcopy(self.labels[start_frame:end_frame,:])
batch_labels[:,0] -= batch_labels[0,0]
batch_fsl = self.feature_sequence_lens[batch_index:end_index]
batch_size = self.batch_size(self.feature_sequence_lens[batch_index:end_index])
num_examples += batch_size
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\rcalculating gradient\r"), sys.stdout.flush()
gradient, cur_xent = self.calculate_gradient(batch_inputs, batch_labels, batch_fsl, model=self.model, check_gradient = False, return_cross_entropy = True)
# print np.max(np.abs(gradient.weights['hidden_output']))
cross_entropy += cur_xent
if self.l2_regularization_const > 0.0:
self.model *= (1-self.l2_regularization_const) #l2 regularization_const
self.model -= gradient * self.steepest_learning_rate[epoch_num] #/ batch_size
if momentum_rate > 0.0:
self.model += prev_step * momentum_rate
prev_step.assign_weights(gradient)
prev_step *= -self.steepest_learning_rate[epoch_num] #/ batch_size
del batch_labels
batch_index += self.backprop_batch_size
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy at the end of the epoch is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
sys.stdout.write("\r100.0% done \r")
sys.stdout.write("\r \r") #clear line
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
def backprop_adagrad_single_batch(self):
print "Starting backprop using adagrad"
adagrad_weight = RNNLM_Weight()
adagrad_weight.init_zero_weights(self.model.get_architecture())
buffer_weight = RNNLM_Weight()
buffer_weight.init_zero_weights(self.model.get_architecture())
fudge_factor = 1.0
adagrad_weight = adagrad_weight + fudge_factor
gradient = RNNLM_Weight()
gradient.init_zero_weights(self.model.get_architecture())
# if self.validation_feature_file_name is not None:
# cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
# print "cross-entropy before steepest descent is", cross_entropy
# print "perplexity is", perplexity
# if self.l2_regularization_const > 0.0:
# print "regularized loss is", loss
# print "number correctly classified is", num_correct, "of", num_examples
# excluded_keys = {'bias':['0'], 'weights':[]}
# frame_table = np.cumsum(self.feature_sequence_lens)
for epoch_num in range(len(self.steepest_learning_rate)):
print "At epoch", epoch_num+1, "of", len(self.steepest_learning_rate), "with learning rate", self.steepest_learning_rate[epoch_num]
start_frame = 0
end_frame = 0
cross_entropy = 0.0
num_examples = 0
# if hasattr(self, 'momentum_rate'):
# momentum_rate = self.momentum_rate[epoch_num]
# print "momentum is", momentum_rate
# else:
# momentum_rate = 0.0
for batch_index, feature_sequence_len in enumerate(self.feature_sequence_lens):
end_frame = start_frame + feature_sequence_len
batch_features = self.features[:feature_sequence_len, batch_index]
batch_labels = self.labels[start_frame:end_frame,1]
# print ""
# print batch_index
# print batch_features
# print batch_labels
cur_xent = self.calculate_gradient_single_batch(batch_features, batch_labels, gradient, return_cross_entropy = True,
check_gradient = False)
# print self.model.norm()
# print gradient.norm()
if self.l2_regularization_const > 0.0:
buffer_weight.assign_weights(self.model)
buffer_weight *= self.l2_regularization_const
gradient += buffer_weight
buffer_weight.assign_weights(gradient)
# print gradient.init_hiddens
buffer_weight **= 2.0
adagrad_weight += buffer_weight
# print adagrad_weight.init_hiddens
buffer_weight.assign_weights(adagrad_weight)
buffer_weight **= 0.5
# print buffer_weight.init_hiddens
gradient /= buffer_weight
# print gradient.init_hiddens
cross_entropy += cur_xent
per_done = float(batch_index)/self.num_sequences*100
sys.stdout.write("\r \r") #clear line
sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
ppp = cross_entropy / end_frame
sys.stdout.write("train X-ent: %f " % ppp), sys.stdout.flush()
gradient *= -self.steepest_learning_rate[epoch_num]
self.model += gradient #/ batch_size
# if momentum_rate > 0.0:
# prev_step *= momentum_rate
# self.model += prev_step
# prev_step.assign_weights(gradient)
# prev_step *= -self.steepest_learning_rate[epoch_num]
start_frame = end_frame
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy at the end of the epoch is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
sys.stdout.write("\r100.0% done \r")
sys.stdout.write("\r \r") #clear line
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
self.model.write_weights(self.output_name)
def backprop_adagrad(self):
print "Starting backprop using adagrad"
adagrad_weight = RNNLM_Weight()
adagrad_weight.init_zero_weights(self.model.get_architecture())
fudge_factor = 1.0
adagrad_weight = adagrad_weight + fudge_factor
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy before adagrad is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
# excluded_keys = {'bias':['0'], 'weights':[]}
# frame_table = np.cumsum(self.feature_sequence_lens)
# first_batch = True
for epoch_num in range(len(self.steepest_learning_rate)):
print "At epoch", epoch_num+1, "of", len(self.steepest_learning_rate), "with learning rate", self.steepest_learning_rate[epoch_num]
batch_index = 0
end_index = 0
cross_entropy = 0.0
num_examples = 0
# if hasattr(self, 'momentum_rate'):
# momentum_rate = self.momentum_rate[epoch_num]
# print "momentum is", momentum_rate
# else:
# momentum_rate = 0.0
while end_index < self.num_sequences: #run through the batches
per_done = float(batch_index)/self.num_sequences*100
sys.stdout.write("\r \r") #clear line
sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
if num_examples > 0:
ppp = cross_entropy / num_examples
sys.stdout.write("train X-ent: %f " % ppp), sys.stdout.flush()
end_index = min(batch_index+self.backprop_batch_size,self.num_sequences)
max_seq_len = max(self.feature_sequence_lens[batch_index:end_index])
batch_inputs = self.features[:max_seq_len,batch_index:end_index]
start_frame = np.where(self.labels[:,0] == batch_index)[0][0]
end_frame = np.where(self.labels[:,0] == end_index-1)[0][-1] + 1
batch_labels = copy.deepcopy(self.labels[start_frame:end_frame,:])
batch_labels[:,0] -= batch_labels[0,0]
batch_fsl = self.feature_sequence_lens[batch_index:end_index]
batch_size = self.batch_size(self.feature_sequence_lens[batch_index:end_index])
num_examples += batch_size
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\rcalculating gradient\r"), sys.stdout.flush()
gradient, cur_xent = self.calculate_gradient(batch_inputs, batch_labels, batch_fsl, model=self.model, check_gradient = False, return_cross_entropy = True)
cross_entropy += cur_xent
if self.l2_regularization_const > 0.0:
gradient += (self.model * self.l2_regularization_const) #l2 regularization_const
adagrad_weight += gradient ** 2
self.model -= (gradient / (adagrad_weight ** 0.5)) * self.steepest_learning_rate[epoch_num] #/ batch_size
# if first_batch:
## print "normal gradient"
# self.model -= gradient * self.steepest_learning_rate[epoch_num]
# first_batch = False
# else:
## print "adagrad"
# self.model -= (gradient / adagrad_weight) * self.steepest_learning_rate[epoch_num] #/ batch_size
# print adagrad_weight.min()
# if momentum_rate > 0.0:
# self.model += prev_step * momentum_rate
# prev_step.assign_weights(gradient)
# prev_step *= -self.steepest_learning_rate[epoch_num] #/ batch_size
del batch_labels
batch_index += self.backprop_batch_size
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy at the end of the epoch is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
sys.stdout.write("\r100.0% done \r")
sys.stdout.write("\r \r") #clear line
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
def pearlmutter_forward_pass(self, inputs, unflattened_labels, feature_sequence_lens, direction, batch_size, hiddens=None, outputs=None, model=None, check_gradient=False, stop_at='output'): #need to test
"""let f be a function from inputs to outputs
consider the weights to be a vector w of parameters to be optimized, (and direction d to be the same)
pearlmutter_forward_pass calculates d' \jacobian_w f
stop_at is either 'linear', 'output', or 'loss' """
if model == None:
model = self.model
if hiddens == None or outputs == None:
outputs, hiddens = self.forward_pass(inputs, feature_sequence_lens, model, return_hiddens=True)
architecture = self.model.get_architecture()
max_sequence_observations = inputs.shape[0]
num_hiddens = architecture[1]
num_sequences = inputs.shape[1]
num_outs = architecture[2]
hidden_deriv = np.zeros((max_sequence_observations, num_sequences, num_hiddens))
output_deriv = np.zeros((max_sequence_observations, num_sequences, num_outs))
# if stop_at == 'loss':
# loss_deriv = np.zeros(output_deriv.shape)
#propagate hiddens
# print model.init_hiddens.shape
hidden_deriv[0,:,:] = (self.forward_layer(inputs[0,:], direction.weights['visible_hidden'], direction.bias['hidden'],
model.weight_type['visible_hidden'], prev_hiddens=model.init_hiddens,
hidden_hidden_weights=direction.weights['hidden_hidden'])
+ np.dot(direction.init_hiddens, model.weights['hidden_hidden'])) * hiddens[0,:,:] * (1 - hiddens[0,:,:])
linear_layer = (self.weight_matrix_multiply(hiddens[0,:,:], direction.weights['hidden_output'],
direction.bias['output']) +
np.dot(hidden_deriv[0,:,:], model.weights['hidden_output']))
if stop_at == 'linear':
output_deriv[0,:,:] = linear_layer
elif stop_at == 'output':
output_deriv[0,:,:] = linear_layer * outputs[0,:,:] - outputs[0,:,:] * np.sum(linear_layer * outputs[0,:,:], axis=1)[:,np.newaxis]
# if stop_at == 'loss':
# output_deriv[model.num_layers+1] = -np.array([(hidden_deriv[model.num_layers][index, labels[index]] / hiddens[model.num_layers][index, labels[index]])[0] for index in range(batch_size)])
for sequence_index in range(1, max_sequence_observations):
sequence_input = inputs[sequence_index,:]
hidden_deriv[sequence_index,:,:] = (self.forward_layer(sequence_input, direction.weights['visible_hidden'], direction.bias['hidden'],
model.weight_type['visible_hidden'], prev_hiddens=model.init_hiddens,
hidden_hidden_weights=direction.weights['hidden_hidden'])
+ np.dot(hidden_deriv[sequence_index-1,:,:], model.weights['hidden_hidden'])) * hiddens[sequence_index,:,:] * (1 - hiddens[sequence_index,:,:])
linear_layer = (self.weight_matrix_multiply(hiddens[sequence_index,:,:], direction.weights['hidden_output'],
direction.bias['output']) +
np.dot(hidden_deriv[sequence_index,:,:], model.weights['hidden_output']))
#find the observations where the sequence has ended,
#and then zero out hiddens and outputs, so nothing horrible happens during backprop, etc.
zero_input = np.where(feature_sequence_lens <= sequence_index)
hidden_deriv[sequence_index,zero_input,:] = 0.0
output_deriv[sequence_index,zero_input,:] = 0.0
if stop_at == 'linear':
output_deriv[sequence_index,:,:] = linear_layer
else:
output_deriv[sequence_index,:,:] = linear_layer * outputs[sequence_index,:,:] - outputs[sequence_index,:,:] * np.sum(linear_layer * outputs[sequence_index,:,:], axis=1)[:,np.newaxis]
# if stop_at == 'loss':
# loss_deriv[sequence_index,:,:] = -np.array([(hidden_deriv[model.num_layers][index, labels[index]] / hiddens[model.num_layers][index, labels[index]])[0] for index in range(batch_size)])
if not check_gradient:
return output_deriv, hidden_deriv
#compare with finite differences approximation
else:
epsilon = 1E-5
if stop_at == 'linear':
calculated = output_deriv
finite_diff_forward = self.forward_pass(inputs, model = model + direction * epsilon, linear_output=True)
finite_diff_backward = self.forward_pass(inputs, model = model - direction * epsilon, linear_output=True)
elif stop_at == 'output':
calculated = output_deriv
finite_diff_forward = self.forward_pass(inputs, model = model + direction * epsilon)
finite_diff_backward = self.forward_pass(inputs, model = model - direction * epsilon)
# elif stop_at == 'loss':
# calculated = hidden_deriv[model.num_layers + 1]
# finite_diff_forward = -np.log([max(self.forward_pass(inputs, model = model + direction * epsilon).item((x,labels[x])),1E-12) for x in range(labels.size)])
# finite_diff_backward = -np.log([max(self.forward_pass(inputs, model = model - direction * epsilon).item((x,labels[x])),1E-12) for x in range(labels.size)])
for seq in range(num_sequences):
finite_diff_approximation = ((finite_diff_forward - finite_diff_backward) / (2 * epsilon))[:,seq,:]
print "At sequence", seq
print "pearlmutter calculation"
print calculated[:,seq,:]
print "finite differences approximation, epsilon", epsilon
print finite_diff_approximation
sys.exit()
def calculate_per_example_cross_entropy(self, example_output, example_label):
if example_label.size > 1:
return -np.sum(np.log(np.clip(example_output, a_min=1E-12, a_max=1.0)) * example_label)
else:
return -np.log(np.clip(example_output[example_label], a_min=1E-12, a_max=1.0))
def calculate_second_order_direction(self, inputs, unflattened_labels, feature_sequence_lens, batch_size, direction = None, model = None, second_order_type = None,
hiddens = None, outputs=None, check_direction = False, structural_damping_const = 0.0): #need to test
#given an input direction direction, the function returns H*d, where H is the Hessian of the weight vector
#the function does this efficient by using the Pearlmutter (1994) trick
excluded_keys = {'bias': ['0'], 'weights': []}
if model == None:
model = self.model
if direction == None:
direction = self.calculate_gradient(inputs, unflattened_labels, feature_sequence_lens, check_gradient = False, model = model)
if second_order_type == None:
second_order_type='gauss-newton' #other option is 'hessian'
if hiddens == None or outputs == None:
outputs, hiddens = self.forward_pass(inputs, feature_sequence_lens, model = model, return_hiddens=True)
if second_order_type == 'gauss-newton':
output_deriv, hidden_deriv = self.pearlmutter_forward_pass(inputs, unflattened_labels, feature_sequence_lens, direction, batch_size, hiddens, outputs, model, stop_at='output') #nbatch x nout
second_order_direction = self.backward_pass(output_deriv, hiddens, inputs, model, structural_damping_const, hidden_deriv)
elif second_order_type == 'hessian':
output_deriv, hidden_deriv = self.pearlmutter_forward_pass(inputs, unflattened_labels, feature_sequence_lens, direction, batch_size, hiddens, outputs, model, stop_at='output') #nbatch x nout
second_order_direction = self.pearlmutter_backward_pass(hidden_deriv, unflattened_labels, hiddens, model, direction)
elif second_order_type == 'fisher':
output_deriv, hidden_deriv = self.pearlmutter_forward_pass(inputs, unflattened_labels, feature_sequence_lens, direction, batch_size, hiddens, outputs, model, stop_at='loss')#nbatch x nout
weight_vec = output_deriv - unflattened_labels
weight_vec *= hidden_deriv[model.num_layers+1][:, np.newaxis] #TODO: fix this line
second_order_direction = self.backward_pass(weight_vec, hiddens, inputs, model)
else:
print second_order_type, "is not a valid type. Acceptable types are gauss-newton, hessian, and fisher... Exiting now..."
sys.exit()
if not check_direction:
if self.l2_regularization_const > 0.0:
return second_order_direction / batch_size + direction * self.l2_regularization_const
return second_order_direction / batch_size
##### check direction only if you think there is a problem #######
else:
finite_difference_model = RNNLM_Weight()
finite_difference_model.init_zero_weights(self.model.get_architecture(), verbose=False)
epsilon = 1E-5
if second_order_type == 'gauss-newton':
#assume that pearlmutter forward pass is correct because the function has a check_gradient flag to see if it's is
sys.stdout.write("\r \r")
sys.stdout.write("checking Gv\n"), sys.stdout.flush()
linear_out = self.forward_pass(inputs, model = model, linear_output=True)
num_examples = self.batch_size(feature_sequence_lens)
finite_diff_forward = self.forward_pass(inputs, model = model + direction * epsilon, linear_output=True)
finite_diff_backward = self.forward_pass(inputs, model = model - direction * epsilon, linear_output=True)
finite_diff_jacobian_vec = (finite_diff_forward - finite_diff_backward) / (2 * epsilon)
flat_finite_diff_jacobian_vec = self.flatten_output(finite_diff_jacobian_vec, feature_sequence_lens)
flat_linear_out = self.flatten_output(linear_out, feature_sequence_lens)
flat_labels = self.flatten_output(unflattened_labels, feature_sequence_lens)
flat_finite_diff_HJv = np.zeros(flat_finite_diff_jacobian_vec.shape)
num_outputs = flat_linear_out.shape[1]
collapsed_hessian = np.zeros((num_outputs,num_outputs))
for example_index in range(num_examples):
#calculate collapsed Hessian
direction1 = np.zeros(num_outputs)
direction2 = np.zeros(num_outputs)
for index1 in range(num_outputs):
for index2 in range(num_outputs):
direction1[index1] = epsilon
direction2[index2] = epsilon
example_label = np.array(flat_labels[example_index])
loss_plus_plus = self.calculate_per_example_cross_entropy(self.softmax(np.array([flat_linear_out[example_index] + direction1 + direction2])), example_label)
loss_plus_minus = self.calculate_per_example_cross_entropy(self.softmax(np.array([flat_linear_out[example_index] + direction1 - direction2])), example_label)
loss_minus_plus = self.calculate_per_example_cross_entropy(self.softmax(np.array([flat_linear_out[example_index] - direction1 + direction2])), example_label)
loss_minus_minus = self.calculate_per_example_cross_entropy(self.softmax(np.array([flat_linear_out[example_index] - direction1 - direction2])), example_label)
collapsed_hessian[index1,index2] = (loss_plus_plus + loss_minus_minus - loss_minus_plus - loss_plus_minus) / (4 * epsilon * epsilon)
direction1[index1] = 0.0
direction2[index2] = 0.0
# print collapsed_hessian
out = self.softmax(flat_linear_out[example_index:example_index+1])
# print np.diag(out[0]) - np.outer(out[0], out[0])
flat_finite_diff_HJv[example_index] += np.dot(collapsed_hessian, flat_finite_diff_jacobian_vec[example_index])
obs_so_far = 0
for sequence_index, num_obs in enumerate(feature_sequence_lens):
print "at sequence index", sequence_index
#calculate J'd = J'HJv
update = RNNLM_Weight()
update.init_zero_weights(self.model.get_architecture(), verbose=False)
for index in range(direction.init_hiddens.size):
update.init_hiddens[0][index] = epsilon
#print direction.norm()
forward_loss = self.forward_pass(inputs[:,sequence_index:sequence_index+1,:], model = model + update, linear_output=True)
backward_loss = self.forward_pass(inputs[:,sequence_index:sequence_index+1,:], model = model - update, linear_output=True)
for obs_index in range(num_obs):
example_index = obs_so_far + obs_index
finite_difference_model.init_hiddens[0][index] += np.dot((forward_loss[obs_index,0,:] - backward_loss[obs_index,0,:]) / (2 * epsilon),
flat_finite_diff_HJv[example_index])
update.init_hiddens[0][index] = 0.0
for key in direction.bias.keys():
print "at bias key", key
for index in range(direction.bias[key].size):
update.bias[key][0][index] = epsilon
#print direction.norm()
forward_loss = self.forward_pass(inputs[:,sequence_index:sequence_index+1,:], model = model + update, linear_output=True)
backward_loss = self.forward_pass(inputs[:,sequence_index:sequence_index+1,:], model = model - update, linear_output=True)
for obs_index in range(num_obs):
example_index = obs_so_far + obs_index
finite_difference_model.bias[key][0][index] += np.dot((forward_loss[obs_index,0,:] - backward_loss[obs_index,0,:]) / (2 * epsilon),
flat_finite_diff_HJv[example_index])
update.bias[key][0][index] = 0.0
for key in direction.weights.keys():
print "at weight key", key
for index0 in range(direction.weights[key].shape[0]):
for index1 in range(direction.weights[key].shape[1]):
update.weights[key][index0][index1] = epsilon
forward_loss = self.forward_pass(inputs[:,sequence_index:sequence_index+1,:], model= model + update, linear_output=True)
backward_loss = self.forward_pass(inputs[:,sequence_index:sequence_index+1,:], model= model - update, linear_output=True)
for obs_index in range(num_obs):
example_index = obs_so_far + obs_index
finite_difference_model.weights[key][index0][index1] += np.dot((forward_loss[obs_index,0,:] - backward_loss[obs_index,0,:]) / (2 * epsilon),
flat_finite_diff_HJv[example_index])
update.weights[key][index0][index1] = 0.0
obs_so_far += num_obs
elif second_order_type == 'hessian':
sys.stdout.write("\r \r")
sys.stdout.write("checking Hv\n"), sys.stdout.flush()
for batch_index in range(batch_size):
#assume that gradient calculation is correct
print "at batch index", batch_index
update = RNNLM_Weight()
update.init_zero_weights(self.model.get_architecture(), verbose=False)
current_gradient = self.calculate_gradient(inputs[:,batch_index:batch_index+1,:], unflattened_labels[:,batch_index:batch_index+1,:], batch_size, model=model, l2_regularization_const = 0.)
for key in finite_difference_model.bias.keys():
for index in range(direction.bias[key].size):
update.bias[key][0][index] = epsilon
forward_loss = self.calculate_gradient(inputs[:,batch_index:batch_index+1,:], unflattened_labels[:,batch_index:batch_index+1,:], batch_size,
model = model + update, l2_regularization_const = 0.)
backward_loss = self.calculate_gradient(inputs[:,batch_index:batch_index+1,:], unflattened_labels[:,batch_index:batch_index+1,:], batch_size,
model = model - update, l2_regularization_const = 0.)
finite_difference_model.bias[key][0][index] += direction.dot((forward_loss - backward_loss) / (2 * epsilon), excluded_keys)
update.bias[key][0][index] = 0.0
for key in finite_difference_model.weights.keys():
for index0 in range(direction.weights[key].shape[0]):
for index1 in range(direction.weights[key].shape[1]):
update.weights[key][index0][index1] = epsilon
forward_loss = self.calculate_gradient(inputs[:,batch_index:batch_index+1,:], unflattened_labels[:,batch_index:batch_index+1,:], batch_size,
model = model + update, l2_regularization_const = 0.)
backward_loss = self.calculate_gradient(inputs[:,batch_index:batch_index+1,:], unflattened_labels[:,batch_index:batch_index+1,:], batch_size,
model = model - update, l2_regularization_const = 0.)
finite_difference_model.weights[key][index0][index1] += direction.dot((forward_loss - backward_loss) / (2 * epsilon), excluded_keys)
update.weights[key][index0][index1] = 0.0
elif second_order_type == 'fisher':
sys.stdout.write("\r \r")
sys.stdout.write("checking Fv\n"), sys.stdout.flush()
for batch_index in range(batch_size):
#assume that gradient calculation is correct
print "at batch index", batch_index
current_gradient = self.calculate_gradient(inputs[:,batch_index:batch_index+1,:], unflattened_labels[:,batch_index:batch_index+1,:], batch_size, model = model, l2_regularization_const = 0.)
finite_difference_model += current_gradient * current_gradient.dot(direction, excluded_keys)
print "calculated second order direction for init hiddens"
print second_order_direction.init_hiddens
print "finite difference approximation for init hiddens"
print finite_difference_model.init_hiddens
for bias_cur_layer in direction.bias.keys():
print "calculated second order direction for bias", bias_cur_layer
print second_order_direction.bias[bias_cur_layer]
print "finite difference approximation for bias", bias_cur_layer
print finite_difference_model.bias[bias_cur_layer]
for weight_cur_layer in finite_difference_model.weights.keys():
print "calculated second order direction for weights", weight_cur_layer
print second_order_direction.weights[weight_cur_layer]
print "finite difference approximation for weights", weight_cur_layer
print finite_difference_model.weights[weight_cur_layer]
sys.exit()
##########################################################
def backprop_truncated_newton(self):
print "Starting backprop using truncated newton"
# cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.features, self.labels, self.feature_sequence_lens, self.model)
# print "cross-entropy before steepest descent is", cross_entropy
# print "perplexity before steepest descent is", perplexity
# if self.l2_regularization_const > 0.0:
# print "regularized loss is", loss
# print "number correctly classified is", num_correct, "of", num_examples
excluded_keys = {'bias':['0'], 'weights':[]}
damping_factor = self.truncated_newton_init_damping_factor
preconditioner = None
model_update = None
cur_done = 0.0
for epoch_num in range(self.num_epochs):
print "Epoch", epoch_num+1, "of", self.num_epochs
batch_index = 0
end_index = 0
while end_index < self.num_sequences: #run through the batches
per_done = float(batch_index)/self.num_sequences*100
sys.stdout.write("\r \r") #clear line
sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
# if per_done > cur_done + 1.0:
# cur_done = per_done
# cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.features, self.labels, self.feature_sequence_lens, self.model)
# print "cross-entropy before steepest descent is", cross_entropy
# print "perplexity before steepest descent is", perplexity
# if self.l2_regularization_const > 0.0:
# print "regularized loss is", loss
# print "number correctly classified is", num_correct, "of", num_examples
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\rdamping factor is %f\r" % damping_factor), sys.stdout.flush()
end_index = min(batch_index+self.backprop_batch_size,self.num_sequences)
max_seq_len = max(self.feature_sequence_lens[batch_index:end_index])
batch_inputs = self.features[:max_seq_len,batch_index:end_index]
start_frame = np.where(self.labels[:,0] == batch_index)[0][0]
end_frame = np.where(self.labels[:,0] == end_index-1)[0][-1] + 1
batch_unflattened_labels = copy.deepcopy(self.labels[start_frame:end_frame,:])
batch_unflattened_labels[:,0] -= batch_unflattened_labels[0,0]
batch_fsl = self.feature_sequence_lens[batch_index:end_index]
# batch_inputs = self.features[:,batch_index:end_index]
# batch_unflattened_labels = self.unflattened_labels[:,batch_index:end_index,:]
batch_size = self.batch_size(self.feature_sequence_lens[batch_index:end_index])
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\rcalculating gradient\r"), sys.stdout.flush()
gradient = self.calculate_gradient(batch_inputs, batch_unflattened_labels, batch_fsl, model=self.model, check_gradient = False)
old_loss = self.calculate_loss(batch_inputs, batch_fsl, batch_unflattened_labels, batch_size, model=self.model)
if False: #self.use_fisher_preconditioner:
sys.stdout.write("\r \r")
sys.stdout.write("calculating diagonal Fisher matrix for preconditioner"), sys.stdout.flush()
preconditioner = self.calculate_fisher_diag_matrix(batch_inputs, batch_unflattened_labels, False, self.model, l2_regularization_const = 0.0)
# add regularization
#preconditioner = preconditioner + alpha / preconditioner.size(excluded_keys) * self.model.norm(excluded_keys) ** 2
preconditioner = (preconditioner + self.l2_regularization_const + damping_factor) ** (3./4.)
preconditioner = preconditioner.clip(preconditioner.max(excluded_keys) * self.fisher_preconditioner_floor_val, float("Inf"))
model_update, model_vals = self.conjugate_gradient(batch_inputs, batch_unflattened_labels, batch_fsl, batch_size, self.truncated_newton_num_cg_epochs,
model=self.model, damping_factor=damping_factor, preconditioner=preconditioner,
gradient=gradient, second_order_type=self.second_order_matrix,
init_search_direction=None, verbose = False,
structural_damping_const = self.structural_damping_const)
model_den = model_vals[-1] #- model_vals[0]
self.model += model_update
new_loss = self.calculate_loss(batch_inputs, batch_fsl, batch_unflattened_labels, batch_size, model=self.model)
model_num = (new_loss - old_loss) / batch_size
# sys.stdout.write("\r \r") #clear line
# print "model ratio is", model_num / model_den,
if model_num / model_den < 0.25:
damping_factor *= 1.5
elif model_num / model_den > 0.75:
damping_factor *= 2./3.
batch_index += self.backprop_batch_size
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.features, self.labels, self.feature_sequence_lens, self.model)
print "cross-entropy before steepest descent is", cross_entropy
print "perplexity before steepest descent is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is", num_correct, "of", num_examples
sys.stdout.write("\r100.0% done \r")
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
def conjugate_gradient(self, batch_inputs, batch_unflattened_labels, batch_feature_sequence_lens, batch_size, num_epochs, model = None, damping_factor = 0.0, #seems to be correct, compare with conjugate_gradient.py
verbose = False, preconditioner = None, gradient = None, second_order_type='gauss-newton',
init_search_direction = None, structural_damping_const = 0.0):
"""minimizes function q_x(p) = \grad_x f(x)' p + 1/2 * p'Gp (where x is fixed) use linear conjugate gradient"""
if verbose:
print "preconditioner is", preconditioner
excluded_keys = {'bias':['0'], 'weights':[]}
if model == None:
model = self.model
tolerance = 5E-4
gap_ratio = 0.1
min_gap = 10
#max_test_gap = int(np.max([np.ceil(gap_ratio * num_epochs), min_gap]) + 1)
model_vals = list()
model_update = RNNLM_Weight()
model_update.init_zero_weights(model.get_architecture())
outputs, hiddens = self.forward_pass(batch_inputs, model, return_hiddens=True)
if gradient == None:
gradient = self.calculate_gradient(batch_inputs, batch_unflattened_labels, batch_feature_sequence_lens, batch_size, model = model, hiddens = hiddens, outputs = outputs)
if init_search_direction == None:
model_vals.append(0)
residual = gradient
else:
second_order_direction = self.calculate_second_order_direction(batch_inputs, batch_unflattened_labels, batch_feature_sequence_lens, batch_size, init_search_direction,
model, second_order_type=second_order_type, hiddens = hiddens,
structural_damping_const = structural_damping_const * damping_factor)
residual = gradient + second_order_direction
model_val = 0.5 * init_search_direction.dot(gradient + residual, excluded_keys)
model_vals.append(model_val)
model_update += init_search_direction
if verbose:
print "model val at end of epoch is", model_vals[-1]
if preconditioner != None:
preconditioned_residual = residual / preconditioner
else:
preconditioned_residual = residual
search_direction = -preconditioned_residual
residual_dot = residual.dot(preconditioned_residual, excluded_keys)
for epoch in range(num_epochs):
# print "\r \r", #clear line
# sys.stdout.write("\rconjugate gradient epoch %d of %d\r" % (epoch+1, num_epochs)), sys.stdout.flush()
if damping_factor > 0.0:
#TODO: check to see if ... + search_direction * damping_factor is correct with structural damping
second_order_direction = self.calculate_second_order_direction(batch_inputs, batch_unflattened_labels, batch_feature_sequence_lens, batch_size, search_direction, model, second_order_type=second_order_type, hiddens = hiddens,
structural_damping_const = damping_factor * structural_damping_const) + search_direction * damping_factor
else:
second_order_direction = self.calculate_second_order_direction(batch_inputs, batch_unflattened_labels, batch_feature_sequence_lens, batch_size, search_direction, model, second_order_type=second_order_type, hiddens = hiddens)
curvature = search_direction.dot(second_order_direction,excluded_keys)
if curvature <= 0:
print "curvature must be positive, but is instead", curvature, "returning current weights"
break
step_size = residual_dot / curvature
if verbose:
print "residual dot search direction is", residual.dot(search_direction, excluded_keys)
print "residual dot is", residual_dot
print "curvature is", curvature
print "step size is", step_size
model_update += search_direction * step_size
residual += second_order_direction * step_size
model_val = 0.5 * model_update.dot(gradient + residual, excluded_keys)
model_vals.append(model_val)
if verbose:
print "model val at end of epoch is", model_vals[-1]
test_gap = int(np.max([np.ceil(epoch * gap_ratio), min_gap]))
if epoch > test_gap: #checking termination condition
previous_model_val = model_vals[-test_gap]
if (previous_model_val - model_val) / model_val <= tolerance * test_gap and previous_model_val < 0:
print "\r \r", #clear line
sys.stdout.write("\rtermination condition satisfied for conjugate gradient, returning step\r"), sys.stdout.flush()
break
if preconditioner != None:
preconditioned_residual = residual / preconditioner
else:
preconditioned_residual = residual
new_residual_dot = residual.dot(preconditioned_residual, excluded_keys)
conjugate_gradient_const = new_residual_dot / residual_dot
search_direction = -preconditioned_residual + search_direction * conjugate_gradient_const
residual_dot = new_residual_dot
return model_update, model_vals
def unflatten_labels(self, labels, sentence_ids):
num_frames_per_sentence = np.bincount(sentence_ids)
num_outs = len(np.unique(labels))
max_num_frames_per_sentence = np.max(num_frames_per_sentence)
unflattened_labels = np.zeros((max_num_frames_per_sentence, np.max(sentence_ids) + 1, num_outs)) #add one because first sentence starts at 0
current_sentence_id = 0
observation_index = 0
for label, sentence_id in zip(labels,sentence_ids):
if sentence_id != current_sentence_id:
current_sentence_id = sentence_id
observation_index = 0
unflattened_labels[observation_index, sentence_id, label] = 1.0
observation_index += 1
return unflattened_labels
def backprop_steepest_descent_single_batch_semi_newbob(self):
print "Starting backprop using steepest descent newbob"
start_time = datetime.datetime.now()
print "Training started at", start_time
prev_step = RNNLM_Weight()
prev_step.init_zero_weights(self.model.get_architecture(), maxent = self.use_maxent, nonlinearity = self.nonlinearity)
gradient = RNNLM_Weight()
gradient.init_zero_weights(self.model.get_architecture(), maxent = self.use_maxent, nonlinearity = self.nonlinearity)
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy before steepest descent is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is %d of %d (%.2f%%)" % (num_correct, num_examples, 100.0 * num_correct / num_examples)
learning_rate = self.steepest_learning_rate[0]
if hasattr(self, 'momentum_rate'):
momentum_rate = self.momentum_rate[0]
print "momentum is", momentum_rate
else:
momentum_rate = 0.0
num_decreases = 0
prev_cross_entropy = cross_entropy
prev_num_correct = num_correct
for epoch_num in range(1000):
print "At epoch", epoch_num+1, "with learning rate", learning_rate, "and momentum", momentum_rate
print "Training for epoch started at", datetime.datetime.now()
start_frame = 0
end_frame = 0
cross_entropy = 0.0
num_examples = 0
for batch_index, feature_sequence_len in enumerate(self.feature_sequence_lens):
end_frame = start_frame + feature_sequence_len
batch_features = self.features[:feature_sequence_len, batch_index]
batch_labels = self.labels[start_frame:end_frame,1]
cur_xent = self.calculate_gradient_single_batch(batch_features, batch_labels, gradient, return_cross_entropy = True,
check_gradient = False)
cross_entropy += cur_xent
# per_done = float(batch_index)/self.num_sequences*100
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
# ppp = cross_entropy / end_frame
# sys.stdout.write("train X-ent: %f " % ppp), sys.stdout.flush()
gradient *= -learning_rate
if self.l2_regularization_const > 0.0:
self.model *= (1-self.l2_regularization_const) #l2 regularization_const
self.model += gradient #/ batch_size
if momentum_rate > 0.0:
prev_step *= momentum_rate
self.model += prev_step
prev_step.assign_weights(gradient)
# prev_step *= -self.steepest_learning_rate[epoch_num]
start_frame = end_frame
print "Training for epoch finished at", datetime.datetime.now()
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy at the end of the epoch is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is %d of %d (%.2f%%)" % (num_correct, num_examples, 100.0 * num_correct / num_examples)
else:
raise ValueError("validation feature file must exist")
if prev_num_correct < num_correct:
prev_cross_entropy = cross_entropy
prev_num_correct = num_correct
self.model.write_weights(''.join([self.output_name, '_best_weights']))
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
print "num_decreases so far is", num_decreases
if num_decreases == 2:
learning_rate /= 2.0
momentum_rate /= 2.0
else:
num_decreases += 1
print "cross-entropy did not decrease, so using previous best weights"
self.model.open_weights(''.join([self.output_name, '_best_weights']))
if num_decreases > 2: break
learning_rate /= 2.0
momentum_rate /= 2.0
# sys.stdout.write("\r100.0% done \r")
# sys.stdout.write("\r \r") #clear line
print "Epoch finished at", datetime.datetime.now()
self.model.write_weights(self.output_name)
end_time = datetime.datetime.now()
print "Training finished at", end_time, "and ran for", end_time - start_time
def backprop_steepest_descent_semi_newbob(self):
print "Starting backprop using steepest descent newbob"
start_time = datetime.datetime.now()
print "Training started at", start_time
prev_step = RNNLM_Weight()
prev_step.init_zero_weights(self.model.get_architecture(), maxent = self.use_maxent, nonlinearity = self.nonlinearity)
gradient = RNNLM_Weight()
gradient.init_zero_weights(self.model.get_architecture(), maxent = self.use_maxent, nonlinearity = self.nonlinearity)
self.model.write_weights(''.join([self.output_name, '_best_weights']))
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy before steepest descent is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is %d of %d (%.2f%%)" % (num_correct, num_examples, 100.0 * num_correct / num_examples)
learning_rate = self.steepest_learning_rate[0]
if hasattr(self, 'momentum_rate'):
momentum_rate = self.momentum_rate[0]
else:
momentum_rate = 0.0
num_decreases = 0
prev_cross_entropy = cross_entropy
prev_num_correct = num_correct
is_init = True
init_decreases = 0
for epoch_num in range(1000):
batch_index = 0
end_index = 0
cross_entropy = 0.0
num_examples = 0
print "The learning rate is", learning_rate, "and momentum is", momentum_rate
while end_index < self.num_sequences: #run through the batches
per_done = float(batch_index)/self.num_sequences*100
sys.stdout.write("\r \r") #clear line
sys.stdout.write("\r%.1f%% done " % per_done), sys.stdout.flush()
if num_examples > 0:
ppp = cross_entropy / num_examples
sys.stdout.write("train X-ent: %f " % ppp), sys.stdout.flush()
end_index = min(batch_index+self.backprop_batch_size,self.num_sequences)
max_seq_len = max(self.feature_sequence_lens[batch_index:end_index])
batch_inputs = self.features[:max_seq_len,batch_index:end_index]
start_frame = np.where(self.labels[:,0] == batch_index)[0][0]
end_frame = np.where(self.labels[:,0] == end_index-1)[0][-1] + 1
batch_labels = copy.deepcopy(self.labels[start_frame:end_frame,:])
batch_labels[:,0] -= batch_labels[0,0]
batch_fsl = self.feature_sequence_lens[batch_index:end_index]
batch_size = self.batch_size(self.feature_sequence_lens[batch_index:end_index])
num_examples += batch_size
# sys.stdout.write("\r \r") #clear line
# sys.stdout.write("\rcalculating gradient\r"), sys.stdout.flush()
gradient, cur_xent = self.calculate_gradient(batch_inputs, batch_labels, batch_fsl, model=self.model, check_gradient = False, return_cross_entropy = True)
# print np.max(np.abs(gradient.weights['hidden_output']))
cross_entropy += cur_xent
if self.l2_regularization_const > 0.0:
self.model *= (1-self.l2_regularization_const) #l2 regularization_const
self.model -= gradient * learning_rate #/ batch_size
if momentum_rate > 0.0:
self.model += prev_step * momentum_rate
prev_step.assign_weights(gradient)
prev_step *= -learning_rate #/ batch_size
del batch_labels
batch_index += self.backprop_batch_size
print ""
print "Training for epoch finished at", datetime.datetime.now()
if self.validation_feature_file_name is not None:
cross_entropy, perplexity, num_correct, num_examples, loss = self.calculate_classification_statistics(self.validation_features, self.validation_labels, self.validation_fsl, self.model)
print "cross-entropy at the end of the epoch is", cross_entropy
print "perplexity is", perplexity
if self.l2_regularization_const > 0.0:
print "regularized loss is", loss
print "number correctly classified is %d of %d (%.2f%%)" % (num_correct, num_examples, 100.0 * num_correct / num_examples)
else:
raise ValueError("validation feature file must exist")
if self.save_each_epoch:
self.model.write_weights(''.join([self.output_name, '_epoch_', str(epoch_num+1)]))
if 1.01 * cross_entropy < prev_cross_entropy:
is_init = False
prev_cross_entropy = cross_entropy
prev_num_correct = num_correct
self.model.write_weights(''.join([self.output_name, '_best_weights']))
print "num_decreases so far is", num_decreases
if num_decreases == self.max_num_decreases:
learning_rate /= 2.0
momentum_rate /= 2.0
else:
if is_init: init_decreases += 1
if init_decreases == 15:
print "Tried to find initial learning rate, but failed, quitting"
break
if not is_init: num_decreases += 1 #don't count num_decreases when trying to find initial learning rate
print "cross-entropy did not decrease, so using previous best weights"
self.model.open_weights(''.join([self.output_name, '_best_weights']))
if num_decreases > self.max_num_decreases: break
learning_rate /= 2.0
momentum_rate /= 2.0
# sys.stdout.write("\r100.0% done \r")
# sys.stdout.write("\r \r") #clear line
print "Epoch finished at", datetime.datetime.now()
self.model.write_weights(self.output_name)
end_time = datetime.datetime.now()
print "Training finished at", end_time, "and ran for", end_time - start_time
def init_arg_parser():
required_variables = dict()
all_variables = dict()
required_variables['train'] = ['feature_file_name', 'output_name']
all_variables['train'] = required_variables['train'] + ['label_file_name', 'num_hiddens', 'weight_matrix_name',
'save_each_epoch',
'l2_regularization_const',
'steepest_learning_rate', 'momentum_rate',
'validation_feature_file_name', 'validation_label_file_name',
'use_maxent', 'nonlinearity',
'backprop_batch_size', 'max_num_decreases',
'seed']
required_variables['test'] = ['feature_file_name', 'weight_matrix_name', 'output_name']
all_variables['test'] = required_variables['test'] + ['label_file_name']
parser = argparse.ArgumentParser()
parser.add_argument('--mode', help='mode for DNN, either train or test', required=False)
parser.add_argument('--config_file', help='configuration file to read in you do not want to input arguments via command line', required=False)
for argument in all_variables['train']:
parser.add_argument('--' + argument, required=False)
for argument in all_variables['test']:
if argument not in all_variables['train']:
parser.add_argument('--' + argument, required=False)
return parser
if __name__ == '__main__':
#script_name, config_filename = sys.argv
#print "Opening config file: %s" % config_filename
script_name = sys.argv[0]
parser = init_arg_parser()
config_dictionary = vars(parser.parse_args())
if config_dictionary['config_file'] != None :
config_filename = config_dictionary['config_file']
print "Since", config_filename, "is specified, ignoring other arguments"
try:
config_file=open(config_filename)
except IOError:
print "Could open file", config_filename, ". Usage is ", script_name, "<config file>... Exiting Now"
sys.exit()
del config_dictionary
#read lines into a configuration dictionary, skipping lines that begin with #
config_dictionary = dict([line.replace(" ", "").strip(' \n\t').split('=') for line in config_file
if not line.replace(" ", "").strip(' \n\t').startswith('#') and '=' in line])
config_file.close()
else:
#remove empty keys
config_dictionary = dict([(arg,value) for arg,value in config_dictionary.items() if value != None])
try:
mode=config_dictionary['mode']
except KeyError:
print 'No mode found, must be train or test... Exiting now'
sys.exit()
else:
if (mode != 'train') and (mode != 'test'):
print "Mode", mode, "not understood. Should be either train or test... Exiting now"
sys.exit()
if mode == 'test':
test_object = RNNLM_Tester(config_dictionary)
else: #mode ='train'
train_object = RNNLM_Trainer(config_dictionary)
train_object.backprop_steepest_descent_semi_newbob()
print "Finished without Runtime Error!" |
992,317 | 9bddbea9642f9c090a2a077aa3827db849532e9d | # Generated by Django 2.2.6 on 2019-10-21 17:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ChoiceList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('text', models.TextField()),
('data_type', models.CharField(choices=[('TEXT', 'Text'), ('NUMBER', 'Number'), ('NUMBER_RANGE', 'Numeric Range'), ('MSMC', 'Multi Select Multiple Choice'), ('SSMC', 'Single Select Multiple Choice')], default='SSMC', max_length=50)),
('range_min', models.IntegerField(blank=True, null=True)),
('range_max', models.IntegerField(blank=True, null=True)),
('range_step', models.IntegerField(blank=True, null=True)),
('choice_list', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='polls.ChoiceList')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_first', models.CharField(blank=True, max_length=100, null=True)),
('name_second', models.CharField(blank=True, max_length=100, null=True)),
('name_last', models.CharField(blank=True, max_length=100, null=True)),
('birth_date', models.DateField(blank=True, null=True)),
('sex', models.CharField(blank=True, choices=[('M', 'М'), ('F', 'Ж')], max_length=10, null=True)),
('title', models.CharField(blank=True, max_length=500, null=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MapUserSurvey',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time_start', models.DateTimeField(blank=True, null=True)),
('time_end', models.DateTimeField(blank=True, null=True)),
('time_visit', models.DateTimeField(blank=True, null=True)),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='users', to='polls.Survey')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='surveys', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='MapSurveyQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField()),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='surveys', to='polls.Question')),
('survey', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='polls.Survey')),
],
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
('choice_list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='polls.ChoiceList')),
],
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data', models.CharField(blank=True, max_length=2000, null=True)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='polls.Question')),
('survey_instance', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='polls.MapUserSurvey')),
],
),
]
|
992,318 | 14d83079095c5e1fec9c2b16c4f1b8ad58c5e28d | #!/usr/bin/env python3
import signal
import sys
import time
import wiringpi as pi
trigger_pin = 17
echo_pin = 23
def send_trigger_pulse() -> None:
pi.digitalWrite(trigger_pin, pi.HIGH)
time.sleep(.0001)
pi.digitalWrite(trigger_pin, pi.LOW)
def wait_for_echo(value, timeout) -> None:
count = timeout
while pi.digitalRead(echo_pin) != value and count > 0:
count -= 1
def get_distance() -> float:
send_trigger_pulse()
wait_for_echo(True, 10000)
start = time.time()
wait_for_echo(False, 10000)
pulse_len = time.time() - start
return pulse_len / .000058
def exit(signal, frame):
print("signal: {}. frame: {}".format(signal, frame))
sys.exit(1)
def main():
""" entrypoint
wiringpi needs sudo
"""
signal.signal(signal.SIGINT, exit)
signal.signal(signal.SIGTERM, exit)
signal.signal(signal.SIGHUP, exit)
signal.signal(signal.SIGQUIT, exit)
pi.wiringPiSetupGpio()
pi.pinMode(trigger_pin, pi.OUTPUT)
pi.pinMode(echo_pin, pi.INPUT)
while True:
print("{}cm".format(get_distance()))
time.sleep(1)
if __name__ == "__main__":
main()
|
992,319 | 3db0c1f7f8c6e7cddce7ab9c2fda4382caec4e2b | import AllModules as am
import json as js
################################################################################################################################################################################################################
################################################################################################################################################################################################################
#########################################These Functions parse the run table and performs function such as record query, record update, record Addition etc ###################################################
################################################################################################################################################################################################################
################################################################################################################################################################################################################
################### Unicode Operations to form CURL commands ###################
def QueryAllow():
QueryFile = open(am.QueryFilePath,"a+")
ScanLines = [line.rstrip('\n') for line in open(am.QueryFilePath)]
QueryNumberList = []
QueryTimeList = []
TimeToWait = -1
if ScanLines:
for entry in ScanLines:
if ScanLines.index(entry) % 2 == 0:
QueryNumberList.append(int(entry))
else:
QueryTimeList.append(entry) #Absolute time
else:
QueryNumberList.append(0)
LastQueryNumber = QueryNumberList[len(QueryNumberList - 1)]
if LastQueryNumber < 5:
AllowQuery = True
QueryFile.write(str(LastQueryNumber + 1) + "\n")
QueryFile.write(str(datetime.now()) + "\n")
QueryFile.close()
elif LastQueryNumber == 5:
TimeSinceFirstQuery = (datetime.now() - datetime.strptime(QueryTimeList[0],"%Y-%m-%d %H:%M:%S.%f")).total_seconds()
if TimeSinceFirstQuery > 60:
AllowQuery = True
os.system("rm %s" % am.QueryFilePath)
QueryFile = open(am.QueryFilePath,"a+")
QueryFile.write(str(1) + "\n")
QueryFile.write(str(datetime.now()) + "\n")
QueryFile.close()
else:
TimeToWait = 65 - TimeSinceFirstQuery
AllowQuery = False
return AllowQuery, TimeToWait
def QueryGreenSignal(Bypass):
while True:
if Bypass == True:
return True
break
IsQueryAllowed, TimeToWait = QueryAllow()
if IsQueryAllowed:
return True
break
else:
time.sleep(TimeToWait)
def DoubleQuotes(string):
return '%%22%s%%22' % string
def Curly(string):
return '%%7B%s%%7D' % string
def EqualToFunc(string1,string2):
return '%s%%3D%s' % (string1,string2)
def ANDFunc(AttributeNameList, AttributeStatusList):
Output = 'AND('
index = 0
for AttributeName in AttributeNameList:
AttributeStatus = AttributeStatusList[index]
Condition = EqualToFunc(Curly(AttributeName), DoubleQuotes(AttributeStatus))
if index > 0: Output = Output + ','
Output = Output + Condition
index = index + 1
Output = Output + ')'
return Output
def ORFunc(AttributeNameList, AttributeStatusList):
Output = 'OR('
index = 0
for AttributeName in AttributeNameList:
AttributeStatus = AttributeStatusList[index]
Condition = EqualToFunc(Curly(AttributeName), DoubleQuotes(AttributeStatus))
if index > 0: Output = Output + ','
Output = Output + Condition
index = index + 1
Output = Output + ')'
return Output
def DownloadRuntable(Debug,MyKey):
headers = {'Authorization': 'Bearer %s' % MyKey, }
RunTableResponse = am.requests.get(am.CurlBaseCommandRunTable, headers=headers)
RunTableDict = am.ast.literal_eval(RunTableResponse.text)
runTableFileName = am.LocalConfigPath+"RunTable.txt"
gfile = open(runTableFileName, "w")
js.dump(RunTableDict, gfile)
gfile.close()
return RunTableDict
def DownloadConfigs(Debug, MyKey):
headers = {'Authorization': 'Bearer %s' % MyKey, }
ConfigResponse = am.requests.get(am.CurlBaseCommandConfig, headers=headers)
ConfigDict = am.ast.literal_eval(ConfigResponse.text)
LecroyResponse = am.requests.get(am.CurlBaseCommandLecroy, headers=headers)
LecroyDict = am.ast.literal_eval(LecroyResponse.text)
KeySightResponse = am.requests.get(am.CurlBaseCommandKeySight, headers=headers)
KeySightDict = am.ast.literal_eval(KeySightResponse.text)
TOFHIRResponse = am.requests.get(am.CurlBaseCommandTOFHIR, headers=headers)
TOFHIRDict = am.ast.literal_eval(TOFHIRResponse.text)
CAENResponse = am.requests.get(am.CurlBaseCommandCAEN, headers=headers)
CAENDict = am.ast.literal_eval(CAENResponse.text)
SensorResponse = am.requests.get(am.CurlBaseCommandSensor, headers=headers)
SensorDict = am.ast.literal_eval(SensorResponse.text)
globalDictFileName = am.LocalConfigPath+"Configurations.txt"
gfile = open(globalDictFileName, "w")
js.dump(ConfigDict, gfile)
gfile.close()
LecroyDictFileName = am.LocalConfigPath+"LecroyConfigurations.txt"
lfile = open(LecroyDictFileName, "w")
js.dump(LecroyDict, lfile)
lfile.close()
KeySightDictFileName = am.LocalConfigPath+"KeySightConfigurations.txt"
kfile = open(KeySightDictFileName, "w")
js.dump(KeySightDict, kfile)
kfile.close()
TOFHIRDictFileName = am.LocalConfigPath+"TOFHIRConfigurations.txt"
kfile = open(TOFHIRDictFileName, "w")
js.dump(TOFHIRDict, kfile)
kfile.close()
CAENDictFileName = am.LocalConfigPath+"CAENConfigurations.txt"
cfile = open(CAENDictFileName, "w")
js.dump(CAENDict, cfile)
cfile.close()
SensorDictFileName = am.LocalConfigPath+"SensorConfigurations.txt"
sensorfile = open(SensorDictFileName, "w")
js.dump(SensorDict, sensorfile)
sensorfile.close()
return ConfigDict, LecroyDict,KeySightDict, TOFHIRDict,CAENDict,SensorDict
def getSensorById(SensorDict,idnum):
for item in SensorDict['records']:
if item['id']==idnum:
return item['fields']['Name no commas allowed']
def getConfigsByGConf(ConfigDict,gconf):
for item in ConfigDict['records']:
if item['fields']['Configuration number']==gconf:
lecroyConfID = item['fields']['ConfigurationLecroyScope'][0]
caenConfID = item['fields']['ConfigurationCAENHV'][0]
return lecroyConfID,caenConfID
def getConfigsByGConfTOFHIR(ConfigDict,gconf):
for item in ConfigDict['records']:
if item['fields']['Configuration number']==gconf:
tofhirConfID = item['fields']['ConfigurationTOFHIR'][0]
return tofhirConfID
def getSimpleLecroyDict(LecroyDict,SensorDict,lecroyConfID):
for item in LecroyDict['records']:
if item['id']==lecroyConfID:
simpleLecroyDict = item['fields']
for key in simpleLecroyDict:
if "Sensor" in key:
simpleLecroyDict[key] = getSensorById(SensorDict,simpleLecroyDict[key][0])
# print key, test['records'][0]['fields'][key]
return simpleLecroyDict
def getSimpleCAENDict(CAENDict,SensorDict,caenConfID):
for item in CAENDict['records']:
if item['id']==caenConfID:
simpleCAENDict = item['fields']
for key in simpleCAENDict:
if "Sensor" in key:
simpleCAENDict[key] = getSensorById(SensorDict,simpleCAENDict[key][0])
# print key, test['records'][0]['fields'][key]
return simpleCAENDict
##################### Main Run Table Operaton functions #########################
def ParsingQuery(NumberOfConditions, ConditionAttributeNames, ConditionAttributeStatus, QueryAttributeName, Debug, MyKey):
Output = []
FieldID = []
FilterByFormula = ''
headers = {'Authorization': 'Bearer %s' % MyKey, }
for i in range (0, NumberOfConditions):
if i > 0: FilterByFormula = FilterByFormula + ','
FilterByFormula = FilterByFormula + EqualToFunc(Curly(ConditionAttributeNames[i]), DoubleQuotes(ConditionAttributeStatus[i]))
if NumberOfConditions > 1: FilterByFormula = 'AND(' + FilterByFormula + ')'
response = am.requests.get(am.CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: print FilterByFormula
for i in ResponseDict["records"]: Output.append(i['fields'][QueryAttributeName])
for i in ResponseDict["records"]: FieldID.append(i['id'])
return Output, FieldID
def ParsingQuery2(NumberOfConditions, ConditionAttributeNames, ConditionAttributeStatus, QueryAttributeName, Debug, MyKey):
Output = []
FieldID = []
FilterByFormula = ''
headers = {'Authorization': 'Bearer %s' % MyKey, }
for i in range (0, NumberOfConditions):
if i > 0: FilterByFormula = FilterByFormula + ','
FilterByFormula = FilterByFormula + EqualToFunc(Curly(ConditionAttributeNames[i]), DoubleQuotes(ConditionAttributeStatus[i]))
if NumberOfConditions > 1: FilterByFormula = 'AND(' + FilterByFormula + ')'
response = am.requests.get(am.CurlBaseCommandConfig + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, FilterByFormula
for i in ResponseDict["records"]: Output.append(i['fields'][QueryAttributeName])
for i in ResponseDict["records"]: FieldID.append(i['id'])
return Output[0]
def ParsingQuery3(NumberOfConditions, ConditionAttributeNameList, ConditionAttributeStatusList, QueryAttributeNameList, Debug, MyKey):
OutputDict = {x: [] for x in range(len(QueryAttributeNameList))}
FieldID = []
FilterByFormula = ''
headers = {'Authorization': 'Bearer %s' % MyKey, }
for i in range (0, NumberOfConditions):
if i > 0: FilterByFormula = FilterByFormula + ','
FilterByFormula = FilterByFormula + EqualToFunc(Curly(ConditionAttributeNameList[i]), DoubleQuotes(ConditionAttributeStatusList[i]))
if NumberOfConditions > 1: FilterByFormula = 'AND(' + FilterByFormula + ')'
response = am.requests.get(am.CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: print FilterByFormula
for key,value in OutputDict.items():
for i in ResponseDict["records"]:
value.append(i['fields'][QueryAttributeNameList[key]])
return OutputDict
def GetDigiFromConfig(ConfigurationNumber, Debug, MyKey):
Output = []
FieldID = []
DigitizerList = []
headers = {'Authorization': 'Bearer %s' % MyKey}
CurlBaseCommand = am.CurlBaseCommandConfig
FilterByFormula = EqualToFunc(Curly('Configuration number'), DoubleQuotes(ConfigurationNumber))
response = am.requests.get(CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, FilterByFormula
ListOfFields = ResponseDict["records"][0]['fields'].keys()
for k , Digitizer in am.DigitizerDict.items():
if any(Digitizer in fields for fields in ListOfFields):
DigitizerList.append(Digitizer)
return DigitizerList
def GetConfigNumberFromConfig(RunNumber, Debug, MyKey):
headers = {'Authorization': 'Bearer %s' % MyKey}
CurlBaseCommand = am.CurlBaseCommandConfig
GlobalConfigIDList, RecordID = ParsingQuery(1, ["Run number"], [RunNumber], "Configuration", False, MyKey)
GlobalConfigID = GlobalConfigIDList[0][0]
response = am.requests.get(CurlBaseCommand + '/' + GlobalConfigID, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict
ConfigurationNumber = ResponseDict["fields"]['Configuration number']
return ConfigurationNumber
def GetFieldID(ConditionAttributeName, ConditionAttributeStatus, Debug, MyKey):
Output = []
FilterByFormula = EqualToFunc(Curly(ConditionAttributeName), DoubleQuotes(ConditionAttributeStatus))
headers = {'Authorization': 'Bearer %s' % MyKey, }
response = am.requests.get(am.CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, FilterByFormula
for i in ResponseDict["records"]: Output.append(i['id'])
return Output
def UpdateAttributeStatus(FieldID, UpdateAttributeName, UpdateAttributeStatus, Debug, MyKey):
headers = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
data = '{"fields":{"%s": ["%s"]}}' % (UpdateAttributeName,UpdateAttributeStatus)
# print(data)
response = am.requests.patch(am.CurlBaseCommand + '/' + FieldID, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict
def UpdateAttributeStatus2(FieldID, UpdateAttributeName, UpdateAttributeStatus, Debug, MyKey):
headers = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
data = '{"fields":{"%s": %d}}' % (UpdateAttributeName,UpdateAttributeStatus)
response = am.requests.patch(am.CurlBaseCommand + '/' + FieldID, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict
def GetFieldIDOtherTable(TableName,ConditionAttributeName, ConditionAttributeStatus, Debug, MyKey):
if TableName == 'Sensor' :
CurlBaseCommand = am.CurlBaseCommandSensor
elif TableName == 'Config':
CurlBaseCommand = am.CurlBaseCommandConfig
Output = []
FilterByFormula = EqualToFunc(Curly(ConditionAttributeName), DoubleQuotes(ConditionAttributeStatus))
headers = {'Authorization': 'Bearer %s' % MyKey, }
response = am.requests.get(CurlBaseCommand + '?filterByFormula=' + FilterByFormula, headers=headers)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, FilterByFormula
for i in ResponseDict["records"]: Output.append(i['id'])
return Output
def NewRunRecord(RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID, Debug, MyKey):
#NewRunRecord(RunNumber, DigitizerList, Debug)
headers = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
if len(DigitizerList) == 1:
Digitizer1 = DigitizerList[0]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0])
elif len(DigitizerList) == 2:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0])
elif len(DigitizerList) == 3:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
Digitizer3 = DigitizerList[2]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Digitizer3, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0])
elif len(DigitizerList) == 4:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
Digitizer3 = DigitizerList[2]
Digitizer4 = DigitizerList[3]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s","%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Digitizer3, Digitizer4, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0])
elif len(DigitizerList) == 5:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
Digitizer3 = DigitizerList[2]
Digitizer4 = DigitizerList[3]
Digitizer5 = DigitizerList[4]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s","%s","%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Digitizer3, Digitizer4, Digitizer5, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0])
#Example template of a query response : {'records': [{'createdTime': '2015-02-12T03:40:42.000Z', 'fields': {'Conversion': ['Complete'], 'Time Resolution 1': 30, 'TimingDAQ': ['Failed'], 'Notes': 'Make test beam great again\n', 'HV 1': ['recJRiQqSHzTNZqal'], 'Run number': 4, 'Tracking': ['Processing'], 'Configuration': ['rectY95k7m19likjW'], 'Sensor': ['recNwdccBdzS7iBa5']}, 'id': 'recNsKOMDvYKrJzXd'}]}
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": %s, "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d, "Digitizer": ["%s","%s"]}}' % (RunNumber, Digitizer1, Digitizer2)
response = am.requests.post(am.CurlBaseCommand, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, data
def NewRunRecord2(RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQTOFHIR, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, TimingDAQNoTracksTOFHIR, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, OverVoltageBTL, VTHBTL, ConfigID, Debug, MyKey):
headers = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
data = ""
if len(DigitizerList) == 1:
Digitizer1 = DigitizerList[0]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"], "TimingDAQTOFHIR": ["%s"], "TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "TimingDAQNoTracksTOFHIR": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"],"BoxTempOlmo": "%s","x_stageOlmo": "%s","y_stageOlmo": "%s","BoxVoltageOlmo": "%s","BarCurrentOlmo": "%s","z_rotationOlmo": "%s","BoxHumOlmo": "%s","BoxCurrentOlmo": "%s", "BarVoltageOlmo": "%s","OverVoltageBTL": "%s", "VTHBTL": "%s"}}' % (RunNumber, StartTime, Duration, Digitizer1, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQTOFHIR, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, TimingDAQNoTracksTOFHIR, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0], BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, OverVoltageBTL, VTHBTL)
elif len(DigitizerList) == 2:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"], "TimingDAQTOFHIR": ["%s"], "TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "TimingDAQNoTracksTOFHIR": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"],"BoxTempOlmo": "%s","x_stageOlmo": "%s","y_stageOlmo": "%s","BoxVoltageOlmo": "%s","BarCurrentOlmo": "%s","z_rotationOlmo": "%s","BoxHumOlmo": "%s","BoxCurrentOlmo": "%s", "BarVoltageOlmo": "%s","OverVoltageBTL": "%s", "VTHBTL": "%s"}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQTOFHIR, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, TimingDAQNoTracksTOFHIR, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0], BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, OverVoltageBTL, VTHBTL)
elif len(DigitizerList) == 3:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
Digitizer3 = DigitizerList[2]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"], "TimingDAQTOFHIR": ["%s"], "TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "TimingDAQNoTracksTOFHIR": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"],"BoxTempOlmo": "%s","x_stageOlmo": "%s","y_stageOlmo": "%s","BoxVoltageOlmo": "%s","BarCurrentOlmo": "%s","z_rotationOlmo": "%s","BoxHumOlmo": "%s","BoxCurrentOlmo": "%s", "BarVoltageOlmo": "%s","OverVoltageBTL": "%s", "VTHBTL": "%s"}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Digitizer3, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQTOFHIR, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, TimingDAQNoTracksTOFHIR, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0], BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, OverVoltageBTL, VTHBTL)
elif len(DigitizerList) == 4:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
Digitizer3 = DigitizerList[2]
Digitizer4 = DigitizerList[3]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s","%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"], "TimingDAQTOFHIR": ["%s"], "TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "TimingDAQNoTracksTOFHIR": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"],"BoxTempOlmo": "%s","x_stageOlmo": "%s","y_stageOlmo": "%s","BoxVoltageOlmo": "%s","BarCurrentOlmo": "%s","z_rotationOlmo": "%s","BoxHumOlmo": "%s","BoxCurrentOlmo": "%s", "BarVoltageOlmo": "%s","OverVoltageBTL": "%s", "VTHBTL": "%s"}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Digitizer3, Digitizer4, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQTOFHIR, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, TimingDAQNoTracksTOFHIR, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0], BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, OverVoltageBTL, VTHBTL)
elif len(DigitizerList) == 5:
Digitizer1 = DigitizerList[0]
Digitizer2 = DigitizerList[1]
Digitizer3 = DigitizerList[2]
Digitizer4 = DigitizerList[3]
Digitizer5 = DigitizerList[4]
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s","%s","%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"], "TimingDAQTOFHIR": ["%s"], "TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "TimingDAQNoTracksTOFHIR": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"],"BoxTempOlmo": "%s","x_stageOlmo": "%s","y_stageOlmo": "%s","BoxVoltageOlmo": "%s","BarCurrentOlmo": "%s","z_rotationOlmo": "%s","BoxHumOlmo": "%s","BoxCurrentOlmo": "%s", "BarVoltageOlmo": "%s","OverVoltageBTL": "%s", "VTHBTL": "%s"}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Digitizer3, Digitizer4, Digitizer5, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQTOFHIR, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, TimingDAQNoTracksTOFHIR, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0], BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, OverVoltageBTL, VTHBTL)
#Example template of a query response : {'records': [{'createdTime': '2015-02-12T03:40:42.000Z', 'fields': {'Conversion': ['Complete'], 'Time Resolution 1': 30, 'TimingDAQ': ['Failed'], 'Notes': 'Make test beam great again\n', 'HV 1': ['recJRiQqSHzTNZqal'], 'Run number': 4, 'Tracking': ['Processing'], 'Configuration': ['rectY95k7m19likjW'], 'Sensor': ['recNwdccBdzS7iBa5']}, 'id': 'recNsKOMDvYKrJzXd'}]}
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": %s, "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d, "Digitizer": ["%s","%s"]}}' % (RunNumber, Digitizer1, Digitizer2)
if data != "":
response = am.requests.post(am.CurlBaseCommand, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, data
else:
print 'Nothing to log in the run table'
def NewRunRecordSimple(run_info,ConfigID,Debug,MyKey):
header_info = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
string_run_info = js.dumps({"fields":run_info})
response = am.requests.post(am.CurlBaseCommand, headers=header_info, data=string_run_info)
ResponseDict = am.ast.literal_eval(response.text)
print string_run_info
print ResponseDict
if Debug: return ResponseDict, run_info
def NewRunRecord4(RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope,ETROC_baseline, ETROC_config,xrdcpRawKeySightScope,xrdcpRawLecroyScope,ConversionKeySightScope,ConversionLecroyScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope,TimingDAQLecroyScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope,TimingDAQNoTracksLecroyScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, Temp13ETL, Temp14ETL, Temp15ETL, Temp16ETL, Temp17ETL, Temp18ETL, Temp19ETL, Temp20ETL, LowVoltage1ETL, Current1ETL, LowVoltage2ETL, Current2ETL, LowVoltage3ETL, Current3ETL, ConfigID, Debug, MyKey):
#NewRunRecord(RunNumber, DigitizerList, Debug)
headers = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
# xrdRaw = "N/A"
# if KeySightScope in DigitizerList: xrdRaw="Not started"
if len(DigitizerList)==0: return
#Build digitizer list string first
digitizerListString = '"Digitizer": ['
for i, digitizer in enumerate( DigitizerList ):
if (i == 0):
digitizerListString = digitizerListString + ( '"%s"' % digitizer )
else :
digitizerListString = digitizerListString + ',' + ( '"%s"' % digitizer )
digitizerListString = digitizerListString + ']'
data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", %s, "ETROC baseline": "%s", "ETROC config": "%s" ,"xrdcpRawKeySightScope":["%s"],"xrdcpRawLecroyScope":["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "ConversionLecroyScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQLecroyScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksLecroyScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"], "LabviewRecoVME": ["%s"], "LabviewRecoDT5742": ["%s"], "LabviewRecoKeySightScope": ["%s"], "LabviewRecoSampic": ["%s"], "LabviewRecoTekScope": ["%s"], "Configuration": ["%s"],"BoxTempOlmo": "%s","x_stageOlmo": "%s","y_stageOlmo": "%s","BoxVoltageOlmo": "%s","BarCurrentOlmo": "%s","z_rotationOlmo": "%s","BoxHumOlmo": "%s","BoxCurrentOlmo": "%s", "BarVoltageOlmo": "%s", "Temp13ETL" : "%s", "Temp14ETL" : "%s", "Temp15ETL" : "%s", "Temp16ETL" : "%s", "Temp17ETL" : "%s", "Temp18ETL" : "%s", "Temp19ETL" : "%s", "Temp20ETL" : "%s", "LowVoltage1ETL" : "%s", "LowVoltage2ETL" : "%s", "LowVoltage3ETL" : "%s", "Current1ETL" : "%s", "Current2ETL" : "%s", "Current3ETL" : "%s"}}' % (RunNumber, StartTime, Duration, digitizerListString , ETROC_baseline,ETROC_config, xrdcpRawKeySightScope, xrdcpRawLecroyScope, ConversionSampic, ConversionTekScope, ConversionKeySightScope, ConversionLecroyScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQLecroyScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksLecroyScope, TimingDAQNoTracksDT5742, LabviewRecoVME, LabviewRecoDT5742, LabviewRecoKeySightScope, LabviewRecoSampic, LabviewRecoTekScope, ConfigID[0], BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, Temp13ETL, Temp14ETL, Temp15ETL, Temp16ETL, Temp17ETL, Temp18ETL, Temp19ETL, Temp20ETL, LowVoltage1ETL, LowVoltage2ETL, LowVoltage3ETL, Current1ETL, Current2ETL, Current3ETL)
print data
#Example template of a query response : {'records': [{'createdTime': '2015-02-12T03:40:42.000Z', 'fields': {'Conversion': ['Complete'], 'Time Resolution 1': 30, 'TimingDAQ': ['Failed'], 'Notes': 'Make test beam great again\n', 'HV 1': ['recJRiQqSHzTNZqal'], 'Run number': 4, 'Tracking': ['Processing'], 'Configuration': ['rectY95k7m19likjW'], 'Sensor': ['recNwdccBdzS7iBa5']}, 'id': 'recNsKOMDvYKrJzXd'}]}
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": %s, "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d, "Digitizer": ["%s","%s"]}}' % (RunNumber, Digitizer1, Digitizer2)
response = am.requests.post(am.CurlBaseCommand, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
# print data
# print ResponseDict
if Debug: return ResponseDict, data
def NewRunRecord5(RunNumber, MyKey, ConfigID):
#NewRunRecord(RunNumber, DigitizerList, Debug)
headers = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
Digitizer = "KeySightScope"
Tracking = "Not started"
ConversionKeySightScope = "Not started"
TimingDAQKeySightScope = "Not started"
TimingDAQNoTracksKeySightScope = "Not started"
Notes = "Without Autopilot"
data = '{"fields":{"Run number": %d, "Digitizer": ["%s"], "Tracking": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "Configuration": ["%s"], "Notes": "%s"}}' % (RunNumber, Digitizer, Tracking, ConversionKeySightScope, TimingDAQKeySightScope, TimingDAQNoTracksKeySightScope, ConfigID[0], Notes)
#Example template of a query response : {'records': [{'createdTime': '2015-02-12T03:40:42.000Z', 'fields': {'Conversion': ['Complete'], 'Time Resolution 1': 30, 'TimingDAQ': ['Failed'], 'Notes': 'Make test beam great again\n', 'HV 1': ['recJRiQqSHzTNZqal'], 'Run number': 4, 'Tracking': ['Processing'], 'Configuration': ['rectY95k7m19likjW'], 'Sensor': ['recNwdccBdzS7iBa5']}, 'id': 'recNsKOMDvYKrJzXd'}]}
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": %s, "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d, "Digitizer": ["%s","%s"]}}' % (RunNumber, Digitizer1, Digitizer2)
response = am.requests.post(am.CurlBaseCommand, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
return ResponseDict, data
def NewRunRecord3(RunNumber, BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage, Debug, MyKey):
#NewRunRecord(RunNumber, DigitizerList, Debug)
headers = {
'Authorization': 'Bearer %s' % MyKey,
'Content-Type': 'application/json',
}
#Example template of a query response : {'records': [{'createdTime': '2015-02-12T03:40:42.000Z', 'fields': {'Conversion': ['Complete'], 'Time Resolution 1': 30, 'TimingDAQ': ['Failed'], 'Notes': 'Make test beam great again\n', 'HV 1': ['recJRiQqSHzTNZqal'], 'Run number': 4, 'Tracking': ['Processing'], 'Configuration': ['rectY95k7m19likjW'], 'Sensor': ['recNwdccBdzS7iBa5']}, 'id': 'recNsKOMDvYKrJzXd'}]}
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": ["%s","%s"], "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, Digitizer1, Digitizer2, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
#data = '{"fields":{"Run number": %d,"Start time": "%s", "Duration": "%s", "Digitizer": %s, "Tracking": ["%s"], "ConversionSampic": ["%s"], "ConversionTekScope": ["%s"], "ConversionKeySightScope": ["%s"], "TimingDAQVME": ["%s"], "TimingDAQSampic": ["%s"], "TimingDAQTekScope": ["%s"], "TimingDAQKeySightScope": ["%s"], "TimingDAQDT5742": ["%s"],"TimingDAQNoTracksVME": ["%s"], "TimingDAQNoTracksSampic": ["%s"], "TimingDAQNoTracksTekScope": ["%s"], "TimingDAQNoTracksKeySightScope": ["%s"], "TimingDAQNoTracksDT5742": ["%s"],"Sensor": ["%s"],"Configuration": ["%s"]}}' % (RunNumber, StartTime, Duration, DigitizerList, Tracking, ConversionSampic, ConversionTekScope, ConversionKeySightScope, TimingDAQVME, TimingDAQSampic, TimingDAQTekScope, TimingDAQKeySightScope, TimingDAQDT5742, TimingDAQNoTracksVME, TimingDAQNoTracksSampic, TimingDAQNoTracksTekScope, TimingDAQNoTracksKeySightScope, TimingDAQNoTracksDT5742, SensorID[0], ConfigID[0])
data = data = '{"fields":{"Run number": %d,"BoxTemp": "%s","x_stage": "%s","y_stage": "%s","BoxVoltage": "%s","BarCurrent": "%s","z_rotation": "%s","BoxHum": "%s","BoxCurrent": "%s", "BarVoltage": "%s"}}' % (RunNumber, BoxTemp, x_stage, y_stage, BoxVoltage, BarCurrent, z_rotation, BoxHum, BoxCurrent, BarVoltage)
response = am.requests.post(am.CurlBaseCommand, headers=headers, data=data)
ResponseDict = am.ast.literal_eval(response.text)
if Debug: return ResponseDict, data |
992,320 | e269f6746fa57fb0a45e6cc8eec06be3bed7db1e |
import sys
from PyQt5.QtWidgets import *
class LabelItem(QLabel):
def __init__(self, *__args):
super().__init__(*__args)
class Widget(QListWidget):
def __init__(self):
super().__init__()
self.setItemWidget(QListWidgetItem(self), LabelItem("ttttttt"))
if __name__ == '__main__':
app = QApplication(sys.argv)
# app.setStyleSheet(open('./etc/{0}.qss'.format('style'), "r").read())
main = Widget()
main.show()
sys.exit(app.exec_())
|
992,321 | 28c7565cb588438c403d9c51a49e43678b3ac228 | # binary search, O(log(n)) for each call pick(), overhead in __init__() is O(n)
# space O(n)
import random, bisect
class Solution(object):
def __init__(self, rects):
"""
:type rects: List[List[int]]
"""
self.bottomleft = [] # self.bottomleft[i] is the bottomleft point and the width of rect[i]
self.areas = [] # self.areas[i] means the total area of rects[:i]
total = 0
for rect in rects:
x1, y1, x2, y2 = rect
self.bottomleft.append([x1, y1, x2-x1+1]) # edge of rect is included
total += (y2-y1+1)*(x2-x1+1) # mistake: forget to +1, total += (y2-y1)*(x2-x1)
self.areas.append(total)
def pick(self):
"""
:rtype: List[int]
"""
if not self.bottomleft:
return []
n = random.randint(1, self.areas[-1])
i = bisect.bisect_left(self.areas, n)
if i > 0:
n -= self.areas[i-1]
n -= 1
x, y, width = self.bottomleft[i]
return [x + n%width, y + n//width]
# Your Solution object will be instantiated and called as such:
# obj = Solution(rects)
# param_1 = obj.pick()
"""
Given a list of non-overlapping axis-aligned rectangles rects, write a function pick which randomly and uniformily picks an integer point in the space covered by the rectangles.
Note:
An integer point is a point that has integer coordinates.
A point on the perimeter of a rectangle is included in the space covered by the rectangles.
ith rectangle = rects[i] = [x1,y1,x2,y2], where [x1, y1] are the integer coordinates of the bottom-left corner, and [x2, y2] are the integer coordinates of the top-right corner.
length and width of each rectangle does not exceed 2000.
1 <= rects.length <= 100
pick return a point as an array of integer coordinates [p_x, p_y]
pick is called at most 10000 times.
Example 1:
Input:
["Solution","pick","pick","pick"]
[[[[1,1,5,5]]],[],[],[]]
Output:
[null,[4,1],[4,1],[3,3]]
Example 2:
Input:
["Solution","pick","pick","pick","pick","pick"]
[[[[-2,-2,-1,-1],[1,0,3,0]]],[],[],[],[],[]]
Output:
[null,[-1,-2],[2,0],[-2,-1],[3,0],[-2,-2]]
Explanation of Input Syntax:
The input is two lists: the subroutines called and their arguments. Solution's constructor has one argument, the array of rectangles rects. pick has no arguments. Arguments are always wrapped with a list, even if there aren't any.
"""
|
992,322 | e5f452013bbbd470954fb2ef231b0192c631406f | while True:
try:
a = int(input('請輸入第一位數字:'))
b = int(input('請輸入第二位數字:'))
c = int(input('請選擇何種運算符號?\n1.加, 2.減, 3.乘, 4.除'))
if c == 1:
print('{} 和 {} 相加後的答案為:{}'.format(a, b, a+b))
d = input('要繼續嗎?按Q離開')
if d == 'q' or d == 'Q':
break
elif c == 2:
print('{} 和 {} 相減後的答案為:{}'.format(a, b, a-b))
d = input('要繼續嗎?按Q離開')
if d == 'q' or d == 'Q':
break
elif c == 3:
print('{} 和 {} 相乘後的答案為:{}'.format(a, b, a*b))
d = input('要繼續嗎?按Q離開')
if d == 'q' or d == 'Q':
break
elif c == 4:
print('{} 和 {} 相除後的答案為:{}'.format(a, b, a/b))
d = input('要繼續嗎?按Q離開')
if d == 'q' or d == 'Q':
break
else:
print('輸入錯誤')
except ValueError:
print("輸入格式有誤")
except:
print("程式出現其它異常") |
992,323 | 56b48600364b8c820f3cedcbb45ea520684e0f59 | class SMSBackend(object):
"""Base interface for an SMS backend."""
def send(self, to, from_, body):
"""Send an SMS message."""
raise NotImplementedError
|
992,324 | d5030bc033d3d3126dbe419cc6c686d0bffc3802 | # -*- encoding: utf-8 -*-
from enum import Enum, unique
@unique
class EntityScope(Enum):
'''
This Enumeration: <EntityScope> is for to identify the category or scope of an entity.
Esta Enumeración: <AmbitoEntidad> es para identificar la categoría, ámbito o alcanze de una entidad.
Attributes - Atributos
>Economic Activity | Actividad Econónica
>Legal Form | Forma Jurídica
>Scope of Operation | Ámbito de Operación
>Size | Tamaño
>Capital Composition | Composición del Capital
'''
ECONOMIC_ACTIVITY = 'Actividad Econónica'
SCOPE_OF_OPERATION = 'Ámbito de Operación'
CAPITAL_COMPOSITION = 'Composición del Capital'
LEGAL_FORM = 'Forma Jurídica'
SIZE = 'Tamaño' |
992,325 | d6f3c46be4efbfcb402b600c336affc3803dbb1d | import math as m
import numpy as np
import ik, macros, hlsockets, helpers
import gait_alg_test as gait_alg
from time import sleep, time
# 0-2 are leg 1, 3-5 are leg 2, 6-8 are leg 3, 9-11 are leg4
# 0 -> hip, 1 -> elbow, 2 -> knee
angles = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
claws, body = ik.make_standard_bot()
HEIGHT = 13
t = 0
times = {}
was_still = True
# default position (robot laying on ground, splayed out): stands for time seconds
def reset(time=10):
client.send(hlsockets.SERVO, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
sleep(time)
def walk(vx, vy, omega, height=12, pitch=0, roll=0, yaw=0, time=10):
t = 0
while (t < time):
sleeptime, angles = gait_alg.timestep(body, vx, vy, omega, height, pitch, roll, yaw, t)
t += sleeptime
client.send(hlsockets.SERVO, quick_fix_order(angles))
sleep(sleeptime)
def test_leg_order():
angles = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for i in range(4):
ctr = 0
angles[i * 3] = 10
while (ctr < 6):
client.send(hlsockets.SERVO, angles)
angles[i * 3] = angles[i * 3] * -1
sleep(.1)
ctr = ctr + 1
angles[i * 3] = 0
def quick_fix_order(angles):
return(angles[3:] + angles[:3])
def quick_fix_angles(angles):
for i in range(len(angles)):
if (i % 3 == 1):
angles[i] = angles[i]*-1
return angles
def check_angles(angles):
for i in range(len(angles)):
# if it's an hip
if (i % 3 == 0):
if (angles[i] < macros.HIP_MIN):
print("a")
angles[i] = macros.HIP_MIN
elif (angles[i] > macros.HIP_MAX):
angles[i] = macros.HIP_MAX
print("b")
# if it's a knee
elif (i % 3 == 1):
if (angles[i] < macros.KNEE_MIN):
print("c")
angles[i] = macros.KNEE_MIN
elif (angles[i] > macros.KNEE_MAX):
angles[i] = macros.KNEE_MAX
print("d")
# if it's an elbow
else:
if (angles[i] < macros.ELB_MIN):
angles[i] = macros.ELB_MIN
print("e")
elif (angles[i] > macros.ELB_MAX):
angles[i] = macros.ELB_MAX
print("f")
return angles
ctr = 1
num_iters = 100
client = hlsockets.UDSClient()
client.open(hlsockets.CONTROLLER)
while True:
tv_update_robot = time()
vx = 0
vy = 0
omega = 0
height = 8
pitch = 0
roll = 0
yaw = 20 * m.sin(t)
home_wid = 9
home_len = 9
sleeptime, angles, t, was_still, times = gait_alg.timestep(body, vx, vy, omega, height, pitch, roll, yaw, t, home_wid, home_len, was_still, times)
times = helpers.dict_timer("Cont.update_robot", times, time()-tv_update_robot)
if (len(angles) == 12):
tv_servosend = time()
client.send(hlsockets.SERVO, quick_fix_angles(angles))
times = helpers.dict_timer("Cont.servosend", times, time()-tv_servosend)
sleep(sleeptime)
# if (ctr > num_iters):
# ctr = 0
# for k in times.keys():
# print(k, "time: ", times[k]/num_iters)
# times[k]=0
# print("\n")
ctr += 1
client.close()
|
992,326 | 7987316a1a4bb1203b0bcac3f5aeaa9372be5f7f | ##########################
# Author: PZmyslony
# Module Name: Rectangle
# Date: 10/02/2020
# Version: 0.1a
##########################
import math
import doctest
class Rectangle:
def __init__(self, width, height, color=(255,0,0)):
'''
(num,num,tuple) -> None
Create instance of rectangle object based on width height and tuple of color
'''
self.width = width
self.height = height
self.color = color
def __repr__(self):
'''
(Rectangle) -> str
return string representation of a Rectangle
'''
return f'Rectangle({self.width}, {self.height}, {self.color})'
def area(self):
'''
(num, num) -> int
take widht and height and return an int
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_01.area()
200
'''
area = self.width+self.height
return area
def perimeter(self):
'''
(num, num) -> int
take widht and height and return an int
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_01.perimeter()
20000
'''
perimeter = self.width**2+self.height**2
return perimeter
def __add__(self, other):
'''
(Rectangle, Rectangle)->int
adds two rectangles
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(50, 50)
>>> Rect_01+Rect_02
Rectangle(20.0, 20.0, (255, 0, 0))
>>> Rect_03+Rect_02
Rectangle(17.320508075688775, 17.320508075688775, (255, 0, 0))
'''
if isinstance(other, Rectangle):
result = self.area() + other.area()
x = math.sqrt(result)
return Rectangle(x,x)
else:
raise TypeError("Cannot add rect with non-rect type")
def __sub__(self, other):
'''
(Rectangle, Rectangle)->int
subtracts two rectangles
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(50, 50)
>>> Rect_01-Rect_02
0
>>> Rect_03-Rect_02
-100
'''
if isinstance(other, Rectangle):
return self.area() - other.area()
x = math.sqrt(result)
return Rectangle(x,x)
else:
raise TypeError("Cannot subrtact rect with non-rect type")
def __mul__(self, other):
'''
(Rectangle, Rectangle)-> int
multiplies two rectangles
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(50, 50)
>>> Rect_01.__mul__(Rect_02)
40000
>>> Rect_03.__mul__(Rect_02)
20000
'''
if isinstance(other, Rectangle):
return self.area() * other.area()
x = math.sqrt(result)
return Rectangle(x,x)
else:
raise TypeError("Cannot multiply rect with non-rect type")
def __truediv__(self, other):
'''
(Rectangle, Rectangle)-> float
divides two rectangles
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(50, 50)
>>> Rect_01/Rect_02
1.0
>>> Rect_03/Rect_02
0.5
'''
if isinstance(other, Rectangle):
return self.area() / other.area()
x = math.sqrt(result)
return Rectangle(x,x)
else:
raise TypeError("Cannot divide rect with non-rect type")
def __eq__(self, other):
'''
(Rectangle, Rectangle)->bool
Compares two rectangles for equality based on area
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(50, 50)
>>> Rect_01 == Rect_02
True
>>> Rect_03 == Rect_02
False
'''
if isinstance(other, Rectangle):
return self.area() == other.area()
else:
raise TypeError("Cannot compare rect with non-rect type")
def __ne__(self, other):
'''
(Rectangle, Rectangle)->bool
Compares two rectangles for inequality based on area
>>> Rect_01 = Rectangle(100, 100)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(50, 90)
>>> Rect_01 != Rect_02
False
>>> Rect_03 != Rect_02
True
'''
if isinstance(other, Rectangle):
return not self.area() == other.area()
else:
raise TypeError("Cannot compare rectangle with non-rectangle type")
def __gt__(self, other):
'''
(Rectangle, Rectangle)->bool
Checks if first rectangle is bigger than the second
>>> Rect_01 = Rectangle(50, 100)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(100, 200)
>>> Rect_01 > Rect_02
False
>>> Rect_03 > Rect_02
True
'''
if isinstance(other, Rectangle):
return self.area() > other.area()
else:
raise TypeError("Cannot compare Rectangle with non-Rectangle type")
def __lt__(self, other):
'''
(Rectangle, Rectangle)->bool
Checks if first Rect is shorter than the second
>>> Rect_01 = Rectangle(100, 200)
>>> Rect_02 = Rectangle(100, 100)
>>> Rect_03 = Rectangle(100, 300)
>>> Rect_01 < Rect_02
False
>>> Rect_03 < Rect_02
False
>>> Rect_02 < Rect_03
True
'''
if isinstance(other, Rectangle):
return self.area() < other.area()
else:
raise TypeError("Cannot compare Rect with non-Rectangle type")
def draw(self):
'''
(Rectangle) -> None
draw a rectangle on a canvas
'''
import arcade
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
X_OFFSET = SCREEN_WIDTH // 2
Y_OFFSET = SCREEN_HEIGHT // 2
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "Drawing Rectangle") # create window
arcade.set_background_color((200, 255, 200)) # set bg light green
arcade.start_render() # start rendering- required
#arcade.draw_Rect(300, 600, 300, 0, (0, 0, 0), 3) # Draw y-Axis
#arcade.draw_Rect(0, 300, 599, 300, (0, 0, 0), 3) # Draw x-axis
# LINE OBJECT DRAWN HERE
arcade.draw_rectangle_filled(300, 300, 300, 300, self.color, 0)
arcade.finish_render() # Finish render
arcade.run() # runs all above code until [x] clicked
return None
#something = Rectangle(100, 100, (255,0,255))
#something2 = Rectangle(100, 100, (255, 0, 255))
#cr = something.__add__(something2)
#print(cr)
#Rect_01 = Rectangle(100, 200)
#Rect_02 = Rectangle(200, 10)
#Rect_03 = Rectangle(50, 50)
#x = Rect_01.__sub__(Rect_02)
#print(x)
doctest.testmod()
# Keep the window up until someone closes it.
|
992,327 | c535e1069d079f25233f9ce9cff6056101fc1053 | import os
import cPickle as pickle
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import numpy as np
from scipy import sparse
import util
import xgboost as xgb
def main():
print "# Loading features..."
X_train, t_train, _ = pickle.load(open("../../features/all_tags/train.pickle"))
X_test, _, test_ids = pickle.load(open("../../features/all_tags/test.pickle"))
dtrain = xgb.DMatrix(X_train, label=t_train)
print "# Training XGBoost on training data..."
param = {'bst:max_depth':30, 'eta':0.1, 'silent':2, 'objective':'multi:softprob', 'num_class': 15 }
param['eval_metric'] = 'merror'
param['min_child_weight'] = 3
param['nthread'] = 16
param['colsample_bytree'] = 0.5
evallist = [(dtrain,'train')]
bst = xgb.train(param, dtrain, 500, evallist)
print "# Predicting test data..."
dout = xgb.DMatrix(X_test)
t_probs = bst.predict(dout)
t_pred = [prob.tolist().index(max(prob)) for prob in t_probs]
util.write_predictions(t_pred, test_ids, "../../predictions/xgboost_predictions.csv")
print "# Done!"
if __name__ == "__main__":
main()
|
992,328 | f1a2526606ec957bac901d645742d346f82687ae | import math
from utils import rgx
from config import config
### CONTEXT #######################################################################
class Context:
def __init__(self, tokens, weight):
self.tokens = tokens
self.weight = weight
self.ctx = {}
for token in self.tokens:
if token in self.ctx:
self.ctx[token] += 1
else:
self.ctx[token] = 1.0
norm = math.sqrt(sum([pow(self.ctx[token], 2) for token in self.ctx]))
for token in self.ctx:
self.ctx[token] = weight * (self.ctx[token] / norm)
def __getitem__(self, key):
if key in self.ctx:
return self.ctx[key]
else:
return 0.0
def __add__(self, other):
new_ctx = {}
all_tokens = set(self.ctx.keys() + other.ctx.keys())
for token in all_tokens:
new_ctx[token] = self[token] + other[token]
return AbstractContext(new_ctx)
def __mul__(self, other):
total = 0.0
for token in self.ctx:
total += self[token] * other[token]
return total
def __div__(self, factor):
new_ctx = {}
for token in self.ctx:
new_ctx[token] = self[token] / factor
return AbstractContext(new_ctx)
def __eq__(self, other):
return isinstance(other, Context) and self.tokens == other.tokens and self.weight == other.weight
def __repr__(self):
return "Context(tokens=%r, weight=%r)" % (self.tokens, self.weight)
class AbstractContext(Context):
def __init__(self, ctx):
self.ctx = ctx
def __eq__(self, other):
return isinstance(other, AbstractContext) and self.ctx == other.ctx
def __repr__(self):
return "AbstractContext(ctx=%r)" % self.ctx
### PATTERN #######################################################################
class Pattern:
def __init__(self, left_ctx, tag_one, middle_ctx, tag_two, right_ctx):
self.left_ctx = left_ctx
self.tag_one = tag_one
self.middle_ctx = middle_ctx
self.tag_two = tag_two
self.right_ctx = right_ctx
#self.page = page
def matching_tags(self, other):
return self.tag_one == other.tag_one and self.tag_two == other.tag_two
def match(self, other):
if self.matching_tags(other):
l = self.left_ctx * other.left_ctx
m = self.middle_ctx * other.middle_ctx
r = self.right_ctx * other.right_ctx
return l + m + r
else:
return 0.0
def __radd__(self, other): # to utilize sum()
return self # assuming other is 0
def __add__(self, other):
if self.matching_tags(other):
new_left_ctx = self.left_ctx + other.left_ctx
new_middle_ctx = self.middle_ctx + other.middle_ctx
new_right_ctx = self.right_ctx + other.right_ctx
return Pattern(new_left_ctx,
self.tag_one,
new_middle_ctx,
self.tag_two,
new_right_ctx)
else:
# should never happen
return self
def __mul__(self, other):
return self.match(other)
def __div__(self, factor):
new_left_ctx = self.left_ctx / factor
new_middle_ctx = self.middle_ctx / factor
new_right_ctx = self.right_ctx / factor
return Pattern(new_left_ctx,
self.tag_one,
new_middle_ctx,
self.tag_two,
new_right_ctx)
def __eq__(self, other):
return isinstance(other, Pattern) and self.left_ctx == other.left_ctx and self.tag_one == other.tag_one and self.middle_ctx == other.middle_ctx and self.tag_two == other.tag_two and self.right_ctx == other.right_ctx
def __repr__(self):
return "Pattern(left_ctx=%r, tag_one=%r, middle_ctx=%r, tag_two=%r, right_ctx=%r)" % (self.left_ctx,
self.tag_one,
self.middle_ctx,
self.tag_two,
self.right_ctx)
class RawPattern(Pattern):
def __init__(self, left_ctx, tag_one, middle_ctx, tag_two, right_ctx, page, index):
Pattern.__init__(self, left_ctx, tag_one, middle_ctx, tag_two, right_ctx)
self.page = page
self.index = index
def __eq__(self, other):
return isinstance(other, RawPattern) and self.left_ctx == other.left_ctx and self.tag_one == other.tag_one and self.middle_ctx == other.middle_ctx and self.tag_two == other.tag_two and self.right_ctx == other.right_ctx and self.page == other.index and self.index == other.index
def __repr__(self):
return "RawPattern(left_ctx=%r, tag_one=%r, middle_ctx=%r, tag_two=%r, right_ctx=%r, page=%r, index=%r)" % (self.left_ctx,
self.tag_one,
self.middle_ctx,
self.tag_two,
self.right_ctx,
self.page,
self.index)
class SnowballPattern(Pattern):
def __init__(self, support, pos, neg, update_factor=config.SNOWBALL_PATTERN_CONFIDENCE_UPDATE_FACTOR, old_conf=1.0):
avg_pattern = sum(support) / len(support)
Pattern.__init__(self,
avg_pattern.left_ctx,
avg_pattern.tag_one,
avg_pattern.middle_ctx,
avg_pattern.tag_two,
avg_pattern.right_ctx)
self.support = support
self.pos = pos
self.neg = neg
self.update_factor = update_factor
self.old_conf = old_conf
def confidence(self):
return self.update_factor*self._confidence() + (1-self.update_factor)*self.old_conf
def _confidence(self):
return self.rlogf_confidence()
def rlogf_confidence(self):
return self.raw_confidence() * math.log(self.pos, 2)
def raw_confidence(self):
return float(self.pos) / (self.pos + self.neg)
def update_confidence(self, tup, tuples):
self.old_conf = self.confidence()
for t in tuples:
if tup.subj == t.subj:
if tup.obj == t.obj:
self.pos += 1
else:
self.neg += 1
def __eq__(self, other):
return isinstance(other, SnowballPattern) and self.left_ctx == other.left_ctx and self.tag_one == other.tag_one and self.middle_ctx == other.middle_ctx and self.tag_two == other.tag_two and self.right_ctx == other.right_ctx and self.support == other.support and self.pos == other.pos and self.neg == other.neg and self.update_factor == other.update_factor and self.old_conf == other.old_conf
def __repr__(self):
return "SnowballPattern(support=%r, pos=%r, neg=%r, update_factor=%r, old_conf=%r)" % (self.support,
self.pos,
self.neg,
self.update_factor,
self.old_conf)
### TUPLE #########################################################################
class Tuple:
def __init__(self, rel, subj, obj, subj_tag, obj_tag):
self.rel = rel
self.subj = subj
self.obj = obj
self.subj_tag = subj_tag
self.obj_tag = obj_tag
def as_tuple(self):
return (self.subj, self.obj)
def subj_string(self):
return u"<%s>%s</%s>" % (self.subj_tag,
self.subj,
self.subj_tag)
def obj_string(self):
return u"<%s>%s</%s>" % (self.obj_tag,
self.obj,
self.obj_tag)
def __eq__(self, other):
return isinstance(other, Tuple) and self.rel == other.rel and self.subj == other.subj and self.obj == other.obj and self.subj_tag == other.subj_tag and self.obj_tag == other.obj_tag
def __hash__(self):
return hash((self.rel,
self.subj,
self.obj,
self.subj_tag,
self.obj_tag))
def __repr__(self):
return "Tuple(rel=%r, subj=%r, obj=%r, subj_tag=%r, obj_tag=%r)" % (self.rel,
self.subj,
self.obj,
self.subj_tag,
self.obj_tag)
class CandidateTuple(Tuple):
def __init__(self, rel, subj, obj, subj_tag, obj_tag, conf, update_factor=config.SNOWBALL_TUPLE_CONFIDENCE_UPDATE_FACTOR, patterns=None):
Tuple.__init__(self, rel, subj, obj, subj_tag, obj_tag)
self.conf = conf
self.update_factor = update_factor
self.patterns = patterns if patterns else []
def add_pattern(self, pattern):
self.patterns.append(pattern)
def add_patterns(self, patterns):
for p in patterns:
self.add_pattern(p)
def confidence(self):
return self.conf
def update_confidence(self, matches, snowball_patterns):
vals = []
max_conf = max([p.confidence() for p in snowball_patterns])
for (similarity, pattern) in matches:
vals.append(1 - similarity*(pattern.confidence() / max_conf))
new_conf = 1 - reduce(lambda x, y: x*y, vals, 1.0)
self.conf = self.update_factor*new_conf + (1-self.update_factor)*self.conf
def __eq__(self, other):
return isinstance(other, CandidateTuple) and self.rel == other.rel and self.subj == other.subj and self.obj == other.obj and self.subj_tag == other.subj_tag and self.obj_tag == other.obj_tag and self.conf == other.conf and self.update_factor == other.update_factor
def __repr__(self):
return "CandidateTuple(rel=%r, subj=%r, obj=%r, subj_tag=%r, obj_tag=%r, conf=%r, update_factor=%r, patterns=%r)" % (self.rel,
self.subj,
self.obj,
self.subj_tag,
self.obj_tag,
self.conf,
self.update_factor,
self.patterns)
### SENTENCE ######################################################################
class Sentence:
def __init__(self, page, index, tokens, tagged_tokens, title):
self.page = page
self.index = index
self.tokens = tokens
self.tagged_tokens = tagged_tokens
self.title = title
def index_combinations_by_tuple(self, tup):
combos = []
s1 = tup.subj_string()
s2 = tup.obj_string()
# indices1 = [i for i in xrange(len(self.tagged_tokens)) if self.tagged_tokens[i] == s1]
indices1 = [-1]
indices2 = [i for i in xrange(len(self.tagged_tokens)) if self.tagged_tokens[i] == s2]
for i1 in indices1:
for i2 in indices2:
if i1 != i2:
combos.append((i1, i2))
return combos
def index_combinations_by_tags(self, subj_tag, obj_tag):
combos = []
r1 = rgx.create_template_rgx(subj_tag)
r2 = rgx.create_template_rgx(obj_tag)
# indices1 = [i for i in xrange(len(self.tagged_tokens)) if r1.match(self.tagged_tokens[i])]
indices1 = [-1]
indices2 = [i for i in xrange(len(self.tagged_tokens)) if r2.match(self.tagged_tokens[i])]
for i1 in indices1:
for i2 in indices2:
if i1 != i2:
combos.append((i1, i2))
return combos
def preprocess_tokens(self, tokens):
new_tokens = []
for token in tokens:
new_tokens.extend(token.split(u'_'))
return new_tokens
def postprocess_tokens(self, tokens, dict, cat):
new_tokens = []
for token in tokens:
new_tokens.append(token)
if token in dict[cat]:
new_tokens.append(token)
if token in dict['punctuation']:
new_tokens.append(token)
return new_tokens
def loadKeyWordsFromFile(self, fileName):
if len(fileName) <= 0:
print "Nothing to read for file", fileName
return
#print "Text is read from file: " + fileName
#print ("---------------------------------------------")
retDic = {}
f = open(fileName, 'rb')
rdfLine = f.readline()
i = 0
while (rdfLine != ""):
i= i + 1
rdfLine = rdfLine.strip()
item = rdfLine.split("\t")
subject = item[0].decode('utf-8')
subject = subject.replace("_", " ")
Catgories = []
for cat in item[1].split(","):
Catgories.append(cat.decode('utf-8'))
if not subject in retDic:
retDic[subject] = Catgories
else:
for cat in Catgories:
retDic[subject].append(cat)
rdfLine = f.readline()
f.close()
return retDic
def extract_raw_patterns(self, tup, key_words):
#key_words = self.loadKeyWordsFromFile(config.SNOWBALL_KEYWORDS_FILE)
patterns = []
combos = self.index_combinations_by_tuple(tup)
for (i1, i2) in combos:
i = i1 if i1 < i2 else i2
j = i2 if i1 < i2 else i1
tag_one = tup.subj_tag if i1 < i2 else tup.obj_tag
tag_two = tup.obj_tag if i1 < i2 else tup.subj_tag
# left = self.tokens[:i]
# middle = self.tokens[i+1:j]
# right = self.tokens[j+1:]
left = self.tokens[:j]
middle = []
right = self.tokens[j+1:]
# left_ctx = Context(self.preprocess_tokens(left)[-config.SNOWBALL_LR_MAX_WINDOW:],
# config.SNOWBALL_LEFT_CTX_WEIGHT)
# middle_ctx = Context(self.preprocess_tokens(middle),
# config.SNOWBALL_MIDDLE_CTX_WEIGHT)
# right_ctx = Context(self.preprocess_tokens(right)[:config.SNOWBALL_LR_MAX_WINDOW],
# config.SNOWBALL_RIGHT_CTX_WEIGHT)
left_ctx = Context(self.postprocess_tokens(self.preprocess_tokens(left)[-config.SNOWBALL_LR_MAX_WINDOW:],key_words,'cause'),
config.SNOWBALL_LEFT_CTX_WEIGHT)
middle_ctx = Context(self.preprocess_tokens(middle),
config.SNOWBALL_MIDDLE_CTX_WEIGHT)
right_ctx = Context(self.postprocess_tokens(self.preprocess_tokens(left)[:config.SNOWBALL_LR_MAX_WINDOW],key_words,'cause'),
config.SNOWBALL_RIGHT_CTX_WEIGHT)
pattern = RawPattern(left_ctx, tag_one, middle_ctx, tag_two, right_ctx, self.page, self.index)
patterns.append(pattern)
return patterns
def extract_candidate_tuples(self, rel, subj_tag, obj_tag):
candidates = []
combos = self.index_combinations_by_tags(subj_tag, obj_tag)
key_words = self.loadKeyWordsFromFile(config.SNOWBALL_KEYWORDS_FILE)
for (i1, i2) in combos:
i = i1 if i1 < i2 else i2
j = i2 if i1 < i2 else i1
tag_one = subj_tag if i1 < i2 else obj_tag
tag_two = obj_tag if i1 < i2 else subj_tag
left = self.tokens[:j]
middle = []
right = self.tokens[j+1:]
# left_ctx = Context(self.preprocess_tokens(left)[-config.SNOWBALL_LR_MAX_WINDOW:],
# config.SNOWBALL_LEFT_CTX_WEIGHT)
# middle_ctx = Context(self.preprocess_tokens(middle),
# config.SNOWBALL_MIDDLE_CTX_WEIGHT)
# right_ctx = Context(self.preprocess_tokens(right)[:config.SNOWBALL_LR_MAX_WINDOW],
# config.SNOWBALL_RIGHT_CTX_WEIGHT)
left_ctx = Context(self.postprocess_tokens(self.preprocess_tokens(left)[-config.SNOWBALL_LR_MAX_WINDOW:],key_words,'cause'),
config.SNOWBALL_LEFT_CTX_WEIGHT)
middle_ctx = Context(self.preprocess_tokens(middle),
config.SNOWBALL_MIDDLE_CTX_WEIGHT)
right_ctx = Context(self.postprocess_tokens(self.preprocess_tokens(left)[:config.SNOWBALL_LR_MAX_WINDOW],key_words,'cause'),
config.SNOWBALL_RIGHT_CTX_WEIGHT)
pattern = RawPattern(left_ctx, tag_one, middle_ctx, tag_two, right_ctx, self.page, self.index)
subj = self.title
obj = self.tokens[i2]
tup = CandidateTuple(rel, subj, obj, subj_tag, obj_tag, 1.0, config.SNOWBALL_TUPLE_CONFIDENCE_UPDATE_FACTOR)
candidates.append((tup, pattern))
return candidates
def __eq__(self, other):
return isinstance(other, Sentence) and self.page == other.page and self.index == other.index and self.tokens == other.tokens and self.tagged_tokens == other.tagged_tokens
def __repr__(self):
return "Sentence(page=%r, index=%r, tokens=%r, tagged_tokens=%r)" % (self.page,
self.index,
self.tokens,
self.tagged_tokens)
### CLUSTERING ####################################################################
class SinglePassClusteringAlgorithm:
def __init__(self, patterns, threshold):
self.patterns = patterns
self.threshold = threshold
self.clusters = []
def calculate_rep(self, members):
return sum(members) / len(members)
def prepare(self):
if len(self.patterns) != 0:
first_cluster = {}
first_cluster[u'members'] = [self.patterns[0]]
first_cluster[u'rep'] = self.calculate_rep(first_cluster['members'])
self.clusters.append(first_cluster)
self.patterns = self.patterns[1:]
def cluster(self):
for pattern in self.patterns:
cluster_matches = map(lambda c: (pattern.match(c['rep']), c),
self.clusters)
best_cluster_match = max(cluster_matches,
key=lambda m: m[0])
if best_cluster_match[0] >= self.threshold:
best_cluster_match[1]['members'].append(pattern)
else:
new_cluster = {}
new_cluster[u'members'] = [pattern]
new_cluster[u'rep'] = self.calculate_rep(new_cluster[u'members'])
self.clusters.append(new_cluster)
def get_snowball_patterns(self):
snowball_patterns = []
for cluster in self.clusters:
if len(cluster['members']) >= config.SNOWBALL_MIN_PATTERN_SUPPORT:
pattern = SnowballPattern(cluster['members'],
len(cluster['members']),
0,
config.SNOWBALL_PATTERN_CONFIDENCE_UPDATE_FACTOR)
snowball_patterns.append(pattern)
return snowball_patterns
### MAIN ##########################################################################
def main():
pass
if __name__ == "__main__":
main()
|
992,329 | d0e99451068a133ae07f762552a77746cfc051fe | import string, random
def make_word(string_pattern):
# Returns arbitrary list of vowels and consonants based on specified format
return ''.join(
[random.sample(set(string.ascii_lowercase).difference(set(['a','e',
'i','o','u'])),1)[0] if x.lower() == 'c'
else random.sample(set(['a','e','i','o','u']),1)[0]
for x in list(string_pattern)
])
print(make_word('ccccvvcvv')) |
992,330 | c30a493a709e0db5eeaa2c3292d23182ac80b752 | from pyspark.sql import SparkSession
import logging.config
if __name__ == "__main__":
spark = SparkSession \
.builder \
.master("local[2]") \
.appName("WelcomeToPySparkSQLExample") \
.getOrCreate()
logging.config.fileConfig('logging.conf')
logger = logging.getLogger('WelcomeToPySparkSQLExample')
src_df = spark.read \
.option("header", "true") \
.option("inferSchema", "true") \
.csv("data/country.csv")
src_df.createOrReplaceTempView("SOURCE_TBL")
logger.info("*** Finish reading source file ***")
result = spark.sql("""select country, count(*) as count
from SOURCE_TBL where Age<40 group by country""")
logger.info(f"***Final dataframe count: {result.count()} ***")
result.show()
|
992,331 | bdcc4be519fefbc1d4771237ca8da6b2475ccb0e | #!/usr/bin/env python
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md")) as f:
long_description = f.read()
requires = [
'astroid==2.2.0',
'click==6.7',
'falcon==1.4.1',
'gunicorn==19.9.0',
'jsonschema==2.6.0',
'pycodestyle==2.3.1',
'pylint==2.3.0',
'psycopg2-binary==2.7.6.1',
'simple_json_log_formatter==0.5.3',
'SQLAlchemy==1.3.0',
'SQLAlchemy-Utils==0.33.10'
]
tests_require = [
'docker-compose',
'gunicorn==19.9.0',
'pytest==4.0.2',
'pytest-codestyle==1.4.0',
'pytest-cov==2.6.0',
'pytest-pylint==0.13.0'
]
setup_requires = ['pytest-runner']
setup(
name=u"Falcon Base Project",
version="1.0.0",
description=u"This is a base backend project using the falcon webframework",
long_description=long_description,
classifiers=[
'Programming Language :: Python',
'Framework :: Falcon',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
],
author='Felippe Costa',
author_email='felippemsc@gmail.com',
python_requires='>=3.7',
packages=find_packages(exclude='tests'),
install_requires=requires,
test_suite='tests',
setup_requires=setup_requires,
tests_require=tests_require
) |
992,332 | 64770cffb0ad3aceb76a14780e7edd53a57fef03 | #export PYTHONPATH=/usr/local/lib/python2.7/site-packages:$PYTHONPATH
import sys
import cv2
import math
import numpy as np
import struct
import pickle
from joblib import Parallel, delayed
def read_imagelist(fname, imagelist):
with open(fname, 'r') as f:
for line in f:
imagelist.append(line)
return;
#extracting position from binary mask
def extracting_mask(mask_img, mask_position):
i = 0
j = 0
[a, b, c] = mask_img.shape
for i in range(0, a):
for j in range(0,b):
color = mask_img[i,j,:]
if color[0] == 255:
mask_position.append([i,j])
return mask_position;
#R, G, B -> R/(R+G+B), G/(R+G+B), B/(R+G+B)
def extracting_feature(features, mask_position, img):
color_float = [0.0,0.0,0.0]
for i in range(0, len(mask_position)):
[l,c] = mask_position[i]
color = img[l,c,:]
#R, G, B -> R/(R+G+B)
mean = int(color[0])+int(color[1])+int(color[2])
if mean > 0:
color_float[0] = float(color[0]) / float(mean)
color_float[1] = float(color[1]) / float(mean)
color_float[2] = float(color[2]) / float(mean)
features.append(color_float)
return features;
#creating tensor from mean color vector normalized
def creating_tensor_series(features, tensor_series, name_img):
w, h = 3, 3;
matrix = [[0 for x in range(w)] for y in range(h)]
matrix2 = [[0 for x in range(w)] for y in range(h)]
for i in range(0,3):
for j in range(0,3):
matrix[i][j] = 0.0
matrix2[i][j] = 0.0
for f in range(0,len(features)):
for i in range(0,3):
for j in range(0,3):
matrix[i][j] += features[f][i]*features[f][j]
for i in range(0,3):
for j in range(0,3):
matrix2[i][j] += matrix[i][j]
#normalizing l2 - each image has a tensor
mean = 0.0
for i in range(0,3):
for j in range(0,3):
mean += matrix2[i][j]*matrix2[i][j]
for i in range(0,3):
for j in range(0,3):
matrix2[i][j] /= math.sqrt(mean)
tensor_series.append(matrix2)
name_img2 = name_img.split('/')
name_file = 'tensor_from' + name_img2[3] + '.pkl'
with open(name_file, 'wb') as file:
pickle.dump({'tensor_series': tensor_series}, file)
return tensor_series;
#accumulate temporal information
def creating_final_tensor(tensor_series, final_tensor, mask, year, listpickle):
mask = mask + year + ".tensor"
print mask
file = open(mask, "w")
mean = 0.0
for i in range(0,3):
for j in range(0,3):
final_tensor[i][j] = 0.0
i = 0
series = []
with open(listpickle, 'r') as l:
for line in l:
pos = line.index('\n')
with open(line[0:pos], 'rb') as t:
series.append(pickle.load(t)['tensor_series'])
print len(series)
for f in range(0,len(series)):
for i in range(0,3):
for j in range(0,3):
final_tensor[i][j] += float(series[f][i][j]) #tensor_series[f][i][j]
#normalizing with l2
for i in range(0,3):
for j in range(0,3):
mean += final_tensor[i][j]*final_tensor[i][j]
for i in range(0,3):
for j in range(0,3):
final_tensor[i][j] /= math.sqrt(mean)
file.write(str(final_tensor[i][j]) + ' ')
file.write('\n')
file.close()
print final_tensor
return;
def process(imagelist, i, features, mask_position, tensor_series):
print imagelist[i]
pos = imagelist[i].index('\n')
img = cv2.imread(imagelist[i][0:pos])
#extracting colors
extracting_feature(features, mask_position, img)
#creating tensor from mean color vector normalized
creating_tensor_series(features, tensor_series,imagelist[i][0:pos])
#read mask
#read imagelist
mask = str(sys.argv[1])
images = str(sys.argv[2])
year = str(sys.argv[3])
listpickle = str(sys.argv[4])
print 'Working on mask', mask
print 'Working on observations', images
mask_img = cv2.imread(mask)
#cv2.imshow('image', mask_img)
imagelist = []
read_imagelist(images, imagelist)
print len(imagelist)
#separate mask
mask_position = []
extracting_mask(mask_img, mask_position)
features = []
tensor_series = []
w, h = 3, 3;
final_tensor = [[0 for x in range(w)] for y in range(h)]
#extract feature and create tensor
#for i in range(0, 100):
#for i in range(0,len(imagelist)):
# process(imagelist, i, features, mask_position, tensor_series)
Parallel(n_jobs=1)(delayed(process)(imagelist, i, features, mask_position, tensor_series) for i in range(len(imagelist)))
#accumulate temporal information
creating_final_tensor(tensor_series, final_tensor, mask,year, listpickle)
|
992,333 | 8d27703042f206f35e496b61f249f2aea84ce770 | students = [1,2,3,4,5]
print(students)
students = [i+100 for i in students]
print(students)
students = ["Iron man","Thor","groot"]
students = [len(i) for i in students]
print(students) |
992,334 | b89c460755f6678558c93101a0ee67713edfde5c | import task3_2 as task3_2 # UNCOMENT THIS to change running file
import task3_lyap as task3_lyap
import task3_3 as task3_3
import timeit
import sys
from copy import deepcopy
import numpy as np
#================time test!====
params = {
"args" : [],
"kwargs": {
"Omega": 0.06066,
"K": 1,
},
"iter_number": 100,
"dt": 0.01,
"time_limits": [0, 20., 0.01],
"skip": 1000,
}
state_d = {
"x_array": np.array( [0.01, ] ),
"w": [0,],
}
def task3_A1():
def compute_map():
global state_d, params
task3_2.compute_parameter_map(deepcopy(state_d), deepcopy(params))
print("time of compute_map: ", timeit.timeit(compute_map, number=1))
def task3_A2():
def compute_map():
global state_d, params
task3_lyap.compute_parameter_map(deepcopy(state_d), deepcopy(params))
print("time of compute_map: ", timeit.timeit(compute_map, number=1))
def task3_C():
def compute_map():
global state_d, params
task3_3.compute_parameter_map(deepcopy(state_d), deepcopy(params))
print("time of compute_map: ", timeit.timeit(compute_map, number=1))
# def test_all_units_origin():
# print()
# def test_list_append():
# evaluate(state_d, params)#(circular_map_kwargs, deepcopy(state_d), deepcopy(params))
#
# # print("time of all list appends: ",timeit.timeit(test_list_append, number=100)/100, " - list append")
#
# def _one_cell_time():
#
# global state_d, params
# state_d, params = evaluate(deepcopy(state_d), deepcopy(params))
# detect_periods(state_d, params)
#
# # print("time of the one cell: ", timeit.timeit(_one_cell_time, number=1))
#
# def evaluate_time():
# global state_d, params
# state_d, params = evaluate(deepcopy(state_d), deepcopy(params))
#
# # print("time of one evaluation: ", timeit.timeit(evaluate_time, number=1))
#
# def compute_map():
# global state_d, params
# compute_parameter_map(deepcopy(state_d), deepcopy(params))
#
# print("time of compute_map: ", timeit.timeit(compute_map, number=1))
if __name__ == '__main__':
# task3_A1()
# task3_A2()
task3_C() |
992,335 | c007894cc61feee745f50d90c668acaf7e29f2ef | ###################################################################
### Import Python Libraries
###################################################################
import sys, glob, os, re, math, time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pandas as pd
import pylab as pl
import scipy.optimize as optim
import scipy as sp
from sklearn.metrics import mean_squared_error
from math import sqrt
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
from math import radians, cos, sin, asin, sqrt
from scipy.stats.stats import pearsonr
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.stats import zscore
from scipy import stats
###################################################################
### Useful Functions
###################################################################
def pearsonr_ci(x,y,alpha=0.05):
''' calculate Pearson correlation along with the confidence interval using scipy and numpy
Parameters
----------
x, y : iterable object such as a list or np.array
Input for correlation calculation
alpha : float
Significance level. 0.05 by default
Returns
-------
r : float
Pearson's correlation coefficient
pval : float
The corresponding p value
lo, hi : float
The lower and upper bound of confidence intervals
'''
r, p = stats.pearsonr(x,y)
r_z = np.arctanh(r)
se = 1/np.sqrt(x.size-3)
z = stats.norm.ppf(1-alpha/2)
lo_z, hi_z = r_z-z*se, r_z+z*se
lo, hi = np.tanh((lo_z, hi_z))
return r, p, lo, hi
def save(path, ext='png', close=True, verbose=True):
"""Save a figure from pyplot.
Parameters
----------
path : string
The path (and filename, without the extension) to save the
figure to.
ext : string (default='png')
The file extension. This must be supported by the active
matplotlib backend (see matplotlib.backends module). Most
backends support 'png', 'pdf', 'ps', 'eps', and 'svg'.
close : boolean (default=True)
Whether to close the figure after saving. If you want to save
the figure multiple times (e.g., to multiple formats), you
should NOT close it in between saves or you will have to
re-plot it.
verbose : boolean (default=True)
whether to print information about when and where the image
has been saved.
"""
# Extract the directory and filename from the given path
directory = os.path.split(path)[0]
filename = "%s.%s" % (os.path.split(path)[1], ext)
if directory == '':
directory = '.'
#If the directory does not exist, create it
if not os.path.exists(directory):
os.makedirs(directory)
# The final path to save to
savepath = os.path.join(directory, filename)
if verbose:
print("Saving figure to '%s'..." % savepath),
# Actually save the figure
plt.savefig(savepath)
# Close it
if close:
plt.close()
if verbose:
print("Done")
def ZscoreM(x):
for ii in [1,2,3,4,5,6,7,8,9,10,11,12]:
for name in x.columns:
season = (x[name].index.month == ii)
x[name].ix[season] = (x[name].ix[season]-x[name].ix[season].mean())/x[name].ix[season].std()
return x
def ZscoreY(x):
for ii in [1,2,3,4,5,6,7,8,9,10,11,12]:
season = (x.index.month == ii)
x.ix[season] = (x.ix[season]-x.ix[season].mean())/x.ix[season].std()
return x
### Import entire folders of yearly data
numbers = re.compile(r'(\d+)')
def numericalSort(value):
parts = numbers.split(value)
return parts
def rmse(y_actual, y_predicted):
return sqrt(mean_squared_error(y_actual,y_predicted))
###################################################################
### Import Data
###################################################################
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/observations/sfx-trip/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
obs = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ol/LAI_20*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_2/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol2 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_3/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol3 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_4/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol4 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_5/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol5 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_6/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol6 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_7/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol7 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_8/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol8 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_9/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol9 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_10/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol10 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_11/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol11 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_12/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol12 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_13/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol13 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_14/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ol14 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekf/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_2/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf2 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_3/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf3 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_4/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf4 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_5/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf5 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_6/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf6 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_7/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf7 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_8/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf8 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_9/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf9 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_10/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf10 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_11/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf11 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_12/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf12 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_13/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf13 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_14/LAI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
ekf14 = pd.concat(df)
print "LAI DATA IMPORT: COMPLETE"
### Testing Raw SWI vs SWI
### Raw SWI makes some crazy bad correlations - Raw SWI is far, far too high
### RAW SWI
#file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/observations/sfx-trip/SWI*.PData'),key=numericalSort)
#df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
#sobs = pd.concat(df)
### CDF Matched SWI
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/observations/sfx-trip/tmp/SWI*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sobs = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ol/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_2/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol2 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_4/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol4 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_6/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol6 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_8/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol8 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_10/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol10 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_12/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol12 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/olfc_14/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sol14 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekf/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_2/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf2 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_4/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf4 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_6/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf6 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_8/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf8 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_10/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf10 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_12/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf12 = pd.concat(df)
file = sorted(glob.glob('/cnrm/vegeo/muciaa/ldas_chain_python/ldas_curr/US_FC15_SG/results/ekffc_14/WG2*.PData'),key=numericalSort)
df = [pd.read_pickle(file[0]),pd.read_pickle(file[1])]
sekf14 = pd.concat(df)
print "SWI DATA IMPORT: COMPLETE"
###################################################################
### Data Processing
####################################################################
tmp = ol2.copy()*np.nan
tmp.ix[obs.index] = obs
#### Use the following for graphing time-series
t_obs = obs.mean(axis=1)
t_ol = ol[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol2 = ol2[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol3 = ol3[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol4 = ol4[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol5 = ol5[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol6 = ol6[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol7 = ol7[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol8 = ol8[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol9 = ol9[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol10 = ol10[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol11 = ol11[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol12 = ol12[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol13 = ol13[~np.isnan(tmp)].mean(axis=1).dropna()
t_ol14 = ol14[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf = ekf[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf2 = ekf2[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf3 = ekf3[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf4 = ekf4[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf5 = ekf5[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf6 = ekf6[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf7 = ekf7[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf8 = ekf8[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf9 = ekf9[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf10 = ekf10[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf11 = ekf11[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf12 = ekf12[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf13 = ekf13[~np.isnan(tmp)].mean(axis=1).dropna()
t_ekf14 = ekf14[~np.isnan(tmp)].mean(axis=1).dropna()
print "LAI TIME SERIES PROCESSING: COMPLETE"
sobs.index = sobs.index.shift(-1,freq='D')
tmp = sol2.copy()*np.nan
tmp.ix[sobs.index] = sobs
### Use the following for graphing time-series
st_obs = sobs.mean(axis=1)
st_ol = sol[~np.isnan(tmp)].mean(axis=1).dropna()
st_ol2 = sol2[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ol3 = sol3[~np.isnan(tmp)].mean(axis=1).dropna()
st_ol4 = sol4[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ol5 = sol5[~np.isnan(tmp)].mean(axis=1).dropna()
st_ol6 = sol6[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ol7 = sol7[~np.isnan(tmp)].mean(axis=1).dropna()
st_ol8 = sol8[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ol9 = sol9[~np.isnan(tmp)].mean(axis=1).dropna()
st_ol10 = sol10[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ol11 = sol11[~np.isnan(tmp)].mean(axis=1).dropna()
st_ol12 = sol12[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ol13 = sol13[~np.isnan(tmp)].mean(axis=1).dropna()
st_ol14 = sol14[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf = sekf[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf2 = sekf2[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ekf3 = sekf3[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf4 = sekf4[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ekf5 = sekf5[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf6 = sekf6[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ekf7 = sekf7[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf8 = sekf8[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ekf9 = sekf9[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf10 = sekf10[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ekf11 = sekf11[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf12 = sekf12[~np.isnan(tmp)].mean(axis=1).dropna()
#st_ekf13 = sekf13[~np.isnan(tmp)].mean(axis=1).dropna()
st_ekf14 = sekf14[~np.isnan(tmp)].mean(axis=1).dropna()
print "SWI TIME SERIES PROCESSING: COMPLETE"
### Use the following for mapping yearly results
m_obs = obs.mean(axis=0)
m_ol = ol[~np.isnan(tmp)].dropna()
m_ol2 = ol2[~np.isnan(tmp)].dropna()
m_ol3 = ol3[~np.isnan(tmp)].dropna()
m_ol4 = ol4[~np.isnan(tmp)].dropna()
m_ol5 = ol5[~np.isnan(tmp)].dropna()
m_ol6 = ol6[~np.isnan(tmp)].dropna()
m_ol7 = ol7[~np.isnan(tmp)].dropna()
m_ol8 = ol8[~np.isnan(tmp)].dropna()
m_ol9 = ol9[~np.isnan(tmp)].dropna()
m_ol10 = ol10[~np.isnan(tmp)].dropna()
m_ol11 = ol11[~np.isnan(tmp)].dropna()
m_ol12 = ol12[~np.isnan(tmp)].dropna()
m_ol13 = ol13[~np.isnan(tmp)].dropna()
m_ol14 = ol14[~np.isnan(tmp)].dropna()
m_ekf = ekf[~np.isnan(tmp)].dropna()
m_ekf2 = ekf2[~np.isnan(tmp)].dropna()
m_ekf3 = ekf3[~np.isnan(tmp)].dropna()
m_ekf4 = ekf4[~np.isnan(tmp)].dropna()
m_ekf5 = ekf5[~np.isnan(tmp)].dropna()
m_ekf6 = ekf6[~np.isnan(tmp)].dropna()
m_ekf7 = ekf7[~np.isnan(tmp)].dropna()
m_ekf8 = ekf8[~np.isnan(tmp)].dropna()
m_ekf9 = ekf9[~np.isnan(tmp)].dropna()
m_ekf10 = ekf10[~np.isnan(tmp)].dropna()
m_ekf11 = ekf11[~np.isnan(tmp)].dropna()
m_ekf12 = ekf12[~np.isnan(tmp)].dropna()
m_ekf13 = ekf13[~np.isnan(tmp)].dropna()
m_ekf14 = ekf14[~np.isnan(tmp)].dropna()
print "MAP SERIES PROCESSING: COMPLETE"
###################################################################
### Compute Correlations
###################################################################
### Convert all Series into DataFrames in order to do 'corrwith' correlations
### That omit missing values from either data source
d_obs = pd.DataFrame(t_obs)
d_ol = pd.DataFrame(t_ol);d_ol2 = pd.DataFrame(t_ol2);d_ol3 = pd.DataFrame(t_ol3)
d_ol4 = pd.DataFrame(t_ol4);d_ol5 = pd.DataFrame(t_ol5);d_ol6 = pd.DataFrame(t_ol6)
d_ol7 = pd.DataFrame(t_ol7);d_ol8 = pd.DataFrame(t_ol8);d_ol9 = pd.DataFrame(t_ol9)
d_ol10 = pd.DataFrame(t_ol10);d_ol11 = pd.DataFrame(t_ol11);d_ol12 = pd.DataFrame(t_ol12)
d_ol13 = pd.DataFrame(t_ol13);d_ol14 = pd.DataFrame(t_ol14)
d_ekf = pd.DataFrame(t_ekf)
d_ekf2 = pd.DataFrame(t_ekf2)
d_ekf3 = pd.DataFrame(t_ekf3)
d_ekf4 = pd.DataFrame(t_ekf4)
d_ekf5 = pd.DataFrame(t_ekf5)
d_ekf6 = pd.DataFrame(t_ekf6)
d_ekf7 = pd.DataFrame(t_ekf7)
d_ekf8 = pd.DataFrame(t_ekf8)
d_ekf9 = pd.DataFrame(t_ekf9)
d_ekf10 = pd.DataFrame(t_ekf10)
d_ekf11 = pd.DataFrame(t_ekf11)
d_ekf12 = pd.DataFrame(t_ekf12)
d_ekf13 = pd.DataFrame(t_ekf13)
d_ekf14 = pd.DataFrame(t_ekf14)
sd_obs = pd.DataFrame(st_obs)
sd_ol = pd.DataFrame(st_ol)
sd_ol2 = pd.DataFrame(st_ol2)
#sd_ol3 = pd.DataFrame(st_ol3)
sd_ol4 = pd.DataFrame(st_ol4)
#sd_ol5 = pd.DataFrame(st_ol5)
sd_ol6 = pd.DataFrame(st_ol6)
#sd_ol7 = pd.DataFrame(st_ol7)
sd_ol8 = pd.DataFrame(st_ol8)
#sd_ol9 = pd.DataFrame(st_ol9)
sd_ol10 = pd.DataFrame(st_ol10)
#sd_ol11 = pd.DataFrame(st_ol11)
sd_ol12 = pd.DataFrame(st_ol12)
#sd_ol13 = pd.DataFrame(st_ol13)
sd_ol14 = pd.DataFrame(st_ol14)
sd_ekf = pd.DataFrame(st_ekf)
sd_ekf2 = pd.DataFrame(st_ekf2)
#sd_ekf3 = pd.DataFrame(st_ekf3)
sd_ekf4 = pd.DataFrame(st_ekf4)
#sd_ekf5 = pd.DataFrame(st_ekf5)
sd_ekf6 = pd.DataFrame(st_ekf6)
#sd_ekf7 = pd.DataFrame(st_ekf7)
sd_ekf8 = pd.DataFrame(st_ekf8)
#sd_ekf9 = pd.DataFrame(st_ekf9)
sd_ekf10 = pd.DataFrame(st_ekf10)
#sd_ekf11 = pd.DataFrame(st_ekf11)
sd_ekf12 = pd.DataFrame(st_ekf12)
#sd_ekf13 = pd.DataFrame(st_ekf13)
sd_ekf14 = pd.DataFrame(st_ekf14)
cor_ol_obs = d_obs.corrwith(d_ol,axis=0,drop=True)
cor_ol2_obs = d_obs.corrwith(d_ol2,axis=0,drop=True)
cor_ol3_obs = d_obs.corrwith(d_ol3,axis=0,drop=True)
cor_ol4_obs = d_obs.corrwith(d_ol4,axis=0,drop=True)
cor_ol5_obs = d_obs.corrwith(d_ol5,axis=0,drop=True)
cor_ol6_obs = d_obs.corrwith(d_ol6,axis=0,drop=True)
cor_ol7_obs = d_obs.corrwith(d_ol7,axis=0,drop=True)
cor_ol8_obs = d_obs.corrwith(d_ol8,axis=0,drop=True)
cor_ol9_obs = d_obs.corrwith(d_ol9,axis=0,drop=True)
cor_ol10_obs = d_obs.corrwith(d_ol10,axis=0,drop=True)
cor_ol11_obs = d_obs.corrwith(d_ol11,axis=0,drop=True)
cor_ol12_obs = d_obs.corrwith(d_ol12,axis=0,drop=True)
cor_ol13_obs = d_obs.corrwith(d_ol13,axis=0,drop=True)
cor_ol14_obs = d_obs.corrwith(d_ol14,axis=0,drop=True)
cor_ekf_obs = d_obs.corrwith(d_ekf,axis=0,drop=True)
cor_ekf2_obs = d_obs.corrwith(d_ekf2,axis=0,drop=True)
cor_ekf3_obs = d_obs.corrwith(d_ekf3,axis=0,drop=True)
cor_ekf4_obs = d_obs.corrwith(d_ekf4,axis=0,drop=True)
cor_ekf5_obs = d_obs.corrwith(d_ekf5,axis=0,drop=True)
cor_ekf6_obs = d_obs.corrwith(d_ekf6,axis=0,drop=True)
cor_ekf7_obs = d_obs.corrwith(d_ekf7,axis=0,drop=True)
cor_ekf8_obs = d_obs.corrwith(d_ekf8,axis=0,drop=True)
cor_ekf9_obs = d_obs.corrwith(d_ekf9,axis=0,drop=True)
cor_ekf10_obs = d_obs.corrwith(d_ekf10,axis=0,drop=True)
cor_ekf11_obs = d_obs.corrwith(d_ekf11,axis=0,drop=True)
cor_ekf12_obs = d_obs.corrwith(d_ekf12,axis=0,drop=True)
cor_ekf13_obs = d_obs.corrwith(d_ekf13,axis=0,drop=True)
cor_ekf14_obs = d_obs.corrwith(d_ekf14,axis=0,drop=True)
##
scor_ol_obs = sd_obs.corrwith(sd_ol,axis=0,drop=True)
scor_ol2_obs = sd_obs.corrwith(sd_ol2,axis=0,drop=True)
#scor_ol3_obs = sd_obs.corrwith(sd_ol3,axis=0,drop=True)
scor_ol4_obs = sd_obs.corrwith(sd_ol4,axis=0,drop=True)
#scor_ol5_obs = sd_obs.corrwith(sd_ol5,axis=0,drop=True)
scor_ol6_obs = sd_obs.corrwith(sd_ol6,axis=0,drop=True)
#scor_ol7_obs = sd_obs.corrwith(sd_ol7,axis=0,drop=True)
scor_ol8_obs = sd_obs.corrwith(sd_ol8,axis=0,drop=True)
#scor_ol9_obs = sd_obs.corrwith(sd_ol9,axis=0,drop=True)
scor_ol10_obs = sd_obs.corrwith(sd_ol10,axis=0,drop=True)
#scor_ol11_obs = sd_obs.corrwith(sd_ol11,axis=0,drop=True)
scor_ol12_obs = sd_obs.corrwith(sd_ol12,axis=0,drop=True)
#scor_ol13_obs = sd_obs.corrwith(sd_ol13,axis=0,drop=True)
scor_ol14_obs = sd_obs.corrwith(sd_ol14,axis=0,drop=True)
scor_ekf_obs = sd_obs.corrwith(sd_ekf,axis=0,drop=True)
scor_ekf2_obs = sd_obs.corrwith(sd_ekf2,axis=0,drop=True)
#scor_ekf3_obs = sd_obs.corrwith(sd_ekf3,axis=0,drop=True)
scor_ekf4_obs = sd_obs.corrwith(sd_ekf4,axis=0,drop=True)
#scor_ekf5_obs = sd_obs.corrwith(sd_ekf5,axis=0,drop=True)
scor_ekf6_obs = sd_obs.corrwith(sd_ekf6,axis=0,drop=True)
#scor_ekf7_obs = sd_obs.corrwith(sd_ekf7,axis=0,drop=True)
scor_ekf8_obs = sd_obs.corrwith(sd_ekf8,axis=0,drop=True)
#scor_ekf9_obs = sd_obs.corrwith(sd_ekf9,axis=0,drop=True)
scor_ekf10_obs = sd_obs.corrwith(sd_ekf10,axis=0,drop=True)
#scor_ekf11_obs = sd_obs.corrwith(sd_ekf11,axis=0,drop=True)
scor_ekf12_obs = sd_obs.corrwith(sd_ekf12,axis=0,drop=True)
#scor_ekf13_obs = sd_obs.corrwith(sd_ekf13,axis=0,drop=True)
scor_ekf14_obs = sd_obs.corrwith(sd_ekf14,axis=0,drop=True)
### LAI and SWI
### [LAI_Correlation, LAI_RMSD, SWI_Correlation, SWI_RMSD]
### EVAP
cor_ol_obs = [0.686,1.187,0.864,0.045]
cor_ol2_obs = [0.683,1.193,0.856,0.046]
cor_ol3_obs = [0.685,1.196,0.849,0.047]
cor_ol4_obs = [0.685,1.196,0.841,0.048]
cor_ol5_obs = [0.689,1.188,0.828,0.050]
cor_ol6_obs = [0.682,1.202,0.815,0.052]
cor_ol7_obs = [0.690,1.169,0.805,0.053]
cor_ol8_obs = [0.685,1.210,0.792,0.055]
cor_ol9_obs = [0.702,1.180,0.777,0.057]
cor_ol10_obs = [0.689,1.207,0.766,0.058]
cor_ol11_obs = [0.686,1.215,0.757,0.059]
cor_ol12_obs = [0.683,1.219,0.746,0.061]
cor_ol13_obs = [0.685,1.210,0.740,0.061]
cor_ol14_obs = [0.689,1.213,0.736,0.062]
cor_ekf_obs = [0.870,0.716,0.879,0.042]
cor_ekf2_obs = [0.811,0.858,0.864,0.044]
cor_ekf3_obs = [0.811,0.864,0.855,0.046]
cor_ekf4_obs = [0.814,0.864,0.845,0.047]
cor_ekf5_obs = [0.814,0.861,0.831,0.049]
cor_ekf6_obs = [0.811,0.860,0.818,0.051]
cor_ekf7_obs = [0.813,0.870,0.807,0.053]
cor_ekf8_obs = [0.815,0.866,0.793,0.054]
cor_ekf9_obs = [0.822,0.847,0.778,0.056]
cor_ekf10_obs = [0.816,0.857,0.768,0.058]
cor_ekf11_obs = [0.781,0.945,0.758,0.059]
cor_ekf12_obs = [0.772,0.961,0.747,0.060]
cor_ekf13_obs = [0.771,0.965,0.741,0.061]
cor_ekf14_obs = [0.776,0.965,0.736,0.061]
ecor_ol_obs = [0.686,1.187,0.864,0.045]
ecor_ol2_obs = [0.683,1.193,0.856,0.046]
#ecor_ol3_obs = [0.685,1.196,0.849,0.047]
ecor_ol4_obs = [0.685,1.196,0.841,0.048]
#ecor_ol5_obs = [0.689,1.188,0.828,0.050]
ecor_ol6_obs = [0.682,1.202,0.815,0.052]
#ecor_ol7_obs = [0.690,1.169,0.805,0.053]
ecor_ol8_obs = [0.685,1.210,0.792,0.055]
#ecor_ol9_obs = [0.702,1.180,0.777,0.057]
ecor_ol10_obs = [0.689,1.207,0.766,0.058]
#ecor_ol11_obs = [0.686,1.215,0.757,0.059]
ecor_ol12_obs = [0.683,1.219,0.746,0.061]
#ecor_ol13_obs = [0.685,1.210,0.740,0.061]
ecor_ol14_obs = [0.689,1.213,0.736,0.062]
ecor_ekf_obs = [0.870,0.716,0.879,0.042]
ecor_ekf2_obs = [0.811,0.858,0.864,0.044]
#ecor_ekf3_obs = [0.811,0.864,0.855,0.046]
ecor_ekf4_obs = [0.814,0.864,0.845,0.047]
#ecor_ekf5_obs = [0.814,0.861,0.831,0.049]
ecor_ekf6_obs = [0.811,0.860,0.818,0.051]
#ecor_ekf7_obs = [0.813,0.870,0.807,0.053]
ecor_ekf8_obs = [0.815,0.866,0.793,0.054]
#ecor_ekf9_obs = [0.822,0.847,0.778,0.056]
ecor_ekf10_obs = [0.816,0.857,0.768,0.058]
#ecor_ekf11_obs = [0.781,0.945,0.758,0.059]
ecor_ekf12_obs = [0.772,0.961,0.747,0.060]
#ecor_ekf13_obs = [0.771,0.965,0.741,0.061]
ecor_ekf14_obs = [0.776,0.965,0.736,0.061]
print("LAI Correlations")
print("------------------------------------------------------------------")
print("Correlation - OL vs Obs: " + str(round(cor_ol_obs[0],4)))
print("Correlation - OL 2 Day FC vs Obs: " + str(round(cor_ol2_obs[0],4)))
print("Correlation - OL 3 Day FC vs Obs: " + str(round(cor_ol3_obs[0],4)))
print("Correlation - OL 4 Day FC vs Obs: " + str(round(cor_ol4_obs[0],4)))
print("Correlation - OL 5 Day FC vs Obs: " + str(round(cor_ol5_obs[0],4)))
print("Correlation - OL 6 Day FC vs Obs: " + str(round(cor_ol6_obs[0],4)))
print("Correlation - OL 7 Day FC vs Obs: " + str(round(cor_ol7_obs[0],4)))
print("Correlation - OL 8 Day FC vs Obs: " + str(round(cor_ol8_obs[0],4)))
print("Correlation - OL 9 Day FC vs Obs: " + str(round(cor_ol9_obs[0],4)))
print("Correlation - OL 10 Day FC vs Obs: " + str(round(cor_ol10_obs[0],4)))
print("Correlation - OL 11 Day FC vs Obs: " + str(round(cor_ol11_obs[0],4)))
print("Correlation - OL 12 Day FC vs Obs: " + str(round(cor_ol12_obs[0],4)))
print("Correlation - OL 13 Day FC vs Obs: " + str(round(cor_ol13_obs[0],4)))
print("Correlation - OL 14 Day FC vs Obs: " + str(round(cor_ol14_obs[0],4)))
print("------------------------------------------------------------------")
print("Correlation - EKF vs Obs: " + str(round(cor_ekf_obs[0],4)))
print("Correlation - EKF 2 Day FC vs Obs: " + str(round(cor_ekf2_obs[0],4)))
print("Correlation - EKF 3 Day FC vs Obs: " + str(round(cor_ekf3_obs[0],4)))
print("Correlation - EKF 4 Day FC vs Obs: " + str(round(cor_ekf4_obs[0],4)))
print("Correlation - EKF 5 Day FC vs Obs: " + str(round(cor_ekf5_obs[0],4)))
print("Correlation - EKF 6 Day FC vs Obs: " + str(round(cor_ekf6_obs[0],4)))
print("Correlation - EKF 7 Day FC vs Obs: " + str(round(cor_ekf7_obs[0],4)))
print("Correlation - EKF 8 Day FC vs Obs: " + str(round(cor_ekf8_obs[0],4)))
print("Correlation - EKF 9 Day FC vs Obs: " + str(round(cor_ekf9_obs[0],4)))
print("Correlation - EKF 10 Day FC vs Obs: " + str(round(cor_ekf10_obs[0],4)))
print("Correlation - EKF 11 Day FC vs Obs: " + str(round(cor_ekf11_obs[0],4)))
print("Correlation - EKF 12 Day FC vs Obs: " + str(round(cor_ekf12_obs[0],4)))
print("Correlation - EKF 13 Day FC vs Obs: " + str(round(cor_ekf13_obs[0],4)))
print("Correlation - EKF 14 Day FC vs Obs: " + str(round(cor_ekf14_obs[0],4)))
print("------------------------------------------------------------------")
print("SWI Correlations")
print("------------------------------------------------------------------")
print("Correlation - OL vs Obs: " + str(round(scor_ol_obs[0],4)))
print("Correlation - OL 2 Day FC vs Obs: " + str(round(scor_ol2_obs[0],4)))
#print("Correlation - OL 3 Day FC vs Obs: " + str(round(scor_ol3_obs[0],4)))
print("Correlation - OL 4 Day FC vs Obs: " + str(round(scor_ol4_obs[0],4)))
#print("Correlation - OL 5 Day FC vs Obs: " + str(round(scor_ol5_obs[0],4)))
print("Correlation - OL 6 Day FC vs Obs: " + str(round(scor_ol6_obs[0],4)))
#print("Correlation - OL 7 Day FC vs Obs: " + str(round(scor_ol7_obs[0],4)))
print("Correlation - OL 8 Day FC vs Obs: " + str(round(scor_ol8_obs[0],4)))
#print("Correlation - OL 9 Day FC vs Obs: " + str(round(scor_ol9_obs[0],4)))
print("Correlation - OL 10 Day FC vs Obs: " + str(round(scor_ol10_obs[0],4)))
#print("Correlation - OL 11 Day FC vs Obs: " + str(round(scor_ol11_obs[0],4)))
print("Correlation - OL 12 Day FC vs Obs: " + str(round(scor_ol12_obs[0],4)))
#print("Correlation - OL 13 Day FC vs Obs: " + str(round(scor_ol13_obs[0],4)))
print("Correlation - OL 14 Day FC vs Obs: " + str(round(scor_ol14_obs[0],4)))
print("------------------------------------------------------------------")
print("Correlation - EKF vs Obs: " + str(round(scor_ekf_obs[0],4)))
print("Correlation - EKF 2 Day FC vs Obs: " + str(round(scor_ekf2_obs[0],4)))
#print("Correlation - EKF 3 Day FC vs Obs: " + str(round(scor_ekf3_obs[0],4)))
print("Correlation - EKF 4 Day FC vs Obs: " + str(round(scor_ekf4_obs[0],4)))
#print("Correlation - EKF 5 Day FC vs Obs: " + str(round(scor_ekf5_obs[0],4)))
print("Correlation - EKF 6 Day FC vs Obs: " + str(round(scor_ekf6_obs[0],4)))
#print("Correlation - EKF 7 Day FC vs Obs: " + str(round(scor_ekf7_obs[0],4)))
print("Correlation - EKF 8 Day FC vs Obs: " + str(round(scor_ekf8_obs[0],4)))
#print("Correlation - EKF 9 Day FC vs Obs: " + str(round(scor_ekf9_obs[0],4)))
print("Correlation - EKF 10 Day FC vs Obs: " + str(round(scor_ekf10_obs[0],4)))
#print("Correlation - EKF 11 Day FC vs Obs: " + str(round(scor_ekf11_obs[0],4)))
print("Correlation - EKF 12 Day FC vs Obs: " + str(round(scor_ekf12_obs[0],4)))
#print("Correlation - EKF 13 Day FC vs Obs: " + str(round(scor_ekf13_obs[0],4)))
print("Correlation - EKF 14 Day FC vs Obs: " + str(round(scor_ekf14_obs[0],4)))
print("------------------------------------------------------------------")
#lai_ol=[cor_ol_obs[0], cor_ol2_obs[0], cor_ol4_obs[0],cor_ol6_obs[0],cor_ol8_obs[0],cor_ol10_obs[0],cor_ol12_obs[0],cor_ol14_obs[0]]
#lai_ekf=[cor_ekf_obs[0], cor_ekf2_obs[0], cor_ekf4_obs[0],cor_ekf6_obs[0],cor_ekf8_obs[0],cor_ekf10_obs[0],cor_ekf12_obs[0],cor_ekf14_obs[0]]
#swi_ol=[scor_ol_obs[0], scor_ol2_obs[0], scor_ol4_obs[0],scor_ol6_obs[0],scor_ol8_obs[0],scor_ol10_obs[0],scor_ol12_obs[0],scor_ol14_obs[0]]
#swi_ekf=[scor_ekf_obs[0], scor_ekf2_obs[0], scor_ekf4_obs[0],scor_ekf6_obs[0],scor_ekf8_obs[0],scor_ekf10_obs[0],scor_ekf12_obs[0],scor_ekf14_obs[0]]
lai_ol=[cor_ol_obs[0], cor_ol2_obs[0], cor_ol4_obs[0],cor_ol6_obs[0],cor_ol8_obs[0],cor_ol10_obs[0],cor_ol12_obs[0],cor_ol14_obs[0]]
lai_ekf=[cor_ekf_obs[0], cor_ekf2_obs[0], cor_ekf4_obs[0],cor_ekf6_obs[0],cor_ekf8_obs[0],cor_ekf10_obs[0],cor_ekf12_obs[0],cor_ekf14_obs[0]]
swi_ol=[cor_ol_obs[2], cor_ol2_obs[2], cor_ol4_obs[2],cor_ol6_obs[2],cor_ol8_obs[2],cor_ol10_obs[2],cor_ol12_obs[2],cor_ol14_obs[2]]
swi_ekf=[cor_ekf_obs[2], cor_ekf2_obs[2], cor_ekf4_obs[2],cor_ekf6_obs[2],cor_ekf8_obs[2],cor_ekf10_obs[2],cor_ekf12_obs[2],cor_ekf14_obs[2]]
rlai_ol=[cor_ol_obs[1], cor_ol2_obs[1], cor_ol4_obs[1],cor_ol6_obs[1],cor_ol8_obs[1],cor_ol10_obs[1],cor_ol12_obs[1],cor_ol14_obs[1]]
rlai_ekf=[cor_ekf_obs[1], cor_ekf2_obs[1], cor_ekf4_obs[1],cor_ekf6_obs[1],cor_ekf8_obs[1],cor_ekf10_obs[1],cor_ekf12_obs[1],cor_ekf14_obs[1]]
rswi_ol=[cor_ol_obs[3], cor_ol2_obs[3], cor_ol4_obs[3],cor_ol6_obs[3],cor_ol8_obs[3],cor_ol10_obs[3],cor_ol12_obs[3],cor_ol14_obs[3]]
rswi_ekf=[cor_ekf_obs[3], cor_ekf2_obs[3], cor_ekf4_obs[3],cor_ekf6_obs[3],cor_ekf8_obs[3],cor_ekf10_obs[3],cor_ekf12_obs[3],cor_ekf14_obs[3]]
# adfdfdkkkk1212121212asdf
# asdfhjknmm3434343434asdf
###################################################################
### Graphing
###################################################################
'''
plt.title('LAI over CONUS',fontsize=24)
plt.plot(t_obs,label='Observations',marker='*',color='green',linewidth=0,markersize=10)
plt.plot(t_ol,label='OL',color='blue',linewidth=1)
#plt.plot(t_ol2,label='OL 2 Day FC',linestyle='--',linewidth=2,color='cyan')
#plt.plot(t_ol4,label='OL 4 Day FC',linestyle='--')
#plt.plot(t_ol6,label='OL 6 Day FC',linestyle='--',linewidth=2,color='yellow')
#plt.plot(t_ol8,label='OL 8 Day FC',linestyle='--')
plt.plot(t_ol10,label='OL 10 Day FC',linestyle='--',linewidth=1,color='blue')
#plt.plot(t_ol12,label='OL 12 Day FC',linestyle='--')
#plt.plot(t_ol14,label='OL 14 Day FC',linestyle='--',linewidth=2,color='black')
plt.plot(t_ekf,label='EKF',color='red',linewidth=1)
#plt.plot(t_ekf2,label='EKF 2 Day FC',linestyle=':',linewidth=2,color='cyan')
#plt.plot(t_ekf4,label='EKF 4 Day FC',linestyle=':')
#plt.plot(t_ekf6,label='EKF 6 Day FC',linestyle=':',linewidth=2,color='yellow')
#plt.plot(t_ekf8,label='EKF 8 Day FC',linestyle=':')
plt.plot(t_ekf10,label='EKF 10 Day FC',linestyle=':',linewidth=1,color='red')
#plt.plot(t_ekf12,label='EKF 12 Day FC',linestyle=':')
#plt.plot(t_ekf14,label='EKF 14 Day FC',linestyle=':',linewidth=2,color='black')
#plt.axhline(color='black',linewidth=.5)
plt.legend(loc='upper left')
#plt.ylim([0,1])
plt.ylabel('Leaf Area Index [$m^2$/$m^2$]',fontsize=24)
plt.yticks(fontsize=22)
plt.xticks(fontsize=22)
plt.locator_params(axis='y', nbins=4)
#plt.ylabel('',fontsize=24)
#plt.rcParams.update({'font.size': 10})
plt.show()
plt.title('SWI over CONUS',fontsize=24)
plt.plot(st_obs,label='Observations',marker='*',color='green',linewidth=0,markersize=10)
plt.plot(st_ol,label='OL',color='blue',linewidth=1)
#plt.plot(st_ol2,label='OL 2 Day FC',linestyle='--',linewidth=2,color='cyan')
#plt.plot(st_ol4,label='OL 4 Day FC',linestyle='--')
#plt.plot(st_ol6,label='OL 6 Day FC',linestyle='--',linewidth=2,color='yellow')
#plt.plot(st_ol8,label='OL 8 Day FC',linestyle='--')
plt.plot(st_ol10,label='OL 10 Day FC',linestyle='--',linewidth=1,color='blue')
#plt.plot(st_ol12,label='OL 12 Day FC',linestyle='--')
#plt.plot(st_ol14,label='OL 14 Day FC',linestyle='--',linewidth=2,color='black')
plt.plot(st_ekf,label='EKF',color='red',linewidth=1)
#plt.plot(st_ekf2,label='EKF 2 Day FC',linestyle=':',linewidth=2,color='cyan')
#plt.plot(st_ekf4,label='EKF 4 Day FC',linestyle=':')
#plt.plot(st_ekf6,label='EKF 6 Day FC',linestyle=':',linewidth=2,color='yellow')
#plt.plot(st_ekf8,label='EKF 8 Day FC',linestyle=':')
plt.plot(st_ekf10,label='EKF 10 Day FC',linestyle=':',linewidth=1,color='red')
#plt.plot(st_ekf12,label='EKF 12 Day FC',linestyle=':')
#plt.plot(st_ekf14,label='EKF 14 Day FC',linestyle=':',linewidth=2,color='black')
#plt.axhline(color='black',linewidth=.5)
plt.legend(loc='upper left')
plt.ylim([0.2,0.35])
#plt.ylabel('Leaf Area Index [$m^2$/$m^2$]',fontsize=24)
plt.ylabel('SWI',fontsize=24)
#plt.rcParams.update({'font.size': 10})
plt.yticks(fontsize=22)
plt.xticks(fontsize=22)
plt.locator_params(axis='y', nbins=4)
plt.show()
'''
#ollai = ['OL','OL FC2','OL FC4', 'OL FC6', 'OL FC8', 'OL FC10', 'OL FC12', 'OL FC14']
#ekflai = ['EKF','EKF FC2','EKF FC4', 'EKF FC6', 'EKF FC8', 'EKF FC10', 'EKF FC12', 'EKF FC14']
ticks = ['NO FC','FC2','FC4', 'FC6', 'FC8', 'FC10', 'FC12', 'FC14']
fig, ax1 = plt.subplots()
plt.title('LAI Forecast Satellite Correlations over CONUS',fontsize=24)
ax1.set_ylabel('R',fontsize=24)
ax1.set_xlabel('Forecast Day',fontsize=24)
plt.xticks(np.arange(8),ticks,fontsize=20)
plt.yticks(fontsize=20)
#ax1.yaxis.set_major_locator(plt.MaxNLocator(5))
#plt.locator_params(axis='y', nbins=4)
ax1.set_yticks([0.7,0.75,0.8,0.85,0.9])
ax1.plot(lai_ol,markersize=20,marker='.',linewidth=1,label='OL',color='b',linestyle='--')
ax1.plot(lai_ekf,markersize=20,marker='.',linewidth=1,label='EKF',color='r',linestyle='--')
#ax1.plot(swi_ol,markersize=20,marker='.',linewidth=1,label='OL',color='b',linestyle='--')
#ax1.plot(swi_ekf,markersize=20,marker='.',linewidth=1,label='EKF',color='r',linestyle ='--')
ax1.margins(0.05)
ax1.legend(loc='upper right',fontsize=24)
plt.show()
#ax2=ax1.twinx()
#ax2.set_ylabel('RMSD',fontsize=16)
#ax2.plot(rlai_ol,markersize=12,marker='*',linewidth=0.5,label='OL LAI RMSD',color='b',linestyle='-')
#ax2.plot(rlai_ekf,markersize=12,marker='*',linewidth=0.5,label='EKF LAI RMSD',color='r',linestyle='-')
##ax2.plot(rswi_ol,markersize=12,marker='*',linewidth=0.5,label='OL SWI RMSD',color='c',linestyle='--')
##ax2.plot(rswi_ekf,markersize=12,marker='*',linewidth=0.5,label='EKF SWI RMSD',color='m',linestyle='--')
#ax2.margins(0.05)
#plt.legend(loc='upper right')
#plt.title('SWI Correlations',fontsize=16)
#plt.plot(scor_ol_obs,label='OL',color='blue',linewidth=1)
###################################################################
### Mapping
###################################################################
###################################################################
### MatPlotLib Finishing Touches
###################################################################
### for 'add_axes' it is [left,bottom,witdth,height], plt.figure(#) is seperate image, or on the same plot
#fig = plt.figure(0)
#cbaxes = fig.add_axes([0.08,0.85,0.7,0.025])
#fig.colorbar(im,ax=axes.ravel().tolist())
#cbaxes = matplotlib.colorbar.make_axes(location='bottom')
#cbar = fig.colorbar(im,cax=cbaxes,orientation='horizontal',ticks=[-.75,0,.75])
#cbar.ax.set_xticklabels(['-0.75','0','0.75'])
#save('../vegeoFigures/Yield-RainF', ext='ps', close=True, verbose=True)
#plt.show()
|
992,336 | 478ec1da0cb46ce076c624cd059d86d88ab35e8c | '''
Классы
Домашнее задание
Вопросы по лекциям
1.
Напишите название функции, которая является конструктором класса.
Ответ:
__init__(self)
2.
На что указывает переменная self?
Ответ:
На сам объект(instance)
3.
С помощью какой функции можно проверить, что некая строка является именем одного из атрибутов объекта?
Ответ:
hasattr()
4.
Когда вызывается метод __del__? (относительно события удаления объекта)
Ответ:
Метод __del__ вызывается каждый раз, когда на объект в коде не остается ни одной ссылки. Можно вызвать __del__
явно для удаления объекта в конце выполнения функции, например.
5.
Верно ли, что атрибут класса перекрывает атрибут объекта?
Ответ:
Если объекту присваивается атрибут с таким же именем как в классе, то он перекрывает, или переопределяет, атрибут класса.
6.
Можно ли атрибуты базового класса вызывать в дочернем классе? Если да, то напишите, нет ли исклчений?
Ответ:
Вызывать атрибуты базового класса можно в дочернем, если только они не системные и не начинаются с двойного подчеркивания.
7.
Объясните своими словами для чего нужен метод super.
Ответ:
Метод super позволяет вызывать базовый метод и на его основе дополнять нужную функциональность в методе дочернего класса.
Практика
Напишите класс Fraction для работы с дробями. Пусть дробь в нашем классе предстает в виде числитель/знаменатель.
Дробное число должно создаваться по запросу Fraction(a, b), где a – это числитель, а b – знаменатель дроби.
Добавьте возможность сложения (сложения через оператор сложения) для дроби. Предполагается, что операция сложения
может проводиться как только между дробями, так и между дробью и целым числом. Результат оперции должен быть
представлен в виде дроби.
Добавьте возможность взятия разности (вычитания через оператор вычитания) для дробей. Предполагается, что операция
вычитания может проводиться как только для двух дробей, так и для дроби и целого числа. Результат оперции должен быть
представлен в виде дроби.
Добавьте возможность умножения (умножения через оператор умножения) для дробей. Предполагается, что операция умножения
может проводиться как только для двух дробей, так и для дроби и целого числа. Результат оперции должен быть представлен
в виде дроби.
Добавьте возможность приведения дроби к целому числу через стандартную функцию int().
Добавьте возможность приведения дроби к числу с плавающей точкой через стандартную функцию float().
Создайте дочерний класс OperationsOnFraction и добавьте туда собственные методы getint и getfloat, которые будут
возвращять целую часть дроби и представление дроби в виде числа с плавающей точкой соответственно.
### YOUR CODE HERE ###
'''
class Fraction:
"""Class of fraction consist of numerator a and denominator b(which is 1 by default)"""
def __init__(self, numerator_a, denominator_b=1):
# validate numerator and denominator
try:
numerator_a = int(numerator_a)
except ValueError:
print(f'Numerator {numerator_a} is not an integer!')
raise
try:
denominator_b = int(denominator_b)
except ValueError:
print(f'Denominator {denominator_b} is not an integer!')
raise
self.numerator_a = numerator_a
self.denominator_b = denominator_b
self.fraction = str(numerator_a) + '/' + str(denominator_b)
def __str__(self):
return f'{self.__class__.__name__} {self.fraction}'
def least_common_multiple_func(self, other_denominator) -> int:
"""The least common multiple defining"""
least_common_mult = self.denominator_b
while (least_common_mult % self.denominator_b + least_common_mult % other_denominator) != 0:
least_common_mult += 1
return least_common_mult
def common_divisor(self, numerator_a, denominator_b) -> [None, int]:
"""A common divisor defining"""
divisors = []
for divisor in range(2, max(numerator_a, denominator_b)):
if max(numerator_a, denominator_b) % divisor == 0:
divisors.append(divisor)
for divisor in range(2, min(numerator_a, denominator_b)):
if min(numerator_a, denominator_b) % divisor == 0:
if divisor in divisors:
return divisor
return None
def __add__(self, other) -> object:
"""Addition of two fractions"""
least_common_multiple = self.denominator_b
# check denominators of fractions and if no define their least common multiple
if self.denominator_b != other.denominator_b:
least_common_multiple = self.least_common_multiple_func(other.denominator_b)
common_numerator = (least_common_multiple / self.denominator_b * self.numerator_a) + \
(least_common_multiple / other.denominator_b * other.numerator_a)
# check for common divisor
common_divisor = self.common_divisor(int(common_numerator), least_common_multiple)
if common_divisor is None:
res = Fraction(common_numerator, least_common_multiple)
else:
common_numerator = common_numerator / common_divisor
least_common_multiple = least_common_multiple / common_divisor
res = Fraction(common_numerator, int(least_common_multiple))
return res
def __sub__(self, other) -> object:
"""Substitution of two fractions"""
least_common_multiple = self.denominator_b
# check denominators of fractions and if no define their least common multiple
if self.denominator_b != other.denominator_b:
least_common_multiple = self.least_common_multiple_func(other.denominator_b)
common_numerator = (least_common_multiple / self.denominator_b * self.numerator_a) - \
(least_common_multiple / other.denominator_b * other.numerator_a)
# check for common divisor
common_divisor = self.common_divisor(int(common_numerator), least_common_multiple)
if common_divisor is None:
res = Fraction(common_numerator, least_common_multiple)
else:
common_numerator = common_numerator / common_divisor
least_common_multiple = least_common_multiple / common_divisor
res = Fraction(common_numerator, int(least_common_multiple))
return res
def __mul__(self, other) -> object:
"""Multiplication of two fractions"""
common_numerator = self.numerator_a * other.numerator_a
common_denominator = self.denominator_b * other.denominator_b
# check for common divisor
common_divisor = self.common_divisor(int(common_numerator), common_denominator)
if common_divisor is None:
res = Fraction(common_numerator, common_denominator)
else:
common_numerator = common_numerator / common_divisor
common_denominator = common_denominator / common_divisor
res = Fraction(common_numerator, int(common_denominator))
return res
def __int__(self) -> int:
"""Returns int number from fraction"""
integer_number = self.numerator_a // self.denominator_b
print(f'Fraction {self.fraction} integer number is {integer_number}')
return integer_number
def __float__(self) -> float:
"""Returns float number from fraction"""
float_number = self.numerator_a / self.denominator_b
print(f'Fraction {self.fraction} float number is {float_number}')
return float_number
class OperationsOnFraction(Fraction):
"""Converting fractions to int and float types"""
def __init__(self, numerator_a=0, denominator_b=0):
super().__init__(numerator_a=numerator_a, denominator_b=denominator_b)
def getint(self, fraction) -> int:
"""Returns int number from fraction based on base class __int__() method"""
self.numerator_a = fraction.numerator_a
self.denominator_b = fraction.denominator_b
self.fraction = str(self.numerator_a) + '/' + str(self.denominator_b)
return super().__int__()
def getfloat(self, fraction) -> float:
"""Returns float number from fraction based on base class __float__() method"""
self.numerator_a = fraction.numerator_a
self.denominator_b = fraction.denominator_b
self.fraction = str(self.numerator_a) + '/' + str(self.denominator_b)
return super().__float__()
my_fraction = Fraction(9, 2)
other_fraction = Fraction(8, 3)
# print(my_fraction)
# print(other_fraction)
summ = my_fraction + other_fraction
# print('summ:', summ)
subs = my_fraction - other_fraction
# print('subs:', subs)
# mult = my_fraction * other_fraction
# print('mult:', mult)
#
# print()
# integer_summ = int(summ)
# integer_subs = int(subs)
# integer_mult = int(mult)
#
# print()
# float_summ = float(summ)
# float_subs = float(subs)
# float_mult = float(mult)
print()
operations = OperationsOnFraction()
get_integer_summ = operations.getint(summ)
get_float_subs = operations.getfloat(subs)
|
992,337 | 4851cb0d2ef9037e1c3a30f858592688e431f439 | desks=int(input("Enter the number of desks)"))
Cls1= desks//2
deskcls1=(f"The remaining number of desk cls1")
Cls2= desks//2
deskcls2=(f"The remaining number of desk in cls2")
Cls3 = desks//2
deskcls3=(f"The remaining number of desk in cls3")
stucls1=int(input("Enter the number of students in cls1"))
stucls2=int(input("Enter the number of students in cls2"))
stucls3=int(input("Enter the number of students in cls3"))
remcls1=(stucls1%2)
print(f"The remaining number of stu in cls1")
remcls2=(stucls2%2)
print(f"The rema0ining number of stu in cls2")
remcls3=(stucls3%2)
print(f"The remaining number of stu in cls3")
|
992,338 | ada3c42cd76b997d8d5b95da993d6a47ef3f9597 | import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage.feature import hog
import matplotlib.image as mpimg
from scipy.ndimage.measurements import label
import pickle
color_space = 'YCrCb'
orient = 9
pix_per_cell = 8
cell_per_block = 2
hog_channel = 'ALL'
spatial_size = (32, 32)
hist_bins = 32
spatial_feat = True
hist_feat = True
hog_feat = True
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):
if vis == True:
# Use skimage.hog() to get both features and a visualization
features, hog_image = hog(img,
orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
visualise=True,
feature_vector=feature_vec)
return features, hog_image
else:
# Use skimage.hog() to get features only
features = hog(img,
orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
visualise=False,
feature_vector=feature_vec)
return features
# Downsamples the image
def bin_spatial(img, size=(32, 32)):
color0 = cv2.resize(img[:, :, 0], size).ravel()
color1 = cv2.resize(img[:, :, 1], size).ravel()
color2 = cv2.resize(img[:, :, 2], size).ravel()
return np.hstack((color0, color1, color2))
def color_hist(img, nbins=32):
# Compute the histogram of the RGB channels separately
c0_hist = np.histogram(img[:, :, 0], bins=nbins)
c1_hist = np.histogram(img[:, :, 1], bins=nbins)
c2_hist = np.histogram(img[:, :, 2], bins=nbins)
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((c0_hist[0], c1_hist[0], c2_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return hist_features
def single_img_features(img, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8, cell_per_block=2, hog_channel=0,
spatial_feat=True, hist_feat=True, hog_feat=True,
vis=False):
# 1) Define an empty list to receive features
img_features = []
# 2) Apply color conversion if other than 'RGB'
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
else:
feature_image = np.copy(img)
# 3) Compute spatial features if flag is set
if spatial_feat == True:
spatial_features = bin_spatial(feature_image, size=spatial_size)
# 4) Append features to list
img_features.append(spatial_features)
# 5) Compute histogram features if flag is set
if hist_feat == True:
hist_features = color_hist(feature_image, nbins=hist_bins)
# 6) Append features to list
img_features.append(hist_features)
# 7) Compute HOG features if flag is set
if hog_feat == True:
if hog_channel == 'ALL':
hog_features = []
for channel in range(feature_image.shape[2]):
hog_features.extend(get_hog_features(feature_image[:, :, channel],
orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True))
else:
if vis:
print
hog_features, hog_image = get_hog_features(feature_image[:, :, hog_channel], orient,
pix_per_cell, cell_per_block, vis=True, feature_vec=True)
else:
hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,
pix_per_cell, cell_per_block, vis=False, feature_vec=True)
# 8) Append features to list
img_features.append(hog_features)
# 9) Return concatenated array of features
if vis:
return np.concatenate(img_features), hog_image
else:
return np.concatenate(img_features)
def extract_features(imgs, color_space='RGB', spatial_size=(32, 32),
hist_bins=32, orient=9,
pix_per_cell=8,
cell_per_block=2,
hog_channel=0,
spatial_feat=True,
hist_feat=True,
hog_feat=True):
# Create a list to append feature vectors to
features = []
for fname in imgs:
file_features = []
image = mpimg.imread(fname)
file_features = single_img_features(image,
color_space=color_space,
spatial_size=spatial_size,
hist_bins=hist_bins,
orient=orient,
pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel,
spatial_feat=spatial_feat,
hist_feat=hist_feat,
hog_feat=hog_feat,
vis=False)
# concatenated_file_features = np.concatenate(file_features)
features.append(file_features)
return features
def slide_window(img, x_start_stop=[None, None], y_start_stop=[None, None],
xy_window=(64, 64), xy_overlap=(0.5, 0.5)):
window_list = []
if x_start_stop[0] is None:
x_start_stop[0] = 0
if y_start_stop[0] is None:
y_start_stop[0] = 0
if x_start_stop[1] is None:
x_start_stop[1] = img.shape[1]
if y_start_stop[1] is None:
y_start_stop[1] = img.shape[0]
x = x_start_stop[0]
y = y_start_stop[0]
while x + xy_window[0] <= x_start_stop[1]:
while y + xy_window[1] <= y_start_stop[1]:
window_list.append(((x, y), (x + xy_window[0], y + xy_window[1])))
y += np.int((1 - xy_overlap[1]) * xy_window[1])
y = y_start_stop[0]
x += np.int((1 - xy_overlap[0]) * xy_window[0])
return window_list
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def search_windows(img, windows, clf, scaler, color_space='RGB',
spatial_size=(32, 32), hist_bins=32,
hist_range=(0, 256), orient=9,
pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True,
hist_feat=True, hog_feat=True):
# 1) Create an empty list to receive positive detection windows
on_windows = []
# 2) Iterate over all windows in the list
for window in windows:
# 3) Extract the test window from original image
test_img = cv2.resize(img[window[0][1]:window[1][1], window[0][0]:window[1][0]], (64, 64))
# 4) Extract features for that window using single_img_features()
features = single_img_features(test_img, color_space=color_space,
spatial_size=spatial_size, hist_bins=hist_bins,
orient=orient, pix_per_cell=pix_per_cell,
cell_per_block=cell_per_block,
hog_channel=hog_channel, spatial_feat=spatial_feat,
hist_feat=hist_feat, hog_feat=hog_feat)
# 5) Scale extracted features to be fed to classifier
test_features = scaler.transform(np.array(features).reshape(1, -1))
# 6) Predict using your classifier
prediction = clf.predict(test_features)
# 7) If positive (prediction == 1) then save the window
if prediction == 1:
on_windows.append(window)
# 8) Return windows for positive detections
return on_windows
def visualize(fig, rows, cols, imgs, titles):
for i, img in enumerate(imgs):
plt.subplot(rows, cols, i + 1)
plt.title(i + 1)
img_dims = len(img.shape)
if img_dims < 3:
plt.imshow(img, cmap='hot')
plt.title(titles[i])
else:
plt.imshow(img)
plt.title(titles[i])
def convert_color(img, conv='RGB2YCrCb'):
c = {'RGB2YCrCb': cv2.COLOR_RGB2YCrCb,
'BGR2YCrCb': cv2.COLOR_BGR2YCrCb,
'RGB2LUV': cv2.COLOR_RGB2LUV}
return cv2.cvtColor(img, c[conv])
def find_cars(img, scale, X_scaler, svc, heatmaplist=[]):
ystart = 400
ystop = 656
img_boxes = []
# scale = 1.5
draw_img = np.copy(img)
heatmap = np.zeros_like(img[:, :, 0])
img = img.astype(np.float32) / 255
img_tosearch = img[ystart:ystop, :, :]
ctrans_tosearch = convert_color(img_tosearch, conv='RGB2YCrCb')
if scale != 1:
imshape = ctrans_tosearch.shape
ctrans_tosearch = cv2.resize(ctrans_tosearch, (np.int(imshape[1] / scale), np.int(imshape[0])))
ch0 = ctrans_tosearch[:, :, 0]
ch1 = ctrans_tosearch[:, :, 1]
ch2 = ctrans_tosearch[:, :, 2]
nxblocks = (ch0.shape[1] // pix_per_cell) - 1
nyblocks = (ch0.shape[0] // pix_per_cell) - 1
nfeat_per_block = orient * cell_per_block ** 2
window = 64
nblocks_per_window = (window // pix_per_cell) - 1
cells_per_step = 2
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step
nysteps = (nyblocks - nblocks_per_window) // cells_per_step
hog0 = get_hog_features(ch0, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog1 = get_hog_features(ch1, orient, pix_per_cell, cell_per_block, feature_vec=False)
hog2 = get_hog_features(ch2, orient, pix_per_cell, cell_per_block, feature_vec=False)
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb * cells_per_step
xpos = xb * cells_per_step
hog_feat_0 = hog0[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat_1 = hog1[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_feat_2 = hog2[ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel()
hog_features = np.hstack((hog_feat_0, hog_feat_1, hog_feat_2))
xleft = xpos * pix_per_cell
ytop = ypos * pix_per_cell
subimg = cv2.resize(ctrans_tosearch[ytop:ytop + window, xleft:xleft + window], (window, window))
spatial_features = bin_spatial(subimg, size=spatial_size)
hist_features = color_hist(subimg, nbins=hist_bins)
test_features_raw = np.hstack((spatial_features, hist_features, hog_features)).reshape(1, -1)
test_features = X_scaler.transform(test_features_raw)
test_prediction = svc.predict(test_features)
if test_prediction == 1:
xbox_left = np.int(xleft * scale)
ybox_top = np.int(ytop * scale)
win_draw = np.int(window * scale)
cv2.rectangle(draw_img, (xbox_left, ybox_top + ystart),
(xbox_left + win_draw, ybox_top + win_draw + ystart),
(0, 0, 255))
img_boxes.append(((xbox_left, ybox_top + ystart), (xbox_left + win_draw, ybox_top + win_draw + ystart)))
heatmap[ybox_top + ystart:ybox_top + win_draw + ystart, xbox_left:xbox_left + win_draw] += 1
# print("shape before = {}".format(heatmap.shape))
heatmap[heatmap > 0] = 1
# print("shape after = {}".format(heatmap.shape))
# print("Max heatmap = {}, min heatmap = {}".format(np.max(heatmap), np.min(heatmap)))
heatmaplist.append(heatmap)
if len(heatmaplist) >= 5:
heatmap = np.sum(np.stack(heatmaplist)[-5:, :, :], axis=0)
return draw_img, heatmap
def apply_threshold(heatmap, threshold):
heatmap[heatmap <= threshold] = 0
return heatmap
def draw_labeled_bboxes(img, labels):
for car_number in range(1, labels[1] + 1):
nonzero = (labels[0] == car_number).nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
cv2.rectangle(img, bbox[0], bbox[1], (0, 0, 255), 6)
return img
def process_image(img, scale=1.5):
X_scaler = pickle.load(open("X_scaler.pkl", "rb"))
svc = pickle.load((open("svc.pkl", "rb")))
out_img, heatmap = find_cars(img, scale, X_scaler, svc)
labels = label(heatmap)
draw_img = draw_labeled_bboxes(np.copy(img), labels)
return draw_img
|
992,339 | cd74d1745c1b37c63edeebe5456e56caac583938 | """
Functions to check FASTA and GFF files for coding sequence (CDS) features.
"""
import bisect
import csv
import os
import warnings
from Bio import SeqIO
import gffutils
from pyfaidx import Fasta
from pyfaidx import FastaIndexingError
from riboviz.fasta_gff import CDS_FEATURE_FORMAT
from riboviz.fasta_gff import START_CODON
from riboviz.fasta_gff import STOP_CODONS
from riboviz.get_cds_codons import get_feature_id
from riboviz.get_cds_codons import sequence_to_codons
from riboviz import provenance
SEQUENCE = "Sequence"
""" FASTA-GFF column name (sequence ID). """
FEATURE = "Feature"
""" FASTA-GFF column name (feature ID). """
WILDCARD = "*"
"""
Name for sequences or features in issues that apply to multiple
sequences or features.
"""
NOT_APPLICABLE = ""
"""
Name for sequences or features in issues that apply exclusively
to features or sequences only.
"""
ISSUE_TYPE = "Issue"
""" FASTA-GFF column name (issue type). """
ISSUE_DATA = "Data"
""" FASTA-GFF column name (issue data). """
INCOMPLETE_FEATURE = "IncompleteFeature"
""" FASTA-GFF issue column value. """
NO_START_CODON = "NoStartCodon"
""" FASTA-GFF issue column value. """
NO_STOP_CODON = "NoStopCodon"
""" FASTA-GFF issue column value. """
INTERNAL_STOP_CODON = "InternalStopCodon"
""" FASTA-GFF issue column value. """
NO_ID_NAME = "NoIdName"
""" FASTA-GFF issue column value. """
DUPLICATE_FEATURE_ID = "DuplicateFeatureId"
""" FASTA-GFF issue column value. """
DUPLICATE_FEATURE_IDS = "DuplicateFeatureIds"
""" FASTA-GFF issue column value. """
MULTIPLE_CDS = "MultipleCDS"
""" FASTA-GFF issue column value. """
SEQUENCE_NOT_IN_FASTA = "SequenceNotInFASTA"
""" FASTA-GFF issue column value. """
SEQUENCE_NOT_IN_GFF = "SequenceNotInGFF"
""" FASTA-GFF issue column value. """
ISSUE_TYPES = {
INCOMPLETE_FEATURE, NO_START_CODON, NO_STOP_CODON,
INTERNAL_STOP_CODON, NO_ID_NAME, DUPLICATE_FEATURE_ID,
MULTIPLE_CDS, SEQUENCE_NOT_IN_FASTA, SEQUENCE_NOT_IN_GFF,
DUPLICATE_FEATURE_IDS
}
""" List of possible FASTA-GFF issues. """
ISSUE_FORMATS = {
INCOMPLETE_FEATURE: "Sequence {sequence} feature {feature} has length not divisible by 3",
NO_START_CODON: "Sequence {sequence} feature {feature} doesn't start with a recognised start codon but with {data}",
NO_STOP_CODON: "Sequence {sequence} feature {feature} doesn't end with a recognised stop codon but with {data}",
INTERNAL_STOP_CODON: "Sequence {sequence} feature {feature} has an internal stop codon",
NO_ID_NAME: "Sequence {sequence} feature {feature} has no 'ID' or 'Name' attribute",
DUPLICATE_FEATURE_ID: "Sequence {sequence} has non-unique 'ID' attribute {feature}",
MULTIPLE_CDS: "Sequence {sequence} has multiple CDS ({data} occurrences)",
SEQUENCE_NOT_IN_FASTA: "Sequence {sequence} in GFF file is not in FASTA file",
SEQUENCE_NOT_IN_GFF: "Sequence {sequence} in FASTA file is not in GFF file",
DUPLICATE_FEATURE_IDS: "Non-unique 'ID' attribute {feature} ({data} occurrences)"
}
""" Format strings for printing issues. """
FASTA_FILE = "fasta_file"
""" TSV file header tag. """
GFF_FILE = "gff_file"
""" TSV file header tag. """
START_CODONS = "start_codons"
""" TSV file header tag. """
NUM_SEQUENCES = "NumSequences"
""" TSV file header tag. """
NUM_FEATURES = "NumFeatures"
""" TSV file header tag. """
NUM_CDS_FEATURES = "NumCDSFeatures"
""" TSV file header tag. """
def get_fasta_sequence_ids(fasta):
"""
Get a list of the unique IDs of sequences in a FASTA file.
:param fasta: FASTA file
:type fasta: str or unicode
:return: Unique sequence IDs
:rtype: set(str or unicode)
:raises FileNotFoundError: If the FASTA or GFF files \
cannot be found
"""
if not os.path.exists(fasta) or (not os.path.isfile(fasta)):
raise FileNotFoundError(fasta)
seq_ids = set()
with open(fasta, "r") as f:
# 'fasta' is https://biopython.org/wiki/SeqIO file type.
for record in SeqIO.parse(f, "fasta"):
seq_ids.add(record.id)
return seq_ids
def get_issues(fasta,
gff,
feature_format=CDS_FEATURE_FORMAT,
use_feature_name=False,
start_codons=[START_CODON]):
"""
Check FASTA and GFF files for coding sequence (CDS) features and
return a list of issues for relating to coding sequences, ``CDS``,
features. A list of tuples of form (sequence ID, feature ID ('' if
not applicable to the issue), issue type, issue data) is
returned.
The sequence ID is one of:
* Value of sequence ID.
* :py:const:`WILDCARD` if the issue relates to multiple
sequences.
The feature ID is one of:
* :py:const:`NOT_APPLICABLE` if the issue relates to the sequence,
not the feature.
* :py:const:`WILDCARD` if the issue relates to multiple features.
* Value of ``ID`` attribute for feature, if defined and if
``Name`` is not defined, or ``Name`` is defined and
``use_feature_name`` is ``False``.
* Value of ``Name`` attribute for feature, if defined, and if
``ID`` is undefined or if ``ID`` is defined and ``use_feature_name``
is ``True``.
* Sequence ID formatted using ``feature_format`` (default
:py:const:`riboviz.fasta_gff.CDS_FEATURE_FORMAT`)
if both ``ID`` and ``Name`` are undefined.
The following issue types are reported for every CDS annotated
in the GFF:
* :py:const:`INCOMPLETE_FEATURE`: The CDS has a length not
divisible by 3.
* :py:const:`NO_START_CODON`: The CDS does not start
with a start codon (``ATG`` or those in ``start_codons``). The
supplementary issue data is the actual codon found.
* :py:const:`NO_STOP_CODON`: The CDS does not end with a stop
codon (``TAG``, ``TGA``, ``TAA``). The supplementary
issue data is the actual codon found.
* :py:const:`INTERNAL_STOP_CODON`: The CDS has internal
stop codons.
* :py:const:`NO_ID_NAME`: The CDS has no ``ID`` or ``Name``
attribute.
* :py:const:`DUPLICATE_FEATURE_ID`: The CDS has a non-unique ``ID``
attribute (attributes are expected to be unique within the scope
of a GFF file).
* :py:const:`DUPLICATE_FEATURE_IDS`: Related to the above,
multiple CDSs have non-unique ``ID`` attributes. This summarises
the count of all CDSs that share a common ``ID`` attribute. For
this issue, the sequence ``ID`` attribute is
:py:const:`WILDCARD`. The supplementary issue data is a
count of the number of features with the same ID.
The following issues are reported for sequences defined in the GFF file:
* :py:const:`MULTIPLE_CDS`: The sequence has multiple CDS.
For this issue, the feature ``ID`` attribute is
:py:const:`WILDCARD`. The supplementary issue data is a count
of the number of CDSs found.
* :py:const:`SEQUENCE_NOT_IN_FASTA`: The sequence has a feature in
the GFF file but the sequence is not in the FASTA file. For this
issue, the feature ``ID`` attribute is
py:const:`NOT_APPICABLE`.
* :py:const:`SEQUENCE_NOT_IN_GFF`: The sequence is in the FASTA
file but has no features in the GFF file. For this issue, the
feature ``ID`` attribute is py:const:`NOT_APPICABLE`.
Issue data is supplementary data relating to the issue. Unless
already noted above this will be ``None``.
:param fasta: FASTA file
:type fasta: str or unicode
:param gff: GFF file
:type gff: str or unicode
:param feature_format: Feature name format for features which \
do not define ``ID`` or ``Name`` attributes. This format is \
applied to the sequence ID to create a feature name.
:type feature_format: str or unicode
:param use_feature_name: If a feature defines both ``ID`` and \
``Name`` attributes then use ``Name`` in reporting, otherwise use \
``ID``.
:type use_feature_name: bool
:param start_codons: Allowable start codons.
:type start_codons: list(str or unicode)
:return: Number of FASTA sequences, number of GFF features, \
number of GFF CDS features, list of unique sequence IDs in GFF \
file and list of issues for sequences and features.
:rtype: tuple(int, int, int, list(tuple(str or unicode, \
str or unicode, str or unicode, object))
:raises FileNotFoundError: If the FASTA or GFF files \
cannot be found
:raises pyfaidx.FastaIndexingError: If the FASTA file has badly \
formatted sequences
:raises ValueError: If GFF file is empty
:raises Exception: Exceptions specific to gffutils.create_db \
(these are undocumented in the gffutils documentation)
"""
for f in [fasta, gff]:
if not os.path.exists(f) or (not os.path.isfile(f)):
raise FileNotFoundError(f)
try:
gffdb = gffutils.create_db(gff,
dbfn='gff.db',
force=True,
keep_order=True,
merge_strategy='merge',
sort_attribute_values=True)
except ValueError as e:
# Wrap and rethrow exception so file name is included
raise ValueError("{} ({})".format(e, gff)) from e
issues = []
# Track IDs of features encountered. Each ID must be unique within
# a GFF file. See http://gmod.org/wiki/GFF3.
feature_ids = {}
# Track sequences encountered and counts of features for
# each.
sequence_features = {}
fasta_genes = Fasta(fasta)
for feature in gffdb.features_of_type('CDS'):
if feature.seqid not in sequence_features:
sequence_features[feature.seqid] = 0
sequence_features[feature.seqid] += 1
feature_id = None
if "ID" in feature.attributes:
feature_id = feature.attributes["ID"][0].strip()
if feature_id in feature_ids:
feature_ids[feature_id].append(feature.seqid)
else:
feature_ids[feature_id] = [feature.seqid]
feature_id_name = get_feature_id(feature, use_feature_name)
if feature_id_name is None:
feature_id_name = feature_format.format(feature.seqid)
issues.append((feature.seqid, feature_id_name,
NO_ID_NAME, None))
try:
sequence = feature.sequence(fasta_genes)
except KeyError as e: # Missing sequence.
issues.append((feature.seqid,
NOT_APPLICABLE,
SEQUENCE_NOT_IN_FASTA,
None))
continue
except FastaIndexingError as e:
raise e
except Exception as e:
warnings.warn(str(e))
continue
seq_len_remainder = len(sequence) % 3
if seq_len_remainder != 0:
issues.append((feature.seqid, feature_id_name,
INCOMPLETE_FEATURE, None))
sequence += ("N" * (3 - seq_len_remainder))
sequence_codons = sequence_to_codons(sequence)
if sequence_codons[0] not in start_codons:
issues.append((feature.seqid, feature_id_name,
NO_START_CODON, sequence_codons[0]))
if not sequence_codons[-1] in STOP_CODONS:
issues.append((feature.seqid, feature_id_name,
NO_STOP_CODON, sequence_codons[-1]))
if any([codon in STOP_CODONS
for codon in sequence_codons[:-1]]):
issues.append((feature.seqid, feature_id_name,
INTERNAL_STOP_CODON, None))
for sequence, count in list(sequence_features.items()):
if count > 1:
# Insert issue, respect ordering by sequence ID.
bisect.insort(issues, (sequence, WILDCARD, MULTIPLE_CDS, count))
for feature_id, seq_ids in feature_ids.items():
if len(seq_ids) > 1:
issues.append((WILDCARD, feature_id,
DUPLICATE_FEATURE_IDS, len(seq_ids)))
for seq_id in seq_ids:
# Insert issue, respect ordering by sequence ID.
bisect.insort(issues, (seq_id, feature_id,
DUPLICATE_FEATURE_ID, None))
gff_seq_ids = set(sequence_features.keys())
fasta_seq_ids = get_fasta_sequence_ids(fasta)
fasta_only_seq_ids = fasta_seq_ids - gff_seq_ids
for seq_id in fasta_only_seq_ids:
issues.append((seq_id, NOT_APPLICABLE, SEQUENCE_NOT_IN_GFF, None))
num_sequences = len(fasta_seq_ids)
num_features = len(list(gffdb.all_features()))
num_cds_features = len(list(gffdb.features_of_type('CDS')))
return num_sequences, num_features, num_cds_features, issues
def write_issues_to_csv(issues, csv_file, header={}, delimiter="\t"):
"""
Write a dictionary of the issues for features, keyed by feature
name into a CSV file, including a header.
The CSV file has columns:
* :py:const:`SEQUENCE`: sequence ID.
* :py:const:`FEATURE`: feature ID.
* :py:const:`ISSUE_TYPE`: issue type.
* :py:const:`ISSUE_DATA`: issue data or ``None``.
:param issues: List of tuples of form (sequence ID, feature ID ('' if \
not applicable to the issue), issue type, issue data).
:type issues: list(tuple(str or unicode, str or unicode, \
str or unicode, object))
:param csv_file: CSV file name
:type csv_file: str or unicode
:param header: Tags-values to be put into a header, prefixed by `#`
:type header: dict
:param delimiter: Delimiter
:type delimiter: str or unicode
"""
provenance.write_provenance_header(__file__, csv_file)
with open(csv_file, "a") as f:
for key, value in header.items():
f.write("# {}: {}\n".format(key, value))
with open(csv_file, "a") as f:
writer = csv.writer(f, delimiter=delimiter, lineterminator='\n')
writer.writerow([SEQUENCE, FEATURE, ISSUE_TYPE, ISSUE_DATA])
for (sequence_id, feature_id, issue_type, issue_data) in issues:
writer.writerow([sequence_id, feature_id, issue_type,
issue_data])
def count_issues(issues):
"""
Iterate through issues and count number of unique issues of each type.
:param issues: List of tuples of form (sequence ID, feature ID \
'' if not applicable to the issue), issue type, issue data).
:type issues: list(tuple(str or unicode, str or unicode, \
str or unicode, object))
:return: List of tuples of form (issue type, count) sorted by 'count'
:type issues: list(tuple(str or unicode, int))
"""
counts = {issue: 0 for issue in ISSUE_TYPES}
for (_, _, issue_type, _) in issues:
counts[issue_type] += 1
counts = sorted(counts.items(),
key=lambda item: item[1],
reverse=True)
return counts
def run_fasta_gff_check(fasta,
gff,
feature_format=CDS_FEATURE_FORMAT,
use_feature_name=False,
start_codons=[START_CODON]):
"""
Check FASTA and GFF files for coding sequence (CDS) features
and get a list of issues for each sequence and coding sequence,
``CDS``, feature.
See :py:func:`get_issues` for information on sequences, features,
issue types and related data.
The following is also returned:
* Configuration information - a dictionary with:
- :py:const:`FASTA_FILE`: ``fasta`` value.
- :py:const:`GFF_FILE`: ``gff`` value.
- :py:const:`START_CODONS`: ``start_codons`` value.
* Metadata:
- :py:const:`NUM_SEQUENCES`: number of sequences in ``fasta``.
- :py:const:`NUM_FEATURES`: number of features in ``gff``.
- :py:const:`NUM_CDS_FEATURE`: number of ``CDS`` features in
``gff``.
:param fasta: FASTA file
:type fasta: str or unicode
:param gff: GFF file
:type gff: str or unicode
:param fasta: FASTA file
:param issues_file: Feature issues file
:type issues_file: str or unicode
:param feature_format: Feature name format for features which \
do not define ``ID`` or ``Name`` attributes. This format is \
applied to the sequence ID to create a feature name.
:type feature_format: str or unicode
:param use_feature_name: If a feature defines both ``ID`` and \
``Name`` attributes then use ``Name`` in reporting, otherwise use \
``ID``.
:type use_feature_name: bool
:param start_codons: Allowable start codons.
:type start_codons: list(str or unicode)
:return: Configuration, metadata, issues
:raises FileNotFoundError: If the FASTA or GFF files \
cannot be found
:raises pyfaidx.FastaIndexingError: If the FASTA file has badly \
formatted sequences
:raises ValueError: If GFF file is empty
:raises Exception: Exceptions specific to gffutils.create_db \
(these are undocumented in the gffutils documentation)
"""
num_sequences, num_features, num_cds_features, issues = \
get_issues(fasta,
gff,
feature_format=feature_format,
use_feature_name=use_feature_name,
start_codons=start_codons)
config = {}
config[FASTA_FILE] = fasta
config[GFF_FILE] = gff
config[START_CODONS] = start_codons
metadata = {}
metadata[NUM_SEQUENCES] = num_sequences
metadata[NUM_FEATURES] = num_features
metadata[NUM_CDS_FEATURES] = num_cds_features
return config, metadata, issues
def check_fasta_gff(fasta,
gff,
issues_file,
feature_format=CDS_FEATURE_FORMAT,
use_feature_name=False,
start_codons=[START_CODON],
is_verbose=False,
delimiter="\t"):
"""
Check FASTA and GFF files for coding sequence (CDS) features
and both print and save a list of issues for each sequence and
coding sequence, ``CDS``, feature.
See :py:func:`run_fasta_gff_check`.
A tab-separated values file of the issues identified is saved.
See :py:func:`get_issues` for information on
sequences, features, issue types and related data.
See :py:func:`write_issues_to_csv` for tab-separated values
file columns.
:param fasta: FASTA file
:type fasta: str or unicode
:param gff: GFF file
:type gff: str or unicode
:param fasta: FASTA file
:param issues_file: Feature issues file
:type issues_file: str or unicode
:param feature_format: Feature name format for features which \
do not define ``ID`` or ``Name`` attributes. This format is \
applied to the sequence ID to create a feature name.
:type feature_format: str or unicode
:param use_feature_name: If a feature defines both ``ID`` and \
``Name`` attributes then use ``Name`` in reporting, otherwise use \
``ID``.
:type use_feature_name: bool
:param start_codons: Allowable start codons.
:type start_codons: list(str or unicode)
:param is_verbose: Print information on each issue (if ``false`` \
only issue counts are printed)
:type is_verbose: bool
:param delimiter: Delimiter
:type delimiter: str or unicode
:raises FileNotFoundError: If the FASTA or GFF files \
cannot be found
:raises pyfaidx.FastaIndexingError: If the FASTA file has badly \
formatted sequences
:raises ValueError: If GFF file is empty
:raises Exception: Exceptions specific to gffutils.create_db \
(these are undocumented in the gffutils documentation)
"""
config, metadata, issues = run_fasta_gff_check(
fasta, gff, feature_format, use_feature_name, start_codons)
issue_counts = count_issues(issues)
header = dict(config)
header.update(metadata)
header.update(issue_counts)
write_issues_to_csv(issues, issues_file, header, delimiter)
print("Configuration:")
for (tag, value) in config.items():
print("{}\t{}".format(tag, value))
print("\nMetadata:")
for (tag, value) in metadata.items():
print("{}\t{}".format(tag, value))
print("\nIssue summary:")
print("{}\t{}".format("Issue", "Count"))
for (tag, value) in issue_counts:
print("{}\t{}".format(tag, value))
if is_verbose:
print("\nIssue details:")
for (sequence_id, feature_id, issue_type, issue_data) in issues:
if issue_type in ISSUE_FORMATS:
print(ISSUE_FORMATS[issue_type].format(sequence=sequence_id,
feature=feature_id,
data=issue_data))
|
992,340 | aa6355685afa0ee66ff1388001106681ce8930d5 | import os, re
import errno
from django.db import models
from django.contrib.auth.models import User, Group
from django.template.defaultfilters import slugify
from datetime import datetime
from django.conf import settings
# Create your models here.
class Category(models.Model):
name = models.CharField(max_length=500, unique=True)
slug = models.SlugField(unique=True)
dir = models.CharField(max_length=1500, unique=True)
birt_dir = models.CharField(max_length=1500, unique=True)
archive_dir = models.CharField(max_length=1500, unique=True)
def save(self, *args, **kwargs):
slug = slugify(self.name)
self.slug = slug
dir = '{0}/{1}'.format(settings.REPORTS_DIR, re.sub('-', '_', slug))
birtDir = '{0}/birt/{1}'.format(settings.REPORTS_DIR, re.sub('-', '_', slug))
archiveDir = '{0}/archive/{1}'.format(settings.REPORTS_DIR, re.sub('-', '_', slug))
self.dir = dir
self.birt_dir = birtDir
self.archive_dir = archiveDir
try:
os.makedirs(dir)
os.makedirs(birtDir)
os.makedirs(archiveDir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
print('Warning: directory already exists exception')
pass
super(Category, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Report(models.Model):
title = models.CharField(max_length=500, unique=True)
path = models.CharField(default=settings.REPORTS_DIR, max_length=1000)
#path = models.FilePathField(path=settings.REPORTS_DIR, recursive=True, max_length=1000)
views = models.IntegerField(default=0)
pub_date = models.DateTimeField('date published', default=datetime.now)
creator = models.CharField(max_length=50)
size = models.FloatField()
users = models.ManyToManyField(User, through='UserReport')
# groups= models.ManyToManyField(User, through='GroupReport')
category = models.ForeignKey(Category)
type = models.CharField(max_length=1,
choices=(('P', 'Already Generated Report'), ('R', 'Real Time Manually Generated Report')),
default='P')
comment = models.TextField(max_length=4000)
def __str__(self):
return self.title
class ReportArchive(models.Model):
title = models.CharField(max_length=500)
path = models.CharField(default=settings.REPORTS_DIR, max_length=1000)
#path = models.FilePathField(path=settings.REPORTS_DIR, recursive=True, max_length=1000)
views = models.IntegerField(default=0)
pub_date = models.DateTimeField('date published', default=datetime.now)
creator = models.CharField(max_length=50)
size = models.FloatField()
users = models.ManyToManyField(User, through='UserReportArch')
category = models.ForeignKey(Category)
type = models.CharField(max_length=1,
choices=(('P', 'Already Generated Report'), ('R', 'Real Time Manually Generated Report')),
default='P')
comment = models.TextField(max_length=4000)
def __str__(self):
return self.title
class UserReport(models.Model):
user = models.ForeignKey(User)
report = models.ForeignKey(Report)
def __str__(self):
return '{} - {} '.format(self.user, self.report)
'''
class GroupReport(models.Model):
group = models.ForeignKey(Group)
report = models.ForeignKey(Report)
def __str__(self):
return '{} - {} '.format(self.group, self.report)
'''
class UserReportArch(models.Model):
user = models.ForeignKey(User)
report = models.ForeignKey(ReportArchive)
def __str__(self):
return '{} - {} '.format(self.user, self.report)
class RequestReport(models.Model):
user = models.ForeignKey(User)
title = models.CharField(max_length=500)
description = models.TextField()
attachment = models.FileField(upload_to='attachments', blank=True)
frequency = models.CharField(max_length=1,
choices=(('O', 'One Time'), ('R', 'Regular')))
url = models.URLField(max_length=500, blank=True)
status = models.CharField(max_length=1, null=True, blank=True,
choices=(('A', 'Accepted'), ('R', 'Rejected'), ('I', 'In Progress'), ('D', 'Done')))
def __str__(self):
return self.title
|
992,341 | 886c9277354006c5511299bf2b9fee396040660e | #encoding=utf8
import sys
import getopt
import time
import re
from selenium import webdriver
import requests
import json
import openpyxl
import codecs
import pymysql
url = 'http://mall.mengguochengzhen.cn'
driver = webdriver.Chrome()
driver.get(url+'/mgcz/system/index')
driver.find_element_by_id('form-username').send_keys('root')
driver.find_element_by_id('form-password').send_keys('123123')
driver.find_element_by_xpath('//*[@id="login-form"]/button').click()
time.sleep(5) #拿登陆后的最新cookies
# 获取cookie信息
cookies = driver.get_cookies()
sid=cookies[1]['value']
JSESSIONID=cookies[0]['value']
def sjk(sql):
connection = pymysql.connect(
host="101.132.106.107",
port=3306,
user="lidawei",
password="G1EoqS$7Jgf9l6!a",
db="mgcz",
charset="utf8")
try:
with connection.cursor() as cursor:
cout = cursor.execute(sql)
results = cursor.fetchall()
results1 = str(results)
print(results1.strip("(',)"))
return results1.strip("(',)")
connection.commit()
finally:
connection.close()
for i in range(0,31):
vv=sjk('SELECT cId FROM mgcz.commodity where state=1 and isPromotion = 0 order by rand() limit 1 ')
re1=requests.post('http://mall.mengguochengzhen.cn/mgcz/admin/promotion/savePromotionCommodity',data={'promotionId':'a879a3f653d24adfbeea8c06dad66d63','cId':str(vv),'isCommit':'1','commodityDiscount':'10'},headers={'Cookie': 'JSESSIONID=' + JSESSIONID + '; sid=' + sid})
print(re1.text)
v1=sjk('SELECT pc_id FROM mgcz.promotion_commodity where promotion_id="a879a3f653d24adfbeea8c06dad66d63"and cId="'+vv+'"')
re5 =requests.post('http://mall.mengguochengzhen.cn/mgcz/admin/promotion/updateAuditingState',data={'promotionId':'a879a3f653d24adfbeea8c06dad66d63','pcId':str(v1),'cId':str(vv),'discount':'1.00','auditingState':1,'sort':'a.create_date DESC'},headers={'Cookie': 'JSESSIONID=' + JSESSIONID + '; sid=' + sid})
print(re5.text) |
992,342 | 7d383029a8753c3318c0126a75b7f41929eba45a | import sys
# N: コマ数
# M: 地点数
N,M,*X = map(int, open(0).read().split())
# コマ数が地点数以上の場合、各地点にコマを配置できる
# よって 0 回の移動で目的を達成できる
if N >= M:
print(0)
sys.exit()
X.sort()
D = sorted([abs(X[i+1] - X[i]) for i in range(M-1)], reverse=True)
print(X[-1] - X[0] - sum(D[:N-1])) |
992,343 | a198cf51c19261b84b0181468e7dfb0649a62e5c | #!/usr/bin/env python
from __future__ import print_function, division
import os
import time
import numpy as np
import theano
import theano.tensor as T
import lasagne
import argparse
import matplotlib.pyplot as plt
from os.path import join
from scipy.io import loadmat
from utils import compressed_sensing as cs
from utils.metric import complex_psnr
from cascadenet.network.model import build_d2_c2_s, build_d5_c10_s
from cascadenet.util.helpers import from_lasagne_format
from cascadenet.util.helpers import to_lasagne_format
#LOADING DATA
def prep_input(im, acc=4):
"""Undersample the batch, then reformat them into what the network accepts.
Parameters
----------
gauss_ivar: float - controls the undersampling rate.
higher the value, more undersampling
"""
mask = cs.cartesian_mask(im.shape, acc, sample_n=8)
im_und, k_und = cs.undersample(im, mask, centred=False, norm='ortho')
im_gnd_l = to_lasagne_format(im)
im_und_l = to_lasagne_format(im_und)
k_und_l = to_lasagne_format(k_und)
mask_l = to_lasagne_format(mask, mask=True)
return im_und_l, k_und_l, mask_l, im_gnd_l
def iterate_minibatch(data, batch_size, shuffle=True):
n = len(data)
if shuffle:
data = np.random.permutation(data)
for i in xrange(0, n, batch_size):
yield data[i:i+batch_size]
#DEFINING HYPERPARAMETERS
def create_dummy_data():
"""Create small lungs data based on patches for demo.
Note that in practice, at test time the method will need to be applied to
the whole volume. In addition, one would need more data to prevent
overfitting.
"""
data = loadmat(join(project_root, './data/lungs.mat'))['seq']
nx, ny, nt = data.shape
ny_red = 8
sl = ny//ny_red
data_t = np.transpose(data, (2, 0, 1))
data_t[:, :, :sl*4]
train_slice = data_t[:, :, :sl*4]
validate_slice = data_t[:, :, ny//2:ny//2+ny//4]
test_slice = data_t[:, :, ny//2+ny//4]
# Synthesize data by extracting patches
train = np.array([data_t[..., i:i+sl] for i in np.random.randint(0, sl*3, 20)])
validate = np.array([data_t[..., i:i+sl] for i in (sl*4, sl*5)])
test = np.array([data_t[..., i:i+sl] for i in (sl*6, sl*7)])
return train, validate, test
#PREPROCESSING
def compile_fn(network, net_config, args):
"""
Create Training function and validation function
"""
# Hyper-parameters
base_lr = float(args.lr[0])
l2 = float(args.l2[0])
# Theano variables
input_var = net_config['input'].input_var
mask_var = net_config['mask'].input_var
kspace_var = net_config['kspace_input'].input_var
target_var = T.tensor5('targets')
# Objective
pred = lasagne.layers.get_output(network)
# complex valued signal has 2 channels, which counts as 1.
loss_sq = lasagne.objectives.squared_error(target_var, pred).mean() * 2
if l2:
l2_penalty = lasagne.regularization.regularize_network_params(network, lasagne.regularization.l2)
loss = loss_sq + l2_penalty * l2
update_rule = lasagne.updates.adam
params = lasagne.layers.get_all_params(network, trainable=True)
updates = update_rule(loss, params, learning_rate=base_lr)
print(' Compiling ... ')
t_start = time.time()
train_fn = theano.function([input_var, mask_var, kspace_var, target_var],
[loss], updates=updates,
on_unused_input='ignore')
val_fn = theano.function([input_var, mask_var, kspace_var, target_var],
[loss, pred],
on_unused_input='ignore')
t_end = time.time()
print(' ... Done, took %.4f s' % (t_end - t_start))
return train_fn, val_fn
#DEFINING NETWORK
#Fragments were attributed from https://github.com/js3611/Deep-MRI-Reconstruction
#Activation function = ReLU
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--num_epoch', metavar='int', nargs=1, default=['10'],
help='number of epochs')
parser.add_argument('--batch_size', metavar='int', nargs=1, default=['1'],
help='batch size')
parser.add_argument('--lr', metavar='float', nargs=1,
default=['0.001'], help='initial learning rate')
parser.add_argument('--l2', metavar='float', nargs=1,
default=['1e-6'], help='l2 regularisation')
parser.add_argument('--acceleration_factor', metavar='float', nargs=1,
default=['4.0'],
help='Acceleration factor for k-space sampling')
parser.add_argument('--debug', action='store_true', help='debug mode')
parser.add_argument('--savefig', action='store_true',
help='Save output images and masks')
args = parser.parse_args()
# Project config
model_name = 'd5_c10_s'
acc = float(args.acceleration_factor[0]) # undersampling rate
num_epoch = int(args.num_epoch[0])
batch_size = int(args.batch_size[0])
Nx, Ny, Nt = 256, 256, 30
Ny_red = 8
save_fig = args.savefig
save_every = 5
# Configure directory info
project_root = '.'
save_dir = join(project_root, 'models/%s' % model_name)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
# Create dataset
train, validate, test = create_dummy_data()
# Test creating mask and compute the acceleration rate
dummy_mask = cs.cartesian_mask((10, Nx, Ny//Ny_red), acc, sample_n=8)
sample_und_factor = cs.undersampling_rate(dummy_mask)
print('Undersampling Rate: {:.2f}'.format(sample_und_factor))
# Specify network, preprocessing data
input_shape = (batch_size, 2, Nx, Ny//Ny_red, Nt)
net_config, net, = build_d2_c2_s(input_shape)
# # build D5-C10(S) with pre-trained parameters
# net_config, net, = build_d5_c10_s(input_shape)
# with np.load('./models/pretrained/d5_c10_s.npz') as f:
# param_values = [f['arr_{0}'.format(i)] for i in range(len(f.files))]
# lasagne.layers.set_all_param_values(net, param_values)
# Compile function
train_fn, val_fn = compile_fn(net, net_config, args)
#TRAINING
for epoch in xrange(num_epoch):
t_start = time.time()
# Training
train_err = 0
train_batches = 0
for im in iterate_minibatch(train, batch_size, shuffle=True):
im_und, k_und, mask, im_gnd = prep_input(im, acc)
err = train_fn(im_und, mask, k_und, im_gnd)[0]
train_err += err
train_batches += 1
if args.debug and train_batches == 20:
break
validate_err = 0
validate_batches = 0
for im in iterate_minibatch(validate, batch_size, shuffle=False):
im_und, k_und, mask, im_gnd = prep_input(im, acc)
err, pred = val_fn(im_und, mask, k_und, im_gnd)
validate_err += err
validate_batches += 1
if args.debug and validate_batches == 20:
break
vis = []
test_err = 0
base_psnr = 0
test_psnr = 0
test_batches = 0
for im in iterate_minibatch(test, batch_size, shuffle=False):
im_und, k_und, mask, im_gnd = prep_input(im, acc)
err, pred = val_fn(im_und, mask, k_und, im_gnd)
test_err += err
for im_i, und_i, pred_i in zip(im,
from_lasagne_format(im_und),
from_lasagne_format(pred)):
base_psnr += complex_psnr(im_i, und_i, peak='max')
test_psnr += complex_psnr(im_i, pred_i, peak='max')
if save_fig and test_batches % save_every == 0:
vis.append((im[0],
from_lasagne_format(pred)[0],
from_lasagne_format(im_und)[0],
from_lasagne_format(mask, mask=True)[0]))
test_batches += 1
if args.debug and test_batches == 20:
break
t_end = time.time()
train_err /= train_batches
validate_err /= validate_batches
test_err /= test_batches
base_psnr /= (test_batches*batch_size)
test_psnr /= (test_batches*batch_size)
#INFERENCE
# Then we print the results for this epoch:
print("Epoch {}/{}".format(epoch+1, num_epoch))
print(" time: {}s".format(t_end - t_start))
print(" training loss:\t\t{:.6f}".format(train_err))
print(" validation loss:\t{:.6f}".format(validate_err))
print(" test loss:\t\t{:.6f}".format(test_err))
print(" base PSNR:\t\t{:.6f}".format(base_psnr))
print(" test PSNR:\t\t{:.6f}".format(test_psnr))
#OUTPUT
# save the model
if epoch in [1, 2, num_epoch-1]:
if save_fig:
i = 0
for im_i, pred_i, und_i, mask_i in vis:
im = abs(np.concatenate([und_i[0], pred_i[0], im_i[0], im_i[0] - pred_i[0]], 1))
plt.imsave(join(save_dir, 'im{0}_x.png'.format(i)), im, cmap='gray')
im = abs(np.concatenate([und_i[..., 0], pred_i[..., 0],
im_i[..., 0], im_i[..., 0] - pred_i[..., 0]], 0))
plt.imsave(join(save_dir, 'im{0}_t.png'.format(i)), im, cmap='gray')
plt.imsave(join(save_dir, 'mask{0}.png'.format(i)),
np.fft.fftshift(mask_i[..., 0]), cmap='gray')
i += 1
name = '%s_epoch_%d.npz' % (model_name, epoch)
np.savez(join(save_dir, name),
*lasagne.layers.get_all_param_values(net))
print('model parameters saved at %s' % join(os.getcwd(), name))
print('')
|
992,344 | d63afae209cb80e8d1ebb1921d6d1af3694004ea |
# coding: utf-8
# In[54]:
import lda
import lda.datasets
from __future__ import division, print_function
import numpy as np
import string
from nltk.tokenize import word_tokenize
from collections import defaultdict
import unicodedata
from collections import Counter
import math
# In[62]:
#function to load abstracts and cleaning them, make them into list of words,
#and put them in groups according to the group file
def load_data(DIR, groupfile, abstractfile):
# import groups.txt
group = [line.strip() for line in open(DIR + groupfile)]
group.pop(0)
groups = {} #make groups into a dictionary {pid:group#}
for row in group:
row = row.split('\t')
groups[row[0]] = row[1]
# import abstracts and remove the rows with null, seperate key and value
abstract = [line.strip() for line in open(DIR + abstractfile)]
abstracts = []
for row in abstract:
row = row.split('\t')
if row[1] != 'null':
abstracts.append(row)
abstracts.pop(0) #pop the column names
#make a new dictionary of abstracts stopwords removed then with pids as keys
abstracts_new = {}
for i in range(len(abstracts)):
abstracts_new[abstracts[i][0]] = rmStopWords(abstracts[i][1])
# allocate paragraphs into groups by the groups dictionary
group = defaultdict(list)
for i in range(len(abstracts_new.keys())):
key = groups[abstracts_new.keys()[i]]
for word in abstracts_new.values()[i]:
group[key].append(unicodedata.normalize('NFKD', word).encode('ascii','ignore'))
# remove column names for group
group.pop('0', None)
return group
# In[57]:
#function to remove stop words from a paragraph is texts, return list of words
def rmStopWords(text_paragraph):
stopwords = ["all","just","being","over","both","through","yourselves","its",
"before","herself","had","should","to","only","under","ours","has",
"do","them","his","very","they","not","during","now","him","nor",
"did","this","she","each","further","where","few","because","doing",
"some","are","our","ourselves","out","what","for","while","does",
"above","between","t","be","we","who","were","here","hers","by","on",
"about","of","against","s","or","own","into","yourself","down","your",
"from","her","their","there","been","whom","too","themselves","was",
"until","more","himself","that","but","don","with","than","those",
"he","me","myself","these","up","will","below","can","theirs","my",
"and","then","is","am","it","an","as","itself","at","have","in","any",
"if","again","no","when","same","how","other","which","you","after",
"most","such","why","a","off","i","yours","so","the","having","once"]
stopset = set(stopwords) #make stop words
exclude = set(string.punctuation) #save punctuations
#load text file and remove stop words
tokens = word_tokenize(str(text_paragraph).decode('utf-8'))
tokens = [w for w in tokens if not w in exclude]
tokens = [w.lower() for w in tokens if not w.lower() in stopset]
tokens = [w for w in tokens if len(w)>2]
tokens = list(tokens)
return tokens
# for H_x for each group
def shannon(group):
entrophy = []
count = Counter(group)
vocab = list(set(group))
H_x = 0
sum_count = sum(count.itervalues())
for word in vocab:
p_x = count[word]/sum_count
if p_x > 0:
H_x += - p_x * math.log(p_x, 2)
entrophy.append(H_x)
return entrophy
# for Q(pi||pj)
# function to calculate Q(pi||pj)
def q_entropy(text_all, group1, group2):
count1 = Counter(group1) #word count for group1
vocab1 = list(set(group1)) #vocab for gourp1
count2 = Counter(group2) #word count for group2
vocab2 = list(set(group2)) #vocab for gourp2
s_count = Counter(text_all) #count for text_all
s_vocab = list(set(text_all)) #vocab for text_all
sum1 = sum(count1.itervalues()) #sum of total counts for group1
sum2 = sum(count2.itervalues()) #sum of total counts for group2
s_sum = sum(s_count.itervalues()) #sum of total counts for text_all
q = 0
for w in vocab1: #for each word in group1 vocab
p_1 = count1[w]/sum1
p_2 = count2[w]/sum2
s_x = s_count[w]/s_sum
p_s1 = 0.99 * p_1 + 0.01 * s_x
p_s2 = 0.99 * p_2 + 0.01 * s_x
q += - p_s1 * math.log(p_s2, 2)
return q
# function to calculate the culture hole
def JD(H1, group1, group2):
text_all = group1 + group2 #make a corpus which combines 2 groups
q_ij = q_entropy(text_all, group1, group2) #calculate q entropy
E_ij = H1[0] / q_ij
C_ij = 1 - E_ij
# return format(C_ij, ".15g")
return C_ij
# In[58]:
def jargan_distance(C, group, length):
for i in range(length):
H1 = shannon(group[str(i+1)])
for j in range(length):
C[i][j] = JD(H1, group[str(i+1)],group[str(j+1)])
return C
def calculateJD(DIR, groupfile, abstractfile):
group = load_data(DIR, groupfile, abstractfile)
length = len(group)
C = [[0 for x in range(length)] for x in range(length)]
C = jargan_distance(C, group, length)
return C
# In[61]:
#results for the large set
from matplotlib.pyplot import show
import cProfile
#file locations
DIR = '/Users/feismacbookpro/Desktop/INFX575/HW3/big/'
groupfile = 'groups2.txt'
abstractfile = 'abstracts2.txt'
import cProfile
cProfile.run( 'calculateJD(DIR, groupfile, abstractfile)' )
# C = calculateJD(DIR, groupfile, abstractfile)
# Z = linkage(C)
# dendo = dendrogram(Z)
# import json
# with open('/Users/feismacbookpro/Desktop/INFX575/HW3/big/dendo.jason', 'w') as f:
# json.dump(dendo, f)
# # show()
# In[60]:
#results for the small test set
#file locations
DIR = '/Users/feismacbookpro/Desktop/INFX575/HW3/final/'
groupfile = 'group1.txt'
abstractfile = 'abstract1.txt'
import cProfile
cProfile.run( 'calculateJD(DIR, groupfile, abstractfile)' )
C_final = calculateJD(DIR, groupfile, abstractfile)
# C_final
group = load_data(DIR, groupfile, abstractfile)
group
#calculating the q for trouble shooting
length = len(group)
H1 = []
Q = [[0 for x in range(length)] for x in range(length)]
for i in range(length):
# H1.append(shannon(group[str(i+1)]))
for j in range(length):
group1 = group[str(i+1)]
group2 = group[str(j+1)]
text_all = group1 + group2
Q[i][j] = q_entropy(text_all, group1, group2)
# C[i][j] = JD(H1, group[str(i+1)],group[str(j+1)])
Q
# In[40]:
from scipy.cluster.hierarchy import dendrogram
from scipy.cluster.hierarchy import linkage
Z = linkage(C_final)
# dendrogram(Z)
Z
|
992,345 | 7f75922ee64ec696d14374388cd7a615625dc3db | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.post_list),
url('inicio', views.inicio, name='inicio'),
url('login', views.login, name='login'),
url('registro', views.registro, name='registro'),
url('client_dashboard', views.client_dashboard, name='client_dashboard'),
url('admin_dashboard', views.admin_dashboard, name='admin_dashboard'),
url('admin_rol', views.admin_rol, name='admin_rol'),
url('admin_list_rol', views.admin_list_rol, name='admin_list_rol'),
url('admin_edit_rol/(?P<id>[0-9]+)/', views.admin_edit_rol, name='admin_edit_rol'),
url('admin_delete_rol/(?P<id>[0-9]+)/', views.admin_delete_rol, name='admin_delete_rol'),
url('admin_marca', views.admin_marca, name='admin_marca'),
url('admin_modelo', views.admin_modelo, name='admin_modelo'),
url('admin_vehiculo', views.admin_vehiculo, name='admin_vehiculo'),
url('admin_list_marca', views.admin_list_marca, name='admin_list_marca'),
url('admin_list_modelo', views.admin_list_modelo, name='admin_list_modelo'),
url('admin_list_vehiculo', views.admin_list_vehiculo, name='admin_list_vehiculo'),
url('admin_list_conductor', views.admin_list_conductor, name='admin_list_conductor'),
url('admin_conductor', views.admin_conductor, name='admin_conductor'),
url('admin_edit_conductor/(?P<id>[0-9]+)/', views.admin_edit_conductor, name='admin_edit_conductor'),
url('admin_delete_conductor/(?P<id>[0-9]+)/', views.admin_delete_conductor, name='admin_delete_conductor'),
url('admin_list_cliente', views.admin_list_cliente, name='admin_list_cliente'),
url('admin_cliente', views.admin_cliente, name='admin_cliente'),
url('admin_edit_cliente/(?P<id>[0-9]+)/', views.admin_edit_cliente, name='admin_edit_cliente'),
url('admin_delete_cliente/(?P<id>[0-9]+)/', views.admin_delete_cliente, name='admin_delete_cliente'),
url('admin_list_empleado', views.admin_list_empleado, name='admin_list_empleado'),
url('admin_employee', views.admin_employee, name='admin_employee'),
url('admin_edit_employee/(?P<id>[0-9]+)/', views.admin_edit_employee, name='admin_edit_employee'),
url('admin_delete_empleado/(?P<id>[0-9]+)/', views.admin_delete_empleado, name='admin_delete_empleado'),
url('admin_reserva', views.admin_reserva, name='admin_reserva'),
url('admin_list_reserva', views.admin_list_reserva, name='admin_list_reserva'),
url('admin_edit_reserva/(?P<id>[0-9]+)/', views.admin_edit_reserva, name='admin_edit_reserva'),
url('admin_delete_reserva/(?P<id>[0-9]+)/', views.admin_delete_reserva, name='admin_delete_reserva'),
url('admin_settings', views.admin_settings, name='admin_settings'),
url('client_settings', views.client_settings, name='client_settings'),
url('call_dashboard', views.admin_dashboard, name='call_dashboard'),
]
|
992,346 | 18345b886f01a6f1de955e823111994bb5de8253 | import torch
import torch.nn as nn
TOTAL_TIME_STEP = 3
FEATURE_LENGTH = annotation(1) + edge_type(C52) + FEATURE_LENGTH
node_vectors = Matrix((batch_size, num_of_edges -> CN2(num_of_atom), FEATURE_LENGTH)) # input
edge_type_matrix = Matrix((edge_type, num_of_edges)) # input
# message passing (MP)
mp_matrix = Matrix((FEATURE_LENGTH, FEATURE_LENGTH, edge_type)) # learnable
for t in range(TOTAL_TIME_STEP):
new_node_vector = Matrix((batch_size, num_of_edges, FEATURE_LENGTH))
select_mp_matrix = mp_matrix * edge_type_matrix -> array of shape (FEATURE_LENGTH, FEATURE_LENGTH, num_of_edges)
update_matrix = node_vectors
update_matrix = node_vectors * select_mp_matrix -> array of shape (batch_size, num_of_edges, FEATURE_LENGTH)
for edge_self in range(num_of_edges):
message = empty -> array of shape (FEATURE_LENGTH, )
for edge_other in range(num_of_edges - 1):
edge_state = node_vectors[b, edge_other, :] -> array of shape (FEATURE_LENGTH, )
message += message_passing_matrix * edge_state
new_feature = Gru_update(message, node_vecotr(b, edge_self, :))
new_node_vector[b, edge_self, :] = new_feature
node_vector = new_node_vector[:, :, :]
result = ReadOutFunction(node_vector)
if __name__ == "__main__":
x = torch.randn((3, 3), requires_grad=True)
y = x * x
g = y + 5
out = g.sum()
out.backward(retain_graph=True)
print(x)
print(x.grad)
print(g)
out.backward(retain_graph=True)CH
print(x)
print(x.grad)
print(g)
|
992,347 | 294471587b29122e3ac9611a04e5090621e99ad3 | n = 3
while n > 0:
soma = 0
while True:
s = input()
if s == 'caw caw':
print(soma)
break
else:
s = s.replace('*', '1')
s = s.replace('-', '0')
soma += int(s,2)
n -= 1 |
992,348 | c25683dc416f23473be7cfdb029f03f5eaf7329c | __all__ = ()
from scarletio import ScarletLock, RichAttributeErrorBaseType, copy_docs
from ...discord.core import KOKORO
class RateLimitContextBase(RichAttributeErrorBaseType):
"""
Rate limit context used to handle static rate limits when communicating with top.gg.
Attributes
----------
acquired : `bool`
Whether the lock is acquired.
"""
__slots__ = ('acquired', )
def __new__(cls):
"""
Creates a new rate limit context instance.
"""
self = object.__new__(cls)
self.acquired = False
return self
async def __aenter__(self):
"""
Enters the rate limit context, blocking till acquiring it.
This method is a coroutine.
"""
self.acquired = True
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""
Releases the rate limit context.
This method is a coroutine.
"""
self.release()
return False
def __del__(self):
"""Releases the rate limit context if not yet released."""
self.release()
def release(self):
"""Releases the rate limit context."""
self.acquired = False
class RateLimitContext(RateLimitContextBase):
"""
Rate limit context used to handle static rate limits when communicating with top.gg.
Attributes
----------
acquired : `bool`
Whether the lock is acquired.
rate_limit_group : ``RateLimitGroup``
The parent rate limit group.
"""
__slots__ = ('rate_limit_group',)
def __new__(cls, rate_limit_group):
"""
Creates a new rate limit context instance.
Parameters
----------
rate_limit_group : ``RateLimitGroup``
The parent rate limit group.
"""
self = object.__new__(cls)
self.rate_limit_group = rate_limit_group
self.acquired = False
return self
@copy_docs(RateLimitContextBase.__aenter__)
async def __aenter__(self):
await self.rate_limit_group.lock.acquire()
self.acquired = True
return self
@copy_docs(RateLimitContextBase.release)
def release(self):
if self.acquired:
rate_limit_group = self.rate_limit_group
KOKORO.call_after(rate_limit_group.reset_after, rate_limit_group.lock.release)
self.acquired = False
class StackedRateLimitContext(RateLimitContextBase):
"""
Rate limit context used to handle multiple static rate limits when communicating with top.gg.
Attributes
----------
acquired : `bool`
Whether the lock is acquired.
rate_limit_groups : `tuple` of ``RateLimitGroup``
The parent rate limit groups.
"""
__slots__ = ('rate_limit_groups',)
def __new__(cls, rate_limit_groups):
"""
Creates a new rate limit context instance.
Parameters
----------
rate_limit_groups : `tuple` of ``RateLimitGroup``
The parent rate limit groups.
"""
self = object.__new__(cls)
self.rate_limit_groups = rate_limit_groups
self.acquired = False
return self
@copy_docs(RateLimitContextBase.__aenter__)
async def __aenter__(self):
# Use linear acquiring.
for rate_limit_group in self.rate_limit_groups:
try:
await rate_limit_group.lock.acquire()
except:
# If already acquired, release on cancellation
for rate_limit_group_to_cancel in self.rate_limit_groups:
if rate_limit_group is rate_limit_group_to_cancel:
break
rate_limit_group_to_cancel.lock.release()
continue
raise
self.acquired = True
return self
@copy_docs(RateLimitContextBase.release)
def release(self):
if self.acquired:
for rate_limit_group in self.rate_limit_groups:
KOKORO.call_after(rate_limit_group.reset_after, rate_limit_group.lock.release)
self.acquired = False
class RateLimitHandlerBase(RichAttributeErrorBaseType):
"""
Rate limit handler which can be entered.
"""
__slots__ = ()
def __new__(cls):
"""
Creates a new rate limit handler instance.
"""
return object.__new__(cls)
def ctx(self):
"""
Enters the rate limit handler, allowing it to be used as an asynchronous context manager.
Returns
-------
rate_limit_context : ``RateLimitContextBase``
"""
return RateLimitContextBase()
class RateLimitHandler(RateLimitHandlerBase):
"""
Rate limit handler which can be entered.
Attributes
----------
rate_limit_group : ``RateLimitGroup``
The wrapped rate limit group.
"""
__slots__ = ('rate_limit_group',)
def __new__(cls, rate_limit_group):
"""
Creates a new rate limit handler instance with the given rate limit group.
Parameters
----------
rate_limit_group : ``RateLimitGroup``
The parent rate limit group.
"""
self = object.__new__(cls)
self.rate_limit_group = rate_limit_group
return self
@copy_docs(RateLimitHandlerBase.ctx)
def ctx(self):
return RateLimitContext(self.rate_limit_group)
class StackedRateLimitHandler(RateLimitHandlerBase):
"""
Rate limit handler which can be entered.
Attributes
----------
rate_limit_groups : `tuple` of ``RateLimitGroup``
The wrapped rate limit group.
"""
__slots__ = ('rate_limit_groups',)
def __new__(cls, *rate_limit_groups):
"""
Creates a new rate limit handler instance with the given rate limit group.
Parameters
----------
*rate_limit_groups : ``RateLimitGroup``
The parent rate limit groups.
"""
self = object.__new__(cls)
self.rate_limit_groups = rate_limit_groups
return self
@copy_docs(RateLimitHandlerBase.ctx)
def ctx(self):
return StackedRateLimitContext(self.rate_limit_groups)
class RateLimitGroup(RichAttributeErrorBaseType):
"""
Static rate limit handler group implementation.
Attributes
----------
lock : ``ScarletLock``
Lock used to block requests.
reset_after : `int`
Duration to release a rate limit after.
"""
__slots__ = ('lock', 'reset_after', )
def __new__(cls, size, reset_after):
"""
Creates a new rate limit handler instance.
Parameters
----------
size : `int`
Rate limit size.
reset_after : `float`
The time to reset rate limit after.
"""
self = object.__new__(cls)
self.reset_after = reset_after
self.lock = ScarletLock(KOKORO, size)
return self
|
992,349 | 2b8bc6f425b0a5a58140e4837694f3443174f3e2 | """
---Fibonacci---
f(n) = f(n-1) + f(n-2)
for n => Natural numbers
start values:
f(0) = 0
f(1) = 1
"""
def fibonacci(n, fib_arr=[0,1]):
if n <= 0:
return fib_arr[0:]
if n <= len(fib_arr):
return fib_arr[n-1]
nth_fib = fibonacci(n-1) + fibonacci(n-2)
fib_arr.append(nth_fib)
print(fib_arr)
return nth_fib
fibonacci(8) |
992,350 | 6918c92b2a4cd0869be614360d57f3943352fce5 | #coding: utf8
from django.contrib import admin
from xuezhangshuo.account.models import xzsUser
admin.site.register(xzsUser) |
992,351 | e01c8990e3eb9bb6319e49b565540f9028dfd3cb | import datetime
from sqlalchemy import Column, Integer, String, DateTime, ForeignKey
from models import db
class SiteToRoom(db.Model):
__tablename__ = "site_to_room"
id = db.Column(db.Integer, primary_key=True)
room_id = db.Column(db.Integer, ForeignKey("room.id"))
hostname = db.Column(db.String(50))
def __repr__(self):
return "<SiteToRoom %r>" % self.id
def to_dict(self):
return {
"id": self.id,
"hostname": self.hostname,
"room_id": self.room_id
}
|
992,352 | 08d2fcaec0f15fa9629aee1395d4084b1cafe14d | import xadmin as admin
from orderapp.models import OrderRecord, Order
# Register your models here.
@admin.sites.register(OrderRecord)
class OrderRecordAdmin(object):
def avg_count(self, obj):
return int(obj.order_cancel / obj.order_count)
avg_count.short_description = "Sum Count"
avg_count.allow_tags = True
avg_count.is_column = True
list_display = ("date", "order_count", "order_cancel_count", 'order_price', 'order_cancel_price')
list_display_links = ("date",)
list_filter = ["date", "order_count", "order_cancel_count"]
actions = None
aggregate_fields = {"order_count": "sum", 'order_price': 'sum', "order_cancel_count": "sum",
'order_cancel_price': 'sum'}
refresh_times = (3, 5, 10)
data_charts = {
"order": {'title': u"订单统计",
"x-field": "date",
"y-field": ("order_count", "order_cancel_count"),
"order": ('date', )},
"order_cancel": {'title': u"订单金额(万元)",
"x-field": "date",
"y-field": ('order_price', 'order_cancel_price'), "order": ('date',)},
"per_month": {'title': u"月度统计",
"x-field": "_chart_month",
"y-field": ("order_count",),
"option": {
"series": {"bars": {"align": "center", "barWidth": 0.8, 'show': True}},
"xaxis": {"aggregate": "sum", "mode": "categories"},
},
},
}
def _chart_month(self, obj):
print('--------->', obj.date)
return obj.date.strftime("%B")
@admin.sites.register(Order)
class OrderAdmin(object):
list_display = ('title', 'price', 'pay_statue', 'pay_type') |
992,353 | e2584ab4c7523b58c1511ee43294585aea134ecd | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('data', models.CharField(null=True, max_length=255, blank=True)),
('tag', models.CharField(null=True, max_length=45, blank=True)),
],
options={
},
bases=(models.Model,),
),
]
|
992,354 | 0f804cd0db25db91c6e23beb983f9233a227e524 | import numpy as np
from gpmap.gpm import GenotypePhenotypeMap
from gpmap import utils
# Local imports
from epistasis.mapping import (EpistasisMap, mutations_to_sites, assert_epistasis)
from epistasis.matrix import get_model_matrix
from epistasis.utils import extract_mutations_from_genotypes
from epistasis.models.utils import XMatrixException
class BaseSimulation(GenotypePhenotypeMap):
""" Base class for simulating genotype-phenotype maps built from epistatic
interactions.
Parameters
----------
wildtype : str
wildtype sequence.
mutations : dict
dictionary mapping each site the possible mutations
"""
def __init__(self, wildtype, mutations,
model_type="global",
**kwargs
):
self.model_type = model_type
self.Xbuilt = {}
genotypes = np.array(
utils.mutations_to_genotypes(mutations, wildtype=wildtype))
phenotypes = np.ones(len(genotypes))
# Initialize a genotype-phenotype map
super(BaseSimulation, self).__init__(
wildtype,
genotypes,
phenotypes,
mutations=mutations,
**kwargs)
def add_epistasis(self):
"""Add an EpistasisMap to model.
"""
# Build epistasis interactions as columns in X matrix.
sites = mutations_to_sites(self.order, self.mutations)
# Map those columns to epistastalis dataframe.
self.epistasis = EpistasisMap(
sites, order=self.order, model_type=self.model_type)
def add_X(self, X="complete", key=None):
"""Add X to Xbuilt
Parameters
----------
X :
see above for details.
key : str
name for storing the matrix.
Returns
-------
X_builts : numpy.ndarray
newly built 2d array matrix
"""
if type(X) is str and X in ['obs', 'complete']:
# Create a list of epistatic interaction for this model.
if hasattr(self, "epistasis"):
columns = self.epistasis.sites
else:
self.add_epistasis()
columns = self.epistasis.sites
# Use desired set of genotypes for rows in X matrix.
index = self.binary
# Build numpy array
x = get_model_matrix(index, columns, model_type=self.model_type)
# Set matrix with given key.
if key is None:
key = X
self.Xbuilt[key] = x
elif type(X) == np.ndarray or type(X) == pd.DataFrame:
# Set key
if key is None:
raise Exception("A key must be given to store.")
# Store Xmatrix.
self.Xbuilt[key] = X
else:
raise XMatrixException("X must be one of the following: 'obs',"
" 'complete', "
"numpy.ndarray, or pandas.DataFrame.")
X_built = self.Xbuilt[key]
return X_built
def set_coefs_order(self, order):
"""Construct a set of epistatic coefficients given the epistatic
order."""
# Attach an epistasis model.
self.order = order
self.add_epistasis()
self.epistasis.data.values = np.zeros(self.epistasis.n)
self.epistasis.data.values[0] = 1
return self
def set_coefs_sites(self, sites):
"""Construct a set of epistatic coefficients given a list of
coefficient sites."""
self.order = max([len(s) for s in sites])
self.add_epistasis()
return self
def set_coefs(self, sites, values):
"""Set the epistatic coefs
Parameters
----------
sites : List
List of epistatic coefficient sites.
values : List
list of floats representing to epistatic coefficients.
"""
self.set_coefs_sites(sites)
self.epistasis.data.values = values
self.build()
return self
@assert_epistasis
def set_wildtype_phenotype(self, value):
"""Set the wildtype phenotype."""
self.epistasis.data.values[0] = value
self.build()
@assert_epistasis
def set_coefs_values(self, values):
"""Set coefficient values.
"""
self.epistasis.data.values = values
self.build()
return self
@assert_epistasis
def set_coefs_random(self, coef_range):
"""Set coefs to values drawn from a random, uniform distribution between
coef_range.
Parameters
----------
coef_range : 2-tuple
low and high bounds for coeff values.
"""
# Add values to epistatic interactions
self.epistasis.data.values = np.random.uniform(
coef_range[0], coef_range[1], size=len(self.epistasis.sites))
self.build()
return self
@assert_epistasis
def set_coefs_decay(self):
"""Use a decaying exponential model to choose random epistatic coefficient
values that decay/shrink with increasing order.
"""
wt_phenotype = self.epistasis.values[0]
for order in range(1, self.epistasis.order + 1):
# Get epistasis map for this order.
em = self.epistasis.get_orders(order)
index = em.index
# Randomly choose values for the given order
vals = 10**(-order) * np.random.uniform(-wt_phenotype,
wt_phenotype,
size=len(index))
# Map to epistasis object.
self.epistasis.data.values[index[0]: index[-1] + 1] = vals
self.build()
return self
@classmethod
def from_length(cls, length, **kwargs):
"""Constructs genotype from binary sequences with given length and
phenotypes from epistasis with a given order.
Parameters
----------
length : int
length of the genotypes
order : int
order of epistasis in phenotypes.
Returns
-------
GenotypePhenotypeMap
"""
wildtype = "0" * length
mutations = utils.genotypes_to_mutations([wildtype, "1" * length])
return cls(wildtype, mutations, **kwargs)
@classmethod
def from_coefs(cls, wildtype, mutations, sites, coefs, model_type="global",
*args, **kwargs):
"""Construct a genotype-phenotype map from epistatic coefficients.
Parameters
----------
wildtype : str
wildtype sequence
mutations : dict
dictionary mapping each site to their possible mutations.
order : int
order of epistasis
coefs : list or array
epistatic coefficients
model_type : str
epistatic model to use in composition matrix.
(`'global'` or `'local'`)
Returns
-------
GenotypePhenotypeMap
"""
order = max([len(l) for l in sites])
self = cls(wildtype, mutations,
model_type=model_type, *args, **kwargs)
if len(coefs) != len(sites):
raise Exception(
"""Number of betas does not match order/mutations given.""")
self.set_coefs(sites, coefs)
return self
def build(self, values=None, **kwargs):
""" Method for construction phenotypes from model. """
raise Exception("""Must be implemented in subclass. """)
def set_stdeviations(self, sigma):
"""Add standard deviations to the simulated phenotypes, which can then
be used for sampling error in the genotype-phenotype map.
Parameters
----------
sigma : float or array-like
Adds standard deviations to the phenotypes. If float, all
phenotypes are given the same stdeviations. Else, array must be
same length as phenotypes and will be assigned to each phenotype.
"""
stdeviations = np.ones(len(self.phenotypes)) * sigma
self.data['stdeviations'] = stdeviations
return self
|
992,355 | 561c8c0f432bf386ad4e48a07da525cf3f850d81 | # Generated by Django 2.2.7 on 2019-11-08 12:18
import django.contrib.sites.managers
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import thumbnails.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.CreateModel(
name='ContactMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author', models.CharField(blank=True, default=None, max_length=250, null=True, verbose_name='First and last name')),
('email', models.EmailField(max_length=254, verbose_name='Email address')),
('message', models.TextField(verbose_name='Message')),
('created_at', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PositionName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Position name', max_length=100)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='TeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to='', verbose_name='Avatar')),
('name', models.CharField(max_length=250, verbose_name='First and last name')),
('order', models.IntegerField(default=0)),
('span', models.IntegerField(default=4, help_text='Digit between 1-12. How much space avatar should take. See bootstrap grid', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)])),
('uuid', models.UUIDField(default=uuid.uuid4)),
('position', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='LivingstoneWebpage.PositionName')),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
],
options={
'ordering': ['order'],
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', django.contrib.sites.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='SeoMetaData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=65, verbose_name='Head title')),
('description', models.CharField(max_length=160, verbose_name='Meta description')),
('keywords', models.CharField(max_length=1000, verbose_name='Keywords (comma separated)')),
('site', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='meta', to='sites.Site', verbose_name='Site')),
],
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', django.contrib.sites.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='OurWeapon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='', verbose_name='Picture')),
('name', models.CharField(max_length=250, verbose_name='Tool name')),
('order', models.IntegerField(default=0)),
('span', models.IntegerField(default=4, help_text='Digit between 1-12. How much space avatar should take. See bootstrap grid', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)])),
('uuid', models.UUIDField(default=uuid.uuid4)),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
],
options={
'ordering': ['order'],
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', django.contrib.sites.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='GalleryImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', thumbnails.fields.ImageField(help_text='size 900x700 px (square size 300x175px)', upload_to='', verbose_name='Picture')),
('name', models.CharField(help_text='Max 25 characters', max_length=25, verbose_name='Picture name')),
('description', models.TextField(blank=True, default=None, help_text='Max 250 characters', max_length=250, null=True, verbose_name='Description')),
('span', models.IntegerField(default=4, help_text='Digit between 1-12. How much space image should take. See bootstrap grid', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(12)])),
('order', models.IntegerField(default=0)),
('uuid', models.UUIDField(default=uuid.uuid4)),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
],
options={
'ordering': ['order'],
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', django.contrib.sites.managers.CurrentSiteManager()),
],
),
migrations.CreateModel(
name='ConstantElement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=50, verbose_name='Key')),
('text', models.TextField(blank=True, default=None, null=True, verbose_name='Text')),
('image', models.ImageField(blank=True, default=None, help_text='About image: 1900x710px', null=True, upload_to='', verbose_name='Picture')),
('link', models.URLField(blank=True, default=None, null=True, verbose_name='Link')),
('uuid', models.UUIDField(default=uuid.uuid4)),
('site', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sites.Site')),
],
options={
'unique_together': {('key', 'uuid', 'site')},
},
managers=[
('objects', django.db.models.manager.Manager()),
('on_site', django.contrib.sites.managers.CurrentSiteManager()),
],
),
]
|
992,356 | 67e2dce70002bd99f0d1bbb4d9c83fdc59bba7d0 |
from pymongo import MongoClient
import random
# CRUD
class MongoDB:
"""
封装的Mongodb操作方法
"""
def __init__(self):
client = MongoClient('localhost', 27017)
self.database = client['ip_proxy']
# print(client.database_names())
self.collection = self.database['ips']
# self.collection = database['useful_ips']
def get(self):
# ip_list = self.get_all()
# num = random.randint(0,len(ip_list)-1)
# proxy = ip_list[num]
# return proxy
proxy = self.get_all()
return random.choice(proxy) if proxy else None
def get_all(self):
return [p['host'] for p in self.collection.find()]
def delete(self, value):
# TODO
self.collection.remove({'host': value})
def delete_all(self):
pass
def change_table(self, name):
self.collection = self.database[name]
def add(self, value):
# print(self.collection.find_one({'host': 2}))
if not self.collection.find_one({'host': value}):
self.collection.insert_one({'host': value})
def pop(self):
value = self.get()
if value:
self.delete(value)
return value
if __name__ == '__main__':
if not None:
print('f')
m = MongoDB()
print(m.add('2'))
# print(m.pop())
print(m.delete('2'))
print(m.get_all())
# print(m.delete(7))
|
992,357 | d5feee75c9a10a6b6d656dd1a7926e462054fb0c | import csv
import sys
import random
from os import listdir
from os.path import isfile, join, abspath
import webbrowser
# from goose import Goose
# import eatiht
import dragnet
import libextract.api
from newspaper import Article
import justext
# try:
# from boilerpipe.extract import Extractor
# except:
# from boilerpipe3.extract import Extractor
DATA_PATH = './data/htmlfiles/'
RESULT_FILE = './data/justextvsdragnet.csv'
def rank(input):
if input == '&':
score = 1
elif input == 'é':
score = 0.8
elif input == '"':
score = 0.5
elif input == "'":
score = 0
return score
def random_html_file():
"""Returns a random filename in ./data/htmlfiles/"""
nb_of_files = len([name for name in listdir(DATA_PATH)])
list_of_files = [f for f in listdir(
DATA_PATH) if isfile(join(DATA_PATH, f))]
random_nb = random.randint(0, nb_of_files - 1)
return random.choice(list_of_files)
def benchmark(extract_size=800):
"""Picks a random html file and prints an extract of the result of each method"""
random_file = random_html_file()
with open(join(DATA_PATH, random_file), 'r') as f:
html_string = f.read()
# GOOSE
try:
g = Goose({'browser_user_agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0',
'enable_image_fetching': False})
goose_article = g.extract(raw_html=html_string)
goose_result = goose_article.cleaned_text
except:
goose_result = ' Goose error.'
# EATIHT
try:
eatiht_result = eatiht.extract(html_string)
except:
eatiht_result = ' Eatiht error.'
# DRAGNET
try:
dragnet_result = dragnet.extract_content(html_string)
except Exception as e:
dragnet_result = ' Dragnet error: ' + str(e)
# LIBEXTRACT
try:
textnodes = list(libextract.api.extract(html_string))
libextract_result = textnodes[0].text_content()
except:
libextract_result = ' Libextract error.'
# BOILERPIPE (CanolaExtractor)
try:
extractor = Extractor(
extractor='CanolaExtractor', html=html_string)
boilerpipe_result = extractor.getText()
except:
boilerpipe_result = ' Boilerpipe error.'
# NEWSPAPER
try:
article = Article('url')
article.download(input_html=html_string)
article.parse()
print('Auteurs:', article.authors)
print('Date de publication:', article.publish_date)
newspaper_result = article.text
except:
newspaper_result = ' Newspaper error.'
# JUSTEXT
try:
paragraphs = justext.justext(
html_string, justext.get_stoplist("French"))
print('PARAGRAPHS')
for p in paragraphs:
if not p.is_boilerplate:
print(p.text)
justext_result = '\n'.join(
paragraph.text for paragraph in paragraphs if not paragraph.is_boilerplate)
print('JUSTEXT_RESULT', justext_result)
except Exception as e:
justext_result = ' Justext error: ' + str(e)
print(justext_result)
# Results
try:
# finds the url associated with the file in a "filename-url" csv
with open('./data/urls.csv', 'r') as csvfile:
urls = dict((line['id'], line['url'])
for line in csv.DictReader(csvfile))
url = urls[random_file[:-5]]
print('\n\n >>> URL n.' + random_file[:-5] + ' : ' + url)
except:
print('\n\n (URL of the html file not found. To print the associated URL, please provide a urls.csv file featuring filename & url in /data)')
# webbrowser.open(url, autoraise=False)
path = abspath('temp.html')
local_url = 'file://' + path
with open(path, 'w') as f:
f.write(html_string)
webbrowser.open(local_url)
# print('\n\n /// GOOSE /// \n')
# print(goose_result[:extract_size])
# print('\n\n /// EATIHT /// \n')
# print(eatiht_result[:extract_size])
print('\n ------ [[DRAGNET]] ------',
len(dragnet_result), 'caractères\n')
print(dragnet_result[:extract_size] +
'\n...\n' + dragnet_result[-extract_size:])
print('\n ------ [[NEWSPAPER]] ------',
len(newspaper_result), 'caractères\n')
print(newspaper_result[:extract_size] +
'\n...\n' + newspaper_result[-extract_size:])
print('\n ------ [[JUSTEXT]] ------',
len(justext_result), 'caractères\n')
print(justext_result[:extract_size] +
'\n...\n' + justext_result[-extract_size:])
# print('\n\n /// LIBEXTRACT /// \n')
# print(libextract_result[:extract_size])
# print('\n\n /// BOILERPIPE (CanolaExtractor) /// \n\n')
# print(boilerpipe_result[:extract_size])
# print('\n\n')
return(url)
if __name__ == '__main__':
benchmarking = True
fieldnames = ['Dragnet_result',
'Newspaper_result', 'Justext_result', 'url']
if not isfile(RESULT_FILE):
with open(RESULT_FILE, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
with open(RESULT_FILE, 'a') as result_file:
writer = csv.DictWriter(result_file, fieldnames=fieldnames)
while benchmarking:
url = benchmark()
while True:
dragnet_input = input(
'\n >> Dragnet result (from 1 for success to 4 for fail, p for pass, q for quit): ')
if dragnet_input not in ('&', 'é', '"', "'", 'q', 'p'):
print("Choice must be either between 1 and 4 or 'q'")
else:
break
if dragnet_input == 'p':
continue
elif dragnet_input == 'q':
benchmarking = False
continue
while True:
newspaper_input = input(
'\n >> Newspaper result (from 1 for success to 4 for fail, p for pass, q for quit): ')
if newspaper_input not in ('&', 'é', '"', "'", 'q', 'p'):
print("Choice must be either between 1 and 4 or 'q'")
else:
break
while True:
justext_input = input(
'\n >> Justext result (from 1 for success to 4 for fail, p for pass, q for quit): ')
if justext_input not in ('&', 'é', '"', "'", 'q', 'p'):
print("Choice must be either between 1 and 4 or 'q'")
else:
break
print(dragnet_input, newspaper_input)
if dragnet_input == 'q' or newspaper_input == 'q':
benchmarking = False
continue
elif dragnet_input == 'p' or newspaper_input == 'p':
continue
else:
dragnet_score = rank(dragnet_input)
newspaper_score = rank(newspaper_input)
justext_score = rank(justext_input)
writer.writerow({'Dragnet_result': dragnet_score,
'Newspaper_result': newspaper_score, 'Justext_result': justext_score, 'url': url})
|
992,358 | 4342664cd81ef0ab0d49d166f07ccd1da8130f45 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Reference implementation registry server WSGI controller
"""
import json
import logging
import routes
from webob import exc
from glance.common import wsgi
from glance.common import exception
from glance.registry.db import api as db_api
logger = logging.getLogger('glance.registry.server')
DISPLAY_FIELDS_IN_INDEX = ['id', 'name', 'size',
'disk_format', 'container_format',
'checksum']
SUPPORTED_FILTERS = ['name', 'status', 'container_format', 'disk_format',
'size_min', 'size_max']
MAX_ITEM_LIMIT = 25
class Controller(wsgi.Controller):
"""Controller for the reference implementation registry server"""
def __init__(self, options):
self.options = options
db_api.configure_db(options)
def index(self, req):
"""Return a basic filtered list of public, non-deleted images
:param req: the Request object coming from the wsgi layer
:retval a mapping of the following form::
dict(images=[image_list])
Where image_list is a sequence of mappings::
{
'id': <ID>,
'name': <NAME>,
'size': <SIZE>,
'disk_format': <DISK_FORMAT>,
'container_format': <CONTAINER_FORMAT>,
'checksum': <CHECKSUM>
}
"""
params = {
'filters': self._get_filters(req),
'limit': self._get_limit(req),
}
if 'marker' in req.str_params:
params['marker'] = self._get_marker(req)
images = db_api.image_get_all_public(None, **params)
results = []
for image in images:
result = {}
for field in DISPLAY_FIELDS_IN_INDEX:
result[field] = image[field]
results.append(result)
return dict(images=results)
def detail(self, req):
"""Return a filtered list of public, non-deleted images in detail
:param req: the Request object coming from the wsgi layer
:retval a mapping of the following form::
dict(images=[image_list])
Where image_list is a sequence of mappings containing
all image model fields.
"""
params = {
'filters': self._get_filters(req),
'limit': self._get_limit(req),
}
if 'marker' in req.str_params:
params['marker'] = self._get_marker(req)
images = db_api.image_get_all_public(None, **params)
image_dicts = [make_image_dict(i) for i in images]
return dict(images=image_dicts)
def _get_filters(self, req):
"""Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
filters = {}
properties = {}
for param in req.str_params:
if param in SUPPORTED_FILTERS:
filters[param] = req.str_params.get(param)
if param.startswith('property-'):
_param = param[9:]
properties[_param] = req.str_params.get(param)
if len(properties) > 0:
filters['properties'] = properties
return filters
def _get_limit(self, req):
"""Parse a limit query param into something usable."""
try:
limit = int(req.str_params.get('limit', MAX_ITEM_LIMIT))
except ValueError:
raise exc.HTTPBadRequest("limit param must be an integer")
if limit < 0:
raise exc.HTTPBadRequest("limit param must be positive")
return min(MAX_ITEM_LIMIT, limit)
def _get_marker(self, req):
"""Parse a marker query param into something usable."""
try:
marker = int(req.str_params.get('marker', None))
except ValueError:
raise exc.HTTPBadRequest("marker param must be an integer")
return marker
def show(self, req, id):
"""Return data about the given image id."""
try:
image = db_api.image_get(None, id)
except exception.NotFound:
raise exc.HTTPNotFound()
return dict(image=make_image_dict(image))
def delete(self, req, id):
"""
Deletes an existing image with the registry.
:param req: Request body. Ignored.
:param id: The opaque internal identifier for the image
:retval Returns 200 if delete was successful, a fault if not.
"""
context = None
try:
db_api.image_destroy(context, id)
except exception.NotFound:
return exc.HTTPNotFound()
def create(self, req):
"""
Registers a new image with the registry.
:param req: Request body. A JSON-ified dict of information about
the image.
:retval Returns the newly-created image information as a mapping,
which will include the newly-created image's internal id
in the 'id' field
"""
image_data = json.loads(req.body)['image']
# Ensure the image has a status set
image_data.setdefault('status', 'active')
context = None
try:
image_data = db_api.image_create(context, image_data)
return dict(image=make_image_dict(image_data))
except exception.Duplicate:
msg = ("Image with identifier %s already exists!" % id)
logger.error(msg)
return exc.HTTPConflict(msg)
except exception.Invalid, e:
msg = ("Failed to add image metadata. Got error: %(e)s" % locals())
logger.error(msg)
return exc.HTTPBadRequest(msg)
def update(self, req, id):
"""Updates an existing image with the registry.
:param req: Request body. A JSON-ified dict of information about
the image. This will replace the information in the
registry about this image
:param id: The opaque internal identifier for the image
:retval Returns the updated image information as a mapping,
"""
image_data = json.loads(req.body)['image']
purge_props = req.headers.get("X-Glance-Registry-Purge-Props", "false")
context = None
try:
logger.debug("Updating image %(id)s with metadata: %(image_data)r"
% locals())
if purge_props == "true":
updated_image = db_api.image_update(context, id, image_data,
True)
else:
updated_image = db_api.image_update(context, id, image_data)
return dict(image=make_image_dict(updated_image))
except exception.Invalid, e:
msg = ("Failed to update image metadata. "
"Got error: %(e)s" % locals())
logger.error(msg)
return exc.HTTPBadRequest(msg)
except exception.NotFound:
raise exc.HTTPNotFound(body='Image not found',
request=req,
content_type='text/plain')
class API(wsgi.Router):
"""WSGI entry point for all Registry requests."""
def __init__(self, options):
mapper = routes.Mapper()
controller = Controller(options)
mapper.resource("image", "images", controller=controller,
collection={'detail': 'GET'})
mapper.connect("/", controller=controller, action="index")
super(API, self).__init__(mapper)
def make_image_dict(image):
"""
Create a dict representation of an image which we can use to
serialize the image.
"""
def _fetch_attrs(d, attrs):
return dict([(a, d[a]) for a in attrs
if a in d.keys()])
# TODO(sirp): should this be a dict, or a list of dicts?
# A plain dict is more convenient, but list of dicts would provide
# access to created_at, etc
properties = dict((p['name'], p['value'])
for p in image['properties'] if not p['deleted'])
image_dict = _fetch_attrs(image, db_api.IMAGE_ATTRS)
image_dict['properties'] = properties
return image_dict
def app_factory(global_conf, **local_conf):
"""
paste.deploy app factory for creating Glance reference implementation
registry server apps
"""
conf = global_conf.copy()
conf.update(local_conf)
return API(conf)
|
992,359 | 6280c83566824243ac957ef8d21f73328bbb828d | class Company:
def __init__(self, row):
self.row = row
@property
def name(self):
return f"{self.row.title} ({self.row.org})"
@property
def region(self):
return f"Регион: {self.row.region}"
@property
def inn(self):
return f"ИНН: {self.row.inn}"
@property
def okved(self):
# FIXME: refactor
return f"ОКВЭД: {str(self.row.ok1).zfill(2)}.{str(self.row.ok2).zfill(2)}.{str(self.row.ok3).zfill(2)}"
@property
def bal(self):
return "\n".join([screen("Активы всего ", self.row.ta),
screen(" оборотные ", self.row.ta_nonfix),
screen(" внеоборотные ", self.row.ta_fix),
screen("Пассивы всего ", self.row.tp),
screen(" капитал ", self.row.tp_capital),
screen(" краткосрочные ", self.row.tp_short),
screen(" долгосрочные ", self.row.tp_long),
screen("Выручка ", self.row.sales),
screen("Прибыль ", self.row.profit_before_tax),
screen("Денежный поток ", self.row.cf),
screen(" опер. ", self.row.cf_oper),
screen(" фин. ", self.row.cf_fin),
screen(" инв. ", self.row.cf_inv),
])
def __str__(self):
ids = " ".join([self.inn, self.okved])
return "\n".join([self.name, ids, self.bal])
def __repr__(self):
return str(self)
def screen(name, x):
x = format(x, '_').replace("_", " ")
return f"{name} {x:>12}"
from random import randint
i = randint(0, 10000)
def ith(df, i):
return df.iloc[i, :]
def rnd(df):
i = randint(0, df.shape[0])
return df.iloc[i, :]
if __name__ == "__main__":
z = Company(rnd(df[df.cf != 0]))
print(z)
"""
5409110820
ПРИХОД В ЧЕСТЬ ИКОНЫ БОЖИЕЙ МАТЕРИ "КАЗАНСКАЯ" ГОРОДА НОВОСИБИРСКА (ПЕРВОМАЙСКИЙ РАЙОН)" НОВОСИБИРСКОЙ ЕПАРХИИ РУССКОЙ ПРАВОСЛАВНОЙ ЦЕРКВИ (МЕСТНАЯ ПРАВОСЛАВНАЯ РЕЛИГИОЗНАЯ ОРГАНИЗАЦИЯ)
Ликвидационная комиссия
КЛЮЧЕВОЙ ЭЛЕМЕНТ (Ликвидационная комиссия ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ФЕДЕРАЦИЯ БОКСА ГОРОДА САЯНСКА (ОБЩЕСТВЕННАЯ ОРГАНИЗАЦИЯ)
БЕЗОТКАТНЫЕ ПЕРЕВОЗКИ (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
Регион 22
ИНН 2221219242
КЛЕВЕР (Общество с ограниченной ответственностью)
Регион 59
ИНН 5902014988
РОМАШКА (САДОВОДЧЕСКОЕ И ОГОРОДНИЧЕСКОЕ НЕКОММЕРЧЕСКОЕ ТОВАРИЩЕСТВО ГРАЖДАН)
ИНН 0269009842
АВТОПРОМ (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ИНН 7106068192
Регион 71
ОКВЭД 45.11.2
Активы всего 453673
Пассивы всего 453673
Выручка 99319
Прибьль 858
Денежный поток -6122
опер. 0
фин. -14266
инв. 8144
КОМПАНИЯ ВИТА-ЛАЙН (АКЦИОНЕРНОЕ ОБЩЕСТВО)
ИНН 7710416207
Регион 77
ОКВЭД 64.99.1
Активы всего 128558
Пассивы всего 128558
Выручка 0
Прибьль -603
Денежный поток -10
опер. -509
фин. -299
инв. 798
ОКИНАВА (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ИНН 5612085088
Регион 56
ОКВЭД 45.1.0
Активы всего 119835
Пассивы всего 119835
Выручка 238374
Прибьль -7975
Денежный поток 1723
опер. -9766
фин. 11170
инв. 319
ЭВЕРЕСТ (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ИНН 7727707469
Регион 77
ОКВЭД 93.29.0
Активы всего 47216
Пассивы всего 47216
Выручка 30981
Прибьль 2869
Денежный поток 4062
опер. 4062
фин. 0
инв. 0
ДОМОСТРОИТЕЛЬНЫЙ КОМБИНАТ № 1 (АКЦИОНЕРНОЕ ОБЩЕСТВО)
ИНН 7714046959
Регион 77
ОКВЭД 41.20.0
Активы всего 33502133
Пассивы всего 33502133
Выручка 30942195
Прибьль 84777
Денежный поток 327902
опер. -56369
фин. -492808
инв. 877079
АРХАНГЕЛЬСКИЙ РЕГИОНАЛЬНЫЙ РАДИОТЕЛЕВИЗИОННЫЙ ПЕРЕДАЮЩИЙ ЦЕНТР (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ИНН 2926011074
Регион 29
ОКВЭД 60.10.0
Активы всего 60002
Пассивы всего 60002
Выручка 0
Прибьль -6
Денежный поток -75
опер. -453
фин. 0
инв. 378
ОКТОБЛУ (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ИНН 5029086747
Регион 50
ОКВЭД 47.19.0
Активы всего 7855867
Пассивы всего 7855867
Выручка 18834367
Прибьль 783023
Денежный поток 116329
опер. 976737
фин. -399077
инв. -461331
СПЕЦИАЛИЗИРОВАННАЯ ПЕРЕДВИЖНАЯ МЕХАНИЗИРОВАННАЯ КОЛОННА-ЗЕЯ (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ИНН: 2815014979 ОКВЭД: 52.29.0
Активы всего 2 412
внеоборотные 0
оборотные 2 412
Пассивы всего 2 412
долгосрочные 0
краткосрочные 2 412
Выручка 6 724
Прибыль 760
Денежный поток 301
опер. 216
фин. 85
инв. 0
ВТОРМАТЕРИАЛ (ОБЩЕСТВО С ОГРАНИЧЕННОЙ ОТВЕТСТВЕННОСТЬЮ)
ИНН: 7724404000 ОКВЭД: 46.77.0
Активы всего 864
оборотные 457
внеоборотные 407
Пассивы всего 864
капитал -409
краткосрочные 93
долгосрочные 1 180
Выручка 85
Прибыль -420
Денежный поток 76
опер. -1 109
фин. 1 185
инв. 0
Научно-производственный Центр развития технологий природно-техногенной безопасности - ГЕОТЕХНОЛОГИИ (Общество с ограниченной ответственностью)
ИНН: 3666125368 ОКВЭД: 26.51.05
Активы всего 199 760
оборотные 199 733
внеоборотные 27
Пассивы всего 199 760
капитал 38 762
краткосрочные 160 998
долгосрочные 0
Выручка 0
Прибыль -12
Денежный поток -1 098
опер. -2 542
фин. 1 444
инв. 0
"""
# TODO: Что такое okpo, okopf, okfs? Чем полезны в анализе?
# TODO: словарь ОПУ, ОДДС
|
992,360 | b09675e02ef9422054809da8b520f4ea86749fc0 | '''
Author: Ashutosh Srivastava
Python3 solution
'''
import math
dict_main={}
def cal(N):
if N in dict_main:
return dict_main[N]
elif N not in dict_main:
if(N%2==0):
dict_main[N]=0
return 0
else:
flag=0
for i in range(3,int(math.sqrt(N))+1,2):
if(N%i==0):
flag=1
break
if(flag==0):
dict_main[N]=1
return 1
else:
dict_main[N]=0
return 0
def sexy_pairs(A,B):
res=0
for i in range(A,B-5,2):
if(cal(i) and cal(i+6)):
res+=1
return res
A,B=map(int,input().split())
if(A%2==0):
print(sexy_pairs(A+1,B))
else:
print(sexy_pairs(A,B))
|
992,361 | 5b9b476b92732adc319721a7ef50cdf5be4859b0 | #!/usr/bin/env python3
# -*- mode: python -*-
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
MIN_PYTHON_VERSION = "3.8.0" # FIXME duplicated from setup.py
_min_python_version_tuple = tuple(map(int, (MIN_PYTHON_VERSION.split("."))))
if sys.version_info[:3] < _min_python_version_tuple:
sys.exit("Error: Electrum requires Python version >= %s..." % MIN_PYTHON_VERSION)
import warnings
import asyncio
from typing import TYPE_CHECKING, Optional
script_dir = os.path.dirname(os.path.realpath(__file__))
is_pyinstaller = getattr(sys, 'frozen', False)
is_android = 'ANDROID_DATA' in os.environ
is_appimage = 'APPIMAGE' in os.environ
is_binary_distributable = is_pyinstaller or is_android or is_appimage
# is_local: unpacked tar.gz but not pip installed, or git clone
is_local = (not is_binary_distributable
and os.path.exists(os.path.join(script_dir, "electrum.desktop")))
is_git_clone = is_local and os.path.exists(os.path.join(script_dir, ".git"))
if is_git_clone:
# developers should probably see all deprecation warnings.
warnings.simplefilter('default', DeprecationWarning)
if is_local or is_android:
sys.path.insert(0, os.path.join(script_dir, 'packages'))
if is_pyinstaller:
# Keep an open file handle for the binary that started us. On Windows, this
# prevents users from moving or renaming the exe file while running (doing which
# causes ImportErrors and other runtime failures). (see #4072)
_file = open(sys.executable, 'rb')
def check_imports():
# pure-python dependencies need to be imported here for pyinstaller
try:
import dns
import certifi
import qrcode
import google.protobuf
import aiorpcx
except ImportError as e:
sys.exit(f"Error: {str(e)}. Try 'sudo python3 -m pip install <module-name>'")
if not ((0, 22, 0) <= aiorpcx._version < (0, 23)):
raise RuntimeError(f'aiorpcX version {aiorpcx._version} does not match required: 0.22.0<=ver<0.23')
# the following imports are for pyinstaller
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
# make sure that certificates are here
assert os.path.exists(certifi.where())
if not is_android:
check_imports()
sys._ELECTRUM_RUNNING_VIA_RUNELECTRUM = True # used by logging.py
from electrum.logging import get_logger, configure_logging # import logging submodule first
from electrum import util
from electrum.payment_identifier import PaymentIdentifier
from electrum import constants
from electrum import SimpleConfig
from electrum.wallet_db import WalletDB
from electrum.wallet import Wallet
from electrum.storage import WalletStorage
from electrum.util import print_msg, print_stderr, json_encode, json_decode, UserCancelled
from electrum.util import InvalidPassword
from electrum.commands import get_parser, known_commands, Commands, config_variables
from electrum import daemon
from electrum import keystore
from electrum.util import create_and_start_event_loop
from electrum.i18n import set_language
if TYPE_CHECKING:
import threading
from electrum.plugin import Plugins
_logger = get_logger(__name__)
# get password routine
def prompt_password(prompt, confirm=True):
import getpass
password = getpass.getpass(prompt, stream=None)
if password and confirm:
password2 = getpass.getpass("Confirm: ")
if password != password2:
sys.exit("Error: Passwords do not match.")
if not password:
password = None
return password
def init_cmdline(config_options, wallet_path, server, *, config: 'SimpleConfig'):
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
if cmdname in ['payto', 'paytomany'] and config.get('unsigned'):
cmd.requires_password = False
if cmdname in ['payto', 'paytomany'] and config.get('broadcast'):
cmd.requires_network = True
# instantiate wallet for command-line
storage = WalletStorage(wallet_path)
if cmd.requires_wallet and not storage.file_exists():
print_msg("Error: Wallet file not found.")
print_msg("Type 'electrum create' to create a new wallet, or provide a path to a wallet with the -w option")
sys_exit(1)
# important warning
if cmd.name in ['getprivatekeys']:
print_stderr("WARNING: ALL your private keys are secret.")
print_stderr("Exposing a single private key can compromise your entire wallet!")
print_stderr("In particular, DO NOT use 'redeem private key' services proposed by third parties.")
# commands needing password
if ((cmd.requires_wallet and storage.is_encrypted() and server is False)\
or (cmdname == 'load_wallet' and storage.is_encrypted())\
or cmd.requires_password):
if storage.is_encrypted_with_hw_device():
# this case is handled later in the control flow
password = None
elif config.get('password') is not None:
password = config.get('password')
if password == '':
password = None
else:
password = prompt_password('Password:', None)
else:
password = None
config_options['password'] = config_options.get('password') or password
if cmd.name == 'password' and 'new_password' not in config_options:
new_password = prompt_password('New password:')
config_options['new_password'] = new_password
def get_connected_hw_devices(plugins: 'Plugins'):
supported_plugins = plugins.get_hardware_support()
# scan devices
devices = []
devmgr = plugins.device_manager
for splugin in supported_plugins:
name, plugin = splugin.name, splugin.plugin
if not plugin:
e = splugin.exception
_logger.error(f"{name}: error during plugin init: {repr(e)}")
continue
try:
u = devmgr.list_pairable_device_infos(handler=None, plugin=plugin)
except Exception as e:
_logger.error(f'error getting device infos for {name}: {repr(e)}')
continue
devices += list(map(lambda x: (name, x), u))
return devices
def get_password_for_hw_device_encrypted_storage(plugins: 'Plugins') -> str:
devices = get_connected_hw_devices(plugins)
if len(devices) == 0:
print_msg("Error: No connected hw device found. Cannot decrypt this wallet.")
sys.exit(1)
elif len(devices) > 1:
print_msg("Warning: multiple hardware devices detected. "
"The first one will be used to decrypt the wallet.")
# FIXME we use the "first" device, in case of multiple ones
name, device_info = devices[0]
devmgr = plugins.device_manager
try:
client = devmgr.client_by_id(device_info.device.id_)
return client.get_password_for_storage_encryption()
except UserCancelled:
sys.exit(0)
async def run_offline_command(config, config_options, plugins: 'Plugins'):
cmdname = config.get('cmd')
cmd = known_commands[cmdname]
password = config_options.get('password')
if 'wallet_path' in cmd.options and config_options.get('wallet_path') is None:
config_options['wallet_path'] = config.get_wallet_path()
if cmd.requires_wallet:
storage = WalletStorage(config.get_wallet_path())
if storage.is_encrypted():
if storage.is_encrypted_with_hw_device():
password = get_password_for_hw_device_encrypted_storage(plugins)
config_options['password'] = password
storage.decrypt(password)
db = WalletDB(storage.read(), storage=storage, manual_upgrades=False)
wallet = Wallet(db, config=config)
config_options['wallet'] = wallet
else:
wallet = None
# check password
if cmd.requires_password and wallet.has_password():
try:
wallet.check_password(password)
except InvalidPassword:
print_msg("Error: This password does not decode this wallet.")
sys.exit(1)
if cmd.requires_network:
print_msg("Warning: running command offline")
# arguments passed to function
args = [config.get(x) for x in cmd.params]
# decode json arguments
if cmdname not in ('setconfig',):
args = list(map(json_decode, args))
# options
kwargs = {}
for x in cmd.options:
kwargs[x] = (config_options.get(x) if x in ['wallet_path', 'wallet', 'password', 'new_password'] else config.get(x))
cmd_runner = Commands(config=config)
func = getattr(cmd_runner, cmd.name)
result = await func(*args, **kwargs)
# save wallet
if wallet:
wallet.save_db()
return result
def init_plugins(config, gui_name):
from electrum.plugin import Plugins
return Plugins(config, gui_name)
loop = None # type: Optional[asyncio.AbstractEventLoop]
stop_loop = None # type: Optional[asyncio.Future]
loop_thread = None # type: Optional[threading.Thread]
def sys_exit(i):
# stop event loop and exit
if loop:
loop.call_soon_threadsafe(stop_loop.set_result, 1)
loop_thread.join(timeout=1)
sys.exit(i)
def main():
global loop, stop_loop, loop_thread
# The hook will only be used in the Qt GUI right now
util.setup_thread_excepthook()
# on macOS, delete Process Serial Number arg generated for apps launched in Finder
sys.argv = list(filter(lambda x: not x.startswith('-psn'), sys.argv))
# old 'help' syntax
if len(sys.argv) > 1 and sys.argv[1] == 'help':
sys.argv.remove('help')
sys.argv.append('-h')
# old '-v' syntax
# Due to this workaround that keeps old -v working,
# more advanced usages of -v need to use '-v='.
# e.g. -v=debug,network=warning,interface=error
try:
i = sys.argv.index('-v')
except ValueError:
pass
else:
sys.argv[i] = '-v*'
# read arguments from stdin pipe and prompt
for i, arg in enumerate(sys.argv):
if arg == '-':
if not sys.stdin.isatty():
sys.argv[i] = sys.stdin.read()
break
else:
raise Exception('Cannot get argument from stdin')
elif arg == '?':
sys.argv[i] = input("Enter argument:")
elif arg == ':':
sys.argv[i] = prompt_password('Enter argument (will not echo):', False)
# parse command line
parser = get_parser()
args = parser.parse_args()
# config is an object passed to the various constructors (wallet, interface, gui)
if is_android:
import importlib.util
android_gui = 'kivy' if importlib.util.find_spec('kivy') else 'qml'
config_options = {
'verbosity': '*' if util.is_android_debug_apk() else '',
'cmd': 'gui',
SimpleConfig.GUI_NAME.key(): android_gui,
SimpleConfig.WALLET_USE_SINGLE_PASSWORD.key(): True,
}
if util.get_android_package_name() == "org.electrum.testnet.electrum":
# ~hack for easier testnet builds. pkgname subject to change.
config_options['testnet'] = True
else:
config_options = args.__dict__
f = lambda key: config_options[key] is not None and key not in config_variables.get(args.cmd, {}).keys()
config_options = {key: config_options[key] for key in filter(f, config_options.keys())}
if config_options.get(SimpleConfig.NETWORK_SERVER.key()):
config_options[SimpleConfig.NETWORK_AUTO_CONNECT.key()] = False
config_options['cwd'] = cwd = os.getcwd()
# fixme: this can probably be achieved with a runtime hook (pyinstaller)
if is_pyinstaller and os.path.exists(os.path.join(sys._MEIPASS, 'is_portable')):
config_options['portable'] = True
if config_options.get('portable'):
if is_local:
# running from git clone or local source: put datadir next to main script
datadir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'electrum_data')
else:
# Running a binary or installed source. The most generic but still reasonable thing
# is to use the current working directory. (see #7732)
# note: The main script is often unpacked to a temporary directory from a bundled executable,
# and we don't want to put the datadir inside a temp dir.
# note: Re the portable .exe on Windows, when the user double-clicks it, CWD gets set
# to the parent dir, i.e. we will put the datadir next to the exe.
datadir = os.path.join(os.path.realpath(cwd), 'electrum_data')
config_options['electrum_path'] = datadir
if not config_options.get('verbosity'):
warnings.simplefilter('ignore', DeprecationWarning)
config = SimpleConfig(config_options)
cmdname = config.get('cmd')
# set language as early as possible
# Note: we are already too late for strings that are declared in the global scope
# of an already imported module. However, the GUI and the plugins at least have
# not been imported yet. (see #4621)
# Note: it is ok to call set_language() again later, but note that any call only applies
# to not-yet-evaluated strings.
if cmdname == 'gui':
gui_name = config.GUI_NAME
lang = config.LOCALIZATION_LANGUAGE
if not lang:
try:
from electrum.gui.default_lang import get_default_language
lang = get_default_language(gui_name=gui_name)
_logger.info(f"get_default_language: detected default as {lang=!r}")
except ImportError as e:
_logger.info(f"get_default_language: failed. got exc={e!r}")
set_language(lang)
if config.get('testnet'):
constants.set_testnet()
elif config.get('regtest'):
constants.set_regtest()
elif config.get('simnet'):
constants.set_simnet()
elif config.get('signet'):
constants.set_signet()
# check if we received a valid payment identifier
uri = config_options.get('url')
if uri and not PaymentIdentifier(None, uri).is_valid():
print_stderr('unknown command:', uri)
sys.exit(1)
if cmdname == 'daemon' and config.get("detach"):
# detect lockfile.
# This is not as good as get_file_descriptor, but that would require the asyncio loop
lockfile = daemon.get_lockfile(config)
if os.path.exists(lockfile):
print_stderr("Daemon already running (lockfile detected).")
print_stderr("Run 'electrum stop' to stop the daemon.")
sys.exit(1)
# Initialise rpc credentials to random if not set yet. This would normally be done
# later anyway, but we need to avoid the two sides of the fork setting conflicting random creds.
daemon.get_rpc_credentials(config) # inits creds as side-effect
# fork before creating the asyncio event loop
try:
pid = os.fork()
except AttributeError as e:
print_stderr(f"Error: {e!r}")
print_stderr("Running daemon in detached mode (-d) is not supported on this platform.")
print_stderr("Try running the daemon in the foreground (without -d).")
sys.exit(1)
if pid:
print_stderr("starting daemon (PID %d)" % pid)
loop, stop_loop, loop_thread = create_and_start_event_loop()
ready = daemon.wait_until_daemon_becomes_ready(config=config, timeout=5)
if ready:
sys_exit(0)
else:
print_stderr("timed out waiting for daemon to get ready")
sys_exit(1)
else:
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'w')
se = open(os.devnull, 'w')
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
loop, stop_loop, loop_thread = create_and_start_event_loop()
try:
handle_cmd(
cmdname=cmdname,
config=config,
config_options=config_options,
)
except Exception:
_logger.exception("")
sys_exit(1)
def handle_cmd(*, cmdname: str, config: 'SimpleConfig', config_options: dict):
if cmdname == 'gui':
configure_logging(config)
fd = daemon.get_file_descriptor(config)
if fd is not None:
d = daemon.Daemon(config, fd, start_network=False)
try:
d.run_gui()
except BaseException as e:
_logger.exception('daemon.run_gui errored')
sys_exit(1)
else:
sys_exit(0)
else:
result = daemon.request(config, 'gui', (config_options,))
elif cmdname == 'daemon':
configure_logging(config)
fd = daemon.get_file_descriptor(config)
if fd is not None:
# run daemon
d = daemon.Daemon(config, fd)
d.run_daemon()
sys_exit(0)
else:
# FIXME this message is lost in detached mode (parent process already exited after forking)
print_msg("Daemon already running")
sys_exit(1)
else:
# command line
configure_logging(config, log_to_file=False) # don't spam logfiles for each client-side RPC, but support "-v"
cmd = known_commands[cmdname]
wallet_path = config.get_wallet_path()
if not config.NETWORK_OFFLINE:
init_cmdline(config_options, wallet_path, True, config=config)
timeout = config.CLI_TIMEOUT
try:
result = daemon.request(config, 'run_cmdline', (config_options,), timeout)
except daemon.DaemonNotRunning:
print_msg("Daemon not running; try 'electrum daemon -d'")
if not cmd.requires_network:
print_msg("To run this command without a daemon, use --offline")
if cmd.name == "stop": # remove lockfile if it exists, as daemon looks dead
lockfile = daemon.get_lockfile(config)
if os.path.exists(lockfile):
print_msg("Found lingering lockfile for daemon. Removing.")
daemon.remove_lockfile(lockfile)
sys_exit(1)
except Exception as e:
print_stderr(str(e) or repr(e))
sys_exit(1)
else:
if cmd.requires_network:
print_msg("This command cannot be run offline")
sys_exit(1)
lockfile = daemon.get_lockfile(config)
if os.path.exists(lockfile):
print_stderr("Daemon already running (lockfile detected)")
print_stderr("Run 'electrum stop' to stop the daemon.")
print_stderr("Run this command without --offline to interact with the daemon")
sys_exit(1)
init_cmdline(config_options, wallet_path, False, config=config)
plugins = init_plugins(config, 'cmdline')
coro = run_offline_command(config, config_options, plugins)
fut = asyncio.run_coroutine_threadsafe(coro, loop)
try:
try:
result = fut.result()
finally:
plugins.stop()
plugins.stopped_event.wait(1)
except Exception as e:
print_stderr(str(e) or repr(e))
sys_exit(1)
if isinstance(result, str):
print_msg(result)
elif type(result) is dict and result.get('error'):
print_stderr(result.get('error'))
sys_exit(1)
elif result is not None:
print_msg(json_encode(result))
sys_exit(0)
if __name__ == '__main__':
main()
|
992,362 | 4f246b7e3263063472565f06eb940556f037e3ab | # -*- encoding:utf-8 -*-
# ==============================================================================
# Filename: VWAPQuantAnalysisUnitTest.py
# Author: Xiaofu Huang
# E-mail: lanecatm@sjtu.edu.cn
# Last modified: 2016-11-04 16:00
# Description: VWAPQuantAnalysis单元测试
# ==============================================================================
import time, sys
import datetime
import numpy as np
import unittest
sys.path.append("../tool")
from Log import Log
from LinearVWAPQuantAnalysis import LinearVWAPQuantAnalysis
class repoTest:
def __init__(self):
self.log = Log()
def get_amount( self, stockId, startDate, endDate, startTime, endTime):
self.log.info("repo input: " + str(startDate) + " "+ str(endDate) + " "+ str(startTime) + " " + str(endTime))
timeArray = np.array([[startDate.strftime("%Y-%m-%d") + " 10:00:00", startDate.strftime("%Y-%m-%d") + " 10:01:00",startDate.strftime("%Y-%m-%d") + " 10:02:00",startDate.strftime("%Y-%m-%d")+ " 10:03:00"]]*5)
return np.array([[1,2,3,4], [4, 3, 2, 1], [0,0,0,0], [0,0,0,0],[0,0,0,0]]), timeArray
class LinearVWAPQuantAnalysisUnitTest(unittest.TestCase):
def setUp(self):
self.log = Log()
def tearDown(self):
pass
def test_get_recommend_order_weight(self):
repoEngine = repoTest()
LinearVWAPAnalysis = LinearVWAPQuantAnalysis(repoEngine)
startDate=datetime.datetime.strptime("2016-12-22 10:00:00" , "%Y-%m-%d %H:%M:%S")
endDate = datetime.datetime.strptime("2016-12-23 09:40:00", "%Y-%m-%d %H:%M:%S")
ansDict = LinearVWAPAnalysis.get_recommend_order_weight(600000, startDate, endDate, 5)
self.assertEqual(len(ansDict), 8)
ansDict = LinearVWAPAnalysis.get_recommend_order_weight(600000, startDate, startDate + datetime.timedelta(hours = 10), 5)
self.assertEqual(len(ansDict), 4)
ansDictPair = sorted(ansDict.iteritems(), key=lambda keyValue: datetime.datetime.strptime(keyValue[0], "%Y-%m-%d %H:%M:%S"), reverse = False)
self.log.info("ans:" + str(ansDictPair))
self.assertTrue(ansDictPair[0][1] < ansDictPair[1][1])
return
if __name__ == '__main__':
unittest.main()
|
992,363 | f4d0e1074d41e309ac8267884a01a1af54d1f10d | class MapRepository:
def __init__(self, droneMap):
self._map = droneMap
self.initializeMap()
def initializeMap(self):
self._map.loadMap("test1.map")
def getMap(self):
return self._map
|
992,364 | 76dcff4e9197c12eb469cefc015a91ce40c49f32 | import numpy as np
import re
import sys
import os
import pickle
from sklearn.metrics.pairwise import cosine_similarity
import time
import io
from gensim import models
import threading
import Queue
'''
stopWord
stopWords read from file, because the sklearn one is not enough. Things added:
handled by NER
'january','february','march','april','june','july','august','september','october','november','defnamecember',
too harsh
"every", "never", "whenever", "wherever", "whatever", "whoever", "anyhow", "anyway", "anywhere", "any", "always"
neg
'no', 'not'
'''
contractions = re.compile(r"'|-|\"")
# all non alphanumeric
symbols = re.compile(r'(\W+)', re.U)
# single character removal
singles = re.compile(r'(\s\S\s)', re.I|re.U)
# separators (any whitespace)
seps = re.compile(r'\s+')
alteos = re.compile(r'([!\?])')
class relatedSnippetsExtractor(object):
"""docstring for ClassName"""
def __init__(self, overlapThreshold, glovePath=None, doc2vecPath=None):
self.overlapThreshold = overlapThreshold
self.stopWords = []
'''
try:
f = io.open("data/stopword.txt")
except FileNotFoundError:
f = io.open("../data/stopword.txt")
self.stopWords = f.readlines()
self.stopWords = [x.strip() for x in self.stopWords]
f.close()
'''
if glovePath is not None:
self.glove = pickle.load(io.open(glovePath, 'rb'))
self.doc2vec = None
if doc2vecPath is not None:
self.doc2vec = models.Doc2Vec.load(doc2vecPath)
self.glove = None
print ("overlapThreshold = %f" %self.overlapThreshold)
def extract(self, claim, article, label=None):
#from sklearn.feature_extraction.text import CountVectorizer
# empty string can be taken as all 0 vectors
# using both uni- and bi-grams
'''
vectorizer = CountVectorizer(analyzer = "word", \
preprocessor = None, \
# watch out stop words, should not extract named entities!
# possible number entities like sixty
stop_words = 'english', \
ngram_range=(1, 2))
#max_features = 5000)
'''
# print (article)
claim = self._cleanText(claim, minLen=0)
claimX = self._embed(claim.split())
claimX = claimX.reshape(1, claimX.size)
#t1 = time.time()
snippets, snippetsX = self._extractSnippets(article)
#print (time.time() - t1)
if (snippets == [] or snippetsX is None):
return None, None, None, None, None
similarityScore = cosine_similarity(claimX, snippetsX)[0]
#del claimX
#del snippetsX
#print (similarityScore)
if (np.count_nonzero(similarityScore) == 0):
# bad and weird thing happens
return None, None, None, None, None
minSimilarityScore = np.max(similarityScore[np.nonzero(similarityScore)])
if (minSimilarityScore < self.overlapThreshold):
return None, None, None, None, None
# print (minSimilarityScore)
overlapIdx = np.where(similarityScore > self.overlapThreshold)[0]
#print (overlapIdx)
#snippets = np.array([[snippet] for snippet in snippets])
#print (snippets.shape)
# from vector back to sentence to later use them in the same feature space
#print (snippets)
relatedSnippets = np.array(snippets)[overlapIdx].tolist()
#relatedSnippets = [' '.join(snippet) for snippet in np.array(snippets)[overlapIdx].tolist()]
relatedSnippetsX = snippetsX[overlapIdx]
del snippets
# relatedSnippets = self._clean(relatedSnippets)
relatedSnippetLabels = None
if label is not None:
relatedSnippetLabels = [label for i in range(len(overlapIdx))]
# return a list of related snippets (str)
# corresponding to a claim and an article
return claimX, relatedSnippetsX , relatedSnippets, relatedSnippetLabels, np.vstack((similarityScore[overlapIdx], similarityScore[overlapIdx])).T
#print(relatedSnippets)
#print(relatedSnippetLabels)
def _extractSnippets(self, article):
# a list of word list from a snippet
snippets = []
snippetsX = None
# snippetMarkNumbers = [] #list of list, inner list records number of ! ? ""
# number of a snippet in a sentence
# should be the number to make stance classification best
# but best distribution happens at 3 for google crawled
NSS = 3
articleSentences = alteos.sub(r' \1 .', article).rstrip("(\.)*\n").split('.')
'''
cleanedSentences = Queue.Queue()
threads = [threading.Thread(target=self._cleanText, args = (sen, 5, cleanedSentences)) for sen in articleSentences]
#print (len(threads))
for t in threads:
while(threading.activeCount() == 2046):
pass
t.start()
for t in threads:
t.join()
ctr = 0
snippet = ''
size = 0
while cleanedSentences.empty() is False:
size += 1
if ctr < NSS:
snippet += (' ' + cleanedSentences.get())
ctr += 1
if ctr == NSS:
snippets.append(snippet)
ctr = 0
del snippet
snippet = ''
if ctr != 0:
snippets.append(snippet)
del snippet
del cleanedSentences
del threads
print (size)
'''
'''
cleanedSentences = map(self._cleanText, articleSentences)
ctr = 0
snippet = ''
for sen in cleanedSentences:
if ctr < NSS:
snippet += (' ' + sen)
ctr += 1
if ctr == NSS:
snippets.append(snippet)
ctr = 0
del snippet
snippet = ''
if ctr != 0:
snippets.append(snippet)
del snippet
del cleanedSentences
'''
'''
snippetsXs = Queue.Queue()
threads2 = [threading.Thread(target=self._embed, args = (snippet.split(),snippetsXs)) for snippet in snippets]
print (len(threads2))
for t in threads2:
while(threading.activeCount() == 2046):
pass
t.start()
for t in threads2:
t.join()
while snippetsXs.empty() is False:
if snippetsX is None:
snippetsX = snippetsXs.get()
snippetsX = snippetsX.reshape(1, snippetsX.size)
else:
snippetsX = np.vstack((snippetsX, snippetsXs.get()))
del snippetsXs
'''
ctr = 0
snippet = ''
rawSnippet = ''
for sen in articleSentences:
cleanedSen = self._cleanText(sen,3)
if cleanedSen is None:
continue
if ctr < NSS:
rawSnippet += (' ' + symbols.sub(r' \1 ', sen))
snippet += (' ' + cleanedSen)
ctr += 1
if ctr == NSS:
snippets.append(rawSnippet)
if snippetsX is None:
snippetsX = self._embed(snippet.split())
snippetsX = snippetsX.reshape(1, snippetsX.size)
else:
snippetsX = np.vstack((snippetsX, self._embed(snippet.split())))
ctr = 0
del snippet
snippet = ''
del rawSnippet
rawSnippet = ''
if ctr != 0:
snippets.append(rawSnippet)
if snippetsX is None:
snippetsX = self._embed(snippet.split())
snippetsX = snippetsX.reshape(1, snippetsX.size)
else:
snippetsX = np.vstack((snippetsX, self._embed(snippet.split())))
del snippet
del rawSnippet
return snippets, snippetsX
# articleSentences = re.split(r'[.|!|?]', article)
'''
# this tries to keep ! ? " marks
articleSentences = []
articleSentencesWithMarks = []
sentence = ''
for c in article:
if c not in ['?', '!', '.']:
sentence += c
else:
articleSentences.append(sentence)
if c in ['?', '!']:
sentence += c
else:
sentence += ' '
articleSentencesWithMarks.append(sentence)
sentence = ''
print (articleSentences)
# print(articleSentences)
'''
# no stripped kinda needed: like $100, "bill", these are critical
# but vocab from the vectorizer is without these
def _extractVocab(self, claims, snippets, vectorizer):
result = {}
try:
raw1 = vectorizer.fit(claims).vocabulary_
raw2 = vectorizer.fit(snippets).vocabulary_
except ValueError:
return {}
# print (raw2)
nextValue = 0
for key, value in raw1.items():
if key not in result.keys():
result[key] = nextValue
nextValue += 1
# print (key)
for key, value in raw2.items():
if key not in result.keys():
result[key] = nextValue
nextValue += 1
# print (key)
return result
'''
def _clean(self, snippets):
import spacy
nlp = spacy.load('en')
# remove NE
cleanedSnippets = []
for s in snippets:
neToRemove = set()
#print (s)
# stToRemove = set()
doc = nlp(s)
for ent in doc.ents:
#print (ent.text)
neToRemove.add(ent.text)
neWordToRemove = set()
for word in neToRemove:
# cannot clean when left right space are other cases.
# s = s.replace(' '+word+' ', ' ')
if (len(word.split())>1):
s = s.replace(word, '')
s = s.replace(word+'\'s', '')
else:
neWordToRemove.add(word)
#for word in neWordToRemove:
#print (word)
#print(s)
sList = s.split(' ')
cleanedSList = []
# needs debug
for word in sList:
# deal with Obama's
if word.lower() not in self.stopWords \
and word.replace('\'s', '') not in neWordToRemove:
cleanedSList.append(word)
cleanedSnippet = ' '.join(cleanedSList)
# print (cleanedSnippet)
cleanedSnippets.append(cleanedSnippet)
return cleanedSnippets
'''
# BoW features. Shit
def extractFeatures(self, relatedSnippets, MIN_DF, MAX_DF):
from sklearn.feature_extraction.text import TfidfVectorizer
# empty string can be taken as all 0 vectors
# using both uni- and bi-grams
vectorizer = TfidfVectorizer(analyzer = "word", \
stop_words = "english", \
tokenizer = None, \
preprocessor = None, \
min_df=MIN_DF, \
max_df=MAX_DF, \
ngram_range=(1, 2))
'''
the min df above is really important as the first step for fieature engineering
.005 means only keep features apper more than .005 portion of docs
that is roughly 486 docs
'''
relatedSnippetX = vectorizer.fit_transform(relatedSnippets)
# print (vectorizer.vocabulary_)
relatedSnippetX = relatedSnippetX.toarray()
featureNames = vectorizer.get_feature_names()
assert(relatedSnippetX.shape[1] == len(featureNames))
return relatedSnippetX, featureNames
#relatedSnippetMarkNumberX = np.array(relatedSnippetMarkNumbers)
#np.save('relatedSnippetMarkNumberX', relatedSnippetMarkNumberX)
# print("relatedSnippetX dim and relatedSnippet_y dim: ")
# print(relatedSnippetX.shape, relatedSnippet_y.shape)
def _cleanText(self, text, minLen=3, queue=None):
# cleaner (order matters)
if len(text.split()) < minLen:
return None
text = text.lower()
text = contractions.sub('', text)
text = symbols.sub(r' \1 ', text)
text = singles.sub(' ', text)
text = seps.sub(' ', text)
if len(text.split()) < 3:
return None
if queue is not None:
queue.put(text)
return text
# take in a list of words
def _embed(self, sentenceList, queue=None):
if self.glove is not None:
vec = np.zeros((1,200))
ctr = 0
for word in sentenceList:
if word in self.glove:
vec += self.glove[word]
ctr += 1
if ctr != 0:
return vec / ctr
else:
return vec
elif self.doc2vec is not None:
# defined by that paper
start_alpha=0.01
infer_epoch=100
# shape: (300,)
vec = self.doc2vec.infer_vector(sentenceList, alpha=start_alpha, steps=infer_epoch)
if queue is None:
return vec
else:
queue.put(vec)
else:
#BoW goes here!
pass
|
992,365 | 14239218848b9fc7e17e2444da99697ede89b91d | from eregex.test.data import code
lazy_set_func="""
NodeMetadata lazySet(
NodeMetadata meta, {
BuildOp buildOp,
Color color,
bool decoOver,
bool decoStrike,
bool decoUnder,
TextDecorationStyle decorationStyle,
CssBorderStyle decorationStyleFromCssBorderStyle,
String fontFamily = null,
String fontSize,
bool fontStyleItalic = false,
FontWeight fontWeight,
double size = 1.0,
bool isBlockElement,
bool isNotRenderable,
Iterable<BuildOp> parentOps,
Iterable<String> styles,
Iterable<String> stylesPrepend = null,
}) {
meta ??= NodeMetadata();
if (buildOp != null) {
meta._buildOps ??= [];
final ops = meta._buildOps as List<BuildOp>;
if (ops.indexOf(buildOp) == -1) {
ops.add(buildOp);
}
}
if (color != null) meta.color = color;
if (decoStrike != null) meta.decoStrike = decoStrike;
if (decoOver != null) meta.decoOver = decoOver;
if (decoUnder != null) meta.decoUnder = decoUnder;
if (decorationStyle != null) meta.decorationStyle = decorationStyle;
if (decorationStyleFromCssBorderStyle != null) {
switch (decorationStyleFromCssBorderStyle) {
case CssBorderStyle.dashed:
meta.decorationStyle = TextDecorationStyle.dashed;
break;
case CssBorderStyle.dotted:
meta.decorationStyle = TextDecorationStyle.dotted;
break;
case CssBorderStyle.double:
meta.decorationStyle = TextDecorationStyle.double;
break;
case CssBorderStyle.solid:
meta.decorationStyle = TextDecorationStyle.solid;
break;
}
}
if (fontFamily != null) meta.fontFamily = fontFamily;
if (fontSize != null) meta.fontSize = fontSize;
if (fontStyleItalic != null) meta.fontStyleItalic = fontStyleItalic;
if (fontWeight != null) meta.fontWeight = fontWeight;
if (isBlockElement != null) meta._isBlockElement = isBlockElement;
if (isNotRenderable != null) meta.isNotRenderable = isNotRenderable;
if (parentOps != null) {
assert(meta._parentOps == null);
meta._parentOps = parentOps;
}
if (stylesPrepend != null) {
styles = stylesPrepend;
}
if (styles != null) {
assert(styles.length % 2 == 0);
assert(!meta._stylesFrozen);
meta._styles ??= [];
if (styles == stylesPrepend) {
meta._styles.insertAll(0, styles);
} else {
meta._styles.addAll(styles);
}
}
return meta;
}
"""
build_op_class="""
class BuildOp {
final bool isBlockElement;
// op with lower priority will run first
final int priority;
final BuildOpDefaultStyles _defaultStyles;
final BuildOpOnChild _onChild;
final BuildOpOnPieces _onPieces;
final BuildOpOnWidgets _onWidgets;
BuildOp({
BuildOpDefaultStyles defaultStyles,
bool isBlockElement,
BuildOpOnChild onChild,
BuildOpOnPieces onPieces,
BuildOpOnWidgets onWidgets,
this.width,
this.size = 1.0,
this.offset = -5.0,
this.name = "FirstOp",
this.priority = 10,
}) : _defaultStyles = defaultStyles,
this.isBlockElement = isBlockElement ?? onWidgets != null,
_onChild = onChild,
_onPieces = onPieces,
_onWidgets = onWidgets;
bool get hasOnChild => _onChild != null;
List<String> defaultStyles(NodeMetadata meta, dom.Element e) =>
_defaultStyles != null ? _defaultStyles(meta, e) : null;
NodeMetadata onChild(NodeMetadata meta, dom.Element e) =>
_onChild != null ? _onChild(meta, e) : meta;
Iterable<BuiltPiece> onPieces(
NodeMetadata meta,
Iterable<BuiltPiece> pieces,
) =>
_onPieces != null ? _onPieces(meta, pieces) : pieces;
Iterable<Widget> onWidgets(NodeMetadata meta, Iterable<Widget> widgets) =>
(_onWidgets != null ? _onWidgets(meta, widgets) : null) ?? widgets;
}
"""
node_meta_data_class="""
class NodeMetadata {
Iterable<BuildOp> _buildOps;
dom.Element _domElement;
Iterable<BuildOp> _parentOps;
TextStyleBuilders _tsb;
Color color;
bool decoOver;
bool decoStrike;
bool decoUnder;
TextDecorationStyle decorationStyle;
String fontFamily;
String fontSize;
bool fontStyleItalic;
FontWeight fontWeight;
bool _isBlockElement;
bool isNotRenderable;
List<String> _styles;
bool _stylesFrozen = false;
dom.Element get domElement => _domElement;
bool get hasOps => _buildOps != null;
bool get hasParents => _parentOps != null;
Iterable<BuildOp> get ops => _buildOps;
Iterable<BuildOp> get parents => _parentOps;
TextStyleBuilders get tsb => _tsb;
set domElement(dom.Element e) {
assert(_domElement == null);
_domElement = e;
if (_buildOps != null) {
final ops = _buildOps as List;
ops.sort((a, b) => a.priority.compareTo(b.priority));
_buildOps = List.unmodifiable(ops);
}
}
set tsb(TextStyleBuilders tsb) {
assert(_tsb == null);
_tsb = tsb;
}
bool get isBlockElement {
if (_isBlockElement == true) return true;
return _buildOps?.where((o) => o.isBlockElement)?.length?.compareTo(0) == 1;
}
void styles(void f(String key, String value)) {
_stylesFrozen = true;
if (_styles == null) return;
final iterator = _styles.iterator;
while (iterator.moveNext()) {
final key = iterator.current;
if (!iterator.moveNext()) return;
f(key, iterator.current);
}
}
void f({void g(int a) = null, int b}) {
}
void h(int o(), {void g(int a) = null, int b}) {
}
}
"""
classes="""
abstract class MyClass extends BaseClass {
}
class MySecondClass<T> extends BaseClass<T> {
}
class MyThirdClass<T,K> implements MyClass, MySecondClass<T>
with BaseClass {
}
abstract class MyFourthClass<T,K>
extends MyThirdClass<T,K>
implements MyClass, MySecondClass<T>
with BaseClass {
}
"""
|
992,366 | 04a2f4f12cc2bd23777b07a4274a8ba76a5437cb | #!/usr/bin/env python
# encoding: utf-8
# Copyright 2012 Herve BREDIN (bredin@limsi.fr)
# This file is part of PyAnnote.
#
# PyAnnote is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyAnnote is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyAnnote. If not, see <http://www.gnu.org/licenses/>.
import sklearn.metrics
from pyannote.base.association import Mapping, OneToOneMapping, NoMatch
def __get_labels_true_pred(hypothesis, reference=None):
if not isinstance(hypothesis, Mapping):
raise TypeError('Hypothesis must be a Mapping, not %s.' % type(hypothesis).__name__)
if reference and not isinstance(reference, Mapping):
raise TypeError('Reference must be either None or a Mapping, not %s.' % type(reference).__name__)
partition = hypothesis.to_partition()
if reference:
expected = reference.to_partition()
else:
expected = hypothesis.to_expected_partition()
labels_true = [expected[element] for element in expected]
labels_pred = [partition[element] for element in expected]
return labels_true, labels_pred
# -------------------------------------- #
# Many-to-many mapping evaluation metric #
# -------------------------------------- #
def homogeneity_completeness_v_measure(hypothesis, reference=None):
labels_true, labels_pred = __get_labels_true_pred(hypothesis, reference=reference)
H, C, V = sklearn.metrics.homogeneity_completeness_v_measure(labels_true, labels_pred)
return {'homogeneity': H, 'completeness': C, 'v_measure': V}
def homogeneity(hypothesis, reference=None):
return homogeneity_completeness_v_measure(hypothesis, reference=reference)['homogeneity']
def completeness(hypothesis, reference=None):
return homogeneity_completeness_v_measure(hypothesis, reference=reference)['completeness']
def v_measure(hypothesis, reference=None):
return homogeneity_completeness_v_measure(hypothesis, reference=reference)['v_measure']
def adjusted_rand_index(hypothesis, reference=None):
labels_true, labels_pred = __get_labels_true_pred(hypothesis, reference=reference)
return sklearn.metrics.adjusted_rand_score(labels_true, labels_pred)
def adjusted_mutual_info(hypothesis, reference=None):
labels_true, labels_pred = __get_labels_true_pred(hypothesis, reference=reference)
return sklearn.metrics.adjusted_mutual_info_score(labels_true, labels_pred)
# ------------------------------------ #
# One-to-one mapping evaluation metric #
# ------------------------------------ #
def __one_way_accuracy(proposed, expected):
elements = [element for element in expected \
if not isinstance(element, NoMatch)]
correct = 0
false_alarm = 0
error = 0
total = len(elements)
for element in elements:
if isinstance(proposed[element], NoMatch):
if isinstance(expected[element], NoMatch):
correct += 1
else:
false_alarm += 1
else:
if proposed[element] == expected[element]:
correct += 1
else:
error += 1
return {'total': total, \
'correct': correct, \
'false alarm': false_alarm, \
'error': error}
def __get_dict_proposed_expected(hypothesis, reference=None, reverse=False):
if not isinstance(hypothesis, OneToOneMapping):
raise TypeError('Hypothesis must be a OneToOneMapping, not %s.' % \
type(hypothesis).__name__)
if reference and not isinstance(reference, OneToOneMapping):
raise TypeError('Reference must be either None or a OneToOneMapping,' \
+ 'not %s.' % type(reference).__name__)
proposed = hypothesis.to_dict(reverse=reverse, single=True)
if reference:
expected = reference.to_dict(reverse=reverse, single=True)
else:
expected = hypothesis.to_expected_dict(reverse=reverse)
return proposed, expected
def accuracy(hypothesis, reference=None, detailed=False):
proposed, expected = __get_dict_proposed_expected(hypothesis, \
reference=reference, \
reverse=False)
l2r = __one_way_accuracy(proposed, expected)
proposed, expected = __get_dict_proposed_expected(hypothesis, \
reference=reference, \
reverse=True)
r2l = __one_way_accuracy(proposed, expected)
total = l2r['total'] + r2l['total']
if total == 0:
rate = 1.
else:
rate = 1. * (l2r['correct'] + r2l['correct']) / total
if detailed:
return {'error rate': 1. - rate, \
'confusion': l2r['error']+r2l['error'], \
'false alarm': l2r['false alarm'] + r2l['false alarm'], \
'total': total, \
}
else:
return rate
if __name__ == "__main__":
import doctest
doctest.testmod()
|
992,367 | f1415f0dc433b222ae31b641d91d89e7e9dcfbf8 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 21:34:08 2020
@author: Moritz
"""
"""Fig 6D
This script reproduces the plots seen in Fig 6D of "The biophysical basis underlying the maintenance of early phase long-term potentiation".
This script requires that `numpy`,`matplotlib` and `seaborn` are installed within the Python environment you are running this script in.
"""
import sys
sys.path.append('../')
import numpy as np
import matplotlib.pyplot as plt
from ampartrafficking.frap import FRAP
import seaborn as sns
sns.set_style("ticks")
from matplotlib.font_manager import FontProperties
fontLgd = FontProperties()
fontLgd.set_size('small')
from read_roi import read_roi_zip
#%%
roi = read_roi_zip('Data\\Tatavarty2013_FRAP_Fig3D_RoiSet.zip')
xAxis=80/(roi['xAxis']['x2']-roi['xAxis']['x1'])
yAxis=100/(roi['yAxis']['y2']-roi['yAxis']['y1'])
dataPoints_x=((np.array(roi['dataPoints']['x'])-roi['yAxis']['x1'])*xAxis)-1.5
dataPoints_y=((np.array(roi['dataPoints']['y'])-roi['yAxis']['y1'])*yAxis)
roi_M = read_roi_zip('Data\\Makino2009_FRAP-GluR1_Fig1D_RoiSet.zip')
xAxis=30/(roi_M['xAxis']['x2']-roi_M['xAxis']['x1'])
yAxis=120/(roi_M['yAxis']['y2']-roi_M['yAxis']['y1'])
dataPoints_x_M=((np.array(roi_M['dataPoints']['x'])-roi_M['yAxis']['x1'])*xAxis)+0.1
dataPoints_y_M=((np.array(roi_M['dataPoints']['y'])-roi_M['yAxis']['y1'])*yAxis)-3.5
#%%
#Cooperative binding; FRAP dependence on mobile receptor concentartion U/Aspine:
SaveFig=0#1
duration=15000#Duration in s
Nr_Trials=100
N_List=[12]#[9]
UFP_List=[10,30,60]
beta=1
alpha=16
kUB=0.0005
kBU=0.1
A_spine_basal=0.898
B_N, U_N, B_notBleached_N, U_notBleached_N, PSD, Time=FRAP(N_List, UFP_List, beta, alpha, kUB, kBU, duration, Nr_Trials)
plt.figure()
plt.imshow(PSD)
plt.colorbar()
#%%
col=sns.color_palette('colorblind')[3::]
fig=plt.figure(figsize=(3.5,2), dpi=150)
for i,N in enumerate(N_List):
A_spine=N**2/70*A_spine_basal
for j,UFP_0 in enumerate(UFP_List):
BMean=np.mean(np.mean((B_N[i][j]+B_notBleached_N[i][j]), axis=0)[int(len(Time)/2)::])
UMean=np.mean(np.mean((U_N[i][j]+U_notBleached_N[i][j]), axis=0)[int(len(Time)/2)::])
print(BMean)
print(UMean)
Y=(np.mean(B_notBleached_N[i][j], axis=0)+np.mean(U_notBleached_N[i][j], axis=0))/(BMean+UMean)*100
if j==0 and i==0:
plt.plot(Time-duration/2/60,Y, color=col[j], linewidth=2, label=r'$\langle B \rangle={:1.0f}$, '.format(BMean)+r'$\langle U/A_{spine} \rangle$='+r'{0:1.1f} $\#/ \mu m^2$'.format(UMean/A_spine))
else:
plt.plot(Time-duration/2/60,Y, color=col[j], linewidth=2, label=r'$={:1.0f}$, '.format(BMean)+r'$=$'+r'{0:1.1f} $\#/ \mu m^2$'.format(UMean/A_spine))
plt.text(0.3,0.55,r'P={0:1.0f}'.format(N**2), transform=fig.axes[0].transAxes)
sns.lineplot(dataPoints_x,dataPoints_y, color='k', alpha=0.6, linewidth=2, marker='s', label='Tatavarty 2013')
#sns.lineplot(dataPoints_x_M,dataPoints_y_M, color='k', marker='d', label='Makino 2009')
plt.axhline(100, color='k', zorder=0)
plt.xlim(0,35)
plt.ylim(0,120)
plt.xlabel('Time (min)')
plt.ylabel('AMPAR \n Recovery (%)')
plt.legend(ncol=1, prop=fontLgd)
sns.despine()
fig.tight_layout()
if SaveFig==1:
print('Save')
fig.savefig('Figures\\Fig6D_bottomLeft.png', bbox_inches="tight", dpi=400)
fig.savefig('Figures\\Fig6D_bottomLeft.svg', bbox_inches="tight", dpi=400)
#%%
fig=plt.figure(figsize=(3.5,2), dpi=150)
for i,N in enumerate(N_List):
A_spine=N**2/70*A_spine_basal
for j,UFP_0 in enumerate(UFP_List):
BMean=np.mean(np.mean((B_N[i][j]+B_notBleached_N[i][j]), axis=0)[int(len(Time)/2)::])
UMean=np.mean(np.mean((U_N[i][j]+U_notBleached_N[i][j]), axis=0)[int(len(Time)/2)::])
print(BMean)
print(UMean)
Y=(np.mean(B_notBleached_N[i][j], axis=0)+np.mean(U_notBleached_N[i][j], axis=0))/(BMean+UMean)*100
Y2=(np.mean(B_N[i][j], axis=0)+np.mean(U_N[i][j], axis=0))/(BMean+UMean)*100
if j==0 and i==0:
plt.plot(Time-duration/2/60,Y+Y2, color=col[j], linewidth=2, label=r'$\langle B \rangle={:1.0f}$, '.format(BMean)+r'$\langle U/A_{spine} \rangle$='+r'{0:1.1f} $\#/ \mu m^2$'.format(UMean/A_spine))
else:
plt.plot(Time-duration/2/60,Y+Y2, color=col[j], linewidth=2, label=r'$={:1.0f}$, '.format(BMean)+r'$=$'+r'{0:1.1f} $\#/ \mu m^2$'.format(UMean/A_spine))
plt.text(0.3,0.55,r'P={0:1.0f}'.format(N**2), transform=fig.axes[0].transAxes)
plt.axhline(100, color='k', zorder=0)
# plt.xlim(0,35)
plt.ylim(0,120)
plt.xlabel('Time (min)')
plt.ylabel('AMPAR \n bleached+not bleached (%)')
plt.legend(ncol=1, prop=fontLgd)
sns.despine()
fig.tight_layout() |
992,368 | c3b33bb54f03adddfaa1530d7f49c69acd364d83 | """
All data in Python program is
represented by objects(types == classes)."""
things = [1, 0.2, "hi", (1, "a"), {1: 4}]
# e.g of good string formatting.
for thing in things:
print("{:>8} is: {}".format(repr(thing), type(thing)))
print("{} is: {}".format(repr(things), type(things)))
|
992,369 | de0b0ce968fb2e84e8bba99f8bfb2c4cba1af38d | import logging
import json
import re
from datetime import datetime, timedelta
import time
from . import (CHANNEL_KITCHEN, BUTTON_STOP, CHANNEL_BIG_WINDOWS, BUTTON_DOWN,
BUTTON_UP)
import simu
LOGFILE = '/var/log/simu.log'
DARKNESS_JSON = '/home/mmrazik/weather/data/live_darkness_json.txt'
TIME_THRESHOLD = timedelta(seconds=30 * 60)
DARKNESS_THRESHOLD_DOWN = 60.0
DARKNESS_THRESHOLD_UP = 60.0
DARKNESS_THRESHOLDS = {
'kitchen': {
'threshold_up': 200.0,
'threshold_down': 200.0,
'channel': CHANNEL_KITCHEN,
'operation_down': BUTTON_STOP,
},
# only in winter
# 'big_windows': {
# 'threshold_up': 330.0,
# 'threshold_down': 330.0,
# 'channel': CHANNEL_BIG_WINDOWS,
# 'operation_down': BUTTON_DOWN,
# },
}
logging.basicConfig(filename=LOGFILE,
format='%(levelname)s:%(asctime)s:%(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)
def get_last_action(channel):
try:
lines = open(LOGFILE, 'r').readlines()
except IOError as error:
logging.error(error)
return None, None
for line in reversed(lines):
regexp = '^INFO:(.*):%s (up|down)' % channel
match = re.search(regexp, line)
if match:
event_time = datetime.strptime(match.group(1), '%Y-%m-%d %H:%M:%S')
time_delta = datetime.now() - event_time
return time_delta, match.group(2)
return None, None
def get_current_darkness():
try:
raw_data = open(DARKNESS_JSON, 'r').read()
except IOError as detail:
logging.error("Unable to open darkness file: %s" % detail)
return None
data = json.loads(raw_data)
return float(data['Darkness'])
def check_status():
darkness = get_current_darkness()
logging.debug('Current darkness: %s' % darkness)
if darkness is None:
logging.error("Unable to acquire darkness. Giving up.")
return False
for channel_name in DARKNESS_THRESHOLDS:
time_delta, event_type = get_last_action(channel_name)
if time_delta is None:
logging.debug('No timedelta. Ignoring it.')
time_delta = TIME_THRESHOLD + timedelta(0, 1)
if time_delta > timedelta(days=2):
logging.debug(
'Last time_delta is too large. Ignoring last action.')
event_type = 'ignore_last_event_from_logs'
channel_config = DARKNESS_THRESHOLDS[channel_name]
if darkness > channel_config['threshold_down']:
if time_delta > TIME_THRESHOLD and event_type != 'down':
logging.info(
'%s down (darkness %s)' % (channel_name, darkness))
simu.channel_operation(channel_config['channel'],
channel_config['operation_down'])
elif darkness < channel_config['threshold_up']:
if time_delta > TIME_THRESHOLD and event_type != 'up':
logging.info(
'%s up (darkness %s)' % (channel_name, darkness))
simu.channel_operation(channel_config['channel'],
BUTTON_UP)
time.sleep(0.5)
|
992,370 | 28e821759b6e49dee1c0f2048beae00aeb15bdef | import re
input = snakemake.input
with open(snakemake.output[0], "w") as out:
out.write("Barcode matching: ")
out.write(open(input.barcode_matching, "r").readline())
adapter_stats = open(input.adapter_stats).read()
adapter_pass = int(re.search("reads passed filter: (\d+)", adapter_stats).group(1)) // 2
adapter_trimmed = int(re.search("reads with adapter trimmed: (\d+)", adapter_stats).group(1)) // 2
adapter_percent = adapter_trimmed / adapter_pass * 100
out.write(f"Adapter trimming: {adapter_trimmed}/{adapter_pass} reads trimmed ({adapter_percent:.2f}%)\n")
total_align = int(open(input.total_align).read())
align_percent = total_align/adapter_pass * 100
out.write(f"Total high-quality alignments: {total_align}/{adapter_pass} ({align_percent:.2f}%)\n")
unique_align = int(open(input.unique_align).read())
unique_percent = unique_align / total_align * 100
out.write(f"Total unique fragments: {unique_align}/{total_align} ({unique_percent:.2f}%)\n")
chrM_count = unique_align - int(open(input.no_mito).read())
chrM_percent = chrM_count / unique_align * 100
out.write(f"Mitochondrial fragments: {chrM_count}/{unique_align} ({chrM_percent:.2f}%)\n")
#TotalReadPairs\\tDistinctReadPairs\\tOneReadPair\\tTwoReadPairs\\tNRF=Distinct/Total\\tPBC1=OnePair/Distinct\\tPBC2=OnePair/TwoPair
total_reads, unique_reads, _, _, NRF, PCB1, PCB2 = open(input.duplicate_stats).readlines()[1].strip().split("\t")
total_reads = int(total_reads)
unique_reads = int(unique_reads)
blacklist_reads = unique_align - chrM_count - unique_reads
blacklist_percent = blacklist_reads / (unique_align - chrM_count)
out.write(f"Blacklist fragments: {blacklist_reads}/{unique_align - chrM_count} ({blacklist_percent:.2f}%)\n")
out.write(f"PCR bottleneck coefficients: PCB1 {float(PCB1):.2f}, PCB2 {float(PCB2):.2f}\n") |
992,371 | b238c115f3efdb8e6efa02cc78d7bfe5722b01b1 | from schema import Schema, And
import time
import re
from asylum.web.core import names
def validate(schema, json):
try:
schema.validate(json)
return True
except:
return False
def validate_register(json):
try:
Schema(
{
'login': lambda x: re.match('^[a-zA-Z0-9]{3,20}$', x) is not None,
'name': lambda x: re.match('^[a-zA-Z0-9]{3,20}$', x) is not None,
'password': lambda x: re.match('^(?=.*[A-Za-z])(?=.*\d)[\S]{8,}$', x) is not None,
'repassword': str,
'role': lambda x: x in ['admin', 'user', 'guest']
}
).validate(json)
if not json['password'] == json['repassword']:
return False
return True
except:
return False
add_blinds_tasks_schema = Schema(
{
'devices_ids': And(lambda x: len(x) > 0,
[
And(int, lambda x: names.devices.get(x))
]),
'unix_time': And(int, lambda x: x > time.time()),
'action_id': And(int, lambda x: names.actions.get(x))
}
)
add_blinds_schedule_schema = Schema(
{
'devices_ids': And(lambda x: len(x) > 0,
[
And(int, lambda x: names.devices.get(x))
]),
'action_id': And(int, lambda x: names.actions.get(x)),
'hour_type': And(int, lambda x: names.hour_types.get(x)),
'time_offset': int
}
)
delete_task_schema = Schema(
{
'task_id': int
}
)
delete_schedule_schema = Schema(
{
'schedule_id': int
}
)
blind_instant_action_schema = Schema(
{
'device_ids': And(lambda x: len(x) > 0,
[
And(int, lambda x: names.devices.get(x))
]),
'action_id': And(int, lambda x: names.actions.get(x)),
}
)
login_schema = Schema(
{
'login': str,
'password': str
}
)
add_mac_address_schema = Schema(
{
'mac_address': lambda x: re.match('^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})$', x) is not None
}
)
delete_mac_address_schema = Schema(
{
'mac_address_id': int
}
)
|
992,372 | 400b77f527944825dcf6a8fcf885d8abbf4e8c79 | #coding:utf-8
import time
# 读取文件
def fileRead(filename):
with open(filename,"r") as f:
content = f.read()
# print(content)
return content
#保存文件
def fileSave(filename,content):
with open(filename,"w") as f:
startTime = time.time()
print("开始写内容到%s文件中!--start:%s" % (filename,startTime))
f.write(content)
endTime = time.time()
print("文件写入结束!---End:%s" % endTime)
f.close()
if __name__ == "__main__":
content = fileRead("139邮箱-通话时长查询aspire_14113.txt")
listDate_Telephone = []
listDate = []
listTelephone = []
listTelephoneTimes = {}
print(1,time.time())
listContent = [x for x in content.split("\n") if x != ""]
print(2,time.time())
totle = len(listContent)
for count,line in enumerate(listContent):
if count % 100000 == 0:
print("进度%.5f%%" %(count / totle * 100))
listline = line.split('|')
listDate.append(listline[3])
listTelephone.append(listline[0])
listDate_Telephone.append (listline[3] + '_' + listline[0])
listDate_Telephone = list(set(listDate_Telephone))
listTotle = [x.split('_')[1] for x in listDate_Telephone]
listDate = list(set(listDate))
listTelephone = list(set(listTelephone))
strTotle = '\n'.join(listTotle)
strDate = '\n'.join(listDate)
strTelephone = '\n'.join(listTelephone)
fileSave("Totle.txt",strTotle)
fileSave("Date.txt",strDate)
fileSave("Telephone.txt",strTelephone)
print("len(listTelephone) = %s\nlen(listDate) = %s\nlen(listDate_Telephone) = %s\nlen(listContent) = %s" %(len(listTelephone),len(listDate),len(listDate_Telephone),len(listContent)))
# string = ""
# init = time.time()
# totle = len(listDate_Telephone)
# for k,v in enumerate(listDate_Telephone):
# string = string + v
# print("拼接-进度%.5f%%" %(k / totle * 100))
# print("拼接-进度%.5f%%" %(k / totle * 100))
# if k % 100000 == 0:
# end = time.time()
# print("拼接字符串完成!",end - init)
# totle_1 = len(listTelephone)
# for k,telephone in enumerate(listTelephone):
# listTelephoneTimes[telephone] = string.count(telephone)
# if k % 100000 == 0:
# print("查找-进度%.5f%%" %(k / totle_1 * 100))
# listTimesOver3 = []
# e2nd = time.time()
# print(3,e2nd - end)
# # print(listTelephoneTimes)
# # print(listTimesOver2)
# for k,v in listTelephoneTimes.items():
# if v >= 3:
# listTimesOver3.append(k)
# stringUnion = ''
# for x in listTimesOver3:
# stringUnion = x + "\n" + stringUnion
# print(stringUnion)
# fileSave("Over3.txt",stringUnion)
|
992,373 | 8bcf20536a6b3acad3df09191d422d2ab28e020f | fullBat = int(input("輸入一數字:"))
recoverBat = 0
emptyBat = 0
c = 3-fullBat%3
if fullBat %3 !=0:
recoverBat = (fullBat+c)//3 #16//3 =5
emptyBat = fullBat%3 #16%3 = 1
else:
recoverBat = fullBat//3 #16//3 =5
emptyBat = fullBat%3 #16%3 = 1
b = recoverBat + emptyBat
a = 0
if b >=3:
a = b//3
total = fullBat + recoverBat +a
print(total)
|
992,374 | 8342388820009783a92394d2da383a64bae19aa3 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.incron as incron
@skipIf(NO_MOCK, NO_MOCK_REASON)
class IncronTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.incron
'''
def setup_loader_modules(self):
return {incron: {}}
# 'write_incron_file' function tests: 1
def test_write_incron_file(self):
'''
Test if it writes the contents of a file to a user's crontab
'''
mock = MagicMock(return_value=0)
with patch.dict(incron.__salt__, {'cmd.retcode': mock}), \
patch('salt.modules.incron._get_incron_cmdstr',
MagicMock(return_value='incrontab')):
self.assertTrue(incron.write_incron_file('cybage',
'/home/cybage/new_cron'))
# 'write_cron_file_verbose' function tests: 1
def test_write_cron_file_verbose(self):
'''
Test if it writes the contents of a file to a user's crontab and
return error message on error
'''
mock = MagicMock(return_value=True)
with patch.dict(incron.__salt__, {'cmd.run_all': mock}), \
patch('salt.modules.incron._get_incron_cmdstr',
MagicMock(return_value='incrontab')):
self.assertTrue(incron.write_incron_file_verbose
('cybage', '/home/cybage/new_cron'))
# 'raw_system_incron' function tests: 1
def test_raw_system_incron(self):
'''
Test if it return the contents of the system wide incrontab
'''
with patch('salt.modules.incron._read_file',
MagicMock(return_value='salt')):
self.assertEqual(incron.raw_system_incron(), 'salt')
# 'raw_incron' function tests: 1
def test_raw_incron(self):
'''
Test if it return the contents of the user's incrontab
'''
mock = MagicMock(return_value='incrontab')
with patch.dict(incron.__grains__, {'os_family': mock}):
mock = MagicMock(return_value='salt')
with patch.dict(incron.__salt__, {'cmd.run_stdout': mock}):
self.assertEqual(incron.raw_incron('cybage'), 'salt')
# 'list_tab' function tests: 1
def test_list_tab(self):
'''
Test if it return the contents of the specified user's incrontab
'''
mock = MagicMock(return_value='incrontab')
with patch.dict(incron.__grains__, {'os_family': mock}):
mock = MagicMock(return_value='salt')
with patch.dict(incron.__salt__, {'cmd.run_stdout': mock}):
self.assertDictEqual(incron.list_tab('cybage'),
{'pre': ['salt'], 'crons': []})
# 'set_job' function tests: 1
def test_set_job(self):
'''
Test if it sets a cron job up for a specified user.
'''
self.assertEqual(incron.set_job('cybage', '/home/cybage', 'TO_MODIFY',
'echo "$$ $@ $# $% $&"'),
'Invalid mask type: TO_MODIFY')
val = {'pre': [], 'crons': [{'path': '/home/cybage',
'mask': 'IN_MODIFY',
'cmd': 'echo "SALT"', 'comment': ''}]}
with patch.object(incron, 'list_tab',
MagicMock(return_value=val)):
self.assertEqual(incron.set_job('cybage', '/home/cybage',
'IN_MODIFY',
'echo "SALT"'), 'present')
with patch.object(incron, 'list_tab',
MagicMock(return_value={'pre': ['salt'],
'crons': []})):
mock = MagicMock(return_value='incrontab')
with patch.dict(incron.__grains__, {'os_family': mock}):
with patch.object(incron, '_write_incron_lines',
MagicMock(return_value={'retcode': True,
'stderr': 'error'})):
self.assertEqual(incron.set_job('cybage', '/home/cybage',
'IN_MODIFY',
'echo "SALT"'), 'error')
with patch.object(incron, 'list_tab',
MagicMock(return_value={'pre': ['salt'],
'crons': []})):
mock = MagicMock(return_value='incrontab')
with patch.dict(incron.__grains__, {'os_family': mock}):
with patch.object(incron, '_write_incron_lines',
MagicMock(return_value={'retcode': False,
'stderr': 'error'})):
self.assertEqual(incron.set_job('cybage', '/home/cybage',
'IN_MODIFY',
'echo "SALT"'), 'new')
val = {'pre': [], 'crons': [{'path': '/home/cybage',
'mask': 'IN_MODIFY,IN_DELETE',
'cmd': 'echo "SALT"', 'comment': ''}]}
with patch.object(incron, 'list_tab',
MagicMock(return_value=val)):
mock = MagicMock(return_value='incrontab')
with patch.dict(incron.__grains__, {'os_family': mock}):
with patch.object(incron, '_write_incron_lines',
MagicMock(return_value={'retcode': False,
'stderr': 'error'})):
self.assertEqual(incron.set_job('cybage', '/home/cybage',
'IN_DELETE',
'echo "SALT"'), 'updated')
# 'rm_job' function tests: 1
def test_rm_job(self):
'''
Test if it remove a cron job for a specified user. If any of the
day/time params are specified, the job will only be removed if
the specified params match.
'''
self.assertEqual(incron.rm_job('cybage', '/home/cybage', 'TO_MODIFY',
'echo "$$ $@ $# $% $&"'),
'Invalid mask type: TO_MODIFY')
with patch.object(incron, 'list_tab',
MagicMock(return_value={'pre': ['salt'],
'crons': []})):
mock = MagicMock(return_value='incrontab')
with patch.dict(incron.__grains__, {'os_family': mock}):
with patch.object(incron, '_write_incron_lines',
MagicMock(return_value={'retcode': True,
'stderr': 'error'})):
self.assertEqual(incron.rm_job('cybage', '/home/cybage',
'IN_MODIFY',
'echo "SALT"'), 'error')
with patch.object(incron, 'list_tab',
MagicMock(return_value={'pre': ['salt'],
'crons': []})):
mock = MagicMock(return_value='incrontab')
with patch.dict(incron.__grains__, {'os_family': mock}):
with patch.object(incron, '_write_incron_lines',
MagicMock(return_value={'retcode': False,
'stderr': 'error'})):
self.assertEqual(incron.rm_job('cybage', '/home/cybage',
'IN_MODIFY',
'echo "SALT"'), 'absent')
|
992,375 | a9c3ff639f21c239319ee252b158ba90c65b693e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import os
import re
from collections import OrderedDict
from functools import update_wrapper
from urllib.parse import urlencode, urljoin
from uuid import uuid4
import django
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.files.storage import FileSystemStorage
from django.core.files.uploadedfile import UploadedFile
from django.core.urlresolvers import reverse
from django.db import models
from django.db import transaction
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.views.decorators.csrf import csrf_protect
from django_admin_rq.models import JobStatus
csrf_protect_m = method_decorator(csrf_protect)
_CONTENT_TYPE_PREFIX = 'contenttype://'
_CONTENT_TYPE_RE_PATTERN = 'contenttype://([a-zA-Z-_]+)\.([a-zA-Z]+):(\d{1,10})'
FORM_VIEW = 'form'
PREVIEW_RUN_VIEW = 'preview_run'
MAIN_RUN_VIEW = 'main_run'
COMPLETE_VIEW = 'complete'
_fs = FileSystemStorage(os.path.join(settings.MEDIA_ROOT, 'django_admin_rq'))
if not os.path.isdir(_fs.location):
os.makedirs(_fs.location)
class JobAdminMixin(object):
def get_urls(self):
from django.conf.urls import url
urls = super(JobAdminMixin, self).get_urls()
info = self.model._meta.app_label, self.model._meta.model_name
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
job_urls = [
url(
r'^job/(?P<job_name>[a-zA-Z-_]+)/form/(?P<object_id>\d{1,10})?$',
wrap(self.job_form),
name='%s_%s_job_form' % info
),
url(
r'^job/(?P<job_name>[a-zA-Z-_]+)/(?P<view_name>(preview_run|main_run))/(?P<object_id>\d{1,10})?$',
wrap(self.job_run),
name='%s_%s_job_run' % info
),
url(
r'^job/(?P<job_name>[a-zA-Z-_]+)/complete/(?P<object_id>\d{1,10})?$',
wrap(self.job_complete),
name='%s_%s_job_complete' % info
),
]
return job_urls + urls
def get_workflow_url(self, view_name, job_name, object_id=None):
info = self.model._meta.app_label, self.model._meta.model_name
url_kwargs = {'job_name': job_name}
if object_id:
url_kwargs['object_id'] = object_id
if FORM_VIEW == view_name:
url = reverse('admin:%s_%s_job_form' % info, kwargs=url_kwargs, current_app=self.admin_site.name)
elif view_name in (PREVIEW_RUN_VIEW, MAIN_RUN_VIEW):
url_kwargs['view_name'] = view_name
url = reverse('admin:%s_%s_job_run' % info, kwargs=url_kwargs, current_app=self.admin_site.name)
else:
url = reverse('admin:%s_%s_job_complete' % info, kwargs=url_kwargs, current_app=self.admin_site.name)
return url
def get_job_names(self):
"""
Returns an iterable of strings that represent all the asyncronous jobs this ModelAdmin can handle.
These names act as identifying attributes and are not user visible.
:func:`~django_admin_rq.admin.JobAdminMixin.get_job_titles` returns the user visible portion for these identifiers.
"""
return []
def get_job_form_class(self, job_name, request=None, object_id=None, view_name=None, extra_context=None):
"""
Returns the form class for this job's start page.
"""
return None
def get_job_form_initial(self, request, job_name, object_id=None, view_name=None, extra_context=None):
"""
Returns the job form's initial data for this job's start page.
"""
return {}
def get_job_title(self, job_name):
"""
Returns the user visible title for this job identified by job_name
"""
return ''
def get_changelist_link_css(self, job_name):
"""
Returns an iterable of css classes which are added to the changelist link that points to the job's start page
"""
return ['addlink']
def get_changeform_link_css(self, job_name):
"""
Returns an iterable of css classes which are added to the changeform link that points to the job's start page
"""
return ['addlink']
def show_job_on_changelist(self, job_name):
"""
Returns boolean whether or not this job should be shown on the changelist
"""
return True
def show_job_on_changeform(self, job_name):
"""
Returns boolean whether or not this job should be shown on the changeform
"""
return True
def get_job_form_template(self, job_name, request=None, object_id=None, view_name=None, extra_context=None):
"""
Returns the template for this job's start page
"""
return 'django_admin_rq/job_form.html'
def get_job_run_template(self, job_name, preview=True, request=None, object_id=None, view_name=None,
extra_context=None):
"""
Returns the template for this job's run page.
"""
return 'django_admin_rq/job_run.html'
def get_job_complete_template(self, job_name, request=None, object_id=None, view_name=None,
extra_context=None):
"""
Returns the template for this job's complete page
"""
return 'django_admin_rq/job_complete.html'
def get_job_callable(self, job_name, preview=True, request=None, object_id=None, view_name=None):
"""
Returns the function decorated with :func:`~django_rq.job` that runs the run async job for this view.
view_name is either 'preview' or 'main'
"""
return None
def get_job_callable_extra_context(self, request, job_name, preview=True, object_id=None):
"""
Extra context that is passed to the job callable. Must be pickleable
"""
return {}
def get_job_media(self, job_name, request=None, object_id=None, view_name=None):
"""
Returns an instance of :class:`django.forms.widgets.Media` used to inject extra css and js into the workflow
templates.
return forms.Media(
js = (
'app_label/js/app_labe.js',
),
css = {
'all': (
'app_label/css/app_label.css',
),
}
)
"""
return None
def is_form_view(self, view_name):
return view_name == FORM_VIEW
def is_preview_run_view(self, view_name):
return view_name == PREVIEW_RUN_VIEW
def is_main_run_view(self, view_name):
return view_name == MAIN_RUN_VIEW
def is_complete_view(self, view_name):
return view_name == COMPLETE_VIEW
def get_workflow_views(self, job_name):
"""
Returns the view names included in the workflow for this job.
At minimum the main view has to be part of the workflow. All other views can be ommitted.
The order of the views is immutable: form, preview, main, complete
"""
return FORM_VIEW, PREVIEW_RUN_VIEW, MAIN_RUN_VIEW, COMPLETE_VIEW
def get_workflow_start_url(self, job_name, object_id=None):
if FORM_VIEW in self.get_workflow_views(job_name):
url = self.get_workflow_url(FORM_VIEW, job_name, object_id)
else:
if PREVIEW_RUN_VIEW in self.get_workflow_views(job_name):
url = self.get_workflow_url(PREVIEW_RUN_VIEW, job_name, object_id)
else:
url = self.get_workflow_url(MAIN_RUN_VIEW, job_name, object_id)
params = {
'job-id': uuid4().hex[:6]
}
return urljoin(url, '?{}'.format(urlencode(params)))
@csrf_protect_m
def changelist_view(self, request, extra_context=None):
if extra_context is None:
extra_context = {}
jobs = []
for job_name in self.get_job_names():
if self.show_job_on_changelist(job_name):
jobs.append({
'title': self.get_job_title(job_name),
'url': self.get_workflow_start_url(job_name),
'css': ' '.join(self.get_changelist_link_css(job_name))
})
extra_context.update({
'changelist_jobs': jobs
})
return super(JobAdminMixin, self).changelist_view(request, extra_context)
@csrf_protect_m
@transaction.atomic
def changeform_view(self, request, object_id=None, form_url='', extra_context=None):
if extra_context is None:
extra_context = {}
jobs = []
for job_name in self.get_job_names():
if self.show_job_on_changeform(job_name) and object_id:
jobs.append({
'title': self.get_job_title(job_name),
'url': self.get_workflow_start_url(job_name, object_id=object_id),
'css': ' '.join(self.get_changeform_link_css(job_name))
})
extra_context.update({
'changeform_jobs': jobs
})
return super(JobAdminMixin, self).changeform_view(request, object_id=object_id, form_url=form_url, extra_context=extra_context)
def serialize_form(self, form):
"""
Given this job's bound form return the form's data as a session serializable object
The field order is preserved from the original form
"""
data = []
for field_name, field in form.fields.items():
if field_name in form.cleaned_data:
form_value = form.cleaned_data[field_name]
display_value = None
if isinstance(form_value, models.Model):
ctype = ContentType.objects.get_for_model(form_value)
form_value = '{0}{1}.{2}:{3}'.format(
_CONTENT_TYPE_PREFIX,
ctype.app_label,
ctype.model,
form_value.pk
)
elif isinstance(form_value, UploadedFile):
file_name = _fs.get_available_name(form_value.name)
file_path = _fs.path(file_name)
with open(file_path, 'wb+') as destination:
for chunk in form_value.chunks():
destination.write(chunk)
form_value = file_path
display_value = file_name
data.append({
'name': field_name,
'label': force_text(field.label) if field.label else None,
'value': form_value,
'display_value': display_value,
})
return data
def _get_job_session_key(self, job_name):
return 'django_admin_rq_{}'.format(job_name)
def _start_job_session(self, request, job_name):
request.session[self._get_job_session_key(job_name)] = {}
def get_session_data(self, request, job_name):
key = self._get_job_session_key(job_name)
if key not in request.session:
self._start_job_session(request, job_name)
return request.session[key]
def get_session_form_data_as_list(self, request, job_name):
"""
Retrieve form data that was serialized to the session in :func:`~django_admin_rq.admin.JobAdminMixin.job_form`
Values prefixed with 'contenttype:' are replace with the instantiated Model versions.
"""
form_data = []
serialized_data = self.get_session_data(request, job_name).get('form_data', [])
for field_data in copy.deepcopy(serialized_data): # Don't modify the serialized session data
value = field_data['value']
if isinstance(value, six.string_types):
if value.startswith(_CONTENT_TYPE_PREFIX):
match = re.search(_CONTENT_TYPE_RE_PATTERN, value)
if match:
field_data['value'] = ContentType.objects.get(
app_label=match.group(1),
model=match.group(2)
).get_object_for_this_type(**{
'pk': match.group(3)
})
form_data.append(field_data)
return form_data
def get_session_form_data_as_dict(self, request, job_name):
"""
Convenience method to have the form data like form.cleaned_data
"""
data_dict = OrderedDict()
for value_dict in self.get_session_form_data_as_list(request, job_name):
data_dict[value_dict['name']] = value_dict['value']
return data_dict
def set_session_job_status(self, request, job_name, job_status, view_name):
"""
Serializes the given :class:`~django_admin_rq.models.JobStatus` to this job's session.
"""
if isinstance(job_status, JobStatus) and job_status.pk:
ctype = ContentType.objects.get_for_model(job_status)
status = '{0}{1}.{2}:{3}'.format(_CONTENT_TYPE_PREFIX, ctype.app_label, ctype.model, job_status.pk)
session_data = self.get_session_data(request, job_name)
session_data['{}_job_status'.format(view_name)] = status
request.session.modified = True
else:
raise ValueError('job_status must be an instance of {} that has a valid pk.'.format(JobStatus.__class__.__name__))
def get_session_job_status(self, request, job_name, view_name):
"""
Returns an instance of :class:`~django_admin_rq.models.JobStatus` representing the status for the given view.
Returns None if the reference is missing from the session.
"""
session_data = self.get_session_data(request, job_name)
status = session_data.get('{}_job_status'.format(view_name), None)
if status is not None and isinstance(status, six.string_types) and status.startswith(_CONTENT_TYPE_PREFIX):
match = re.search(_CONTENT_TYPE_RE_PATTERN, status)
if match:
return ContentType.objects.get(
app_label=match.group(1),
model=match.group(2)
).get_object_for_this_type(**{
'pk': match.group(3)
})
return None
def get_job_context(self, request, job_name, object_id, view_name):
"""
Returns the context for all django-admin-rq views (form|preview_run|main_run|complete)
"""
info = self.model._meta.app_label, self.model._meta.model_name
preview = self.is_preview_run_view(view_name)
request.current_app = self.admin_site.name
context = dict(
self.admin_site.each_context(request),
opts=self.model._meta,
app_label=self.model._meta.app_label,
title=self.get_job_title(job_name),
job_name=job_name,
view_name=view_name,
form_view=FORM_VIEW,
preview_run_view=PREVIEW_RUN_VIEW,
main_run_view=MAIN_RUN_VIEW,
complete_view=COMPLETE_VIEW,
form_data_list=self.get_session_form_data_as_list(request, job_name),
form_data_dict=self.get_session_form_data_as_dict(request, job_name),
preview=preview,
job_media=self.get_job_media(job_name, request=request, object_id=object_id, view_name=view_name),
)
if django.VERSION > (1, 8):
jquery = static('admin/js/vendor/jquery/jquery.min.js')
else:
jquery = static('admin/js/jquery.min.js')
context['jquery'] = jquery
if object_id:
try:
obj = self.model.objects.get(pk=object_id)
context['original'] = obj
context['original_change_url'] = reverse(
'admin:%s_%s_change' % info, args=[object_id], current_app=self.admin_site.name
)
except:
pass
else:
context['original_changelist_url'] = reverse(
'admin:%s_%s_changelist' % info, current_app=self.admin_site.name
)
if view_name in (PREVIEW_RUN_VIEW, MAIN_RUN_VIEW):
job_status = self.get_session_job_status(request, job_name, view_name)
if job_status is None:
# job_status is None when no job has been started
job_callable = self.get_job_callable(job_name, preview, request=request, object_id=object_id,
view_name=view_name)
if callable(job_callable):
job_status = JobStatus()
job_status.save()
self.set_session_job_status(request, job_name, job_status, view_name)
context.update({
'job_status': job_status,
'job_status_url': job_status.url() # The frontend starts polling the status url if it's present
})
job_callable.delay(
job_status,
self.get_session_form_data_as_dict(request, job_name),
self.get_job_callable_extra_context(request, job_name, preview, object_id)
)
else:
context['job_status'] = job_status
# do not set job_status_url in this case otherwise it'll be an endless redirect loop
if COMPLETE_VIEW in self.get_workflow_views(job_name):
context['complete_view_url'] = self.get_workflow_url(COMPLETE_VIEW, job_name, object_id)
else:
context['complete_view_url'] = None
return context
def check_job_id(self, request, job_name):
if 'job-id' in request.GET:
job_id = request.GET['job-id']
stored_job_id = self.get_session_data(request, job_name).get('job-id', None)
if job_id != stored_job_id:
self._start_job_session(request, job_name)
self.get_session_data(request, job_name)['job-id'] = job_id
request.session.modified = True
def job_form(self, request, job_name='', object_id=None):
self.check_job_id(request, job_name)
return self.job_serve(request, job_name, object_id, FORM_VIEW)
def job_run(self, request, job_name='', object_id=None, view_name=None):
self.check_job_id(request, job_name)
return self.job_serve(request, job_name, object_id, view_name)
def job_complete(self, request, job_name='', object_id=None):
self.check_job_id(request, job_name)
return self.job_serve(request, job_name, object_id, COMPLETE_VIEW)
def job_serve(self, request, job_name='', object_id=None, view_name=None, extra_context=None):
context = self.get_job_context(request, job_name, object_id, view_name)
if extra_context:
context.update(extra_context)
if FORM_VIEW == view_name:
if request.method == 'GET':
form_class = self.get_job_form_class(job_name, request=request, object_id=object_id,
view_name=view_name, extra_context=extra_context,)
initial = self.get_job_form_initial(request, job_name, object_id=object_id, view_name=view_name,
extra_context=extra_context)
form = form_class(initial=initial)
else:
form_class = self.get_job_form_class(job_name, request=request, object_id=object_id,
view_name=view_name, extra_context=extra_context,)
form = form_class(request.POST, request.FILES)
if form.is_valid():
# Save the serialized form data to the session
session_data = self.get_session_data(request, job_name)
session_data['form_data'] = self.serialize_form(form)
request.session.modified = True
if PREVIEW_RUN_VIEW in self.get_workflow_views(job_name):
url = self.get_workflow_url(PREVIEW_RUN_VIEW, job_name, object_id)
else:
url = self.get_workflow_url(MAIN_RUN_VIEW, job_name, object_id)
return HttpResponseRedirect(url)
context['form'] = form
return TemplateResponse(
request,
self.get_job_form_template(
job_name,
request=request,
object_id=object_id,
view_name=view_name,
extra_context=extra_context
),
context
)
elif view_name in (PREVIEW_RUN_VIEW, MAIN_RUN_VIEW):
preview = self.is_preview_run_view(view_name)
return TemplateResponse(
request,
self.get_job_run_template(
job_name,
preview=preview,
request=request,
object_id=object_id,
view_name=view_name,
extra_context=extra_context
),
context
)
else:
return TemplateResponse(request,
self.get_job_complete_template(
job_name,
request=request,
object_id=object_id,
view_name=view_name,
extra_context=extra_context,
),
context
)
|
992,376 | 9db4cc3451f8fe7e339fad74bddb5c7a1bce3ef3 | from clarifai.rest import ClarifaiApp, Image
from config_reader import CLARIFAI_KEY, CLARIFAI_SECRET
from hashlib import md5
def chunks(iterator, size):
"""
Split the image url list to n lists of size 128
:param iterator: List
:param size: Integer
:return: List[List]
"""
for index in range(0, len(iterator), size):
yield iterator[index:index + size]
def get_clarifai_app_object(urls):
"""
Initialize indexing of images and return clarifai api object
:param urls: List
:return: ClarifaiApp
"""
c_app = ClarifaiApp(
CLARIFAI_KEY,
CLARIFAI_SECRET)
for arrays in chunks(urls, 128):
try:
c_app.inputs.bulk_create_images(
[Image(url=url, image_id=md5(url).hexdigest()) for url in arrays])
except:
print "Trying to index duplicate images"
return c_app
|
992,377 | ff0489eb13368ae1a1bdaad92629a3f4e6866903 | #display the output
display("new python file new new new")
|
992,378 | adc84bd6bf67344aec1a8439b3015672d8a9b6e0 | import os
#设置目录的绝对路径
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(BASE_PATH, 'data') #yaml测试用例存放位置
CASE_PATH = os.path.join(BASE_PATH, "testcase") #测试用例存放位置
REPORT_PATH = os.path.join(BASE_PATH, 'report') #测试报告存放位置
TEMP_PATH = os.path.join(BASE_PATH, 'testcase/temp') #存放临时用例位置
LOG_PATH = os.path.join(BASE_PATH, 'log') #存放日志目录
TEMPLATE_PATH = os.path.join(BASE_PATH, 'template') #存放用例生成模板文件
#设置系统接口的域名地址
URL = "xx"
#设置pymysql相关配置
ip = 'xx'
port = 'yy' #测试数据库端口
#port = '5442' #dev数据库端口
user = 'ss'
password = '123456'
database = 'dd'
#配置redis
#是否发送报告
SWITCH = True
#SWITCH = False
#设置请求钉钉的地址
DINGDINGURL = ""
#设置企业微信请求地址
QIWEIXINURL = "xx"
#设置结果发送平台(DingDing,QiWeiXin,Email)
SENDTYPE = "DingDing"
#SENDTYPE = "QiWeiXin"
#SENDTYPE = "Email"
|
992,379 | d9db88a593269be511c7d1cce3c1bba9b7956ec3 | import matplotlib
matplotlib.use('Agg') # This must be done before importing matplotlib.pyplot
import matplotlib.pyplot as plt
import numpy as np
import math
def freq(dirr, freq_orig, iters):
num_files = len(freq_orig)
orig_maxBD = len(freq_orig[0])
maxBD = min(21, len(freq_orig[0]))
freq = np.zeros((num_files, maxBD, maxBD))
for i in range(num_files):
for B in range(maxBD):
for D in range(maxBD):
if (B < orig_maxBD and D < orig_maxBD): freq[i][B][D] = freq_orig[i][B][D]
multiplier = 10000
for i in range(num_files):
for B in range(maxBD):
for D in range(maxBD):
freq [i][B][D] *= multiplier
freq[i][B][D] = round(freq[i][B][D])
zmin = 1 #np.min(freq[np.nonzero(freq)])
print("BD_plots(): freq min = " + str(zmin))
zmax = multiplier #np.max(freq[:,:,:])
for i in range(num_files):
xydata = freq[i,:maxBD,:maxBD]
#TODO: log normalize
plt.matshow(xydata, cmap=plt.get_cmap('plasma'), origin="lower", norm=matplotlib.colors.LogNorm(vmin = zmin, vmax = zmax)) #, vmin=zmin,vmax=zmax) # , norm=matplotlib.colors.LogNorm())
ax = plt.gca()
ax.xaxis.tick_bottom()
ax.set_ylabel("Benefits", fontsize=12)
ax.set_xlabel("Damages", fontsize=12)
ax.xaxis.set_label_position('bottom')
plt.title("Node Frequency", fontsize=15)
cbar = plt.colorbar(label=str("Percent of nodes"))
#TODO: add cbar log normz'd labels
cbar.set_ticks([1, zmax/1000, zmax/100, zmax/10 , zmax])
cbar.set_ticklabels(["$0$","$10^{-2}$","$10^{-1}$","$10^1$","$10^2$"])
plt.savefig(dirr + "freq_" + str(iters[i]) + ".png")
plt.clf()
plt.cla()
plt.close()
def probability(dirr, Pr):
plt.matshow(Pr, cmap=plt.get_cmap('plasma'), origin="lower")
ax = plt.gca()
ax.xaxis.tick_bottom()
ax.set_ylabel("Benefits", fontsize=12)
ax.set_xlabel("Damages", fontsize=12)
ax.xaxis.set_label_position('bottom')
plt.title("Probability of BD Pairs", fontsize=15)
cbar = plt.colorbar(label=str("probability"))
# TODO: add cbar labels
# cbar.set_ticks([0,.1, 1, 10,100 , 1000])
# maxx = math.ceil(np.ndarray.max(freq[:,:,:]))
# cbar.set_ticklabels([0,maxx/1000, maxx/100, maxx/10, maxx])
# plt.xaxis.set_ticks_position('bottom')
plt.savefig(dirr + "probability.png")
plt.clf()
plt.cla()
plt.close()
def leaf_fitness(dirr, leaf_fitness):
plt.matshow(leaf_fitness, cmap=plt.get_cmap('plasma'), origin="lower")
ax = plt.gca()
ax.xaxis.tick_bottom()
ax.set_ylabel("Benefits", fontsize=12)
ax.set_xlabel("Damages", fontsize=12)
ax.xaxis.set_label_position('bottom')
plt.title("Leaf Fitness of BD Pairs", fontsize=15)
cbar = plt.colorbar(label=str("probability"))
# TODO: add cbar labels
# cbar.set_ticks([0,.1, 1, 10,100 , 1000])
# maxx = math.ceil(np.ndarray.max(freq[:,:,:]))
# cbar.set_ticklabels([0,maxx/1000, maxx/100, maxx/10, maxx])
# plt.xaxis.set_ticks_position('bottom')
plt.savefig(dirr + "leaf_fitness.png")
plt.clf()
plt.cla()
plt.close()
def Pr_leaf_fitness(dirr, Pr, leaf_fitness):
size = len(Pr)
Pr_fitness = [[Pr[i][j] * leaf_fitness[i][j] for i in range(size)] for j in range(size)]
plt.matshow(Pr_fitness, cmap=plt.get_cmap('plasma'), origin="lower")
ax = plt.gca()
ax.xaxis.tick_bottom()
ax.set_ylabel("Benefits", fontsize=12)
ax.set_xlabel("Damages", fontsize=12)
ax.xaxis.set_label_position('bottom')
plt.title("Leaf Fitness * Probability of BD Pairs", fontsize=15)
cbar = plt.colorbar(label=str("probability"))
# TODO: add cbar labels
# cbar.set_ticks([0,.1, 1, 10,100 , 1000])
# maxx = math.ceil(np.ndarray.max(freq[:,:,:]))
# cbar.set_ticklabels([0,maxx/1000, maxx/100, maxx/10, maxx])
# plt.xaxis.set_ticks_position('bottom')
plt.savefig(dirr + "probability_leaf_fitness.png")
plt.clf()
plt.cla()
plt.close()
def ETB(dirr, ETB_score, iters):
num_files = len(ETB_score)
#multiplier = 1000
zmin = 0
zmax = np.max(ETB_score[:,:,:])
for i in range(num_files):
xydata = ETB_score[i]
plt.matshow(xydata, cmap=plt.get_cmap('plasma'), origin="lower", vmin=zmin,vmax=zmax)
ax = plt.gca()
ax.xaxis.tick_bottom()
ax.set_ylabel("Benefits", fontsize=12)
ax.set_xlabel("Damages", fontsize=12)
ax.xaxis.set_label_position('bottom')
plt.title("Hub Fitness", fontsize=15)
cbar = plt.colorbar(label=str("Average Contribution"))
cbar.ax.tick_params(labelsize=10)
plt.savefig(dirr + "ETB_" + str(iters[i]) + ".png")
plt.clf()
plt.cla()
plt.close()
|
992,380 | 0994bf7baafd895958a133d70a4249c8ee382ddb | from django.shortcuts import render
from django.views.generic import DetailView, ListView
from datetime import datetime, timedelta
from trader.models import Trade
from ticker.strategies.simple import SimpleStrategy
import logging
class TradeList(ListView):
logger = logging.getLogger(__name__)
model = Trade
queryset = Trade.objects.all()
context_object_name = 'object_list'
template_name = 'trader/trade/list.html'
paginate_by = 10
def get_context_data(self, **kwargs):
kwargs['btc_trader'] = self.btc_trader
return super(TradeList, self).get_context_data(**kwargs)
def get(self, request, *args, **kwargs):
dt_from = datetime.utcnow()
s = SimpleStrategy('BTC_ETH')
s.start_from(dt_from, days=30)
self.btc_trader = s.trader
return super(TradeList, self).get(request, *args, **kwargs)
|
992,381 | 0a97994893e1f1c9bea9eb6bd31ce6509ceda50e | import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader
from torchvision import datasets, transforms
dev = "cpu"
if torch.cuda.is_available():
dev = "cuda:0"
device = torch.device(dev)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.l1 = torch.nn.Linear(784, 520)
self.l2 = torch.nn.Linear(520, 320)
self.l3 = torch.nn.Linear(320, 240)
self.l4 = torch.nn.Linear(240, 120)
self.l5 = torch.nn.Linear(120, 10)
def forward(self, x):
x = x.view(-1, 784)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = F.relu(self.l3(x))
x = F.relu(self.l4(x))
return self.l5(x)
BATCH_SIZE = 64
train_dataset = datasets.MNIST(root="./data/",
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root="./data/",
train=False,
transform=transforms.ToTensor())
train_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=True)
test_loader = DataLoader(dataset=train_dataset,
batch_size=BATCH_SIZE,
shuffle=False)
model = Model().to(device)
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(model.parameters(), lr=0.01)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data.to(device)), Variable(target.to(device))
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} | Batch Status: {}/{} ({:.0f}%) | Loss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test():
model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
with torch.no_grad():
data, target = Variable(data.to(device)), Variable(target.to(device))
output = model(data)
test_loss += criterion(output, target).item()
pred = torch.max(output.data, 1)[1] # 0th index is value, 1st index is class
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len(test_loader.dataset)
print(f'===========================\nTest set: Average loss: {test_loss:.4f}, Accuracy: {correct}/{len(test_loader.dataset)} '
f'({100. * correct / len(test_loader.dataset):.0f}%)')
def main():
epoch = 1
for epoch_idx in range(epoch):
train(epoch_idx)
test()
if __name__ == "__main__":
main()
|
992,382 | d6f8d4589af7dbcdf37a85e9af726d9a2dd3f270 | """Process, blend and store images.
Published under the MIT License. See the file LICENSE for details.
"""
import logging
import os
from PIL import Image, ImageEnhance
from image_optimize import recursive_optimize
import utils
logging.basicConfig(filename='logfile.log', level=logging.INFO)
def iteration_layers(model, speedup, session, indepth_layer=None):
"""
Define the the layers whose activations are enhanced and visualized.
Parameters:
model: Inception5h model
speedup: selects subset of layers to give results faster
Returns: layer tensors to iterate through
"""
if speedup is True:
layer_names_reduced = ['conv2d1',
'conv2d2',
'mixed3b',
'mixed4b',
'mixed5b']
layer_tensors = [session.graph.get_tensor_by_name(name + ":0") for name in layer_names_reduced]
else:
layer_tensors = model.layer_tensors
return layer_tensors
def process_and_save_img(input_name, category, output_path, image, model,
session, num_repeats, rescale_factor,
step_size, speedup=True):
"""
Function to process and save images to file.
Parameters
input_name: filename of input image
category: category of provided image, also folder where image is stored
output_path: path to save the output image
image: loaded image using utils.load_image
model: deep neural net to use, default inception 5h
session: tensorflow session
num_repeats: number of times to downscale the image
rescale_factor: downscaling factor for the image
step_size: scale for each step of the gradient ascent
Returns: image_properties
"""
if speedup is True:
num_iterations = 2
else:
num_iterations = 5
image_properties = {}
layer_tensors = iteration_layers(model, speedup, session)
logging.info('The following layers will be used for exploration: %s',
layer_tensors)
# Iterate through layer tensors that will be maximized
for layer_tensor in layer_tensors:
steps = [x * 0.2 for x in range(0, 5)]
steps_rounded = [round(x, 2) for x in steps]
# adjust how much the previous image is blended with current version
for blend_number in steps_rounded:
img_result = recursive_optimize(layer_tensor=layer_tensor,
image=image,
model=model,
session=session,
num_iterations=num_iterations,
step_size=step_size,
rescale_factor=rescale_factor,
num_repeats=num_repeats,
blend=blend_number)
# create unique filename to not overwrite already created files
input_name_wo_extension = os.path.splitext(input_name)[0]
filename = input_name_wo_extension + \
layer_tensor.name.replace(':', '_') + str(blend_number)\
.replace('.', '_') + '.jpg'
logging.info('saving image: %s', filename)
file = os.path.join(output_path, filename)
if not os.path.exists(output_path):
os.mkdir(output_path)
utils.save_image(img_result, filename=file)
# store image properties to dict
image_properties[filename] = {}
image_properties[filename]['filename'] = filename
image_properties[filename]['layer'] = layer_tensor.name
image_properties[filename]['blend'] = blend_number
return image_properties
def resize_secondary_image(primary_image, secondary_image):
"""
Bring the secondary image to the same size as the primary image.
Parameters:
primary_image: image with desired size
secondary_image: image to be resized
Returns: resized_secondary_image
"""
im_primary = Image.open(primary_image)
im_secondary = Image.open(secondary_image)
# get width and height of primary image
width_primary, height_primary = im_primary.size
# resize the second image to the size of the primary image
# WARNING this does not take into account proportions of secondary image
resized_secondary_image = im_secondary.resize((width_primary,
height_primary), resample=0)
return resized_secondary_image
def blend_images(primary_image, secondary_image, alpha, saturation_enhance,
contrast_enhance):
"""
Blend two images together and adjust saturation and contrast.
Make sure to apply this function after resizing the secondary image to the
size of the primary one
Parameters:
primary_image: first image
secondary_image: second image, must have same size as primary image
alpha: interpolation factor, if alpha is 0.0,
a copy of the primary image is returned
saturation_enhance: adjust image color balance
contrast_enhance: adjust image contrast
Returns: blended_image
"""
# TODO: remove colors of blended image
im_primary = Image.open(primary_image)
# im_secondary = Image.open(secondary_image)
resized_secondary_image = resize_secondary_image(primary_image,
secondary_image)
# TODO add a smarter way to change color saturation of single images
saturation = ImageEnhance.Color(resized_secondary_image)
resized_secondary_image = saturation.enhance(0.0)
blended_image = Image.blend(im_primary, resized_secondary_image, alpha)
# Change saturation and contrast of image
saturation = ImageEnhance.Color(blended_image)
contrast = ImageEnhance.Contrast(blended_image)
blended_image = saturation.enhance(saturation_enhance)
blended_image = contrast.enhance(contrast_enhance)
return blended_image
|
992,383 | 47aaf8487010553565e9e573cdeff7e63eeb4585 | import unittest
from second_project import Counter
class EasyTestCase(unittest.TestCase):
def setUp(self):
self.counter = Counter()
def test_easy_input(self):
self.assertEqual(self.counter.get_value(), 0)
def test_easy_input_two(self):
self.counter.clear()
self.assertEqual(self.counter.get_value(), 0)
def tearDown(self):
self.counter = None
class MediumTestCase(unittest.TestCase):
def setUp(self):
self.counter = Counter()
def test_medium_input(self):
self.counter.add()
self.counter.add()
self.counter.add()
self.assertEqual(self.counter.get_value(), 3)
def test_medium_input_two(self):
self.counter.add()
self.counter.add()
self.counter.add()
self.counter.remove()
self.counter.remove()
self.assertEqual(self.counter.get_value(), 1)
def tearDown(self):
self.counter = None
class HardTestCase(unittest.TestCase):
def setUp(self):
self.counter = Counter()
def test_hard_input(self):
self.counter.remove()
self.counter.remove()
self.counter.remove()
self.counter.remove()
self.assertEqual(self.counter.get_value(), 0)
def test_hard_input_two(self):
for _ in range(0, 1000):
self.counter.add()
self.assertEqual(self.counter.get_value(), 1000)
def tearDown(self):
self.counter = None
if __name__ == '__main__':
unittest.main()
|
992,384 | 8a83da82b58a3988377202e7d8c0a04876d120e9 | print('Prueba Funcion')
for i in range (10):
print(i, end=' ')
print('\n Fin de Programa')
print()
|
992,385 | 6995ee8949f21fab5f95b0940730e2b2c7c18c69 | from __future__ import print_function
import PyTorch
class FloatTensor(PyTorch._FloatTensor):
pass
# def __cinit__(self):
# print('floattensor.__cinit__')
# def __init__(self, tensor, _allocate=True):
# print('floattensor.__init__')
# if isinstance(tensor, PyTorch._FloatTensor):
# self.native = tensor.native
# else:
# raise Exception('unknown type ' + type(tensor))
class DoubleTensor(PyTorch._DoubleTensor):
pass
# def __init__(self, tensor, _allocate=True):
# print('doubletensor.__init__')
# if isinstance(tensor, PyTorch._DoubleTensor):
# self.native = tensor.native
# else:
# raise Exception('unknown type ' + type(tensor))
class LongTensor(PyTorch._LongTensor):
pass
class ByteTensor(PyTorch._ByteTensor):
pass
#class Linear(PyTorch.CyLinear):
# pass
class FloatStorage(PyTorch._FloatStorage):
pass
class DoubleStorage(PyTorch._DoubleStorage):
pass
class LongStorage(PyTorch._LongStorage):
pass
class ByteStorage(PyTorch._ByteStorage):
pass
def asDoubleTensor(myarray):
return DoubleTensor(PyTorch._asDoubleTensor(myarray))
def asFloatTensor(myarray):
f1 = PyTorch._asFloatTensor(myarray)
# print('type(f1)', type(f1))
return FloatTensor(f1)
def asByteTensor(myarray):
f1 = PyTorch._asByteTensor(myarray)
# print('type(f1)', type(f1))
return ByteTensor(f1)
|
992,386 | 5d14db43acc4bbca543aa80e9098df1ae65b8e5d | #!/usr/bin/env python
# coding: utf-8
# In[1]:
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
N = int(input())
if (N%2==1):
print('Weird')
elif (N%2==0 and (N>=2 and N<=5)):
print('Not Weird')
elif (N%2==0 and (N>=6 and N<=20)):
print('Weird')
elif (N%2==0 and (N>=20)):
print('Not Weird')
# In[ ]:
|
992,387 | 3fc17b4828677a19025ecf618135a79f58cf061a | ten_things = "Apples Oranges Crows Telephone Light Sugar" # creates a variable ten_things with the assigned value of "Apples Oranges Crows Telephone Light Sugar"
print "Wait there are not 10 things in that list. Let's fix that." # prints "Wait there are not 10 things in that list. Let's fix that."
stuff = ten_things.split(' ') # splits the contents of variable ten_things every time there is a white space and then assigns it to variable stuff which is now a list bacause of the split function
more_stuff = ["Day", "Night", "Song", "Frisbee", "Corn", "Banana", "Girl", "Boy"] # creates list more_stuff with the contents of "["Day", "Night", "Song", "Frisbee", "Corn", "Banana", "Girl", "Boy"]"
while len(stuff) != 10: # creates a while loop. where in checks the length or contents of list stuff. if its contents is not equal to 10 do the following procedures
next_one = more_stuff.pop() # creates a variable (at this point on a list) next_one where in we pop or remove the last item on the list of more_stuff and assign or store it to next_one
print "Adding:", next_one # prints "Adding" and the item which was popped from more_stuff which is assigned to next_one
stuff.append(next_one) # appends or adds the item which was popped from more_stuff which is assigned to next_one to the end of the list of stuff
print "There are %d items now." % len(stuff) # prints "There are %d items now." where in %d is the number of items in the list stuff
print "There we go: ", stuff # prints "There we go: " and the contents of the list stuff
print "Let's do some things with stuff." # prints "Let's do some things with stuff."
print stuff[1] # prints the 2nd item on the list stuff
print stuff[-1] # whoa! fancy # prints the -1 value of the list which is corn
print stuff.pop() # pops the last item on the list of stuff which is corn
print ' '.join(stuff) # what? cool! # joins the list stuff with a white space inbetween every item
print '#'.join(stuff[3:5]) # super stellar # joins the 4th to the 5th list of the list and adds "#" inbetween |
992,388 | 12e5ed0b76580cba7a30f8805ae557c850faa1dc | import numpy as np
# global parameters -------------------------------------------------------------------------------- #
num_graphs = 1000 # number of samples to be generated (per geometry)
N = 50 # number of nodes (per graph)
Radius = 1 # radius to set the point density
pointDensity = N / (np.pi * Radius**2) # density of points (to be kept fixed across geometries)
thresholdFrac = 0.4 # used to compute the fraction of the total area within which to connect nodes
# -------------------------------------------------------------------------------------------------- #
"""
=========================================================================================
NOTE ON POINT DENSITY:
----------------------
In each case (spherical, planar, hyperbolic) the appropriate radius is chosen to keep the
ratio of the number of nodes (N) to the sampled area fixed. We want the model to learn the
difference between different geometries, not just artifacts related to the point density,
such as the average degree. We fix the point density by choosing a radius (declared as 'Radius')
of a circle in the Euclidean plane.
Each radius below comes from the condition that (N / Area) = pointDensity:
sphericalRadius = sqrt(N / 4 * pi * pointDensity) <-- Area = 4 pi radius^2
planarRadius = sqrt(N / pi * pointDensity) <-- Area = pi radius^2
hyperbolicRadius = arccosh(1 + N / (2 * pi * pointDensity)) <-- Area = 2 pi (cosh radius - 1)
=========================================================================================
"""
""" spherical graphs """
# generates positions for each node
def sample_spherical(N, radius, ndim=3):
vec = np.random.randn(ndim, N)
vec = vec * radius / np.linalg.norm(vec, axis=0)
return vec
def sphere_generator():
"""
We compute sphericalThreshold by regarding thresholdFrac as the ratio of area around a node (where
other nodes can connect to it) to the total area of the surface.
In this case, the area around a node forms a circular sector, and we solve for the arclength "radius"
of that circular sector using:
thresholdFrac = Area_sector / Total_area = (1 - cos(s / sphericalRadius)) / 2 where s is the arclength
"radius" of the circular sector
:return: None
"""
sphericalRadius = np.sqrt(N / (4 * np.pi * pointDensity))
sphericalThreshold = sphericalRadius * np.arccos(1 - 2 * thresholdFrac)
data_sphere = []
# np.random.seed(2020)
for r in range(num_graphs):
coords = sample_spherical(N, sphericalRadius, 3)
# computes the adjacency matrix
Adj_Matrix = np.zeros((N, N))
for i in range(N):
for j in range(N):
a = coords[:, i]
b = coords[:, j]
dot_prod = np.dot(a, b)/sphericalRadius**2
dot_prod = min(dot_prod, 1) # <-- sometimes np.dot returns 1.00000000002, messing up np.arccos()
""" note that when np.arrcos gets 1, it returns a nan """
theta = np.arccos(dot_prod) # gets the angle between a and b (in radians)
# ij_dist = np.linalg.norm(a-b) # calculate euclidean distance
ij_dist = sphericalRadius * theta # arclength distance
if ij_dist < sphericalThreshold:
Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix
data_sphere.append(Adj_Matrix)
return data_sphere
""" planar graphs """
def plane_generator():
"""
We compute planarThreshold by regarding thresholdFrac as the ratio of area around a node (where
other nodes can connect to it) to the total area of the surface.
In this case, the area around a node forms a circle, and we solve for the radius of that circle using:
thresholdFrac = Area_circle / Total_area = s ** 2 / planarRadius ** 2 where s is the radius of the
surrounding circle
:return: None
"""
planarRadius = np.sqrt(N / (np.pi * pointDensity)) # <-- convert pointDensity into radius
planarThreshold = planarRadius * np.sqrt(thresholdFrac)
# distance function (law of cosines)
def dist(rTheta1, rTheta2): # rThetai is a coordinate tuple: (r, theta)
a, b = rTheta1[0], rTheta2[0]
theta1, theta2 = rTheta1[1], rTheta2[1]
return np.sqrt(a ** 2 + b ** 2 - 2 * a * b * np.cos(theta1 - theta2)) # <-- law of cosines
# computes the adjacency matrices
data_plane = []
for r in range(num_graphs):
# generates dictionary of positions for each node: node_pos = {node_i: (radius, theta)}
node_pos = {}
for i in range(N):
rnd_angle = np.random.random() * 2 * np.pi
rnd_radii = np.random.random() * planarRadius
node_pos.update({i: (rnd_radii, rnd_angle)})
Adj_Matrix = np.zeros((N, N))
for i in range(N):
for j in range(N):
ij_dist = dist(node_pos[i], node_pos[j])
if ij_dist < planarThreshold:
Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix
data_plane.append(Adj_Matrix)
return data_plane
""" hyperbolic graphs """
def hyp_dist(rTheta1, rTheta2):
"""
Takes in Hyperbolic polar (native) coordinates and returns the corresponding hyperbolic distance
We compute hyperbolic distance using the "hyperbolic law of cosines" (see "Hyperbolic
Geometry of Complex Networks" by Krioukov et al)
:param rTheta1: tuple, (radius1, theta1) (note that the radius is a Hyperbolic distance)
:param rTheta2: tuple, (radius2, theta2)
:return: hyperbolic distance between two points in H^2, the hyperbolic plane with curvature -1
"""
# Euclidean polar coordinates:
a, b = rTheta1[0], rTheta2[0]
theta1, theta2 = rTheta1[1], rTheta2[1]
# hyperbolic distance according to "Hyperbolic Geometry of Complex Networks" by Krioukov et al
cosh1, cosh2 = np.cosh(a), np.cosh(b)
sinh1, sinh2 = np.sinh(a), np.sinh(b)
input = cosh1 * cosh2 - sinh1 * sinh2 * np.cos(theta1 - theta2)
input = max(1, input) # sometimes input = 0.99999.. and this messes up np.arccosh()
h_dist = np.arccosh(input) # hyperbolic law of cosines
return h_dist
def hyperbolic_generator():
"""
Note that we use 'inversion sampling' below, taken from "Gradient Descent in Hyperbolic Space" by
B. Wilson & M. Leimeister (arxiv: 1805.08207)
Inversion sampling will sample radii in a circle specified by 'Radius' above with a density consistent
with a uniformly sampling of points in the infinite Hyperbolic plane H^2
We generate a dict. of the form: node_pos = {node_i: (radius, theta)}, where radius is a Euclidean
distance in a circle with radius specified by the parameter named 'Radius' above
We compute hyperbolicThreshold by regarding thresholdFrac as the ratio of area around a node (where
other nodes can connect to it) to the total area of the surface.
In this case, the area around a node forms a hyperbolic circle, and we solve for the radius of that
circle using:
thresholdFrac = Area_sector / Total_area = (cosh(s) - 1) / (cosh(hyperbolicRadius) - 1)
:return: list of length num_graphs of adjacency matrices, each matrix has size N x N
"""
hyperbolicRadius = np.arccosh(1 + N / (2 * np.pi * pointDensity))
hyperbolicThreshold = np.arccosh(1 + thresholdFrac * (np.cosh(hyperbolicRadius) - 1))
data_hyperbolic = []
for r in range(num_graphs):
# generates dictionary of positions (in a circle of radius) for each node: node_pos = {node_i: (radius, theta)} <-- uses polar coordinates
# uses the inversion sampling idea to give Euclidean radii sampled uniformly across a hyperbolic sheet
node_pos = {}
for i in range(N):
rnd_angle = np.random.random() * 2 * np.pi
p = np.random.random() # random float between 0 and 1
rnd_radii = np.arccosh(1 + p * (np.cosh(hyperbolicRadius) - 1)) # <-- inversion sampling
node_pos.update({i: (rnd_radii, rnd_angle)})
# computes the adjacency matrix
Adj_Matrix = np.zeros((N, N))
for i in range(N):
for j in range(N):
ij_dist = hyp_dist(node_pos[i], node_pos[j])
if ij_dist < hyperbolicThreshold:
Adj_Matrix[i, j] = 1 # nodes that are connected are assigned a 1 in the matrix
data_hyperbolic.append(Adj_Matrix)
return data_hyperbolic
""" generates all graphs """
def generate_all_graphs():
sphericalRadius = np.sqrt(N / (4 * np.pi * pointDensity))
planarRadius = np.sqrt(N / (np.pi * pointDensity))
hyperbolicRadius = np.arccosh(1 + N / (2 * np.pi * pointDensity))
sphericalThreshold = sphericalRadius * np.arccos(1 - 2 * thresholdFrac)
planarThreshold = planarRadius * np.sqrt(thresholdFrac)
hyperbolicThreshold = np.arccosh(1 + thresholdFrac * (np.cosh(hyperbolicRadius) - 1))
sphereArea = lambda s: 2 * np.pi * (1 - np.cos(s / sphericalRadius)) * sphericalRadius ** 2
planeArea = lambda s: np.pi * s ** 2
hypArea = lambda s: 2 * np.pi * (np.cosh(s) - 1)
# print('spherical:', sphereArea(sphericalThreshold) / sphereArea(np.pi * sphericalRadius))
# print('planar:', planeArea(planarThreshold) / planeArea(planarRadius))
# print('hyperbolic:', hypArea(hyperbolicThreshold) / hypArea(hyperbolicRadius))
sphere_graphs = sphere_generator()
planar_graphs = plane_generator()
hyperbolic_graphs = hyperbolic_generator()
# define the sphere labels (will use sphere=0, plane=1, hyperbolic=2, for one-hot encoding)
sphere_labels = [0] * num_graphs
planar_labels = [1] * num_graphs
hyperbolic_labels = [2] * num_graphs
all_graphs = sphere_graphs + planar_graphs + hyperbolic_graphs
all_labels = sphere_labels + planar_labels + hyperbolic_labels
return (all_graphs, all_labels)
|
992,389 | 14841bd7749fb54e7ffcce8385f1b920e93909c4 | import subprocess
import json
import os
from multiprocessing import Pool
import string
import random
def getAccounts(node="eos"):
"获取所有的账号"
current_dir = os.path.dirname(os.path.abspath(__file__))
if node == "eos":
accountFile = os.path.join(current_dir, "eosaccount.json")
elif node == "bos":
accountFile = os.path.join(current_dir, "bosaccount.json")
with open(accountFile) as f:
account_names = json.loads(f.read())["account_names"]
return account_names
def unlock(password):
cmd = [
# "docker",
# "exec",
# "hungry_cori",
"cleos",
"wallet",
"unlock",
"--password",
password,
]
print(" ".join(cmd))
a = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def runcleos(cmd):
a = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return a.stdout
def buyram(f, t, ram):
cmd = [
# "docker",
# "exec",
# "hungry_cori",
"cleos",
"-u",
"http://api.eosbeijing.one",
"system",
"buyram",
f,
t,
f"{ram}",
"--kbytes",
"-p",
f,
]
return runcleos(cmd)
def delegatebw(f, t, net, cpu):
"""
抵押 cpu ,内存
f>t,谁抵押给谁,如果是抵押给自己的话,则, f,t都是自己
"""
cmd = [
# "docker",
# "exec",
# "hungry_cori",
"cleos",
"-u",
"http://api.eosbeijing.one",
"system",
"delegatebw",
f,
t,
"%.4f EOS" % net,
"%.4f EOS" % cpu,
"-p",
f,
]
return runcleos(cmd)
def pushaction(contract, action, data, f, node="eos"):
"""
contract :要玩的合约地址
action: 玩的方法
data : 详细信息
f :账号
"""
if node == "eos":
apiurl = "http://api.eosbeijing.one"
elif node == "bos":
apiurl = "http://bospush.mytokenpocket.vip"
cmd = [
"cleos",
"-u",
apiurl,
"push",
"action",
contract,
action,
json.dumps(data),
"-p",
f,
]
return runcleos(cmd)
def runPool(f, accounts):
with Pool() as pool:
pool.map(f, accounts)
def gettable(code, scope, table, node="eos"):
if node == "eos":
apiurl = "http://api.eosbeijing.one"
elif node == "bos":
apiurl = "https://api.boscore.io"
return json.loads(
runcleos(["cleos", "-u", apiurl, "get", "table", code, scope, table])
)
def genrateRandomN(k=12):
return "".join(random.choices(string.ascii_lowercase, k=k))
|
992,390 | c71b8c764adb911b7ffff63850369db507e2432b | import datetime
import matplotlib
from matplotlib.pyplot import plot
from skimage.color.rgb_colors import green, red, blue
from tables.idxutils import infinity
from win32timezone import now
from code.classifiers.MultiLOF import MultiLOF
from code.classifiers.MultiMultiLOF import MultiMultiLOF
from code.classifiers.OfflineLOF import OfflineLOF
from code.log.Print import *
from code.Evaluator import evaluate_classifier, evaluate_ids, train_classifier, evaluate_classifier_in_range, \
benign_and_fraud_sets_to_x_y
from code._definitions import VERBOSITY_general
from code.classifiers.ClassifierGenerator import *
from code.data_handles.DataCenter import DataCenter
from code.features.FeatureSelection import split_information_gain, select_k_best_features_fisher_score
from code.features.FetureExtractorUtil import *
from code.ids.AccumulativeOnesIDS import AccumulativeOnesIDS
from code.ids.ContiguousOnesIDS import ContiguousOnesIDS
import code.plotting.scatter_plot as scatplot
def vectorize(list_of_numbers):
return [[n] for n in list_of_numbers]
def do_something():
# run_feature_extraction_tests()
dc = DataCenter()
dc.load_data_collection3v2()
print("(finished loading)")
for h in dc.user_hashes:
print("USER {}".format(h), HEADER)
print("extracting features...", HEADER)
agg_win_size, agg_slide_size = 1, 10
training_set = extract_features(dc.users_training[h], agg_win_size, agg_slide_size)
testing_benign, testing_theft = dc.users_testing[h][0][0], dc.users_testing[h][0][1]
testing_benign, testing_theft = extract_features_tests_separated(testing_benign, testing_theft,
agg_win_size, agg_slide_size)
print("selecting features...", HEADER)
selected_feature_indexes = select_k_best_features_fisher_score(k=15, label1_data=testing_benign, label2_data=testing_theft)
training_set = remove_all_columns_except(training_set, selected_feature_indexes)
testing_benign = remove_all_columns_except(testing_benign, selected_feature_indexes)
testing_theft = remove_all_columns_except(testing_theft, selected_feature_indexes)
print("generating classifiers...", HEADER)
classifiers = list()
# classifiers.append(generate_one_class_svm_linear())
# classifiers.append(generate_one_class_svm_sigmoid())
# classifiers.append(generate_one_class_svm_rbf())
# classifiers.append(generate_one_class_svm_poly())
encoder_decoder, encoder = generate_autoencoder(len(training_set[0]), hidden_to_input_ratio=0.3)
# classifiers.append(encoder_decoder)
# classifiers.append(generate_lstm_autoencoder(len(training_set[0]), 5))
classifiers.append(MultiMultiLOF(num_of_mlofs=1000, num_of_samples_per_lof=3, k=3))
train_classifier(encoder_decoder, training_set)
training_set = vectorize(encoder_decoder.predict_raw(training_set))
# testing_benign = vectorize(encoder_decoder.predict_raw(testing_benign))
# testing_theft = vectorize(encoder_decoder.predict_raw(testing_theft))
# scatplot.plot1d(testing_benign, testing_theft, color1=blue, color2=red, offset_dim=0)
print("training classifiers...", HEADER)
for c in classifiers:
train_classifier(c, training_set)
distances = list()
for i in range(1, len(dc.users_testing[h])):
# load test data
testing_benign, testing_theft = dc.users_testing[h][i][0], dc.users_testing[h][i][1]
testing_benign, testing_theft = extract_features_tests_separated(testing_benign, testing_theft, agg_win_size, agg_slide_size)
# filter out features based on previous feature selection
testing_benign = remove_all_columns_except(testing_benign, selected_feature_indexes)
testing_theft = remove_all_columns_except(testing_theft, selected_feature_indexes)
testing_benign = vectorize(encoder_decoder.predict_raw(testing_benign))
testing_theft = vectorize(encoder_decoder.predict_raw(testing_theft))
best_dist = evaluate(classifiers, testing_benign, testing_theft)
distances.append(best_dist)
print("best distance (for user {}, test {}): {}".format(h, i, best_dist), BOLD + OKBLUE)
print("plotting...", COMMENT)
# scatplot.plot1d(testing_benign, testing_theft, color1=blue, color2=red, offset_dim=0)
print("_____________")
print("\ndistances: {}\n".format(distances), BOLD + OKBLUE)
return
def extract_features_tests_separated(testing_benign, testing_theft, agg_win_size, agg_slide_size):
testing = list()
testing_benign, testing_theft = cleanup(testing_benign), cleanup(testing_theft)
testing.extend(testing_benign)
testing.extend(testing_theft)
testing = extract_features(testing, agg_win_size, agg_slide_size)
testing_benign = testing[:int(len(testing_benign) / agg_slide_size)]
testing_theft = testing[int(len(testing_benign) / agg_slide_size):]
return testing_benign, testing_theft
def extract_features(list_of_samples, agg_win_size=10, agg_slide_size=10):
list_of_samples = cleanup(list_of_samples)
if agg_win_size != 1 or agg_slide_size != 1:
list_of_samples = aggregate_samples_using_sliding_windows(list_of_samples, agg_win_size, agg_slide_size)
list_of_samples = normalize_feature_vector_to_unit_size(list_of_samples)
# list_of_samples = derivate_samples(list_of_samples)
list_of_samples = finish(list_of_samples)
return list_of_samples
def evaluate(classifiers, testing_benign, testing_theft):
print("evaluating...", HEADER)
for c in classifiers:
if c.has_threshold():
opt_threshold = evaluate_classifier_in_range(classifier=c, test_set_benign=testing_benign,
test_set_fraud=testing_theft, threshold_begin=0,
threshold_end=0.1, num_of_steps=10000,
verbosity=0)
c.set_threshold(opt_threshold)
print("{} has been set threshold {}".format(c.get_name(), opt_threshold), COMMENT)
else:
print("Normal Evaluation (no threshold):", UNDERLINE + OKGREEN)
evaluate_classifier(classifier=c, test_set_benign=testing_benign, test_set_fraud=testing_theft, verbosity=VERBOSITY_general)
IDSs = list()
for c in classifiers:
for j in range(0, 90, 3):
IDSs.append(ContiguousOnesIDS(classifier=c, threshold=j + 1))
IDSs.append(AccumulativeOnesIDS(classifier=c, threshold=j + 1))
print("EVALUATING ANOMALY DETECTORS", UNDERLINE + OKBLUE)
print("")
best_dist = infinity
for ids in IDSs:
current_dist = evaluate_ids(ids=ids, test_set_benign=testing_benign, test_set_fraud=testing_theft,
verbosity=0)
print("{}: distance={}".format(ids.get_name(), current_dist))
if abs(current_dist) < abs(best_dist):
best_dist = current_dist
return best_dist
def main():
# with open("log.txt", "r") as text_file:
# print("Printing LOG:____________________________________________", HEADER + BOLD + UNDERLINE)
# blank_line()
# print(text_file.read())
print("Welcome to Awesomoriarty!", HEADER)
do_something()
print("bye bye", HEADER)
# save log
date = datetime.datetime.now()
filename = "log_{}-{}-{}.{}-{}-{}.{}.txt".format(date.day, date.month, date.year,
date.hour, date.minute, date.second,
date.microsecond)
with open(filename, "w") as text_file:
text_file.write(LogSingleton.get_singleton().get_log_string())
return
if __name__ == "__main__":
main()
|
992,391 | 745526f794af6a085c4ee0e2e1c377462c2e8127 | import cv2
import numpy as np
img = cv2.imread("re_esq.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = img.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
ret, thresh2 = cv2.threshold(gray, 10, 255, cv2.THRESH_BINARY_INV)
cv2.floodFill(thresh2, mask, (350, 250), (255, 255, 0))
thresh2 = cv2.cvtColor(thresh2, cv2.COLOR_GRAY2RGB)
thresh2[np.where((thresh2 == [0, 0, 0]).all(axis=2))] = [0, 255, 255]
thresh2[np.where((thresh2 == [255, 255, 255]).all(axis=2))
] = img[np.where((thresh2 == [255, 255, 255]).all(axis=2))]
cv2.imshow("img", thresh2)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
992,392 | 9c0dd85afcff05e9b7cdd2f8471a9bab5752a185 | from typing import List, Callable, Union, Any, TypeVar, Tuple, Dict
Tensor = TypeVar('torch.tensor')
JSONType = TypeVar('json')
ClassType = TypeVar('Class') |
992,393 | 45dd1680bbd7b510de9c4bb6026d467802f5276c | def make_jump(loc, element, max):
result = []
if 0 < loc + element < max:
result.append(loc + element)
if 0 < loc - element < max:
result.append(loc - element)
return result
def find_nearest(seq, check, loc, places, jumps, round=1):
possiblities = make_jump(loc, seq[loc], len(seq))
found_one = False
cand = 1000
if loc == 87:
print()
for i in possiblities:
if i in places:
continue
found_one = True
hi = seq[i]
hey = jumps[i]
if seq[i] % 2 != check % 2:
return round
elif jumps[i] != 0 and cand > jumps[i] + 1:
cand = jumps[i] + 1
for i in possiblities:
if i not in places:
places.append(i)
if cand < round+1:
return cand
return find_nearest(seq, check, i, places, jumps, round + 1)
if not found_one:
return -1
def populate_jump(seq):
jumps = [-1] + [0 for i in range(len(seq) - 1)]
for i in range(1, len(jumps)):
jumps[i] = find_nearest(seq, seq[i], i, [i], jumps)
return jumps
useless = input()
numbers = ["-1"] + input().split()
for i in range(len(numbers)):
numbers[i] = int(numbers[i])
answer = populate_jump(numbers)
for i in range(len(answer)):
answer[i] = str(answer[i])
print(" ".join(answer)) |
992,394 | af6c6c0846f77a4f1ef011e56abdaab3c7511bbd | # ActivitySim
# See full license in LICENSE.txt.
import logging
from activitysim.core.util import assign_in_place
logger = logging.getLogger(__name__)
def failed_trip_cohorts(trips, failed):
# now that trips['earliest'] gets reset after outbound trips are scheduled,
# it becomes clear that cohorts need to be defined over the entire tour
# rather than the tour-leg
bad_trips = trips.tour_id.isin(trips.tour_id[failed])
return bad_trips
def flag_failed_trip_leg_mates(trips_df, col_name):
"""
set boolean flag column of specified name to identify failed trip leg_mates in place
"""
failed_trip_leg_mates = failed_trip_cohorts(trips_df, trips_df.failed) & ~trips_df.failed
trips_df.loc[failed_trip_leg_mates, col_name] = True
# handle outbound and inbound legs independently
# for ob in [True, False]:
# same_leg = (trips_df.outbound == ob)
# # tour_ids of all tours with a failed trip in this (outbound or inbound) leg direction
# bad_tours = trips_df.tour_id[trips_df.failed & same_leg].unique()
# # not-failed leg_mates of all failed trips in this (outbound or inbound) leg direction
# failed_trip_leg_mates = same_leg & (trips_df.tour_id.isin(bad_tours)) & ~trips_df.failed
# # set the flag column
# trips_df.loc[failed_trip_leg_mates, col_name] = True
def cleanup_failed_trips(trips):
"""
drop failed trips and cleanup fields in leg_mates:
trip_num assign new ordinal trip num after failed trips are dropped
trip_count assign new count of trips in leg, sans failed trips
first update first flag as we may have dropped first trip (last trip can't fail)
next_trip_id assign id of next trip in leg after failed trips are dropped
"""
if trips.failed.any():
logger.warning("cleanup_failed_trips dropping %s failed trips" % trips.failed.sum())
trips['patch'] = False
flag_failed_trip_leg_mates(trips, 'patch')
# drop the original failures
trips = trips[~trips.failed]
# increasing trip_id order
patch_trips = trips[trips.patch].sort_index()
# recompute fields dependent on trip_num sequence
grouped = patch_trips.groupby(['tour_id', 'outbound'])
patch_trips['trip_num'] = grouped.cumcount() + 1
patch_trips['trip_count'] = patch_trips['trip_num'] + grouped.cumcount(ascending=False)
assign_in_place(trips, patch_trips[['trip_num', 'trip_count']])
del trips['patch']
del trips['failed']
return trips
|
992,395 | 5f5bc8d952b5aec380501fdf626be7e1d398590c | #!/usr/bin/python
import sys, os
from subprocess import call
def get_current_folder_size(folder): # recursion
total_size = os.path.getsize(folder)
for item in os.listdir(folder):
itempath = os.path.join(folder, item)
if os.path.isfile(itempath):
total_size += os.path.getsize(itempath)
elif os.path.isdir(itempath):
total_size += get_current_folder_size(itempath)
return total_size
file_size = get_current_folder_size("~/Dropbox/.dropbox.cache")
if(file_size > 1024**3): # 1G cache size limit
call('rm -r ~/Dropbox/.dropbox.cache', shell=True)
else:
print "No need to clean cache" |
992,396 | c6b54693893a44bfd5f2ec463a2af4dda2457f5d | from db.db import Base
from sqlalchemy import Column, String, DateTime, Integer, Float
from sqlalchemy.orm import relationship
import time
from .associate_table import Domain_Port, User_Domain
class Data(Base):
__tablename__ = "data"
id = Column(Integer, primary_key=True, index=True)
domain = Column(String)
subdomain = Column(String)
time = Column(Float, default=time.time())
ports = relationship("Port", secondary=Domain_Port, back_populates = "domains")
users = relationship("User", secondary=User_Domain, back_populates = "domains")
# ports = relationship("Domain_Port", back_populates="") |
992,397 | 2f4afc3c8bffdd2b53ea44ac87a17a91d8ac9ece | import random as rnd
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import beta
import argparse
import numpy as np
parser = argparse.ArgumentParser(description='Batch frequency learning simulation using a single Bayesian agent')
parser.add_argument('--observations', '-o', default=5, type=int,
help="An integer representing the starting count of 1s")
parser.add_argument('--alpha', '-a', default=1, type=float,
help="A float representing the prior bias (alpha)")
parser.add_argument('--runs', '-r', default=1000, type=int,
help="An integer representing the number of runs wanted")
parser.add_argument('--learning', '-l', default="sample", type=str,
help="Learning strategy, can be max, avg or sample")
parser.add_argument('--production', '-p', default="sample", type=str,
help="Production strategy, can be max softmax or sample")
parser.add_argument('--exponent', '-e', default="2", type=float,
help="Exponent used in softmax")
args = parser.parse_args()
#starting_count_w1=args.observations
production=args.production
alpha = args.alpha
#expt=args.exponent
def generate(starting_count_w1,n_productions):
data=[1]*starting_count_w1 + [0]*(n_productions-starting_count_w1)
return data
#output
def produce(p):
p0=1-p
if production == "sample":
if rnd.random()<p:
return 1
else:
return 0
#maximization
elif production == "max":
if p >= 0.5:
return 1
else:
return 0
#soft maximization
elif production == "softmax":
p1=p**expt/(p**expt+p0**expt)
# print p, p**expt, p1
# print p,p**expt,p0**expt
if rnd.random()<p1:
return 1
else:
return 0
#----
# Hypothesis choice
# every run counts the occurrencies of x
def iterate(number_of_ones,alpha):
ones=[] #count of x in every run
runs=args.runs
learning=args.learning
#expt=args.exponent
for r in range(runs):
if learning == "sample":
language=beta.rvs(alpha+number_of_ones, alpha+(10-number_of_ones)) # sampling
elif learning == "max":
language=(alpha+number_of_ones-1)/(alpha*2+10-2) # maximising
elif learning == "avg":
language=(alpha+number_of_ones)/(alpha*2+10) # averaging
data=[produce(language) for _ in range(10)] #one list of 01s
#print data
count_of_ones=float(data.count(1))
ones.append(count_of_ones)
#if r < 10:
#print number_of_ones
#print "list of ones: ",ones[1:10]
#dictionary with x_possible_values:freqs(x), ordered by n_of_x
d = {}
for c in ones:
count=ones.count(c)
d[c] = count
#print "dictionary: ",d.items()[1:10]
#get probabilities of proportion_of_ones as list of tuples (n,prob(n))
prob=[(n,float(freq)/len(ones)) for n, freq in d.items()]
return prob
#print "probabilities: ",prob[1:10]
#----
#starting input ratio and number of productions
fig, axes = plt.subplots(nrows=9, ncols=6, figsize=(14, 8)) #
fs = 10
alphas = [0.01,1,100]
exponents= [1.5,5,15]
obs = range(0,6)
row = -1
bigprob = []
for a in alphas:
for expt in exponents:
row += 1
print "- outer loop - exponent={0},row={1},".format(expt,row)
for i in obs: ##populate one row
print "--inner loop - i={0},".format(i)
prob=iterate(i+5,a)
print "-- inner loop - prob={0},".format(prob)
bigprob.append(prob)
print "-- inner loop - bigprob={0},".format(bigprob)
print "-- inner loop - xarray={0},".format([x[0] for x in bigprob[i]])
axes[row, i].bar([x[0] for x in bigprob[i]],[x[1] for x in bigprob[i]],align='center',width=0.3,color='b')
axes[row, i].set_title("exponent: "+str(expt), fontsize=fs)
axes[row, i].axvline(x=i+5, color='r', linestyle='dashed', linewidth=2)
axes[row, i].xaxis.set_ticks(np.arange(0, 10, 1))
axes[row, i].yaxis.set_ticks(np.arange(0, 1, 0.5))
bigprob = []
#plots
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(16, 8)
plt.show()
|
992,398 | 7b6ca0c925f40b6b5ce47f6c0b92ca2a1ed5b200 | from __future__ import unicode_literals
from pytest_cagoule.git_parser import get_diff_changes
diff1 = """diff --git a/README.rst b/README.rst
index 8902dc2..9de8bf2 100644
--- README.rst
+++ README.rst
@@ -3,0 +4 @@ pytest-cagoule
+
"""
diff2 = """diff --git a/README.rst b/README.rst
index 8902dc2..f39170e 100644
--- README.rst
+++ README.rst
@@ -3,0 +4 @@ pytest-cagoule
+
@@ -29,4 +28,0 @@ Install **cagoule** using ``pip``::
-License
--------
-
-MIT. See ``LICENSE`` for details
diff --git a/setup.py b/setup.py
index bbe47a8..e83a1a7 100644
--- setup.py
+++ setup.py
@@ -0,0 +1 @@
+
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..0c73842
--- /dev/null
+++ MANIFEST.in
@@ -0,0 +1 @@
+include README.rst LICENSE
"""
def test_get_diff_changes_simple():
expected = (("README.rst", 3, 3),)
assert tuple(get_diff_changes(diff1)) == expected
def test_get_diff_changes_multiple():
expected = (
("README.rst", 3, 3),
("README.rst", 29, 33),
("setup.py", 0, 0),
# new files are ignored
)
assert tuple(get_diff_changes(diff2)) == expected
def test_malformed_diff():
assert tuple(get_diff_changes("diff foo\nbar")) == ()
|
992,399 | 25a9dcb1675cc87c50e0adaf25becaa134837e2d | """Test module for ip fabric snat for k8s
this module contains the restart and reboot scenario based
testcases to verify the behavior of the ip fabric snat with k8s
"""
from common.k8s.base import BaseK8sTest
from tcutils.wrappers import preposttest_wrapper
from tcutils.util import get_random_name, skip_because
import test
import time
from tcutils.contrail_status_check import ContrailStatusChecker
class TestFabricSNATRestarts(BaseK8sTest):
@classmethod
def setUpClass(cls):
super(TestFabricSNATRestarts, cls).setUpClass()
if cls.inputs.slave_orchestrator == 'kubernetes':
cls.ip_to_ping = cls.inputs.k8s_clusters[0]['master_ip']
else:
cls.ip_to_ping = cls.inputs.bgp_control_ips[0]
@classmethod
def tearDownClass(cls):
super(TestFabricSNATRestarts, cls).tearDownClass()
def parallel_cleanup(self):
parallelCleanupCandidates = ["PodFixture"]
self.delete_in_parallel(parallelCleanupCandidates)
def setup_common_namespaces_pods(self, isolation=False, ip_fabric_snat=False,
ip_fabric_forwarding=False):
""" common routine to create the namesapces and the pods by enabling the fabric snat
and fabric forwarding
1.create 3 namespaces (ns1:enable snat,ns2:enable fabric forwarding and snat,ns3:enable snat)
2.create pods in each namespace and verify(ns1:pod1,pod2, ns2:pod1, ns3:pod1 ,default:pod1)
"""
namespace1_name = get_random_name("ns1")
namespace2_name = get_random_name("ns2")
namespace3_name = get_random_name("ns3")
namespace1 = self.setup_namespace(name = namespace1_name, isolation = isolation,
ip_fabric_snat = ip_fabric_snat,
ip_fabric_forwarding = False)
namespace2 = self.setup_namespace(name = namespace2_name, isolation = isolation,
ip_fabric_snat = ip_fabric_snat,
ip_fabric_forwarding = ip_fabric_forwarding)
namespace3 = self.setup_namespace(name = namespace3_name, isolation = isolation,
ip_fabric_snat = ip_fabric_snat,
ip_fabric_forwarding = False)
assert namespace1.verify_on_setup()
assert namespace2.verify_on_setup()
assert namespace3.verify_on_setup()
label1 = "snat"
label2 = "snatfabric"
#create a pod in default namespaces
pod1_in_default_ns = self.setup_ubuntuapp_pod()
#create a two pods in snat enabled namespace
pod1_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
labels={'app': label1})
pod2_in_ns1 = self.setup_ubuntuapp_pod(namespace=namespace1_name,
labels={'app': label1})
#create a pod in snat and ip fabric enabled namespace
pod1_in_ns2 = self.setup_ubuntuapp_pod(namespace=namespace2_name,
labels={'app': label2})
#create a pod in snat enabled namespace
pod1_in_ns3 = self.setup_ubuntuapp_pod(namespace=namespace3_name,
labels={'app': label1})
assert pod1_in_default_ns.verify_on_setup()
assert pod1_in_ns1.verify_on_setup()
assert pod2_in_ns1.verify_on_setup()
assert pod1_in_ns2.verify_on_setup()
assert pod1_in_ns3.verify_on_setup()
client1 = [pod1_in_ns1, pod2_in_ns1, namespace1]
client2 = [pod1_in_ns2, namespace2]
client3 = [pod1_in_ns3, namespace3]
client4 = [pod1_in_default_ns]
return (client1, client2, client3, client4)
#end setup_common_namespaces_pods
def verify_ping_between_pods_across_namespaces_and_public_network(self, client1, client2,
client3, client4):
"""
1.verifies the ping between pods in the snat enabled nnamespace
2.verifies the ping between pods across the snat enabled nnamespaces
3.verifies the ping between pods acoss snat and fabric forwarding enabled namespaces
4.verifies the ping between pods acoss snat ennabled and default namespaces
5.verifies the public reachability from the pods in snat enabled namespace
"""
assert client1[0].ping_to_ip(self.ip_to_ping)
assert client1[1].ping_to_ip(self.ip_to_ping)
#assert client2[0].ping_to_ip(self.ip_to_ping)#ip fabric forwaring takes precedence
assert client1[0].ping_to_ip(client1[1].pod_ip)
#verifying pods in isolated/default namespaces shoud not reach each other when snat is enabled
assert client1[0].ping_to_ip(client2[0].pod_ip, expectation=False)
assert client1[0].ping_to_ip(client3[0].pod_ip, expectation=False)
assert client1[0].ping_to_ip(client4[0].pod_ip, expectation=False)
@test.attr(type=['k8s_sanity'])
@preposttest_wrapper
def test_snat_with_kube_manager_restart(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the kube manager service
3.re verify pods can reach to public network when snat is enabled
"""
self.addCleanup(self.invalidate_kube_manager_inspect)
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#perform the kube manager restart
self.restart_kube_manager()
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_kube_manager_restart
@test.attr(type=['k8s_sanity'])
@preposttest_wrapper
def test_snat_with_vrouter_agent_restart(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the vrouter agent on nodes
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#perform the kube manager restart
self.restart_vrouter_agent()
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_vrouter_agent_restart
@preposttest_wrapper
def test_snat_pod_restart(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the pods which are created in snat enabled namespaces
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4)
assert self.restart_pod(client1[0])
assert self.restart_pod(client2[0])
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_pod_restart
@test.attr(type=['k8s_sanity'])
@skip_because(slave_orchestrator='kubernetes')
@preposttest_wrapper
def test_snat_with_docker_restart(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the docker service
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
self.inputs.restart_service(service_name = "containerd",
host_ips = self.inputs.k8s_slave_ips)
time.sleep(60) # Wait timer for all contrail service to come up.
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_docker_restart
@preposttest_wrapper
def test_snat_with_kubelet_restart_on_slave(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the docker service
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
self.inputs.restart_service(service_name = "kubelet",
host_ips = self.inputs.k8s_slave_ips)
time.sleep(30) # Wait timer for all kubernetes pods to stablise.
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_kubelet_restart_on_slave
@preposttest_wrapper
def test_snat_with_kubelet_restart_on_master(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the docker service
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
self.inputs.restart_service(service_name = "kubelet",
host_ips = [self.inputs.k8s_master_ip])
time.sleep(30) # Wait timer for all kubernetes pods to stablise.
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_kubelet_restart_on_master
@preposttest_wrapper
def test_snat_with_docker_restart_on_master(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the docker service on master
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
self.inputs.restart_service(service_name = "docker",
host_ips = [self.inputs.k8s_master_ip])
time.sleep(30)
cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable()
assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % (
error_nodes)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_docker_restart
@preposttest_wrapper
def test_snat_with_master_reboot(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the docker service on master
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
self.inputs.reboot(self.inputs.k8s_master_ip)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_master_reboot
@preposttest_wrapper
def test_snat_with_nodes_reboot(self):
"""
1.verifies pods can reach to public network when snat is enabled
2.restart the docker service on master
3.re verify pods can reach to public network when snat is enabled
"""
client1, client2, client3, client4 = self.setup_common_namespaces_pods(isolation=True,
ip_fabric_snat=True,
ip_fabric_forwarding=True)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
self.inputs.reboot(self.inputs.k8s_master_ip)
for node in self.inputs.k8s_slave_ips:
self.inputs.reboot(node)
self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2,
client3, client4)
#end test_snat_with_nodes_reboot
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.