text stringlengths 38 1.54M |
|---|
class ClientSocket:
def __init__(self, socket):
self.socket = socket
def send(self, message):
pass
def receive(self):
pass
def prompt(self, prompt_message):
self.socket.sendall(prompt_message.encode('utf-8'))
return self.socket.recv(1024).decode('utf-8')
|
# -*- coding: utf-8 -*-
"""
Utilities related to optimisation.
"""
import logging
from typing import Optional
import numpy as np
from scipy.optimize import minimize
from scipy.special import logsumexp
logger = logging.getLogger(__name__)
def optimise_meta_proposal_weights(
samples: np.ndarray,
log_q: np.ndarray,
method: str = "SLSQP",
options: Optional[dict] = None,
initial_weights: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Optimise the weights of the meta proposal.
Uses :code:`scipy.optimize.minimize`.
Parameters
----------
samples
Samples drawn from the initial meta proposal.
log_q
Array of log probabilities for each proposal for each sample.
method
Optimisation method to use. See scipy docs for details.
options
Dictionary of options for :code:`scipy.optimize.minimize`.
"""
if options is None and method == "SLSQP":
options = dict(ftol=1e-10)
n_prop = log_q.shape[-1]
counts = np.unique(samples["it"], return_counts=True)[1]
if initial_weights is None:
initial_weights = counts / counts.sum()
else:
initial_weights = initial_weights / initial_weights.sum()
log_Z = logsumexp(samples["logL"] - samples["logQ"]) - np.log(len(samples))
log_pr = samples["logL"] - log_Z
log_pr -= logsumexp(log_pr)
pr = np.exp(log_pr)
def loss(weights):
"""Computes the KL"""
log_Q = logsumexp(log_q, b=weights, axis=1)
return -np.mean(pr * log_Q)
# Weights must sum to one
constraint = {"type": "eq", "fun": lambda x: 1 - x.sum()}
logger.info("Starting optimisation")
result = minimize(
loss,
initial_weights,
constraints=constraint,
bounds=n_prop * [(0, 1)],
method=method,
options=options,
)
logger.info("Finished optimisation")
logger.debug(f"Final weights: {result.x}")
return np.array(result.x)
|
from api.PersonalCenter.PersonalCenter import *
class Pam():
def __init__(self,api_url, **kwargs):
self.api_url = api_url
self.personnal = PersonalCenter(self.api_url,**kwargs)
|
from BanahawApp import Session,Mini_func
from BanahawApp.table import T_Facial_Services
class Facialmodel(Mini_func):
def __init__(self, **kwargs):
self.__session = Session()
self.__args = kwargs
def get_services(self):
retval = None
result = self.__session.query(T_Facial_Services).all()
temp_list = list()
for d in result:
r = d.toJSONExcept()
temp_list.append(r)
if temp_list:
retval = temp_list
return retval
def insert_facial_service(self, **kwargs):
fservices = T_Facial_Services()
for key, value in kwargs.items():
try:
setattr(fservices,key,value)
except TypeError:
continue
self.__session.add(fservices)
self.__session.commit()
def del_facial_service(self, **kwargs):
fsid = kwargs.get('id', 0)
resdata = self.__session.query(T_Facial_Services).filter(T_Facial_Services.facial_services_id == fsid).first()
if resdata:
self.__session.delete(resdata)
try:
self.__session.commit()
except:
self.__session.rollback()
def edit_facial_services(self, **kwargs):
facialid = kwargs.get('facial_services_id', 0)
result = self.__session.query(T_Facial_Services).filter(T_Facial_Services.facial_services_id==facialid).first()
update_param = ['member_price', 'non_member_price', 'duration']
for param in update_param:
data = kwargs.get(param, None)
if data:
try:
setattr(result, param, data)
except TypeError:
continue
self.__session.commit()
|
import json
import dateutil.parser
from typing import Dict
class ReferenceError(Exception):
pass
class References(object):
def __init__(self, references: Dict[int, 'Reference']) -> None:
self.references = references
def to_json(self):
return {key: value.to_json() for key, value in self.references.items()}
class Reference(object):
def __init__(self, id_, template):
self.id = id_
self._parse_type(template)
self._parse_params(template)
self._parse_title()
self._parse_url()
self._parse_date()
def _parse_type(self, template):
try:
self.type = template['parts'][0]['template']['target']['wt'].strip().lower()
except (KeyError, IndexError):
raise ReferenceError(template)
def _parse_params(self, template):
self.params = {}
try:
for param, value in template['parts'][0]['template']['params'].items():
try:
self.params[param] = value['wt'].strip()
except KeyError:
pass
except (KeyError, IndexError):
pass
def _parse_title(self):
self.title = None
for key in ['title']:
if key in self.params:
self.title = self.params[key].strip()
break
def _parse_url(self):
self.url = None
for key in ['archiveurl', 'archive-url', 'url']:
if key in self.params:
self.url = self.params[key].strip()
break
def _parse_date(self):
self.date = None
for key in ['archivedate', 'archive-date', 'accessdate', 'access-date', 'date']:
if key in self.params:
if self.params[key]:
try:
self.date = dateutil.parser.parse(self.params[key])
break
except (ValueError, OverflowError):
pass
def to_json(self):
data = {}
if self.type:
data['type'] = self.type
if self.title:
data['title'] = self.title
if self.url:
data['url'] = self.url
if self.date:
data['date'] = str(self.date)
return data
def parse_references(tree):
references = {}
for i, reference_node in enumerate(tree.xpath("//ol[contains(@class, 'mw-references')]/li")):
children = reference_node.xpath('span[@class="mw-reference-text"]/span[@data-mw]')
if children:
# Sometimes there may be more than one valid child (see Obama #136)
# For now, we take the first valid child as the reference.
node = children[0]
template = json.loads(node.get('data-mw'))
try:
reference = Reference(i + 1, template)
references[i + 1] = reference
except ReferenceError:
pass
else:
# This a raw text citation with no template
pass
return References(references)
|
from utils import *
class Config(object):
def __init__(self):
self.model_name = 'bert'
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_classes = 2
self.bert_path = './Model'
self.hidden_size = 768
self.tokenizer = BertTokenizer.from_pretrained(self.bert_path)
self.batch_size = 16
self.num_epochs = 5
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.bert = BertModel.from_pretrained(config.bert_path)
for param in self.bert.parameters():
param.requires_grad = True
self.fc0 = nn.Linear(3*config.hidden_size, 512)
self.fc1 = nn.Linear(512, 128)
self.fc2 = nn.Linear(128, config.num_classes)
# def forward(self, input_ids, attention_mask, token_type_ids):
def forward(self, cc_input, td_input, msg_input):
cc_input_ids, cc_input_mask, cc_input_types = cc_input[0], cc_input[1], cc_input[2]
td_input_ids, td_input_mask, td_input_types = td_input[0], td_input[1], td_input[2]
msg_input_ids, msg_input_mask, msg_input_types = msg_input[0], msg_input[1], msg_input[2]
# _, cc_pooled = self.bert(input_ids = cc_input_ids, \
# attention_mask = cc_input_mask, \
# token_type_ids = cc_input_types)
# _, td_pooled = self.bert(input_ids = td_input_ids, \
# attention_mask = td_input_mask, \
# token_type_ids = td_input_types)
cc_outputs = self.bert(input_ids = cc_input_ids, \
attention_mask = cc_input_mask, \
token_type_ids = cc_input_types)
td_outputs = self.bert(input_ids = td_input_ids, \
attention_mask = td_input_mask, \
token_type_ids = td_input_types)
msg_outputs = self.bert(input_ids = msg_input_ids, \
attention_mask = msg_input_mask, \
token_type_ids = msg_input_types)
cc_pooled = cc_outputs.pooler_output
td_pooled = td_outputs.pooler_output
msg_pooled = msg_outputs.pooler_output
features = torch.cat((cc_pooled, td_pooled, msg_pooled), dim=1)
features = self.fc0(features)
features = self.fc1(features)
out = self.fc2(features)
return out
|
import os
import pdb
import numpy as np
import pandas as pd
from numpy import savetxt
from sklearn.model_selection import train_test_split
from load_data import LoadData
# USE MAP DICT TO READ INPUT FROM DEEPLEARNING
# drug -[drug_map]-> drug_name(drug_i, drug_j)
# celllinename -[celllinemap]-> cellline_name
# -> gene_name -[drug_map][drug_target]-> (RNA, drug_i, drug_j)
class ParseFile():
def __init__(self, dir_opt):
self.dir_opt = dir_opt
# FIND THE DUPLICATE ROWS[Drug A, Drug B, Cell Line Name] THEN AVERAGE SCORE
def input_condense(self):
dir_opt = self.dir_opt
dl_input_df = pd.read_csv('.' + dir_opt + '/init_data/DeepLearningInput.csv')
dl_input_df = dl_input_df.groupby(['Drug A', 'Drug B', 'Cell Line Name']).agg({'Score':'mean'}).reset_index()
dl_input_df.to_csv('.' + dir_opt + '/mid_data/DeepLearningInput.txt', index = False, header = True)
# REMOVE INPUT ROWS WITH NO MAPPED DRUG NAME (48953 POINTS INPUT)
def input_drug_condense(self):
dir_opt = self.dir_opt
dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/DeepLearningInput.txt', delimiter = ',')
drug_map_dict = ParseFile(dir_opt).drug_map_dict()
deletion_list = []
for row in dl_input_df.itertuples():
if pd.isna(drug_map_dict[row[1]]) or pd.isna(drug_map_dict[row[2]]):
deletion_list.append(row[0])
mid_dl_input_df = dl_input_df.drop(dl_input_df.index[deletion_list]).reset_index(drop = True)
mid_dl_input_df.to_csv('.' + dir_opt + '/mid_data/MidDeepLearningInput.txt', index = False, header = True)
# REMOVE INPUT ROWS WITH NO CORRESPONDING CELLLINE NAME ([, 37355] POINTS INPUT)
def input_cellline_condense(self, RNA_seq_filename):
dir_opt = self.dir_opt
cellline_gene_df = pd.read_csv('.' + dir_opt + '/filtered_data/' + RNA_seq_filename + '.csv')
cellline_name_list = list(cellline_gene_df.columns[2:])
mid_dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/MidDeepLearningInput.txt', delimiter = ',')
cellline_map_dict = ParseFile(dir_opt).cellline_map_dict()
deletion_list = []
for row in mid_dl_input_df.itertuples():
if cellline_map_dict[row[3]] not in cellline_name_list:
deletion_list.append(row[0])
final_dl_input_df = mid_dl_input_df.drop(mid_dl_input_df.index[deletion_list]).reset_index(drop = True)
final_dl_input_df.to_csv('.' + dir_opt + '/mid_data/FinalDeepLearningInput.txt', index = False, header = True)
# REMOVE INPUT ROWS WITH ALL ZEROS ON DRUG TARGET GENE CONNECTION,
# WHICH MEANS NO INPUT POINT(VECTOR) HAS ALL ZERO ON 1634 GENES
def input_drug_gene_condense(self, RNA_seq_filename):
dir_opt = self.dir_opt
deletion_list = []
final_dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/FinalDeepLearningInput.txt', delimiter = ',')
drug_map_dict, cellline_map_dict, drug_dict, gene_target_num_dict = LoadData(dir_opt, RNA_seq_filename).pre_load_dict()
target_index_list = gene_target_num_dict.values()
drug_target_matrix = np.load('.' + dir_opt + '/filtered_data/drug_target_matrix.npy')
for row in final_dl_input_df.itertuples():
drug_a = drug_map_dict[row[1]]
drug_b = drug_map_dict[row[2]]
cellline_name = cellline_map_dict[row[3]]
# DRUG_A AND 1634 TARGET GENES
drug_a_target_list = []
drug_index = drug_dict[drug_a]
for target_index in target_index_list:
if target_index == -1 :
effect = 0
else:
effect = drug_target_matrix[drug_index, target_index]
drug_a_target_list.append(effect)
# DRUG_B AND 1634 TARGET GENES
drug_b_target_list = []
drug_index = drug_dict[drug_b]
for target_index in target_index_list:
if target_index == -1 :
effect = 0
else:
effect = drug_target_matrix[drug_index, target_index]
drug_b_target_list.append(effect)
if all([a == 0 for a in drug_a_target_list]) or all([b == 0 for b in drug_b_target_list]):
deletion_list.append(row[0])
zero_final_dl_input_df = final_dl_input_df.drop(final_dl_input_df.index[deletion_list]).reset_index(drop = True)
zero_final_dl_input_df.to_csv('.' + dir_opt + '/filtered_data/ZeroFinalDeepLearningInput.txt', index = False, header = True)
# print(zero_final_dl_input_df)
# RANDOMIZE THE DL INPUT
def input_random_condense(self):
dir_opt = self.dir_opt
zero_final_dl_input_df = pd.read_table('.' + dir_opt + '/filtered_data/ZeroFinalDeepLearningInput.txt', delimiter = ',')
random_final_dl_input_df = zero_final_dl_input_df.sample(frac = 1)
random_final_dl_input_df.to_csv('.' + dir_opt + '/filtered_data/RandomFinalDeepLearningInput.txt', index = False, header = True)
print(random_final_dl_input_df)
# CALCULATE NUMBER OF UNIQUE DRUG IN RANDOMFINAL_INPUT
def random_final_drug_count(self):
dir_opt = self.dir_opt
random_final_dl_input_df = pd.read_table('.' + dir_opt + '/filtered_data/RandomFinalDeepLearningInput.txt', delimiter = ',')
random_final_drug_list = []
for drug in random_final_dl_input_df['Drug A']:
if drug not in random_final_drug_list:
random_final_drug_list.append(drug)
for drug in random_final_dl_input_df['Drug B']:
if drug not in random_final_drug_list:
random_final_drug_list.append(drug)
random_final_drug_list = sorted(random_final_drug_list)
print(random_final_drug_list)
print(len(random_final_drug_list))
# # SPLIT DEEP LEARNING INPUT INTO TRAINING AND TEST
def split_k_fold(self, k, place_num):
dir_opt = self.dir_opt
dir_opt = self.dir_opt
random_final_dl_input_df = pd.read_table('.' + dir_opt + '/filtered_data/RandomFinalDeepLearningInput.txt', delimiter = ',')
print(random_final_dl_input_df)
num_points = random_final_dl_input_df.shape[0]
num_div = int(num_points / k)
num_div_list = [i * num_div for i in range(0, k)]
num_div_list.append(num_points)
low_idx = num_div_list[place_num - 1]
high_idx = num_div_list[place_num]
print('\n--------TRAIN-TEST SPLIT WITH TEST FROM ' + str(low_idx) + ' TO ' + str(high_idx) + '--------')
train_input_df = random_final_dl_input_df.drop(random_final_dl_input_df.index[low_idx : high_idx])
print(train_input_df)
test_input_df = random_final_dl_input_df[low_idx : high_idx]
print(test_input_df)
train_input_df.to_csv('.' + dir_opt + '/filtered_data/TrainingInput.txt', index = False, header = True)
test_input_df.to_csv('.' + dir_opt + '/filtered_data/TestInput.txt', index = False, header = True)
# FIND UNIQUE DRUG NAME FROM DATAFRAME AND MAP
def drug_map(self):
dir_opt = self.dir_opt
dl_input_df = pd.read_table('.' + dir_opt + '/mid_data/DeepLearningInput.txt', delimiter = ',')
drug_target_df = pd.read_table('.' + dir_opt + '/init_data/drug_tar_drugBank_all.txt')
drug_list = []
for drug in dl_input_df['Drug A']:
if drug not in drug_list:
drug_list.append(drug)
for drug in dl_input_df['Drug B']:
if drug not in drug_list:
drug_list.append(drug)
drug_list = sorted(drug_list)
drug_df = pd.DataFrame(data = drug_list, columns = ['Drug Name'])
drug_df.to_csv('.' + dir_opt + '/init_data/input_drug_name.txt', index = False, header = True)
mapped_drug_list = []
for drug in drug_target_df['Drug']:
if drug not in mapped_drug_list:
mapped_drug_list.append(drug)
mapped_drug_list = sorted(mapped_drug_list)
mapped_drug_df = pd.DataFrame(data = mapped_drug_list, columns = ['Mapped Drug Name'])
mapped_drug_df.to_csv('.' + dir_opt + '/init_data/mapped_drug_name.txt', index = False, header = True)
# LEFT JOIN TWO DATAFRAME
drug_map_df = pd.merge(drug_df, mapped_drug_df, how='left', left_on = 'Drug Name', right_on = 'Mapped Drug Name')
drug_map_df.to_csv('.' + dir_opt + '/init_data/drug_map.csv', index = False, header = True)
# AFTER AUTO MAP -> MANUAL MAP
# FROM MANUAL MAP TO DRUG MAP DICT
def drug_map_dict(self):
dir_opt = self.dir_opt
drug_map_df = pd.read_csv('.' + dir_opt + '/mid_data/drug_map.csv')
drug_map_dict = {}
for row in drug_map_df.itertuples():
drug_map_dict[row[1]] = row[2]
if os.path.exists('.' + dir_opt + '/filtered_data') == False:
os.mkdir('.' + dir_opt + '/filtered_data')
np.save('.' + dir_opt + '/filtered_data/drug_map_dict.npy', drug_map_dict)
return drug_map_dict
# FORM ADAJACENT MATRIX (DRUG x TARGET) (LIST -> SORTED -> DICT -> MATRIX) (ALL 5435 DRUGS <-> ALL 2775 GENES)
def drug_target(self):
dir_opt = self.dir_opt
drug_target_df = pd.read_table('.' + dir_opt + '/init_data/drug_tar_drugBank_all.txt')
# GET UNIQUE SORTED DRUGLIST AND TARGET(GENE) LIST
drug_list = []
for drug in drug_target_df['Drug']:
if drug not in drug_list:
drug_list.append(drug)
drug_list = sorted(drug_list)
target_list = []
for target in drug_target_df['Target']:
if target not in target_list:
target_list.append(target)
target_list = sorted(target_list)
# CONVERT THE SORTED LIST TO DICT WITH VALUE OF INDEX
drug_dict = {drug_list[i] : i for i in range((len(drug_list)))}
drug_num_dict = {i : drug_list[i] for i in range((len(drug_list)))}
target_dict = {target_list[i] : i for i in range(len(target_list))}
target_num_dict = {i : target_list[i] for i in range(len(target_list))}
# ITERATE THE DATAFRAME TO DEFINE CONNETIONS BETWEEN DRUG AND TARGET(GENE)
drug_target_matrix = np.zeros((len(drug_list), len(target_list))).astype(int)
for index, drug_target in drug_target_df.iterrows():
# BUILD ADJACENT MATRIX
drug_target_matrix[drug_dict[drug_target['Drug']], target_dict[drug_target['Target']]] = 1
drug_target_matrix = drug_target_matrix.astype(int)
np.save('.' + dir_opt + '/filtered_data/drug_target_matrix.npy', drug_target_matrix)
# np.savetxt("drug_target_matrix.csv", drug_target_matrix, delimiter=',')
# x, y = drug_target_matrix.shape
# for i in range(x):
# # FIND DRUG TARGET OVER 100 GENES
# row = drug_target_matrix[i, :]
# if len(row[row>=1]) >= 100: print(drug_num_dict[i])
np.save('.' + dir_opt + '/filtered_data/drug_dict.npy', drug_dict)
np.save('.' + dir_opt + '/filtered_data/drug_num_dict.npy', drug_num_dict)
np.save('.' + dir_opt + '/filtered_data/target_dict.npy', target_dict)
np.save('.' + dir_opt + '/filtered_data/target_num_dict.npy', target_num_dict)
return drug_dict, drug_num_dict, target_dict, target_num_dict
# FROM MANUAL CELLLINE NAME MAP TO DICT
def cellline_map_dict(self):
dir_opt = self.dir_opt
cellline_name_df = pd.read_table('.' + dir_opt + '/init_data/nci60-ccle_cell_name_map1.txt')
cellline_map_dict = {}
for row in cellline_name_df.itertuples():
cellline_map_dict[row[1]] = row[2]
np.save('.' + dir_opt + '/filtered_data/cellline_map_dict.npy', cellline_map_dict)
return cellline_map_dict
# FILTER DUPLICATED AND SPARSE GENES (FINALLY [1118, 1684] GENES)
def filter_cellline_gene(self, RNA_seq_filename):
dir_opt = self.dir_opt
cellline_gene_df = pd.read_table('.' + dir_opt + '/init_data/' + RNA_seq_filename + '.txt')
cellline_gene_df = cellline_gene_df.drop_duplicates(subset = ['geneSymbol'],
keep = 'first').sort_values(by = ['geneSymbol']).reset_index(drop = True)
threshold = int((len(cellline_gene_df.columns) - 3) / 3)
deletion_list = []
for row in cellline_gene_df.itertuples():
if list(row[3:]).count(0) > threshold:
deletion_list.append(row[0])
cellline_gene_df = cellline_gene_df.drop(cellline_gene_df.index[deletion_list]).reset_index(drop = True)
cellline_gene_df.to_csv('.' + dir_opt + '/mid_data/' + RNA_seq_filename + '.csv', index = False, header = True)
print(cellline_gene_df)
# FILTER GENES NOT EXIST IN EDGES FILE(FINALLY [, 1634] GENES)
# AND FORM TUPLES GENE CONNECTION AS EDGES
def filter_form_edge_cellline_gene(self, RNA_seq_filename, gene_filename, form_data_path):
dir_opt = self.dir_opt
cellline_gene_df = pd.read_csv('.' + dir_opt + '/mid_data/' + RNA_seq_filename +'.csv')
cellline_gene_list = list(cellline_gene_df['geneSymbol'])
# DELETE GENES NOT EXIST IN [cellline_gene_df] WRT FILE [Selected_Kegg_Pathways_edges_1.txt]
gene_connection_df = pd.read_table('.' + dir_opt + '/init_data/' + gene_filename + '.txt')
gene_connection_df = gene_connection_df.drop(columns = ['src_type', 'dest_type', 'direction', 'type'])
gene_connection_deletion_list = []
for row in gene_connection_df.itertuples():
if row[1] not in cellline_gene_list or row[2] not in cellline_gene_list:
gene_connection_deletion_list.append(row[0])
gene_connection_df = gene_connection_df.drop(gene_connection_df.index[gene_connection_deletion_list]).reset_index(drop = True)
# SORT DELETED [gene_connection_df] IN ALPHA-BETA ORDER
gene_connection_df = gene_connection_df.sort_values(by = ['src', 'dest']).reset_index(drop = True)
# FETCH ALL UNIQUE GENE IN [gene_connection_df] [1634 genes]
gene_connection_list = []
for row in gene_connection_df.itertuples():
if row[1] not in gene_connection_list:
gene_connection_list.append(row[1])
if row[2] not in gene_connection_list:
gene_connection_list.append(row[2])
print(len(gene_connection_list))
# DELETE GENES NOT EXIST IN [gene_connection_list] WRT [cellline_gene_gf]
cellline_gene_deletion_list = []
for row in cellline_gene_df.itertuples():
if row[2] not in gene_connection_list:
cellline_gene_deletion_list.append(row[0])
print(len(cellline_gene_deletion_list))
cellline_gene_df = cellline_gene_df.drop(cellline_gene_df.index[cellline_gene_deletion_list]).reset_index(drop = True)
cellline_gene_df.to_csv('.' + dir_opt + '/filtered_data/' + RNA_seq_filename + '.csv', index = False, header = True)
print(cellline_gene_df)
# FORM [cellline_gene_dict] TO MAP GENES WITH INDEX NUM !!! START FROM 1 INSTEAD OF 0 !!!
cellline_gene_list = list(cellline_gene_df['geneSymbol'])
cellline_gene_dict = {cellline_gene_list[i - 1] : i for i in range(1, len(cellline_gene_list) + 1)}
# FORM TUPLES GENE CONNECTION ACCORDING TO NUM INDEX OF GENE IN [cellline_gene_dict]
src_gene_list = []
dest_gene_list = []
for row in gene_connection_df.itertuples():
src_gene_list.append(cellline_gene_dict[row[1]])
dest_gene_list.append(cellline_gene_dict[row[2]])
src_dest = {'src': src_gene_list, 'dest': dest_gene_list}
gene_connection_num_df = pd.DataFrame(src_dest)
if os.path.exists(form_data_path) == False:
os.mkdir(form_data_path)
gene_connection_num_df.to_csv(form_data_path + '/gene_connection_num.txt', index = False, header = True)
# BUILD GENES MAP BETWEEN [cellline_gene_df] AND [target_dict]
# [CCLE GENES : DRUG_TAR GENES] KEY : VALUE (1634 <-MAP-> 2775)
def gene_target_num_dict(self, RNA_seq_filename):
dir_opt = self.dir_opt
drug_dict, drug_num_dict, target_dict, target_num_dict = ParseFile(dir_opt).drug_target()
cellline_gene_df = pd.read_csv('.' + dir_opt + '/filtered_data/' + RNA_seq_filename +'.csv')
# print(target_dict)
gene_target_num_dict = {}
for row in cellline_gene_df.itertuples():
if row[2] not in target_dict.keys():
map_index = -1
else:
map_index = target_dict[row[2]]
gene_target_num_dict[row[0]] = map_index
np.save('.' + dir_opt + '/filtered_data/gene_target_num_dict.npy', gene_target_num_dict)
return gene_target_num_dict
# FORM ADAJACENT MATRIX (GENE x PATHWAY) (LIST -> SORTED -> DICT -> MATRIX) (ALL 1298 GENES <-> 16 PATHWAYS)
def gene_pathway(self, pathway_filename):
dir_opt = self.dir_opt
gene_pathway_df = pd.read_table('.' + dir_opt + '/init_data/' + pathway_filename + '.txt')
gene_list = sorted(list(gene_pathway_df['AllGenes']))
gene_pathway_df = gene_pathway_df.drop(['AllGenes'], axis = 1).sort_index(axis = 1)
pathway_list = list(gene_pathway_df.columns)
# CONVERT SORTED LIST TO DICT WITH INDEX
gene_dict = {gene_list[i] : i for i in range(len(gene_list))}
gene_num_dict = {i : gene_list[i] for i in range(len(gene_list))}
pathway_dict = {pathway_list[i] : i for i in range(len(pathway_list))}
pathway_num_dict = {i : pathway_list[i] for i in range(len(pathway_list))}
# ITERATE THE DATAFRAME TO DEFINE CONNETIONS BETWEEN GENES AND PATHWAYS
gene_pathway_matrix = np.zeros((len(gene_list), len(pathway_list))).astype(int)
print(gene_pathway_matrix.shape)
for gene_row in gene_pathway_df.itertuples():
pathway_index = 0
for gene in gene_row[1:]:
if gene != 'test':
gene_pathway_matrix[gene_dict[gene], pathway_index] = 1
pathway_index += 1
np.save('.' + dir_opt + '/filtered_data/gene_pathway_matrix.npy', gene_pathway_matrix)
np.save('.' + dir_opt + '/filtered_data/gene_dict.npy', gene_dict)
np.save('.' + dir_opt + '/filtered_data/gene_num_dict.npy', gene_num_dict)
np.save('.' + dir_opt + '/filtered_data/pathway_dict.npy', pathway_dict)
np.save('.' + dir_opt + '/filtered_data/pathway_num_dict.npy', pathway_num_dict)
return gene_dict, gene_num_dict, pathway_dict, pathway_num_dict
def pre_manual():
dir_opt = '/datainfo2'
RNA_seq_filename = 'nci60-ccle_RNAseq_tpm2'
ParseFile(dir_opt).input_condense()
ParseFile(dir_opt).drug_map()
# AFTER GET [/init_data/drug_map.csv] WITH AUTO MAP -> MANUAL MAP
def pre_parse():
dir_opt = '/datainfo2'
# STABLE DICTIONARY NOT CHANGE WITH FILES
ParseFile(dir_opt).drug_map_dict()
ParseFile(dir_opt).drug_target()
ParseFile(dir_opt).cellline_map_dict()
RNA_seq_filename = 'nci60-ccle_RNAseq_tpm2'
ParseFile(dir_opt).filter_cellline_gene(RNA_seq_filename)
# FILTER GENES NOT IN CELLLINE FOR EDGES
# FILTER GENES NOT IN EDGES FOR CELLLINE
gene_filename = 'Selected_Kegg_Pathways_edges_1'
form_data_path = '.' + dir_opt + '/form_data'
ParseFile(dir_opt).filter_form_edge_cellline_gene(RNA_seq_filename, gene_filename, form_data_path)
ParseFile(dir_opt).gene_target_num_dict(RNA_seq_filename)
pathway_filename = 'Selected_Kegg_Pathways2'
ParseFile(dir_opt).gene_pathway(pathway_filename)
def pre_input():
dir_opt = '/datainfo2'
RNA_seq_filename = 'nci60-ccle_RNAseq_tpm2'
ParseFile(dir_opt).input_condense()
ParseFile(dir_opt).input_drug_condense()
ParseFile(dir_opt).input_cellline_condense(RNA_seq_filename)
ParseFile(dir_opt).input_drug_gene_condense(RNA_seq_filename)
# ParseFile(dir_opt).random_final_drug_count()
def k_fold_split(random_mode, k, place_num):
dir_opt = '/datainfo2'
if random_mode == True:
ParseFile(dir_opt).input_random_condense()
ParseFile(dir_opt).random_final_drug_count()
ParseFile(dir_opt).split_k_fold(k, place_num)
def pre_form():
dir_opt = '/datainfo2'
RNA_seq_filename = 'nci60-ccle_RNAseq_tpm2'
form_data_path = '.' + dir_opt + '/form_data'
batch_size = 256
xTr, yTr, xTe, yTe, x, y = LoadData(dir_opt, RNA_seq_filename).load_all(batch_size, form_data_path)
if __name__ == "__main__":
pre_parse()
pre_input()
# DOING K-FOLD VALIDATION IN 100% DATASET
random_mode = False
k = 5
place_num = 1
k_fold_split(random_mode, k, place_num)
# FORM NUMPY FILES TO BE LOADED
pre_form()
|
"""
Created By : Nikesh
Created On :
Reviewed By :
Reviewed On :
Version :
"""
import datetime
from PIL import Image
from fuzzywuzzy import fuzz
class Validation:
def __init__(self, is_valid=True, validation_message="Not a valid Object", validation_object=None):
self.is_valid = is_valid
self.validation_message = validation_message
self.validation_object = validation_object
class IntHelper:
@staticmethod
def string_to_int(string_value, default_value=None):
return default_value if string_value == "" or string_value is None else int(string_value)
class FloatHelper:
@staticmethod
def string_to_float(string_value, default_value=None):
return default_value if string_value == "" or string_value is None else float(string_value)
class StringHelper:
@staticmethod
def cast_string(string_value):
return None if string_value is None else str(string_value)
@staticmethod
def isvalid(string_value):
if string_value == '' or string_value is None or type(string_value) != str:
return False
else:
return True
@staticmethod
def length(string_value):
if StringHelper.isvalid(string_value):
return len(string_value)
else:
return None
@staticmethod
def toUpper(string_value):
if StringHelper.isvalid(string_value):
return string_value.upper()
else:
return None
@staticmethod
def toLower(string_value):
if StringHelper.isvalid(string_value):
return string_value.lower()
else:
return None
@staticmethod
def replace(string_value, source_str, replace_str):
if StringHelper.isvalid(source_str) and StringHelper.isvalid(replace_str):
return string_value.replace(source_str, replace_str)
else:
return None
@staticmethod
def trim(string_value, left=True, right=True):
if StringHelper.isvalid(string_value):
if left == True and right == True:
x = string_value.lstrip().rstrip()
elif left == True and right == False:
x = string_value.lstrip()
elif right == True and left == True:
x = string_value.rstrip()
else:
x = string_value
return x
else:
return None
@staticmethod
def remove_spl_characters(x):
return x \
.replace(' ', '') \
.replace('.', '') \
.replace('/', '') \
.replace('-', '') \
.replace(':', '') \
.replace(',', '') \
.replace('(', '') \
.replace(')', '') \
.lower()
@staticmethod
def find_similarity_percentage(s1, s2):
return fuzz.ratio(s1, s2)
class DateHelper:
def __init__(self):
pass
# ------------------------------------------------------------------------------------------ #
"""
:description : used to get current_time[Ex: datetime.time(11, 8, 25)] or
current_time_with_ milliseconds[Ex:datetime.datetime( 11, 51, 13, 848913)]
:param with_millisec default: False (date)
:returns dateobject ot date_time_object
"""
@staticmethod
def get_current_time(with_millisec=False):
if with_millisec:
return datetime.datetime.now().time()
else:
return datetime.datetime.now().time().replace(microsecond=0) # returns time object
# ------------------------------------------------------------------------------------------ #
"""
:description : used to get current_date[Ex: datetime.time(11, 8, 25)] or
current_date_with_time[Ex:datetime.datetime(2019, 6, 6, 11, 51, 13, 848913)]
:param with_millisec default: False (date) if True (datetime)
:returns dateobject ot date_time_object
"""
@staticmethod
def get_current_date(with_millisec=False):
if with_millisec:
return datetime.datetime.now()
else:
return datetime.datetime.now().date() # returns date object
# ------------------------------------------------------------------------------------------ #
# "Date" - "format"
# "2019-04-08" - "%Y-%m-%d"
# "04-08-2018" - "%d-%m-%Y"
# "04-08-18" - "%d-%m-%y"
"""
:description : used to convert string to date object
:param input_date (in string) and date_format (in string)
:returns dateobject
"""
@staticmethod
def convert_string_to_date_object(input_date_as_string, date_format):
try:
x = datetime.datetime.strptime(input_date_as_string, date_format).date()
except ValueError:
x = None
return x # returns date object
# ------------------------------------------------------------------------------------------ #
# "Date" - "format"
# "2019-04-08" - "%Y-%m-%d"
# "04-08-2018" - "%d-%m-%Y"
# "04-08-18" - "%d-%m-%y"
"""
:description : used to convert date object to string
:param input_date (in date_object) and output_format (in string )
:returns date as string
"""
@staticmethod
def convert_date_object_to_string(date, output_format):
yyyymmdd = str(date)
if output_format == "%Y-%m-%d":
return yyyymmdd
elif output_format == "%d-%m-%Y":
ddmmyyyy = yyyymmdd[8:10] + '-' + yyyymmdd[5:7] + '-' + yyyymmdd[0:4]
return ddmmyyyy
elif output_format == "%d-%m-%y":
ddmmyy = yyyymmdd[8:10] + '-' + yyyymmdd[5:7] + '-' + yyyymmdd[2:4]
return ddmmyy
else:
return None
# ------------------------------------------------------------------------------------------ #
"""
:description : used to add number_of_days to input datetime object
:param input_date (in date_object) and output_format (in string )
:returns date as string
"""
@staticmethod
def date_add(input_date_time, number_of_days):
return (input_date_time + datetime.timedelta(days=number_of_days)).date() # returns date object
# input_date_time should be datetime object
# ------------------------------------------------------------------------------------------ #
"""
:description : used to check if date is valid
:param input_date (in string) and input_format (in string )
:returns True/False
"""
@staticmethod
def is_Valid(input_date, input_format):
try:
datetime.datetime.strptime(input_date, input_format).date()
x = True
except ValueError:
x = False
return x
class ImageHelper:
@staticmethod
def compress_image(imagepath):
im = Image.open(imagepath)
im.save(imagepath, optimize=True, quality=25)
|
import logging
from os import path
import itertools
from typing import Any, Dict, List, Optional
import torch
from torch.nn import functional as F
from overrides import overrides
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, Seq2SeqEncoder
from allennlp.nn import util, InitializerApplicator, RegularizerApplicator
from allennlp.modules import TimeDistributed
from allennlp.modules.token_embedders import Embedding
from allennlp.modules.matrix_attention.bilinear_matrix_attention import BilinearMatrixAttention
from dygie.training.relation_metrics import RelationMetrics, CandidateRecall
from dygie.training.event_metrics import EventMetrics, ArgumentStats
from dygie.models.shared import fields_to_batches
from dygie.models.one_hot import make_embedder
from dygie.models.entity_beam_pruner import make_pruner
from dygie.models.span_prop import SpanProp
# TODO(dwadden) rename NERMetrics
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
# TODO(dwadden) add tensor dimension comments.
# TODO(dwadden) Different sentences should have different number of relation candidates depending on
# length.
class EventExtractor(Model):
"""
Event extraction for DyGIE.
"""
# TODO(dwadden) add option to make `mention_feedforward` be the NER tagger.
def __init__(self,
vocab: Vocabulary,
trigger_feedforward: FeedForward,
trigger_candidate_feedforward: FeedForward,
mention_feedforward: FeedForward, # Used if entity beam is off.
argument_feedforward: FeedForward,
context_attention: BilinearMatrixAttention,
trigger_attention: Seq2SeqEncoder,
span_prop: SpanProp,
cls_projection: FeedForward,
feature_size: int,
trigger_spans_per_word: float,
argument_spans_per_word: float,
loss_weights,
trigger_attention_context: bool,
event_args_use_trigger_labels: bool,
event_args_use_ner_labels: bool,
event_args_label_emb: int,
shared_attention_context: bool,
label_embedding_method: str,
event_args_label_predictor: str,
event_args_gold_candidates: bool = False, # If True, use gold argument candidates.
context_window: int = 0,
softmax_correction: bool = False,
initializer: InitializerApplicator = InitializerApplicator(),
positive_label_weight: float = 1.0,
entity_beam: bool = False,
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(EventExtractor, self).__init__(vocab, regularizer)
self._n_ner_labels = vocab.get_vocab_size("ner_labels")
self._n_trigger_labels = vocab.get_vocab_size("trigger_labels")
self._n_argument_labels = vocab.get_vocab_size("argument_labels")
# Embeddings for trigger labels and ner labels, to be used by argument scorer.
# These will be either one-hot encodings or learned embeddings, depending on "kind".
self._ner_label_emb = make_embedder(kind=label_embedding_method,
num_embeddings=self._n_ner_labels,
embedding_dim=event_args_label_emb)
self._trigger_label_emb = make_embedder(kind=label_embedding_method,
num_embeddings=self._n_trigger_labels,
embedding_dim=event_args_label_emb)
self._label_embedding_method = label_embedding_method
# Weight on trigger labeling and argument labeling.
self._loss_weights = loss_weights
# Trigger candidate scorer.
null_label = vocab.get_token_index("", "trigger_labels")
assert null_label == 0 # If not, the dummy class won't correspond to the null label.
self._trigger_scorer = torch.nn.Sequential(
TimeDistributed(trigger_feedforward),
TimeDistributed(torch.nn.Linear(trigger_feedforward.get_output_dim(),
self._n_trigger_labels - 1)))
self._trigger_attention_context = trigger_attention_context
if self._trigger_attention_context:
self._trigger_attention = trigger_attention
# Make pruners. If `entity_beam` is true, use NER and trigger scorers to construct the beam
# and only keep candidates that the model predicts are actual entities or triggers.
self._mention_pruner = make_pruner(mention_feedforward, entity_beam=entity_beam,
gold_beam=event_args_gold_candidates)
self._trigger_pruner = make_pruner(trigger_candidate_feedforward, entity_beam=entity_beam,
gold_beam=False)
# Argument scorer.
self._event_args_use_trigger_labels = event_args_use_trigger_labels # If True, use trigger labels.
self._event_args_use_ner_labels = event_args_use_ner_labels # If True, use ner labels to predict args.
assert event_args_label_predictor in ["hard", "softmax", "gold"] # Method for predicting labels at test time.
self._event_args_label_predictor = event_args_label_predictor
self._event_args_gold_candidates = event_args_gold_candidates
# If set to True, then construct a context vector from a bilinear attention over the trigger
# / argument pair embeddings and the text.
self._context_window = context_window # If greater than 0, concatenate context as features.
self._argument_feedforward = argument_feedforward
self._argument_scorer = torch.nn.Linear(argument_feedforward.get_output_dim(), self._n_argument_labels)
# Distance embeddings.
self._num_distance_buckets = 10 # Just use 10 which is the default.
self._distance_embedding = Embedding(self._num_distance_buckets, feature_size)
# Class token projection.
self._cls_projection = cls_projection
self._cls_n_triggers = torch.nn.Linear(self._cls_projection.get_output_dim(), 5)
self._cls_event_types = torch.nn.Linear(self._cls_projection.get_output_dim(),
self._n_trigger_labels - 1)
self._trigger_spans_per_word = trigger_spans_per_word
self._argument_spans_per_word = argument_spans_per_word
# Context attention for event argument scorer.
self._shared_attention_context = shared_attention_context
if self._shared_attention_context:
self._shared_attention_context_module = context_attention
# Span propagation object.
# TODO(dwadden) initialize with `from_params` instead if this ends up working.
self._span_prop = span_prop
self._span_prop._trig_arg_embedder = self._compute_trig_arg_embeddings
self._span_prop._argument_scorer = self._compute_argument_scores
# Softmax correction parameters.
self._softmax_correction = softmax_correction
self._softmax_log_temp = torch.nn.Parameter(
torch.zeros([1, 1, 1, self._n_argument_labels]))
self._softmax_log_multiplier = torch.nn.Parameter(
torch.zeros([1, 1, 1, self._n_argument_labels]))
# TODO(dwadden) Add metrics.
self._metrics = EventMetrics()
self._argument_stats = ArgumentStats()
self._trigger_loss = torch.nn.CrossEntropyLoss(reduction="sum")
# TODO(dwadden) add loss weights.
self._argument_loss = torch.nn.CrossEntropyLoss(reduction="sum", ignore_index=-1)
initializer(self)
@overrides
def forward(self, # type: ignore
trigger_mask,
trigger_embeddings,
spans,
span_mask,
span_embeddings, # TODO(dwadden) add type.
cls_embeddings,
sentence_lengths,
output_ner, # Needed if we're using entity beam approach.
trigger_labels,
argument_labels,
ner_labels,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
"""
TODO(dwadden) Write documentation.
The trigger embeddings are just the contextualized token embeddings, and the trigger mask is
the text mask. For the arguments, we consider all the spans.
"""
cls_projected = self._cls_projection(cls_embeddings)
auxiliary_loss = self._compute_auxiliary_loss(cls_projected, trigger_labels, trigger_mask)
ner_scores = output_ner["ner_scores"]
predicted_ner = output_ner["predicted_ner"]
# Compute trigger scores.
trigger_scores = self._compute_trigger_scores(trigger_embeddings, cls_projected, trigger_mask)
_, predicted_triggers = trigger_scores.max(-1)
# Get trigger candidates for event argument labeling.
num_trigs_to_keep = torch.floor(
sentence_lengths.float() * self._trigger_spans_per_word).long()
num_trigs_to_keep = torch.max(num_trigs_to_keep,
torch.ones_like(num_trigs_to_keep))
num_trigs_to_keep = torch.min(num_trigs_to_keep,
15 * torch.ones_like(num_trigs_to_keep))
(top_trig_embeddings, top_trig_mask,
top_trig_indices, top_trig_scores, num_trigs_kept) = self._trigger_pruner(
trigger_embeddings, trigger_mask, num_trigs_to_keep, trigger_scores)
top_trig_mask = top_trig_mask.unsqueeze(-1)
# Compute the number of argument spans to keep.
num_arg_spans_to_keep = torch.floor(
sentence_lengths.float() * self._argument_spans_per_word).long()
num_arg_spans_to_keep = torch.max(num_arg_spans_to_keep,
torch.ones_like(num_arg_spans_to_keep))
num_arg_spans_to_keep = torch.min(num_arg_spans_to_keep,
30 * torch.ones_like(num_arg_spans_to_keep))
# If we're using gold event arguments, include the gold labels.
gold_labels = ner_labels if self._event_args_gold_candidates else None
(top_arg_embeddings, top_arg_mask,
top_arg_indices, top_arg_scores, num_arg_spans_kept) = self._mention_pruner(
span_embeddings, span_mask, num_arg_spans_to_keep, ner_scores, gold_labels)
top_arg_mask = top_arg_mask.unsqueeze(-1)
top_arg_spans = util.batched_index_select(spans,
top_arg_indices)
# Collect trigger and ner labels, in case they're included as features to the argument
# classifier.
# At train time, use the gold labels. At test time, use the labels predicted by the model,
# or gold if specified.
if self.training or self._event_args_label_predictor == "gold":
top_trig_labels = trigger_labels.gather(1, top_trig_indices)
top_ner_labels = ner_labels.gather(1, top_arg_indices)
else:
# Hard predictions.
if self._event_args_label_predictor == "hard":
top_trig_labels = predicted_triggers.gather(1, top_trig_indices)
top_ner_labels = predicted_ner.gather(1, top_arg_indices)
# Softmax predictions.
else:
softmax_triggers = trigger_scores.softmax(dim=-1)
top_trig_labels = util.batched_index_select(softmax_triggers, top_trig_indices)
softmax_ner = ner_scores.softmax(dim=-1)
top_ner_labels = util.batched_index_select(softmax_ner, top_arg_indices)
# Make a dict of all arguments that are needed to make trigger / argument pair embeddings.
trig_arg_emb_dict = dict(cls_projected=cls_projected,
top_trig_labels=top_trig_labels,
top_ner_labels=top_ner_labels,
top_trig_indices=top_trig_indices,
top_arg_spans=top_arg_spans,
text_emb=trigger_embeddings,
text_mask=trigger_mask)
# Run span graph propagation, if asked for
if self._span_prop._n_span_prop > 0:
top_trig_embeddings, top_arg_embeddings = self._span_prop(
top_trig_embeddings, top_arg_embeddings, top_trig_mask, top_arg_mask,
top_trig_scores, top_arg_scores, trig_arg_emb_dict)
top_trig_indices_repeat = (top_trig_indices.unsqueeze(-1).
repeat(1, 1, top_trig_embeddings.size(-1)))
updated_trig_embeddings = trigger_embeddings.scatter(
1, top_trig_indices_repeat, top_trig_embeddings)
# Recompute the trigger scores.
trigger_scores = self._compute_trigger_scores(updated_trig_embeddings, cls_projected, trigger_mask)
_, predicted_triggers = trigger_scores.max(-1)
trig_arg_embeddings = self._compute_trig_arg_embeddings(
top_trig_embeddings, top_arg_embeddings, **trig_arg_emb_dict)
argument_scores = self._compute_argument_scores(
trig_arg_embeddings, top_trig_scores, top_arg_scores, top_arg_mask)
_, predicted_arguments = argument_scores.max(-1)
predicted_arguments -= 1 # The null argument has label -1.
output_dict = {"top_trigger_indices": top_trig_indices,
"top_argument_spans": top_arg_spans,
"trigger_scores": trigger_scores,
"argument_scores": argument_scores,
"predicted_triggers": predicted_triggers,
"predicted_arguments": predicted_arguments,
"num_triggers_kept": num_trigs_kept,
"num_argument_spans_kept": num_arg_spans_kept,
"sentence_lengths": sentence_lengths}
# Evaluate loss and F1 if labels were provided.
if trigger_labels is not None and argument_labels is not None:
# Compute the loss for both triggers and arguments.
trigger_loss = self._get_trigger_loss(trigger_scores, trigger_labels, trigger_mask)
gold_arguments = self._get_pruned_gold_arguments(
argument_labels, top_trig_indices, top_arg_indices, top_trig_mask, top_arg_mask)
argument_loss = self._get_argument_loss(argument_scores, gold_arguments)
# Compute F1.
predictions = self.decode(output_dict)["decoded_events"]
assert len(predictions) == len(metadata) # Make sure length of predictions is right.
self._metrics(predictions, metadata)
self._argument_stats(predictions)
loss = (self._loss_weights["trigger"] * trigger_loss +
self._loss_weights["arguments"] * argument_loss +
0.05 * auxiliary_loss)
output_dict["loss"] = loss
return output_dict
@overrides
def decode(self, output_dict):
"""
Take the output and convert it into a list of dicts. Each entry is a sentence. Each key is a
pair of span indices for that sentence, and each value is the relation label on that span
pair.
"""
outputs = fields_to_batches({k: v.detach().cpu() for k, v in output_dict.items()})
res = []
# Collect predictions for each sentence in minibatch.
for output in outputs:
decoded_trig = self._decode_trigger(output)
decoded_args, decoded_args_with_scores = self._decode_arguments(output, decoded_trig)
entry = dict(trigger_dict=decoded_trig, argument_dict=decoded_args,
argument_dict_with_scores=decoded_args_with_scores)
res.append(entry)
output_dict["decoded_events"] = res
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
f1_metrics = self._metrics.get_metric(reset)
argument_stats = self._argument_stats.get_metric(reset)
res = {}
res.update(f1_metrics)
res.update(argument_stats)
return res
def _decode_trigger(self, output):
trigger_dict = {}
for i in range(output["sentence_lengths"]):
trig_label = output["predicted_triggers"][i].item()
if trig_label > 0:
trigger_dict[i] = self.vocab.get_token_from_index(trig_label, namespace="trigger_labels")
return trigger_dict
def _decode_arguments(self, output, decoded_trig):
argument_dict = {}
argument_dict_with_scores = {}
for i, j in itertools.product(range(output["num_triggers_kept"]),
range(output["num_argument_spans_kept"])):
trig_ix = output["top_trigger_indices"][i].item()
arg_span = tuple(output["top_argument_spans"][j].tolist())
arg_label = output["predicted_arguments"][i, j].item()
# Only include the argument if its putative trigger is predicted as a real trigger.
if arg_label >= 0 and trig_ix in decoded_trig:
arg_score = output["argument_scores"][i, j, arg_label + 1].item()
label_name = self.vocab.get_token_from_index(arg_label, namespace="argument_labels")
argument_dict[(trig_ix, arg_span)] = label_name
# Keep around a version with the predicted labels and their scores, for debugging
# purposes.
argument_dict_with_scores[(trig_ix, arg_span)] = (label_name, arg_score)
return argument_dict, argument_dict_with_scores
def _compute_auxiliary_loss(self, cls_projected, trigger_labels, trigger_mask):
num_triggers = ((trigger_labels > 0) * trigger_mask.bool()).sum(dim=1)
# Truncate at 4.
num_triggers = torch.min(num_triggers, 4 * torch.ones_like(num_triggers))
predicted_num_triggers = self._cls_n_triggers(cls_projected)
num_trigger_loss = F.cross_entropy(
predicted_num_triggers, num_triggers,
weight=torch.tensor([1, 3, 3, 3, 3], device=trigger_labels.device, dtype=torch.float),
reduction="sum")
label_present = [torch.any(trigger_labels == i, dim=1).unsqueeze(1)
for i in range(1, self._n_trigger_labels)]
label_present = torch.cat(label_present, dim=1)
if cls_projected.device.type != "cpu":
label_present = label_present.cuda(cls_projected.device)
predicted_event_type_logits = self._cls_event_types(cls_projected)
trigger_label_loss = F.binary_cross_entropy_with_logits(
predicted_event_type_logits, label_present.float(), reduction="sum")
return num_trigger_loss + trigger_label_loss
def _compute_trigger_scores(self, trigger_embeddings, cls_projected, trigger_mask):
"""
Compute trigger scores for all tokens.
"""
cls_repeat = cls_projected.unsqueeze(dim=1).repeat(1, trigger_embeddings.size(1), 1)
trigger_embeddings = torch.cat([trigger_embeddings, cls_repeat], dim=-1)
if self._trigger_attention_context:
context = self._trigger_attention(trigger_embeddings, trigger_mask)
trigger_embeddings = torch.cat([trigger_embeddings, context], dim=2)
trigger_scores = self._trigger_scorer(trigger_embeddings)
# Give large negative scores to masked-out elements.
mask = trigger_mask.unsqueeze(-1)
trigger_scores = util.replace_masked_values(trigger_scores, mask, -1e20)
dummy_dims = [trigger_scores.size(0), trigger_scores.size(1), 1]
dummy_scores = trigger_scores.new_zeros(*dummy_dims)
trigger_scores = torch.cat((dummy_scores, trigger_scores), -1)
# Give large negative scores to the masked-out values.
return trigger_scores
def _compute_trig_arg_embeddings(self,
top_trig_embeddings, top_arg_embeddings, cls_projected,
top_trig_labels, top_ner_labels, top_trig_indices,
top_arg_spans, text_emb, text_mask):
"""
Create trigger / argument pair embeddings, consisting of:
- The embeddings of the trigger and argument pair.
- Optionally, the embeddings of the trigger and argument labels.
- Optionally, embeddings of the words surrounding the trigger and argument.
"""
trig_emb_extras = []
arg_emb_extras = []
if self._context_window > 0:
# Include words in a window around trigger and argument.
# For triggers, the span start and end indices are the same.
trigger_context = self._get_context(top_trig_indices, top_trig_indices, text_emb)
argument_context = self._get_context(
top_arg_spans[:, :, 0], top_arg_spans[:, :, 1], text_emb)
trig_emb_extras.append(trigger_context)
arg_emb_extras.append(argument_context)
# TODO(dwadden) refactor this. Way too many conditionals.
if self._event_args_use_trigger_labels:
if self._event_args_label_predictor == "softmax" and not self.training:
if self._label_embedding_method == "one_hot":
# If we're using one-hot encoding, just return the scores for each class.
top_trig_embs = top_trig_labels
else:
# Otherwise take the average of the embeddings, weighted by softmax scores.
top_trig_embs = torch.matmul(top_trig_labels, self._trigger_label_emb.weight)
trig_emb_extras.append(top_trig_embs)
else:
trig_emb_extras.append(self._trigger_label_emb(top_trig_labels))
if self._event_args_use_ner_labels:
if self._event_args_label_predictor == "softmax" and not self.training:
# Same deal as for trigger labels.
if self._label_embedding_method == "one_hot":
top_ner_embs = top_ner_labels
else:
top_ner_embs = torch.matmul(top_ner_labels, self._ner_label_emb.weight)
arg_emb_extras.append(top_ner_embs)
else:
# Otherwise, just return the embeddings.
arg_emb_extras.append(self._ner_label_emb(top_ner_labels))
num_trigs = top_trig_embeddings.size(1)
num_args = top_arg_embeddings.size(1)
trig_emb_expanded = top_trig_embeddings.unsqueeze(2)
trig_emb_tiled = trig_emb_expanded.repeat(1, 1, num_args, 1)
arg_emb_expanded = top_arg_embeddings.unsqueeze(1)
arg_emb_tiled = arg_emb_expanded.repeat(1, num_trigs, 1, 1)
distance_embeddings = self._compute_distance_embeddings(top_trig_indices, top_arg_spans)
cls_repeat = (cls_projected.unsqueeze(dim=1).unsqueeze(dim=2).
repeat(1, num_trigs, num_args, 1))
pair_embeddings_list = [trig_emb_tiled, arg_emb_tiled, distance_embeddings, cls_repeat]
pair_embeddings = torch.cat(pair_embeddings_list, dim=3)
if trig_emb_extras:
trig_extras_expanded = torch.cat(trig_emb_extras, dim=-1).unsqueeze(2)
trig_extras_tiled = trig_extras_expanded.repeat(1, 1, num_args, 1)
pair_embeddings = torch.cat([pair_embeddings, trig_extras_tiled], dim=3)
if arg_emb_extras:
arg_extras_expanded = torch.cat(arg_emb_extras, dim=-1).unsqueeze(1)
arg_extras_tiled = arg_extras_expanded.repeat(1, num_trigs, 1, 1)
pair_embeddings = torch.cat([pair_embeddings, arg_extras_tiled], dim=3)
if self._shared_attention_context:
attended_context = self._get_shared_attention_context(pair_embeddings, text_emb, text_mask)
pair_embeddings = torch.cat([pair_embeddings, attended_context], dim=3)
return pair_embeddings
def _compute_distance_embeddings(self, top_trig_indices, top_arg_spans):
top_trig_ixs = top_trig_indices.unsqueeze(2)
arg_span_starts = top_arg_spans[:, :, 0].unsqueeze(1)
arg_span_ends = top_arg_spans[:, :, 1].unsqueeze(1)
dist_from_start = top_trig_ixs - arg_span_starts
dist_from_end = top_trig_ixs - arg_span_ends
# Distance from trigger to arg.
dist = torch.min(dist_from_start.abs(), dist_from_end.abs())
# When the trigger is inside the arg span, also set the distance to zero.
trigger_inside = (top_trig_ixs >= arg_span_starts) & (top_trig_ixs <= arg_span_ends)
dist[trigger_inside] = 0
dist_buckets = util.bucket_values(dist, self._num_distance_buckets)
dist_emb = self._distance_embedding(dist_buckets)
trigger_before_feature = (top_trig_ixs < arg_span_starts).float().unsqueeze(-1)
trigger_inside_feature = trigger_inside.float().unsqueeze(-1)
res = torch.cat([dist_emb, trigger_before_feature, trigger_inside_feature], dim=-1)
return res
def _get_shared_attention_context(self, pair_embeddings, text_emb, text_mask):
batch_size, n_triggers, n_args, emb_dim = pair_embeddings.size()
pair_emb_flat = pair_embeddings.view([batch_size, -1, emb_dim])
attn_unnorm = self._shared_attention_context_module(pair_emb_flat, text_emb)
attn_weights = util.masked_softmax(attn_unnorm, text_mask)
context = util.weighted_sum(text_emb, attn_weights)
context = context.view(batch_size, n_triggers, n_args, -1)
return context
def _get_context(self, span_starts, span_ends, text_emb):
"""
Given span start and end (inclusive), get the context on either side.
"""
# The text_emb are already zero-padded on the right, which is correct.
assert span_starts.size() == span_ends.size()
batch_size, seq_length, emb_size = text_emb.size()
num_candidates = span_starts.size(1)
padding = torch.zeros(batch_size, self._context_window, emb_size, device=text_emb.device)
# [batch_size, seq_length + 2 x context_window, emb_size]
padded_emb = torch.cat([padding, text_emb, padding], dim=1)
pad_batch = []
for batch_ix, (start_ixs, end_ixs) in enumerate(zip(span_starts, span_ends)):
pad_entry = []
for start_ix, end_ix in zip(start_ixs, end_ixs):
# The starts are inclusive, ends are exclusive.
left_start = start_ix
left_end = start_ix + self._context_window
right_start = end_ix + self._context_window + 1
right_end = end_ix + 2 * self._context_window + 1
left_pad = padded_emb[batch_ix, left_start:left_end]
right_pad = padded_emb[batch_ix, right_start:right_end]
pad = torch.cat([left_pad, right_pad], dim=0).view(-1).unsqueeze(0)
pad_entry.append(pad)
pad_entry = torch.cat(pad_entry, dim=0).unsqueeze(0)
pad_batch.append(pad_entry)
pad_batch = torch.cat(pad_batch, dim=0)
return pad_batch
def _compute_argument_scores(self, pairwise_embeddings, top_trig_scores, top_arg_scores,
top_arg_mask, prepend_zeros=True):
batch_size = pairwise_embeddings.size(0)
max_num_trigs = pairwise_embeddings.size(1)
max_num_args = pairwise_embeddings.size(2)
feature_dim = self._argument_feedforward.input_dim
embeddings_flat = pairwise_embeddings.view(-1, feature_dim)
arguments_projected_flat = self._argument_feedforward(embeddings_flat)
argument_scores_flat = self._argument_scorer(arguments_projected_flat)
argument_scores = argument_scores_flat.view(batch_size, max_num_trigs, max_num_args, -1)
# Add the mention scores for each of the candidates.
argument_scores += (top_trig_scores.unsqueeze(-1) +
top_arg_scores.transpose(1, 2).unsqueeze(-1))
# Softmax correction to compare arguments.
if self._softmax_correction:
the_temp = torch.exp(self._softmax_log_temp)
the_multiplier = torch.exp(self._softmax_log_multiplier)
softmax_scores = util.masked_softmax(argument_scores / the_temp, mask=top_arg_mask, dim=2)
argument_scores = argument_scores + the_multiplier * softmax_scores
shape = [argument_scores.size(0), argument_scores.size(1), argument_scores.size(2), 1]
dummy_scores = argument_scores.new_zeros(*shape)
if prepend_zeros:
argument_scores = torch.cat([dummy_scores, argument_scores], -1)
return argument_scores
@staticmethod
def _get_pruned_gold_arguments(argument_labels, top_trig_indices, top_arg_indices,
top_trig_masks, top_arg_masks):
"""
Loop over each slice and get the labels for the spans from that slice.
All labels are offset by 1 so that the "null" label gets class zero. This is the desired
behavior for the softmax. Labels corresponding to masked relations keep the label -1, which
the softmax loss ignores.
"""
arguments = []
zipped = zip(argument_labels, top_trig_indices, top_arg_indices,
top_trig_masks.bool(), top_arg_masks.bool())
for sliced, trig_ixs, arg_ixs, trig_mask, arg_mask in zipped:
entry = sliced[trig_ixs][:, arg_ixs].unsqueeze(0)
mask_entry = trig_mask & arg_mask.transpose(0, 1).unsqueeze(0)
entry[mask_entry] += 1
entry[~mask_entry] = -1
arguments.append(entry)
return torch.cat(arguments, dim=0)
def _get_trigger_loss(self, trigger_scores, trigger_labels, trigger_mask):
trigger_scores_flat = trigger_scores.view(-1, self._n_trigger_labels)
trigger_labels_flat = trigger_labels.view(-1)
mask_flat = trigger_mask.view(-1).bool()
loss = self._trigger_loss(trigger_scores_flat[mask_flat], trigger_labels_flat[mask_flat])
return loss
def _get_argument_loss(self, argument_scores, argument_labels):
"""
Compute cross-entropy loss on argument labels.
"""
# Need to add one for the null class.
scores_flat = argument_scores.view(-1, self._n_argument_labels + 1)
# Need to add 1 so that the null label is 0, to line up with indices into prediction matrix.
labels_flat = argument_labels.view(-1)
# Compute cross-entropy loss.
loss = self._argument_loss(scores_flat, labels_flat)
return loss
|
import requests
from bs4 import BeautifulSoup
from lxml import html
from requests.compat import quote_plus
def ins(name):
features=[]
headers={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36'}
instructables='https://www.instructables.com/howto/{}'
instractables_id="https://www.instructables.com"
final_url = instructables.format(quote_plus(name))
response = requests.get(final_url,headers=headers)
soup = BeautifulSoup(response.content, features='html.parser')
try:
found=soup.findAll('div',attrs={'class':'desktop-search-feed-ible'})
except:
return features
for i in range(len(found)):
aa=found[i].find('a')
ID=aa['href']
final_id=instractables_id+ID
if aa:
img=aa.find('img')
if img:
img_name=img['alt'].lower()
img_url=img['data-src']
else:
img_name=None
img_url=None
else:
final_id=None
features.append((img_name,img_url,final_id))
return features |
# -*- coding: utf-8 -*-
#
# Original code from https://github.com/manelromero/checkpoint
# A library for communicating with Check Point's management server using
# written by: Check Point software technologies inc.
# tested with Check Point R80 (tested with take hero2 198)
#
# Code updated to python 3.X compatibility
#
import http.client
import hashlib
import ssl
import json
import os.path
import time
from http.client import HTTPResponse
# api response structure
class APIResponse:
def __repr__(self):
return "cp_mgmt_api::Response"
def __init__(self, response_object, err_message=""):
if err_message == "":
assert isinstance(response_object, HTTPResponse)
self.status_code = response_object.status
try:
response_body = json.loads(response_object.read())
except json.JSONDecodeError:
response_body = str(response_object.msg)
self.res_obj = {
"status_code": response_object.status,
"data": response_body}
if self.status_code == 200: # success
self.data = response_body
self.success = True
else:
self.success = False
try:
self.data = json.loads(response_body)
self.error_message = self.data["message"]
except json.JSONDecodeError:
self.data = {'message': response_body}
self.error_message = response_object.reason
except:
self.data = response_body
self.error_message = response_object.reason
else:
self.success = False
self.error_message = err_message
self.res_obj = {}
#
#
# APIClient encapsulates everything that the user needs to do for communicating
# with a Check Point management server
#
#
class APIClient:
#
# initialize class
#
def __init__(self, http_debug_level=0):
# port on management server
self.port = 443
# management server fingerprint
self.fingerprint = None
# session-id
self.sid = None
# management server name or IP-address
self.server = None
# debug level
self.http_debug_level = http_debug_level
# an array with all the api calls (for debug purposes)
self.api_calls = []
# name of debug file. If left empty, debug data will not be saved
self.debug_file = "debug.txt"
def __enter__(self):
return self
#
# destructor
#
def __exit__(self, exc_type, exc_value, traceback):
# if sid is not empty (the login api was called), then call logout
if self.sid:
self.api_call("logout")
# save debug data with api calls to disk
if self.debug_file != "":
print(("\nSaving data to debug file {}\n".format(self.debug_file)))
out_file = open(self.debug_file, 'w+')
out_file.write(json.dumps(
self.api_calls, indent=4, sort_keys=True))
#
# login
# ----------------------------------------------------
# performs a 'login' API call to the management server
#
# arguments:
# server - the IP address or name of the Check Point managemenet
# server
# user - Check Point admin name
# password - Check Point admin password
# continue-last-session - [optional] it is possible to conitue the last
# Check Point session or to create a new one
#
# return: apiresponse object
# side-effects: updates the class's uid and server variables
#
#
def login(self, server, user, password, continue_last_session=False, **kwargs):
"""
Log in to the server with username and password. The server shows your session unique identifier.
Enter this session unique identifier in the 'X-chkp-sid' header of each request.
https://sc1.checkpoint.com/documents/R80/APIs/#gui-cli/login
:param server: (IP or hostname)
:param user: login id
:param password: password
:param continue_last_session: boolean (default False)
:param kwargs: domain (name or UID), session-comments (str), session-description (str), session-name (str),
session-timeout (int, default 600sec)
:return: APIResponse object
"""
credentials = {
"user": user,
"password": password,
"continue-last-session": continue_last_session}
credentials.update(kwargs)
login_res = self.api_call("login", credentials, server)
if login_res.success:
self.sid = login_res.data["sid"]
self.server = server
return login_res
#
# api_call
# ----------------------------------------------------
# performs a web-service API request to the management server
#
# arguments:
# command - the command is placed in the URL field
# payload - a JSON object (or a string representing a JSON object)
# with the command arguments
# server - [optional]. The Check Point management server. when
# omitted use self.server.
# sid - [optional]. The Check Point session-id. when omitted
# use self.sid.
# wait_for_task - dertermines the behavior when the API server responds
# with a "task-id".
# by default, the function will periodically check the
# status of the task
# and will not return until the task is completed.
# when wait_for_task=False, it is up to the user to call
# the "show-task" API and check the status of the
# command.
#
# return: apiresponse object
# side-effects: updates the class's uid and server variables
#
#
def api_call(self, command, payload={}, server=None, sid=None, wait_for_task=True):
# convert the json payload to a string if needed
if isinstance(payload, str):
_data = payload
else:
_data = json.dumps(payload, sort_keys=False)
# update class members if needed.
if server is None:
server = self.server
if sid is None:
sid = self.sid
# set headers
_headers = {
"User-Agent": "python-api-wrapper",
"Accept": "*/*",
"Content-Type": "application/json",
"Content-Length": len(_data)}
# in all API calls (except for 'login') a header containing the
# CheckPoint session-id is required.
if sid is not None:
_headers["X-chkp-sid"] = sid
# create ssl context with no ssl verification, we do it by ourselves
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
# create https connection
conn = HTTPSConnection(server, self.port, context=context)
# set fingerprint
conn.fingerprint = self.fingerprint
# set debug level
conn.set_debuglevel(self.http_debug_level)
url = "/web_api/" + command
try:
# send the data to the Check Point server
conn.request("POST", url, _data, _headers)
# get the response from the Check Point server
response = conn.getresponse()
res = APIResponse(response)
except ValueError as err:
if (err.args[0] == "fingerprint value mismatch"):
err_message = "Error: Fingerprint value mismatch:\n" + " Expecting : {}\n".format(err.args[1]) + " Got : {}\n".format(err.args[2]) + "if you trust the new fingerprint, edit the 'fingerprints.txt' file."
res = APIResponse("", err_message)
else:
res = APIResponse("", err)
except Exception as inst:
res = APIResponse("", inst)
# when the command is 'login' we'd like to convert the password to
# "****" so that it would not appear in the debug file.
if command == "login":
json_data = json.loads(_data)
json_data["password"] = "****"
_data = json.dumps(json_data)
# store the request and the response (for debug purpose).
_api_log = {}
_api_log["request"] = {
"url": url,
"payload": json.loads(_data),
"headers": _headers}
_api_log["response"] = res.res_obj
if self.debug_file != "":
self.api_calls.append(_api_log)
# FOR MY DEBUG save debug data with all api calls to disk
# out_file = open(self.debug_file, 'w+')
# out_file.write(json.dumps(self.api_calls, indent=4, sort_keys=True))
# If we want to wait for the task to end, wait for it
if wait_for_task is True and res.success and "task-id" in res.data:
res = self.__wait_for_task(res.data["task-id"])
return res
#
# get_server_fingerprint
# ----------------------------------------------------
# initiates an HTTPS connection to the server and extracts the SHA1
# fingerprint from the server's certificate.
#
# arguments:
# server - the IP address or name of the Check Point managemenet server
#
# return: string with SHA1 fingerprint (all uppercase letters)
#
def get_server_fingerprint(self, server=None):
server = self.server if server is None else server
context = ssl.create_default_context()
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
conn = HTTPSConnection(server, self.port, context=context)
return conn.get_fingerprint_hash()
#
# __wait_for_task
# ----------------------------------------------------
# When the server needs to perfom an API call that may take a long time
# (e.g. run-script, install-policy, publish), the server responds with a
# 'task-id'.
# Using the show-task API it is possible to check on the status of this
# task until its completion.
# Every two seconds, this function will check for the status of the task.
# The function will return when the task (and its subtasks) are no longer
# in-progress.
#
# arguments:
# task-id - the task identifier
#
def __wait_for_task(self, task_id):
task_complete = False
task_result = None
# as long as there is a task in progress
while not task_complete:
# check the status of the task
task_result = self.api_call("show-task", {"task-id": task_id})
# count the number of tasks that are not in-progress
completed_tasks = sum(1 for task in task_result.data["tasks"] if task["status"] != "in progress") # "INPROGRESS")
# get the total number of tasks
total_tasks = len(task_result.data["tasks"])
# are we done?
if completed_tasks == total_tasks:
task_complete = True
else:
time.sleep(2) # wait 2 sec
return task_result
#
# api_query
# ----------------------------------------------------
# The APIs that return a list of objects are limitted by the number of
# objects that they return.
# To get the full list of objects, there's a need to make repeated API
# calls each time using a different offset until all the objects are
# returned.
# This API makes such repeated API calls and return the full list objects.
#
#
# arguments:
# command - name of API command. This command should be an API
# that returns an array of objects (for example:
# show-hosts, show netowrks, ...)
# details-level - query APIs always take a details-level argument,
# possible values are "standard", "full", "uid"
#
# return: an array of objects.
#
def api_query(self, command, details_level="standard"):
limit = 50 # each time get no more than 50 objects
finished = False # will become true after getting all the data
errors_found = False # will become true in case we get an error
all_objects = [] # accumulate all objects from all the API calls
iterations = 0 # number of times we've made an API call
api_res = {} # API call response object
# are we done?
while not finished:
# make the API call, offset should be increased by 'limit' with
# each iteration
api_res = self.api_call(command, {
"limit": limit,
"offset": iterations*limit,
"details-level": details_level})
iterations = iterations + 1
if api_res.success is True:
# total number of objects
total_objects = api_res.data["total"]
# number of objects we got so far
received_objects = api_res.data["to"]
all_objects = all_objects + api_res.data["objects"]
# did we get all the objects that we're supposed to get
if received_objects == total_objects:
finished = True
else:
return api_res
# replace the data from the last API call with the array of all objects
api_res.data = all_objects
return api_res
#
# check_fingerprint
# ----------------------------------------------------
# This function checks if the server's certificate is stored in the local
# fingerprints file.
# If the server fingerprint is not found, it makes an https connection to
# the server and asks the user if he accepts the server fingerprint.
# If the fingerprint is trusted, then it is stored in the fingerprint file.
#
#
# arguments:
# server - IP address / name of the Check Point management server
#
# return: false if the user does not accept the server certificate, 'true'
# in all other cases.
#
def check_fingerprint(self, server):
# read the fingerprint from the local file
fingerprint = self.read_fingerprint_from_file(server)
# if the fingerprint is not stored on the local file
if fingerprint == "":
# Get the server's fingerprint with a socket.
fingerprint = self.get_server_fingerprint(server)
if fingerprint == "":
return False
print(("Server's fingerprint: {}".format(fingerprint)))
if self.ask_yes_no_question("Do you accept this fingerprint?"):
# Save it
if self.save_fingerprint_to_file(server, fingerprint):
print("Fingerprint saved.")
else:
print("Could not save fingerprint to file. Continuing anyway.")
else:
return False
# set the actual fingerprint in the class instance
self.fingerprint = fingerprint
return True
#
# ask_yes_no_question
# ----------------------------------------------------
# helper function. Present a question to the user with Y/N options.
#
# arguments:
# question - the question to display to the user
#
# return: 'True' if the user typed 'Y'. 'False' is the user typed 'N'
#
@staticmethod
def ask_yes_no_question(question):
answer = input(question + " [y/n] ")
if answer.lower() == "y":
return True
else:
return False
#
# save_fingerprint_to_file
# ----------------------------------------------------
# store a server's fingerprint into a local file.
#
# arguments:
# server - the IP address/name of the Check Point management server
# fingerprint - A SHA1 fingerprint of the server's certificate.
# filename - The file in which to store the certificates. The file
# will hold a JSON structure in which the key is the
# server and the value is its fingerprint.
#
# return: 'True' if everything went well. 'False' if there was some kind of
# error storing the fingerprint.
#
@staticmethod
def save_fingerprint_to_file(server, fingerprint, filename="fingerprints.txt"):
if not fingerprint:
return False
if os.path.isfile(filename):
try:
file = open(filename)
buf = file.read()
json_dict = json.loads(buf)
file.close()
except ValueError as e:
if e.message == "No JSON object could be decoded":
print(("Corrupt JSON file: " + filename))
else:
print((e.message))
return False
except Exception as e:
print(e)
return False
else:
if server in json_dict and json_dict[server] == fingerprint:
return True
else:
json_dict[server] = fingerprint
else:
json_dict = {server: fingerprint}
try:
with open(filename, 'w') as filedump:
json.dump(json_dict, filedump)
filedump.close()
return True
except Exception as e:
print(e)
return False
#
# read_fingerprint_from_file
# ----------------------------------------------------
# reads a server's fingerprint from a local file.
#
# arguments:
# server - the IP address/name of the Check Point management server.
# filename - The file in which to store the certificates. The file
# will hold a JSON structure in which the key is the server
# and the value is its fingerprint.
#
# return: A SHA1 fingerprint of the server's certificate.
#
@staticmethod
def read_fingerprint_from_file(server, filename="fingerprints.txt"):
assert isinstance(server, str)
if os.path.isfile(filename):
file = open(filename)
buf = file.read()
try:
json_dict = json.loads(buf)
file.close()
except ValueError as e:
if e.message == "No JSON object could be decoded":
print(("Corrupt JSON file: " + filename))
else:
print((e.message))
except Exception as e:
print(e)
else:
# file is ok and readable.
if server in json_dict:
return json_dict[server]
return ""
#
#
# HTTPSConnection
# ----------------------------------------------------
# A class for making HTTPS connections that overrides the default HTTPS checks
# (e.g. not accepting self-signed-certificates) and replaces them with a server
# fingerprint check
#
#
class HTTPSConnection(http.client.HTTPSConnection):
def connect(self):
http.client.HTTPConnection.connect(self)
self.sock = ssl.wrap_socket(
self.sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_NONE)
if getattr(self, 'fingerprint') is not None:
digest = self.fingerprint
alg = "SHA1"
fingerprint = hashlib.new(
alg, self.sock.getpeercert(True)).hexdigest().upper()
if fingerprint != digest.replace(':', '').upper():
raise ValueError(
'fingerprint value mismatch',
fingerprint,
digest.replace(':', '').upper())
def get_fingerprint_hash(self):
try:
http.client.HTTPConnection.connect(self)
self.sock = ssl.wrap_socket(
self.sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_NONE)
except Exception as err:
return ""
fingerprint = hashlib.new(
"SHA1", self.sock.getpeercert(True)).hexdigest()
return fingerprint.upper()
|
class Solution(object):
def simplifyPath(self, path):
"""
:type path: str
:rtype: str
"""
if path is "":
return path
path_elems = path.split("/")
new_path_elems = []
i = 0
while i<len(path_elems):
if path_elems[i] == ".":
pass
elif path_elems[i] == "":
pass
elif path_elems[i] == "/":
pass
elif path_elems[i] == "..":
if len(new_path_elems)!=0:
new_path_elems.pop(-1)
else:
new_path_elems.append(path_elems[i])
i += 1
new_path = "/" + "/".join(new_path_elems)
return new_path
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
# self.widget = Widget("The widget")
self.sol = Solution()
def tearDown(self):
# self.widget.dispose()
# self.widget = None
pass
def testSimlifyPath(self):
self.assertEqual(self.sol.simplifyPath("/home/"), "/home")
self.assertEqual(self.sol.simplifyPath("/../"), "/")
self.assertEqual(self.sol.simplifyPath("/../../../../"), "/")
self.assertEqual(self.sol.simplifyPath("/a/./b/../../c/"), "/c")
# def testDefaultSize(self):
# assert self.widget.size() == (50,50), 'incorrect default size'
if __name__ == '__main__':
unittest.main() |
from nose.plugins.attrib import attr
from pages.ATG_login_page import ATGLoginPage
from utility.drivermanager import DriverManager
import logging
from utility.services import Services
from pages.post_detail_page import PostDetailPage
#task 10
@attr(website=['party', 'world'])
class PostDetailShareTest(DriverManager):
def login(self):
atg_login_page = ATGLoginPage(self.driver)
atg_login_page.wait_for_homepage_to_load()
atg_login_page.login()
atg_login_page.validate_user_on_homepage()
self.post = PostDetailPage(self.driver)
self.post.navigate_to_post_detail_page()
self.service = Services(self.driver)
def test_facebook(self):
PostDetailShareTest.login(self)
self.post.click_share_facebook()
check = self.service.check_tab('Facebook')
assert check
logging.info("sharing link works")
def test_twitter(self):
PostDetailShareTest.login(self)
self.post.click_share_twitter()
check = self.service.check_tab('Share a link on Twitter')
assert check
logging.info("sharing link works")
def test_reddit(self):
PostDetailShareTest.login(self)
self.post.click_share_reddit()
check = self.service.check_tab('reddit.com: Log in')
assert check
logging.info("sharing link works")
def test_linkedin(self):
PostDetailShareTest.login(self)
self.post.click_share_linkedin()
check = self.service.check_tab('LinkedIn')
assert check
logging.info("sharing link works")
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
'''
@author: yerik
@contact: xiangzz159@qq.com
@time: 2018/6/26 9:38
@desc: poloniex 数据抓取
'''
import json
import numpy as np
import os
import pandas as pd
import urllib
import time
ts = {
'2015': 1420041600,
'2016': 1451577600,
'2017': 1483200000,
'2018': 1514736000,
}
t = 31536000
period = 7200 # 300, 900, 1800, 7200, 14400, and 86400
t1 = time.time()
for key in ts:
try:
url = 'https://poloniex.com/public?command=returnChartData¤cyPair=USDT_BTC&start=%d&end=%d&period=%d' % (ts[key], ts[key] + t, period)
print('URL: %s' % url)
openUrl = urllib.request.urlopen(url)
r = openUrl.read()
d = json.loads(r.decode())
df = pd.DataFrame(d)
original_columns = [u'date', u'high', u'low', u'open', u'close', u'volume', u'quoteVolume', u'weightedAverage']
new_columns = ['Timestamp', 'High', 'Low', 'Open', 'Close', 'Volume', 'QuoteVolume', 'WeighteAverage']
df = df.loc[:, original_columns]
df.columns = new_columns
fileName = './data/bitcoin%sto%s.csv' % (key, str(int(key) + 1))
df.to_csv(fileName, index=None)
except BaseException as e:
print(e)
print(time.time() - t1)
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
driver = webdriver.Chrome(executable_path = '/Users/drarn/Documents/Code/Study/BigData/WebScraping/chromedriver', options = options)
url = 'https://www.latam.com/es_co/apps/personas/booking?fecha1_dia=13&fecha1_anomes=2020-03&fecha2_dia=21&fecha2_anomes=2020-03&from_city2=CTG&to_city2=BOG&auAvailability=1&ida_vuelta=ida_vuelta&vuelos_origen=Bogot%C3%A1&from_city1=BOG&vuelos_destino=Cartagena%20de%20Indias&to_city1=CTG&flex=1&vuelos_fecha_salida_ddmmaaaa=13/03/2020&vuelos_fecha_regreso_ddmmaaaa=21/03/2020&cabina=Y&nadults=1&nchildren=0&ninfants=0&cod_promo=&stopover_outbound_days=0&stopover_inbound_days=0#/'
if requests.get(url).status_code == 200:
driver.get(url)
def obtenerInfo(indices, precios):
lista_escalas = driver.find_elements_by_xpath('//span[@class="sc-hzDkRC ehGSeR"]')
lista_esperas = driver.find_elements_by_xpath('//span[@class="sc-cvbbAY kxLrTU"]')
total_viajes = []
tiempo = True
for idx,escalas in enumerate(lista_escalas):
datos_viaje = {}
datos_viaje['Indice']=indices
datos_viaje['Ciudad']=escalas.find_element_by_xpath('.//abbr').text
datos_viaje['Hora']=escalas.find_element_by_xpath('.//time').text
datos_viaje['Aeropuerto']=escalas.find_element_by_xpath('.//span[@class="sc-csuQGl ktjiAI"]').text
if idx == 0 or idx == len(lista_escalas):
datos_viaje['Tiempo'] = ''
else:
datos_viaje['Tiempo'] = lista_esperas[idx-1].find_element_by_xpath('.//time').get_attribute('datetime')
datos_viaje['Precio'] = precios[indices-1]
total_viajes.append(datos_viaje)
return total_viajes
driver.implicitly_wait(100)
lista_botones = driver.find_elements_by_xpath('//div[@class="flight-summary-stops-description"]/button')
lista_precios = driver.find_elements_by_xpath('//section[@class="container flight-list"]//span[@class="price"]/span[@class="value"]')
vuelos_precios = [precio.text for precio in lista_precios]
vuelos_data= []
for indices, boton in enumerate(lista_botones):
boton.click()
driver.implicitly_wait(15)
vuelos_data.append(obtenerInfo(indices+1,vuelos_precios))
cerrar = driver.find_element_by_xpath('//div[@class="modal-header sc-dnqmqq cGfTsx"]/button[@class="close"]')
cerrar.click()
driver.implicitly_wait(15)
driver.close()
df = pd.DataFrame(vuelos_data[0])
for i in range(1,len(vuelos_data)):
df = df.append(vuelos_data[i])
df = df.set_index('Indice')
df.to_csv('VuelosLatam.csv') |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Create your views here.
from django.db import models
from django import forms
from django.forms import ModelForm
from django.db.models import Q
from django.core.context_processors import csrf
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response, get_object_or_404
from django.template import Template, Context, RequestContext
from models import *
from forms import *
|
from django import forms
from django.contrib import admin
from django.urls import reverse
from django.db.models import Count
from django.utils.html import format_html
from adminsortable2.admin import SortableInlineAdminMixin, SortableAdminBase
from core.forms import RichTextField
from eligibility.models import EligibilityTest, EligibilityQuestion
from eligibility.tasks import export_eligibility_tests_stats_as_csv
from exporting.utils import get_admin_export_message
from accounts.admin import AuthorFilter
class EligibilityTestForm(forms.ModelForm):
introduction = RichTextField(label="Une introduction", required=False)
conclusion_success = RichTextField(
label="Une conclusion si le test est positif", required=False
)
conclusion_failure = RichTextField(
label="Une conclusion si le test est négatif", required=False
)
conclusion = RichTextField(label="Une conclusion générale", required=False)
class Meta:
model = EligibilityTest
fields = "__all__"
class EligibilityQuestionInline(SortableInlineAdminMixin, admin.TabularInline):
model = EligibilityTest.questions.through
extra = 1
class EligibilityTestAdmin(admin.ModelAdmin, SortableAdminBase):
list_display = ["id", "name", "author", "nb_aids", "nb_results", "date_created"]
search_fields = ["name"]
list_filter = [AuthorFilter]
actions = ["export_csv", "export_csv_background"]
form = EligibilityTestForm
inlines = [EligibilityQuestionInline]
readonly_fields = ["author", "date_created", "date_updated", "display_related_aids"]
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = (
qs.select_related("author")
.annotate(aid_count=Count("aids", distinct=True))
.annotate(result_count=Count("aideligibilitytestevent", distinct=True))
) # noqa
return qs
def save_model(self, request, obj, form, change):
if not change:
obj.author = request.user
super().save_model(request, obj, form, change)
def nb_aids(self, eligibility_test):
return eligibility_test.aid_count
nb_aids.short_description = "Nombre d'aides"
nb_aids.admin_order_field = "aid_count"
def nb_results(self, eligibility_test):
return eligibility_test.result_count
nb_results.short_description = "Nombre de résultats"
nb_results.admin_order_field = "result_count"
def display_related_aids(self, obj):
related_aid_html = format_html(
"<table> \
<thead><tr> \
<th>Auteur</th> \
<th>Aide</th> \
</tr></thead> \
<tbody>"
)
related_aids = obj.aids.all().order_by("name").select_related("author") # noqa
for aid in related_aids:
url = reverse("admin:aids_aid_change", args=(aid.pk,))
related_aid_html += format_html(
'<tr> \
<td>{author}</td> \
<td><a href="{url}">{name} (ID : {id})</a></td> \
</tr>',
url=url,
name=aid.name,
id=aid.pk,
author=aid.author,
)
related_aid_html += format_html("</tbody></table>")
return related_aid_html
display_related_aids.short_description = "Aides associées"
def show_export_message(self, request):
self.message_user(request, get_admin_export_message())
def export_csv(self, request, queryset):
eligibility_tests_id_list = list(queryset.values_list("id", flat=True))
return export_eligibility_tests_stats_as_csv(
eligibility_tests_id_list, request.user.id, background=False
) # noqa
export_csv.short_description = "Exporter le premier Test sélectionné en CSV" # noqa
def export_csv_background(self, request, queryset):
eligibility_tests_id_list = list(queryset.values_list("id", flat=True))
export_eligibility_tests_stats_as_csv.delay(
eligibility_tests_id_list, request.user.id
) # noqa
self.show_export_message(request)
export_csv_background.short_description = (
"Exporter le premier Test sélectionné en CSV en tâche de fond" # noqa
)
class Media:
css = {
"all": (
"css/admin.css",
"/static/trumbowyg/dist/ui/trumbowyg.css",
)
}
js = [
"admin/js/jquery.init.js",
"/static/js/shared_config.js",
"/static/js/plugins/softmaxlength.js",
"/static/js/search/enable_softmaxlength.js",
"/static/trumbowyg/dist/trumbowyg.js",
"/static/trumbowyg/dist/langs/fr.js",
"/static/trumbowyg/dist/plugins/upload/trumbowyg.upload.js",
"/static/jquery-resizable-dom/dist/jquery-resizable.js",
"/static/trumbowyg/dist/plugins/resizimg/trumbowyg.resizimg.js",
"/static/js/enable_rich_text_editor.js",
]
class EligibilityQuestionAdmin(admin.ModelAdmin):
list_display = ["text", "author", "nb_tests", "date_created"]
search_fields = ["text"]
list_filter = [AuthorFilter]
readonly_fields = [
"author",
"date_created",
"date_updated",
"display_related_tests",
]
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = (
qs.select_related("author")
.prefetch_related("eligibility_tests")
.annotate(test_count=Count("eligibility_tests"))
)
return qs
def save_model(self, request, obj, form, change):
if not change:
obj.author = request.user
super().save_model(request, obj, form, change)
def nb_tests(self, eligibility_question):
return eligibility_question.test_count
nb_tests.short_description = "Nombre de tests"
nb_tests.admin_order_field = "test_count"
def display_related_tests(self, obj):
related_test_html = format_html(
"<table> \
<thead><tr> \
<th>Auteur</th> \
<th>Test d’éligibilité</th> \
</tr></thead> \
<tbody>"
)
related_tests = (
obj.eligibility_tests.all().order_by("name").select_related("author")
) # noqa
for test in related_tests:
url = reverse(
"admin:eligibility_eligibilitytest_change", args=(test.pk,)
) # noqa
related_test_html += format_html(
'<tr> \
<td>{author}</td> \
<td><a href="{url}">{name} (ID : {id})</a></td> \
</tr>',
url=url,
name=test.name,
id=test.pk,
author=test.author,
)
related_test_html += format_html("</tbody></table>")
return related_test_html
display_related_tests.short_description = "Tests associés"
admin.site.register(EligibilityTest, EligibilityTestAdmin)
admin.site.register(EligibilityQuestion, EligibilityQuestionAdmin)
|
#coding=utf-8
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn.functional as F
from word2vec import SkipGramModel
import dataset
def get_input_layer(word_idx):
x = torch.zeros(vocabulary_size).float()
x[word_idx] = 1.0
return x
def main():
#idx_pairs, vocab_size = dataset.nlp_get_vocabulary()
#print idx_pairs
pd = dataset.ProcessData()
pd.init_sample_table()
vocab_size = 60
word2vec_model = SkipGramModel(vocab_size, 8)
optimizer = optim.SGD(word2vec_model.parameters(), lr=0.01)
for epoch in range(10):
running_loss = 0.0
batch = 300
for i in range(batch):
res = pd.get_batch_pairs(3, 3)
pos_pairs, neg_pairs = pd.get_pairs_by_neg_sample(res, 5)
#print "pos=", pos_pairs
#print "neg=", neg_pairs
pos_w = [int(pair[0]) for pair in pos_pairs]
pos_c = [int(pair[1]) for pair in pos_pairs]
neg_w = [int(pair[0]) for pair in neg_pairs]
neg_c = [int(pair[0]) for pair in neg_pairs]
pos_w = Variable(torch.LongTensor(pos_w))
pos_c = Variable(torch.LongTensor(pos_c))
neg_w = Variable(torch.LongTensor(neg_w))
neg_c = Variable(torch.LongTensor(neg_c))
optimizer.zero_grad()
loss = word2vec_model(pos_w, pos_c, neg_w, neg_c)
loss.backward()
optimizer.step()
running_loss += loss.data
if i%80 == 79:
print "epoch=%d, batch=%d, loss=%.4f"%(epoch+1, i, running_loss)
running_loss = 0.0
#end-for
print word2vec_model.in_embeddings.weight.data[0]
print "end of word2vec"
word2vec_model.save_embedding(pd.id2word,"data/embedding.txt")
if __name__ == '__main__':
main()
|
#homeview.py
from django.shortcuts import render
from models import Bulletin, UserData
from django.http import HttpResponseRedirect
from ajaxviews import pullfeed
import datetime
def home(request):
me = UserData.objects.get(pk=request.session['pk'])
helps = pullfeed(UserData.objects.get(pk=request.session['pk']), True)
wants = pullfeed(UserData.objects.get(pk=request.session['pk']), False)
return render(request, 'home.html', {'helps':helps, 'wants':wants})
def redirecthome(request):
return HttpResponseRedirect('home/')
|
class Solution:
def nthUglyNumber(self, n: int) -> int:
ugly = [1]
if n<1:
return ugly[0]
two = 2
three = 3
five = 5
index2 = 0
index3 = 0
index5 = 0
count = 0
while count<=n:
count+=1
minimun = min(min(two, three),five)
ugly.append(minimun)
if minimun == two:
index2 += 1
two = ugly[index2]*2
if minimun == three:
index3 += 1
three = ugly[index3]*3
if minimun == five:
index5 += 1
five = ugly[index5]*5
return ugly[n-1] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Timothée Lecomte
# This file is part of Friture.
#
# Friture is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# Friture is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Friture. If not, see <http://www.gnu.org/licenses/>.
from numpy import linspace, log2, floor, log10, cos, arange, pi
from numpy.fft import rfft
DEFAULT_FFT_SIZE = 7 # 4096 points
DEFAULT_FREQ_SCALE = 1 # log10
SAMPLING_RATE = 44100
DEFAULT_MAXFREQ = SAMPLING_RATE / 2
DEFAULT_MINFREQ = 20
fft_size = 2 ** DEFAULT_FFT_SIZE * 32
class audioproc():
def __init__(self):
self.freq = linspace(0, SAMPLING_RATE / 2, 10)
self.A = 0. * self.freq
self.B = 0. * self.freq
self.C = 0. * self.freq
self.maxfreq = 1.
self.decimation = 1
self.window = arange(0, 1)
self.size_sq = 1.
self.fft_size = 10
def analyzelive(self, samples):
samples = self.decimate(samples)
# uncomment the following to disable the decimation altogether
# decimation = 1
# FFT for a linear transformation in frequency scale
fft = rfft(samples * self.window)
spectrum = self.norm_square(fft)
return spectrum
def norm_square(self, fft):
return (fft.real**2 + fft.imag**2) / self.size_sq
# This is done in Cython, too costly in numpy
#return pyx_norm_square(fft, 1. / self.size_sq)
def decimate(self, samples):
# first we remove as much points as possible
if self.decimation > 1:
samples.shape = len(samples) / self.decimation, self.decimation
# the full way
# samples = samples.mean(axis=1)
# the simplest way
samples = samples[:, 0]
return samples
def set_fftsize(self, fft_size):
if fft_size != self.fft_size:
self.fft_size = fft_size
self.update_freq_cache()
self.update_window()
self.update_size()
def set_maxfreq(self, maxfreq):
if maxfreq != self.maxfreq:
self.maxfreq = maxfreq
decimation = SAMPLING_RATE / (2 * maxfreq)
self.decimation = 2 ** (floor(log2(decimation)))
if self.decimation < 1:
self.decimation = 1
self.update_freq_cache()
self.update_window()
self.update_size()
def get_freq_scale(self):
return self.freq
def get_freq_weighting(self):
return self.A, self.B, self.C
def update_size(self):
self.size_sq = float(self.fft_size / self.decimation) ** 2
def update_window(self):
N = self.fft_size / self.decimation
n = arange(0, N)
# Hann window : better frequency resolution than the rectangular window
self.window = 0.5 * (1. - cos(2 * pi * n / (N - 1)))
def update_freq_cache(self):
if len(self.freq) != self.fft_size / (2 * self.decimation) + 1:
self.freq = linspace(0, SAMPLING_RATE / (2 * self.decimation), self.fft_size / (2 * self.decimation) + 1)
# compute psychoacoustic weighting. See http://en.wikipedia.org/wiki/A-weighting
f = self.freq
Rc = 12200. ** 2 * f ** 2 / ((f ** 2 + 20.6 ** 2) * (f ** 2 + 12200. ** 2))
Rb = 12200. ** 2 * f ** 3 / ((f ** 2 + 20.6 ** 2) * (f ** 2 + 12200. ** 2) * ((f ** 2 + 158.5 ** 2) ** 0.5))
Ra = 12200. ** 2 * f ** 4 / ((f ** 2 + 20.6 ** 2) * (f ** 2 + 12200. ** 2) * ((f ** 2 + 107.7 ** 2) ** 0.5) * ((f ** 2 + 737.9 ** 2) ** 0.5))
eps = 1e-50
self.C = 0.06 + 20. * log10(Rc + eps)
self.B = 0.17 + 20. * log10(Rb + eps)
self.A = 2.0 + 20. * log10(Ra + eps)
# above is done a FFT of the signal. This is ok for linear frequency scale, but
# not satisfying for logarithmic scale, which is much more adapted to voice or music
# analysis
# Instead a constant Q transform should be used
# Alternatively, we could use a ear/cochlear model : logarithmic
# frequency scale, 4000 logarithmic-spaced bins, quality factors
# determined from mechanical model, and 50 ms smoothing afterwards
# for the sensor cell response time. The problem here comes from the
# implementation: how to do it cleverly ?
# on top of that, we could add the reponse of the middle ear, which is
# a roughly band-pass filter centered around 1 kHz (see psychoacoustic
# models)
# def analyzelive_cochlear(self, samples, num_channels, lowfreq, maxfreq):
# samples -= samples.mean()
#
# fs = 16000.
# [ERBforward, ERBfeedback] = MakeERBFilters(SAMPLING_RATE, num_channels, lowfreq)
# filtered_samples = ERBFilterBank(ERBforward, ERBfeedback, samples)
# spectrum = (abs(filtered_samples)**2).mean(axis=1)
# self.freq = frequencies(SAMPLING_RATE, num_channels, lowfreq)
#
# return spectrum[::-1], self.freq[::-1]
|
import requests
from time import sleep
url = input('url: ')
timeout = int(input('timeout: '))
try:
resp_url = requests.get(url,timeout=timeout).url
except:
print('Could not connect to the site')
quit()
def request(url,timeout):
try:
resp = requests.get(url,timeout=timeout)
except:
quit()
if resp.url == url or resp_url not in resp.url:
quit()
if 'author' in resp.url:
try:
print(resp.url.split('/')[4])
except:
quit()
count = 1
while True:
check_url = url + '?author=' + str(count)
count += 1
request(check_url,timeout)
|
import numpy as np
from os.path import isfile
import random
total_iter = 100
min = 0.06
max = 1.5
bars = 18
class partical:
def __init__(self, A=np.zeros((1, bars))):
self.fits = 0
self.A = A
self.density = 0.1
self.pbest = None
self.pbest_fit = None
def start(self):
wolf = [min + np.random.random() * (max - min) for _ in range(bars)]
self.A = np.array(wolf)
def get_fit(self, count):
file_disp = open('DISP_{}.txt'.format(str(count)), 'r')
file_strs = open('fre{}.txt'.format(str(count)), 'r')
disp = file_disp.read()
strs = file_strs.read()
file_disp.close()
file_strs.close()
u = float(disp)
strs = float(strs)
#read the bars lenth
length = []
with open('Length.txt', 'r') as file:
lines = file.readlines()
for line in lines:
length.append(line)
tempfit = 0
le = [4, 8, 6] #檢查此處!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
A = []
for i, a in enumerate(self.A):
A += [a]*le[i%3]
self.A = A
for x, y in zip(self.A, length):
y = float(y)
tempfit += self.density * x * y
self.fits = tempfit + 1000 * (0.5 - np.abs(u))**2 + (np.abs(strs) - 29) * 100
def get_pbest_and_pbestfit(self, count):
if isfile('pbest0.txt'):
with open('pbest{}.txt'.format(str(count)), 'r') as file:
lines = file.readlines()
lines = list(map(lambda x:float(x), lines))#let list(string) to list(float)
if self.fits < lines[-1]:
self.pbest = self.A
self.pbest_fit = self.fits
else:
self.pbest_fit = lines[-1]
self.pbest = np.array(lines[: -1])
else:
self.pbest = self.A
self.pbest_fit = self.fits
def write_best_file(self, count):
with open('pbest{}.txt'.format(str(count)), 'w') as file:
for xp in self.pbest:
file.write('{}\n'.format(xp))
file.write('{}\n'.format(self.pbest_fit))
def write_new_partical_file(self, count):
with open('partical{}.txt'.format(str(count)), 'w+') as file:
for x in self.A:
file.write('{}\n'.format(x))
def bad_fit(wolfs, wolf):
a = [random.randint(0, len(wolfs) - 1) for _ in range(3)]
tempa = np.abs(np.random.random((1, bars)) * 2 * [wolfs[a[0]].A] - [wolf.A])
tempb = np.abs(np.random.random((1, bars)) * 2 * [wolfs[a[1]].A] - [wolf.A])
tempc = np.abs(np.random.random((1, bars)) * 2 * [wolfs[a[2]].A] - [wolf.A])
sa = 2
ba = [sa * 2 * random.random() - sa for _ in range(3)]
tempa = wolfs[a[0]].A - ba[0] * tempa
tempb = wolfs[a[1]].A - ba[1] * tempb
tempc = wolfs[a[2]].A - ba[2] * tempc
tempfinal = (tempa + tempb + tempc) / 3
tempfinal = tempfinal.tolist()
for n, x in enumerate(tempfinal[0]):
if x < min:
tempfinal[0][n] = min
elif x > max:
tempfinal[0][n] = max
return partical(np.array(tempfinal[0]))
#partical
def nice_fit(wolfs, wolf, iter_times):
tempa = np.abs(np.random.random((1, bars)) * 2 * [wolfs[0].A] - [wolf.A])
tempb = np.abs(np.random.random((1, bars)) * 2 * [wolfs[1].A] - [wolf.A])
tempc = np.abs(np.random.random((1, bars)) * 2 * [wolfs[2].A] - [wolf.A])
sa = 2 * (1 - (iter_times**2) / total_iter**2)
ba = [sa * 2 * random.random() - sa for _ in range(3)]
tempa = wolfs[0].A - ba[0] * tempa
tempb = wolfs[1].A - ba[1] * tempb
tempc = wolfs[2].A - ba[2] * tempc
tempfinal = (1.5 * tempa + 1.3*tempb + tempc)/(1.5 + 1.3 + 1)# + 0.3 * np.random.random((1, bars)) * np.array(wolf._pbest)
tempfinal = tempfinal.tolist()
for n, x in enumerate(tempfinal[0]):
if x < min:
tempfinal[0][n] = min
elif x > max:
tempfinal[0][n] = max
return partical(np.array(tempfinal[0]))
#partical
def update(wolfs):
#檢查迭代次數
if not isfile('iter_times.txt'):
with open('iter_times.txt', 'w') as file:
iter_times = 1
file.write(str(iter_times))
else:
with open('iter_times.txt', 'r+') as file:
iter_times = int(file.read()) + 1
with open('iter_times.txt', 'w') as file:
file.write(str(iter_times))
temp_wolfs = wolfs[0: 3]
#temp_wolfs = []
total_fit = 0
for w in wolfs:
total_fit += w.fits
average_fit = total_fit / len(wolfs)
for w in wolfs[3: ]:
#for w in wolfs:
if w.fits > average_fit:
temp_wolf = bad_fit(wolfs, w)
else:
temp_wolf = nice_fit(wolfs, w, iter_times)
temp_wolfs.append(temp_wolf)
return temp_wolfs
def getkey(w):
return(w.fits)
def main():
number = 30
wolfs = []
#read particals from txt
for x in range(number):
with open('partical{}.txt'.format(str(x)), 'r') as file:
lines = file.readlines()
lines = list(map(lambda x:float(x), lines))#let list(string) to list(float)
wolf = partical(np.array(lines))
wolfs.append(wolf)
for count, wolf in enumerate(wolfs):
wolf.get_fit(count)#calculate fit
wolf.get_pbest_and_pbestfit(count)
wolfs.sort(key=getkey) #sort by fit from small to big
for count, wolf in enumerate(wolfs):
wolf.write_best_file(count)
wolfs = update(wolfs)
for count, wolf in enumerate(wolfs):
wolf.write_new_partical_file
if __name__ == '__main__':
main() |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from users.models import Profile
from .validators import phone_regex
from .custom_fields import ListField
from PIL import Image
# WEEKDAYS = [
# (1, _("Monday")),
# (2, _("Tuesday")),
# (3, _("Wednesday")),
# (4, _("Thursday")),
# (5, _("Friday")),
# (6, _("Saturday")),
# (7, _("Sunday")),
# ]
WEEKDAYS = [
(1, "Monday"),
(2, "Tuesday"),
(3, "Wednesday"),
(4, "Thursday"),
(5, "Friday"),
(6, "Saturday"),
(7, "Sunday"),
]
class Category(models.Model):
name = models.CharField(max_length=50)
slug = models.SlugField()
image = models.ImageField(default='default.jpg', upload_to='category_images')
def __str__(self):
return self.name
class Location(models.Model):
latitude = models.DecimalField(max_digits=22, decimal_places=16)
longitude = models.DecimalField(max_digits=22, decimal_places=16)
def __str__(self):
return "lat:{}, lng: {}".format(self.latitude, self.longitude)
class PaymentMethod(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class OpeningHours(models.Model):
weekday = models.IntegerField(choices=WEEKDAYS)
from_hour = models.TimeField()
to_hour = models.TimeField()
restaurant = models.ForeignKey('app.Restaurant', on_delete=models.DO_NOTHING)
class Meta:
ordering = ('weekday', 'from_hour')
unique_together = ('weekday', 'from_hour', 'to_hour')
def upload_menu_image_destination(instance, filename):
return f'{instance.restaurant.name}/menu_images/{filename}'
def upload_cover_image_to(instance, filename):
return f'{instance.name}/menu_images/{filename}'
class Restaurant(models.Model):
name = models.CharField(max_length=60)
slogan = models.CharField(max_length=150, blank=True, null=True)
description = models.TextField()
address = models.TextField()
city = models.CharField(max_length=60)
location = models.OneToOneField(Location, on_delete=models.DO_NOTHING)
payment_methods = models.ManyToManyField(PaymentMethod)
cover_image = models.ImageField(
upload_to=upload_cover_image_to,
blank=True,
null=True
)
def __str__(self):
return self.name
@property
def categories(self):
cat_ids = self.menuitem_set.all().values_list('category').distinct()
return Category.objects.filter(pk__in=cat_ids)
class Order(models.Model):
# TODO: handle ListField Problem
date = models.DateField()
time = models.TimeField()
number_of_people = models.IntegerField()
name = models.CharField(max_length=150)
telephone_number = models.CharField(validators=[phone_regex],
max_length=17,
blank=True)
email = models.EmailField()
order_restaurant = models.ForeignKey(Restaurant, on_delete=models.DO_NOTHING)
order_profile = models.ForeignKey(Profile, on_delete=models.DO_NOTHING)
# TODO: Add options like (big pizza) +2.34, (whole grain) +1.22
class MenuItem(models.Model):
name = models.CharField(max_length=255)
description = models.TextField()
category = models.ForeignKey(Category, on_delete=models.DO_NOTHING)
restaurant = models.ForeignKey(Restaurant, on_delete=models.DO_NOTHING)
image = models.ImageField(upload_to=upload_menu_image_destination, blank=True, null=True)
price = models.DecimalField(max_digits=5, decimal_places=2)
is_active = models.BooleanField(default=False)
is_healthy = models.BooleanField(default=False)
def __str__(self):
return self.name
class OrderItem(models.Model):
name = models.ForeignKey(MenuItem, on_delete=models.DO_NOTHING)
user_order = models.ForeignKey(Order, on_delete=models.DO_NOTHING)
number_ordered = models.IntegerField()
def __str__(self):
return f'Product: {self.name}, Number: {str(self.number_ordered)}'
|
class Atm(object):
def __init__(self,card_number,pin):
self.card_number = card_number
self.pin = pin
def wth(self):
print("Money withdrawn...")
def statement(self):
print("Account statement:-")
print("Sorry you dont have any money in your account...")
atm = Atm(21212121,1223)
print(atm.wth())
print(atm.statement())
|
# Квадратная матрица поменять минимальный элемент и дигональный элемент строки.
def getMatrix():
size = int(input('Введите размер квадрвтной матрицы: '))
matrix = [0] * size
print('Построчно введите элементы матрицы:')
for i in range(size):
matrix[i] = list(map(float, input('> ').split()))
return matrix, size
def printMatrix(matrix):
for i in range(len(matrix)):
print(matrix[i])
matrix, size = getMatrix()
for i in range(size):
minElIndex = 0
for j in range(size):
if matrix[i][j] < matrix[i][minElIndex]:
minElIndex = j
matrix[i][minElIndex],matrix[i][i] = matrix[i][i], matrix[i][minElIndex]
print(printMatrix(matrix))
#------------------------------------------------------------------------------
#Из квадратной матрицы убрать главную диагональ и нижний треугольник подвинуть вверх
n = int(input('Введите размер квадрвтной матрицы: '))
mat = []
for i in range(n):
x = list(map(int, input().split()))
mat.append(x)
print(mat)
for i in range(n):
for j in range(n-1):
if i >= j:
mat[i][j] = mat[i+1][j]
for i in range(n-1):
print(mat[i])
#------------------------------------------------------------------------------
# Сформировать квадратную матрицу ввида
# 1 2 3 4 5
# 2 1 2 3 4
# 3 2 1 2 3
# 4 3 2 1 2
# 5 4 3 2 1
n = int(input('Введите размер квадрвтной матрицы: '))
a=[n*[0] for i in range(n)]
for i in range(n):
a[0][i] = i+1
print(a[0][i], end=' ')
for i in range(1,n):
print('\n')
for j in range(i):
a[i][j] = a[i-1][j]+1
print(a[i][j], end=' ')
for j in range(i,n):
a[i][j] = a[i-1][j]-1
print(a[i][j], end=' ')
# или
M = int(input('\nВведите размер строки'))
matrix = [[i*1 for i in range(M)] for i in range(M)]
for i in range(1,M):
for j in range(M):
matrix[i][j] = matrix[i-1][(j + M - 1)% M]
[print(*i) for i in matrix]
#Найти сумму элементов матрицы под главной и побочной диагоналей и вкл их
M = int(input('\nВведите размер строки'))
matrix = [0]*M
for i in range(M):
matrix[i] = list(map(int,input().split()))
for i in range(M):
print(matrix[i])
S = 0
for i in range(M):
for j in range(M):
if (i >= j and j <= M - i - 1) or (i <= j and j >= M - i - 1):
S += matrix[i][j]
print(S)
|
data = open("./Day8/day08.input").read().splitlines()
ops = {"+": lambda x: x, "-": lambda x: -x}
def execute_game(data, score, step, steps_completed):
operation, argument = data[step].split(" ")
if step in steps_completed:
return score
steps_completed.append(step)
if operation == "nop":
score = execute_game(data, score, step+1, steps_completed)
if operation == "acc":
score += ops[argument[:1]] (int(argument[1:]))
score = execute_game(data, score, step+1, steps_completed)
if operation == "jmp":
step += ops[argument[:1]] (int(argument[1:]))
score = execute_game(data, score, step, steps_completed)
return(score)
print(execute_game(data, 0, 0, [])) |
# coding=utf-8
import os
from pytoolbox.util import pmc_config
from pytoolbox.util.pmc_config import read_string
class App:
ADMINS = ['liufan@lvye.com', 'yiwang@lvye.com', 'mengyu@lvye.com', 'zhoushiwei@lvye.com']
JSON_AS_ASCII = False
SECRET_KEY = os.environ.get('SECRET_KEY') or '.yek eyvl'
TESTING = False
DEBUG = True
LOGIN_DISABLED = False
WTF_CSRF_SECRET_KEY = 'a random string'
SQLALCHEMY_DATABASE_URI = 'mysql://lvye_pay:p@55word@127.0.0.1:3306/lvye_pay_pub'
SQLALCHEMY_ECHO = False
SQLALCHEMY_POOL_SIZE = 30
SQLALCHEMY_POOL_TIMEOUT = 60
SQLALCHEMY_MAX_OVERFLOW = 60
SQLALCHEMY_POOL_RECYCLE = 3600
class UserCenter:
PASSPORT_LOGIN_URL = 'http://api.passport.lvye.com/oauth/toLogin/'
AUTH_COOKIE = '4a7e7f902968e79c8b4e4975f316eb65'
IS_PASSPORT_LOGIN_URL = 'http://api.passport.lvye.com/header/islogin.shtml'
LOGIN_URL = 'http://account.lvye.cn/accounts/login/'
LOGOUT_URL = 'http://account.lvye.cn/accounts/logout/'
IS_PROD = False
HOST_URL = 'http://pay.lvye.com'
# 绿野公司域和用户
LVYE_CORP_DOMAIN_NAME = 'lvye_corp'
LVYE_ADVERTISING_USER_NAME = 'lvye_advertising'
# TODO: 同步该配置
IS_ALL_OPENED = True
class Checkout:
ZYT_MAIN_PAGE = 'http://pay.lvye.com/main'
VALID_NETLOCS = ['pay.lvye.com']
AES_KEY = "SGGN8L8LXO0FV00K0AVO7F9HYEU6HMH4"
PAYMENT_CHECKOUT_VALID_SECONDS = 5 * 60
WEIXIN_AUTH_REDIRECT_URI = "http://account.lvye.cn/weixin/auth_redirect/"
# # pay api
class LvyePaySitePayClientConfig:
MD5_KEY = read_string('conf/test/md5_key.txt')
CHANNEL_PRI_KEY = read_string('conf/test/channel_pri_key.txt')
CHANNEL_NAME = 'lvye_pay_site'
ROOT_URL = "http://pay.lvye.com/api/__"
CHECKOUT_URL = 'http://pay.lvye.com/checkout/{sn}'
class LvyeCorpPaySitePayClientConfig:
MD5_KEY = read_string('conf/test/md5_key.txt')
CHANNEL_PRI_KEY = read_string('conf/test/channel_pri_key.txt')
CHANNEL_NAME = 'lvye_corp_pay_site'
ROOT_URL = "http://pay.lvye.com/api/__"
CHECKOUT_URL = 'http://pay.lvye.com/checkout/{sn}'
DEFAULT_CHANNEL = LvyePaySitePayClientConfig.CHANNEL_NAME
# #### data #######
def load_provinces_and_cities(file_path):
from os import path
from collections import OrderedDict
import codecs
utf8reader = codecs.getreader('utf-8')
with utf8reader(open(path.join(pmc_config.get_project_root(), file_path))) as fin:
provinces = OrderedDict()
cities = OrderedDict()
for line in fin:
province, province_code, city, city_code = line.strip().split('\t')
provinces.setdefault(province_code, province)
cities_in_current_province = cities.setdefault(province_code, OrderedDict())
cities_in_current_province.setdefault(city_code, city)
return provinces, cities
class Data:
PROVINCES, CITIES = load_provinces_and_cities('conf/province_and_city_code.txt')
class SMSConfig:
URL = 'http://sdk999ws.eucp.b2m.cn:8080/sdkproxy/sendsms.action'
CD_KEY = '9SDK-EMY-0999-JBQOO'
PASSWORD = '506260'
|
# ANI2102A19/sitecustomize.py | Programmation Python avec Maya | coding=utf-8
# Exemple d'un script de configuration lancé à l'initialisation de l'interpréteur Python, avant le démarrage de Maya.
# Pour être exécuté, le fichier doit absolument s'appeler 'sitecustomize.py' et se trouver au bon emplacement :
# Windows: <drive>:\Program Files\Autodesk\Maya<Version>\Python\lib\site-packages
# MacOSX: Applications/Autodesk/maya<Version>/Maya.app/Contents/Frameworks/Python.framework/Versions/Current/lib/python<version>/site-packages
# Linux: /usr/autodesk/maya/lib/python<version>/site-packages
import os
import sys
# 1. extraction des variables d'environnement 'HOME' et 'USER'
home = os.getenv('HOME')
user = os.getenv('USER')
# 2. ajouter le répertoire 'Desktop' à la liste des chemins d'accès où Maya peut accéder à des scripts Python
sys.path.append(
os.path.join(
home[:home.find(user)], user, 'Desktop'))
# 3. ajouter le répertoire 'Document' à la liste des chemins d'accès où Maya peut accéder à des scripts Python
sys.path.append(
os.path.join(
home[:home.find(user)], user, 'Document'))
|
from pprint import pprint
# читаем адресную книгу в формате CSV в список contacts_list
import csv
import re
def opening_file():
with open("phonebook_raw.csv", encoding='utf-8') as f:
rows = csv.reader(f, delimiter=",")
contacts_list = list(rows)
# pprint(contacts_list)
return contacts_list
opening_file()
def forming_correct_file_format():
new_contact_list = []
new_contact_list.append(opening_file()[0])
# print (new_contact_list)
for contact in opening_file():
# print (contact)
F_I_O = ' '.join([contact[0], contact[1], contact[2]])
# print (F_I_O)
contacts = re.findall("\w+", F_I_O)
while len(contacts) < 3:
contacts.append('')
contact[0], contact[1], contact[2] = contacts
# print (contacts)
pattern_number = re.compile("(\+7|8)\s*\(?(\d{3})[\)\-]?\s*(\d{3})\-?(\d{2})\-?(\d{2})(\s*\(?(доб\.\s*\d{4})\)?)?")
sub_pattern_phone = r"+7(\2)\3-\4-\5 \7"
contact[5] = pattern_number.sub(sub_pattern_phone, contact[5]).rstrip()
final_number=contact[5]
for new_contact in new_contact_list:
# print (new_contact)
if new_contact[0] == contact[0] and \
new_contact[1] == contact[1] and \
(new_contact[2] == contact[2] or
new_contact[2] == ''
or contact[2] == ''):
for i in range(7):
new_contact[i] = max(new_contact[i], contact[i])
break
else:
new_contact_list.append(contact)
return new_contact_list
# pprint (forming_correct_file_format())
def writing_new_file():
# код для записи файла в формате CSV
with open("new_formated_phonebook.csv", "w", newline='', encoding='utf-8') as f:
datawriter = csv.writer(f, delimiter=',')
# Вместо contacts_list подставьте свой список
datawriter.writerows(forming_correct_file_format())
writing_new_file() |
import multiprocessing
import random
import time
def f(q):
while True:
rn = random.randint(1, 100)
q.put([rn, None, 'hello'])
time.sleep(0.2)
def g(q):
while True:
rn = random.randint(1, 100)
q.put([rn, None, 'goodbye'])
time.sleep(1.2)
if __name__ == '__main__':
q = multiprocessing.Queue()
p = multiprocessing.Process(target=f, args=(q,))
r = multiprocessing.Process(target=g, args=(q,))
p.start()
r.start()
while True:
line = q.get()
if line[2] == 'hello':
print('{} = {}'.format(line[2], line[0]))
elif line[2] == 'goodbye':
print('{} = {}'.format(line[2], line[0]))
# python ./backend/tests/queueing.py |
# -*- coding: utf-8 -*-
from tree import xmlTree
from Node import node
import time
import sys
from GUI import *
from computeTree import *
#create a tree from scratch
def test1():
tree = node()
root = tree
#first child
newChild = root.addNode("par")#parallel
#notice that the childlist start from zero.
child = root.getChild(0)
#compare by instance id:
if newChild.comparTo(child) == True :
print("test 1: success!")
else:
print("test 1: failed :-(")
#provide a tree xml file
def test2():
tree = xmlTree("tests/test2.xml")
root = tree.getRoot()
# root is always a plan
newNode = root.getChild(0)
ChildList = newNode.getChildren()
#iterate over first node children:
count = 0
for childNode in ChildList:
count += 1
if count == 5:
print("test 2: success!")
else:
print("test 2: failed :-(")
#this test check the node-type contratur and thir oo
def test3():
tree = node()
root = tree
#first child
firstChild = root.addNode("seq")
if firstChild == None:
print ("error creating seq node")
print("test 3: failed :-(")
return None
tempN = firstChild.addNode("seq")
if tempN == None:
print ("error creating seq node")
else:
tempN.setAttrib("probability","0.1 0.5")
tempN = firstChild.addNode("seq")
if tempN == None:
print ("error creating seq node")
else:
tempN.setAttrib("probability","0.1 0.5")
tempN = firstChild.addNode("loop")
if tempN == None:
print ("error creating loop node")
else:
tempN.setAttrib("probability","0.1 0.5")
tempN = firstChild.addNode("par")
if tempN == None:
print ("error creating parallel node")
else:
tempN.setAttrib("probability","0.1 0.5")
tempN = firstChild.addNode("tsk")
if tempN == None:
print ("error creating tsk node")
else:
tempN.setAttrib("probability","0.1 0.5")
tempN = firstChild.addNode("sel")
if tempN == None:
print ("error creating selector node")
else:
tempN.setAttrib("probability","0.1 0.5")
#iterate over firstChild children:
firstChildList = firstChild.getChildren()
count = 0
for childNode in firstChildList:
count += 1
if count == 6:
print("test 3: success! please check the file test3.xml - every tag need to have the same attrib.")
else:
print("test 3: failed :-(")
#print the tree we built from scratch to xml file.
#please check the file- every tag need to have the same attrib.
root.treeToXml("tests/test3.xml")
#please run test 3 before test 4:
def test4():
tree = xmlTree("tests/test3.xml")
#remember- root is alwayes type/tag- plan
root = tree.getRoot()
ans = []
ans.append(root.boolWhoAmI("plan"))
firstChild = root.getChild(0)
ans.append(firstChild.boolWhoAmI("seq"))
ans.append((firstChild.getChild(0)).boolWhoAmI("seq"))
ans.append((firstChild.getChild(1)).boolWhoAmI("seq"))
ans.append((firstChild.getChild(2)).boolWhoAmI("loop"))
ans.append((firstChild.getChild(3)).boolWhoAmI("par"))
ans.append((firstChild.getChild(4)).boolWhoAmI("tsk"))
ans.append((firstChild.getChild(5)).boolWhoAmI("sel"))
for index in range(0,7):
if ans[index] == False:
print("test 4: failed :-(")
print("test 4: success!")
#please run test 3 before test 5: - check attrib func/method
def test5():
tree = xmlTree("tests/test3.xml")
#remember- root is alwayes type/tag- plan
root = tree.getRoot()
ans = []
ans.append(root.getAttrib("probability"))
firstChild = root.getChild(0)
ans.append(firstChild.getAttrib("probability"))
ans.append((firstChild.getChild(0)).getAttrib("probability"))
ans.append((firstChild.getChild(1)).getAttrib("probability"))
ans.append((firstChild.getChild(2)).getAttrib("probability"))
ans.append((firstChild.getChild(3)).getAttrib("probability"))
ans.append((firstChild.getChild(4)).getAttrib("probability"))
ans.append((firstChild.getChild(5)).getAttrib("probability"))
#ans 1+2 dosn't have attribut- None
if ans[0] !=None or ans[1] != None :
print("test 5: failed :-(")
for index in range(2,7):
if ans[index] != "0.1 0.5":
print("test 5: failed :-(")
print (index)
print("test 5: success!")
#check the monitor set/get func/method
def test6():
tree = xmlTree("tests/test2.xml")
root = tree.getRoot()
firstChild = root.getChild(0)
childList = firstChild.getChildren()
for childNode in childList:
childNode.setMonitor(False)
for childNode in childList:
boolVal = childNode.isMonitored()
if boolVal != False :
print("test 6: failed :-(")
return None
print("test 6: success!")
#empty test - will be implemented- feeling creative? :-)
def test7():
tree = node()
root = tree
#first child
firstChild = root.addNode("par")
if firstChild == None:
print ("error creating seq node")
print("test 7: failed :-(")
return None
dist_succ = _createUniformDist(2,5)
dist_fail = _createUniformDist(6,10)
for j in range(3):
tempN = firstChild.addNode("seq")
if tempN == None:
print ("error creating seq node")
for i in range(5):
if ((j==1) and (i==2)):
tempN1 = tempN.addNode("seq")
if tempN1 == None:
print ("error creating seq node")
else:
for i in range(4):
tempN2 = tempN1.addNode("tsk")
if tempN2 == None:
print ("error creating seq node")
else:
tempN2.setAttrib("time","1")
tempN2.setAttrib("succ","T")
#tempN2.setTime(0)
#tempN2.setSucc(False)
tempN2.setProbTable([0.8, 0.5])
for i in range(2):
tempN2.addDistToSuccTable(dist_succ)
tempN2.addDistToFailTable(dist_fail)
tempN2.setAttrib("Successdistribution",tempN2.distTableSucc)
tempN2.setAttrib("Failuredistribution",tempN2.distTableFail)
else:
tempN1 = tempN.addNode("tsk")
if tempN1 == None:
print ("error creating seq node")
else:
tempN1.setAttrib("time","1")
tempN1.setAttrib("succ","T")
#tempN1.setTime(0)
#tempN1.setSucc(False)
tempN1.setProbTable([0.7, 0.5])
for i in range(2):
tempN1.addDistToSuccTable(dist_succ)
tempN1.addDistToFailTable(dist_fail)
tempN1.setAttrib("Successdistribution",tempN1.distTableSucc)
tempN1.setAttrib("Failuredistribution",tempN1.distTableFail)
#iterate over firstChild children:
firstChildList = firstChild.getChildren()
for i in range(5):
firstChild.run(0)
count = 0
for childNode in firstChildList:
count += 1
if count == 3:
print("test 7: success! please check the file output/test4.xml - every tag need to have the same attrib.")
else:
print("test 7: failed :-(")
#print the tree we built from scratch to xml file.
#please check the file- every tag need to have the same attrib.
root.treeToXml("output/test4.xml")
def test8():
tree = xmlTree("tests/test3.xml")
root = tree.getRoot()
#this child is type- tsk
child = root.getChild(0)
### create a new dist - and
dist_succ = _createNormalDist(5,2)
dist_fail = _createNormalDist(4,1)
#add to succ table
child.addDistToSuccTable(dist_succ)
#add to fail table/
child.addDistToFailTable(dist_fail)
#get distribute from the node by it's index (p1,p2,p3..)
dist_get_succ = child.getSuccDistAtIndex(0)
dist_get_fail = child.getFailDistAtIndex(0)
#check that it has the same parms
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
if (dist_get_succ != None and dist_get_succ.parmM == float(5) and dist_get_succ.parmG == float(2) and dist_get_fail != None and dist_get_fail.parmM == float(4) and dist_get_fail.parmG == float(1)):
print ("test 8.1: success!")
else:
("test 8.1: failed :-(")
# try to create computed dist.
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
dist = _createComputedDist()
dist.setValueToTime(0.1,1)
dist.setValueToTime(0.1, dist.getCountByTime(0.1)+1 )
dist.setValueToTime(0.2,1)
dist.setValueToTime(0.05,1)
if (dist.getCountByTime(0.1) == 2 and dist.getCountByTime(0.2) == 1 and dist.getCountByTime(0.05) == 1):
print ("test 8.2: success!")
else:
("test 8.2: failed :-( - check computed dist")
#this test read test9.xml and create distributaion as needed for tskNode
def test9():
tree = xmlTree("tests/test9.xml")
#root it node type plan
root = tree.getRoot()
#this child is type- seq
child = root.getChild(0)
#this child is type- tsk
tskChild = child.getChild(0)
#get dist from the distTable
distC = tskChild.getSuccDistAtIndex(2)
distU = tskChild.getSuccDistAtIndex(1)
distN = tskChild.getSuccDistAtIndex(0)
if( distC.whoAmI() == "Computed" and float(distC.getCountByTime(0.1)) == 5 and float(distC.getCountByTime(257)) == 977):
print ("test 9.1: success!")
else:
("test 9: failed :-( - check computed dist")
if( distU.whoAmI() == "Uniform" and float(distU.parmA) == 0 and float(distU.parmB) == 5 ):
print ("test 9.2: success!")
else:
("test 9.2: failed :-( - check uniform dist")
if ( distN.whoAmI() == "Normal"):
print ("test 9.3: success!")
else:
("test 9.3: failed :-( - check normal dist")
def test10():
tree = xmlTree("tests/test3.xml")
root = tree.getRoot()
#this child is type- tsk
child = root.getChild(0)
### create a new dist - and
dist_succ = _createNormalDist(5,2)
dist_fail = _createNormalDist(4,1)
dist_fail1 = _createUniformDist(5, 8)
#add to succ table
child.addDistToSuccTable(dist_succ)
#add to fail table/
child.addDistToFailTable(dist_fail)
#get distribute from the node by it's index (p1,p2,p3..)
dist_get_succ = child.getSuccDistAtIndex(0)
dist_get_fail = child.getFailDistAtIndex(0)
#check that it has the same parms
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
if (dist_get_succ != None and dist_get_succ.parmM == float(5) and dist_get_succ.parmG == float(2) and dist_get_fail != None and dist_get_fail.parmM == float(4) and dist_get_fail.parmG == float(1)):
print ("test 10.1: success!")
else:
("test 10.1: failed :-(")
# try to create computed dist.
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
dist = _createComputedDist()
dist.setValueToTime(0.1,1)
dist.setValueToTime(0.1, dist.getCountByTime(0.1)+1 )
dist.setValueToTime(0.2,1)
dist.setValueToTime(0.05,1)
dist.printMe()
print dist.calcProb()
print "-----------"
dist_succ.printMe()
print dist_succ.calcProb()
dist_fail1.printMe()
print dist_fail1.calcProb()
if (dist.getCountByTime(0.1) == 2 and dist.getCountByTime(0.2) == 1 and dist.getCountByTime(0.05) == 1):
print ("test 10.2: success!")
else:
("test 10.2: failed :-( - check computed dist")
#def test11():
# #in AdiEvent2- I removed x,y and id attribue. so we can see easily the decorator not,loop (L!)
# tree = xmlTree("AdiEvent2.xml")
# root= tree.getRoot()
# seqChild = root.getChild(0)
# if seqChild == None:
# print("test 11: failed :-( ")
# else:
# #check debug reading from the file
# #seqChild. getDEBUGtime() = 5
# if seqChild.getDEBUGsucc() == True :#and seqChild.DEBUG[1] == 5 :
# print("test 11.1: success")
# else:
# print ("test 11.1: failed :-( ")
# seqChild.setDEBUGresult("True 100")
# if seqChild.getDEBUGsucc() == True :#and seqChild.DEBUG[1] == 5 :
# print("test 11.2: success")
# else:
# print ("test 11.2: failed :-( ")
def test12():
tree = xmlTree("tests/event1.xml")
tree.treeToXml("output/test12.xml")
print("test 12: success- check output/test12.xml file")
def test14():
tree = node()
root = tree
#first child
firstChild = root.addNode("par")
if firstChild == None:
print ("error creating seq node")
print("test 14: failed :-(")
return None
dist_succ = _createUniformDist(2,5)
dist_fail = _createUniformDist(6,10)
firstChild.DEBUGchild = True
for j in range(3):
if j==0:
tempN = firstChild.addNode("seq")
if tempN == None:
print ("error creating seq node")
if j==1:
tempN = firstChild.addNode("sel")
if tempN == None:
print ("error creating seq node")
if j==2:
tempN = firstChild.addNode("loop")
if tempN == None:
print ("error creating seq node")
for i in range(5):
if ((j==1) and (i==2)):
tempN1 = tempN.addNode("seq")
tempN.DEBUGchild = True
tempN1.DEBUGchild = True
if tempN1 == None:
print ("error creating seq node")
else:
for i in range(4):
tempN2 = tempN1.addNode("tsk")
if tempN2 == None:
print ("error creating seq node")
else:
tempN2.setProbTable([0.1, 0.5])
for i in range(2):
dist_fail = _createUniformDist(6,10-i)
tempN2.addDistToSuccTable(dist_succ)
tempN2.addDistToFailTable(dist_fail)
tempN2.setAttrib("Successdistribution",tempN2._distTableToString(tempN2.distTableSucc))
tempN2.setAttrib("Failuredistribution",tempN2._distTableToString(tempN2.distTableFail))
tempN2.setDebug("True 100")
else:
tempN1 = tempN.addNode("tsk")
if tempN1 == None:
print ("error creating seq node")
else:
tempN1.setProbTable([0.3, 0.5])
for i in range(2):
tempN1.addDistToSuccTable(dist_succ)
tempN1.addDistToFailTable(dist_fail)
tempN1.setAttrib("Successdistribution",tempN1._distTableToString(tempN1.distTableSucc))
tempN1.setAttrib("Failuredistribution",tempN1._distTableToString(tempN1.distTableFail))
if j==2:
break
#iterate over firstChild children:
node.debugMode = False
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test14a.xml")
print("test 14.1: success! please check the file test14a.xml - every tag need to have the same attrib.")
print "phase 2"
node.debugMode = True
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test14b.xml")
print("test 14.2: success! please check the file test14b.xml - every tag need to have the same attrib.")
#empty test - will be implemented- feeling creative? :-)
def test15():
tree = node()
root = tree
#first child
firstChild = root.addNode("par")
if firstChild == None:
print ("error creating seq node")
print("test 15: failed :-(")
return None
dist_succ = _createUniformDist(2,5)
dist_fail = _createUniformDist(6,10)
firstChild.DEBUGchild = True
for j in range(3):
tempN = firstChild.addNode("seq")
if tempN == None:
print ("error creating seq node")
for i in range(5):
if ((j==1) and (i==2)):
tempN1 = tempN.addNode("seq")
tempN.DEBUGchild = True
tempN1.DEBUGchild = True
if tempN1 == None:
print ("error creating seq node")
else:
for i in range(4):
tempN2 = tempN1.addNode("tsk")
if tempN2 == None:
print ("error creating seq node")
else:
tempN2.setProbTable([0.8, 0.5])
for i in range(2):
dist_fail = _createUniformDist(6,10-i)
tempN2.addDistToSuccTable(dist_succ)
tempN2.addDistToFailTable(dist_fail)
tempN2.setAttrib("Successdistribution",tempN2._distTableToString(tempN2.distTableSucc))
tempN2.setAttrib("Failuredistribution",tempN2._distTableToString(tempN2.distTableFail))
tempN2.setDebug("True 100")
else:
tempN1 = tempN.addNode("tsk")
if tempN1 == None:
print ("error creating seq node")
else:
tempN1.setProbTable([0.7, 0.5])
for i in range(2):
tempN1.addDistToSuccTable(dist_succ)
tempN1.addDistToFailTable(dist_fail)
tempN1.setAttrib("Successdistribution",tempN1._distTableToString(tempN1.distTableSucc))
tempN1.setAttrib("Failuredistribution",tempN1._distTableToString(tempN1.distTableFail))
#iterate over firstChild children:
node.debugMode = False
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test15a.xml")
print("test 15.1: success! please check the file test15a.xml - every tag need to have the same attrib.")
node.debugMode = True
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test15b.xml")
print("test 15.2: success! please check the file test15b.xml - every tag need to have the same attrib.")
#print the tree we built from scratch to xml file.
#please check the file- every tag need to have the same attrib.
#check the monitor set/get func/method
def test16():
tree = node()
root = tree
#first child
firstChild = root.addNode("par")
if firstChild == None:
print ("error creating par node")
print("test 16: failed :-(")
return None
dist_succ = _createUniformDist(2,5)
dist_fail = _createUniformDist(6,10)
firstChild.DEBUGchild = True
for j in range(3):
if j==0:
tempN = firstChild.addNode("seq")
if tempN == None:
print ("error creating seq node")
if j==1:
tempN = firstChild.addNode("sel")
if tempN == None:
print ("error creating sel node")
if j==2:
tempN = firstChild.addNode("loop")
if tempN == None:
print ("error creating seq node")
for i in range(5):
if ((j==1) and (i==2)):
tempN1 = tempN.addNode("seq")
tempN.DEBUGchild = True
tempN1.DEBUGchild = True
if tempN1 == None:
print ("error creating seq node")
else:
for i in range(4):
tempN2 = tempN1.addNode("tsk")
if tempN2 == None:
print ("error creating seq node")
else:
tempN2.setProbTable([0.1, 0.5])
for i in range(2):
dist_fail = _createUniformDist(6,10-i)
tempN2.addDistToSuccTable(dist_succ)
tempN2.addDistToFailTable(dist_fail)
tempN2.setAttrib("Successdistribution",tempN2._distTableToString(tempN2.distTableSucc))
tempN2.setAttrib("Failuredistribution",tempN2._distTableToString(tempN2.distTableFail))
tempN2.setDebug("True 100")
else:
tempN1 = tempN.addNode("tsk")
if tempN1 == None:
print ("error creating seq node")
else:
tempN1.setProbTable([0.3, 0.5])
for i in range(2):
tempN1.addDistToSuccTable(dist_succ)
tempN1.addDistToFailTable(dist_fail)
tempN1.setAttrib("Successdistribution",tempN1._distTableToString(tempN1.distTableSucc))
tempN1.setAttrib("Failuredistribution",tempN1._distTableToString(tempN1.distTableFail))
if j==2:
break
#iterate over firstChild children:
firstChildList = firstChild.getChildren()
node.debugMode = False
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test16a.xml")
print "phase 2"
node.debugMode = True
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test16b.xml")
count = 0
for childNode in firstChildList:
count += 1
if count == 3:
print("test 16: success! please check the file test4.xml - every tag need to have the same attrib.")
else:
print("test 16: failed :-(")
#empty test - will be implemented- feeling creative? :-)
def test17():
tree = node()
root = tree
#first child
firstChild = root.addNode("par")
if firstChild == None:
print ("error creating parallel node")
print("test 17: failed :-(")
return None
dist_succ = _createUniformDist(2,5)
dist_fail = _createUniformDist(6,10)
firstChild.DEBUGchild = True
for j in range(3):
tempN = firstChild.addNode("seq")
if tempN == None:
print ("error creating seq node")
for i in range(5):
if ((j==1) and (i==2)):
tempN1 = tempN.addNode("seq")
tempN.DEBUGchild = True
tempN1.DEBUGchild = True
if tempN1 == None:
print ("error creating seq node")
else:
for i in range(4):
tempN2 = tempN1.addNode("tsk")
if tempN2 == None:
print ("error creating seq node")
else:
tempN2.setProbTable([0.8, 0.5])
for i in range(2):
dist_fail = _createUniformDist(6,10-i)
tempN2.addDistToSuccTable(dist_succ)
tempN2.addDistToFailTable(dist_fail)
tempN2.setAttrib("Successdistribution",tempN2._distTableToString(tempN2.distTableSucc))
tempN2.setAttrib("Failuredistribution",tempN2._distTableToString(tempN2.distTableFail))
tempN2.setDebug("True 100")
else:
tempN1 = tempN.addNode("tsk")
if tempN1 == None:
print ("error creating seq node")
else:
tempN1.setProbTable([0.7, 0.5])
for i in range(2):
tempN1.addDistToSuccTable(dist_succ)
tempN1.addDistToFailTable(dist_fail)
tempN1.setAttrib("Successdistribution",tempN1._distTableToString(tempN1.distTableSucc))
tempN1.setAttrib("Failuredistribution",tempN1._distTableToString(tempN1.distTableFail))
#iterate over firstChild children:
firstChildList = firstChild.getChildren()
node.debugMode = False
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test17.xml")
print "phase 2"
node.debugMode = True
for i in range(5):
firstChild.run(0)
root.treeToXml("output/test17.xml")
count = 0
for childNode in firstChildList:
count += 1
if count == 3:
print("test 17: success! please check the file test4.xml - every tag need to have the same attrib.")
else:
print("test 17: failed :-(")
#print the tree we built from scratch to xml file.
#please check the file- every tag need to have the same attrib.
# root.treeToXml("test4.xml")
def test18():
tree = xmlTree("tests/test3.xml")
root = tree.getRoot()
#this child is type- tsk
child = root.getChild(0)
### create a new dist - and
dist_succ = _createNormalDist(5,2)
dist_fail = _createNormalDist(4,1)
#add to succ table
child.addDistToSuccTable(dist_succ)
#add to fail table/
child.addDistToFailTable(dist_fail)
#get distribute from the node by it's index (p1,p2,p3..)
dist_get_succ = child.getSuccDistAtIndex(0)
dist_get_fail = child.getFailDistAtIndex(0)
#check that it has the same parms
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
if (dist_get_succ != None and dist_get_succ.parmM == float(5) and dist_get_succ.parmG == float(2) and dist_get_fail != None and dist_get_fail.parmM == float(4) and dist_get_fail.parmG == float(1)):
print ("test 18.1: success!")
else:
("test 18.1: failed :-(")
# try to create computed dist.
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
dist = _createComputedDist()
dist.setValueToTime(0.1,1)
dist.setValueToTime(0.1, dist.getCountByTime(0.1)+1 )
dist.setValueToTime(0.2,1)
dist.setValueToTime(0.05,1)
if (dist.getCountByTime(0.1) == 2 and dist.getCountByTime(0.2) == 1 and dist.getCountByTime(0.05) == 1):
print ("test 18.2: success!")
else:
("test 18.2: failed :-( - check computed dist")
#this test read test9.xml and create distributaion as needed for tskNode
def test19():
tree = xmlTree("tests/test9.xml")
#root it node type plan
root = tree.getRoot()
#this child is type- seq
child = root.getChild(0)
#this child is type- tsk
tskChild = child.getChild(0)
#get dist from the distTable
distC = tskChild.getSuccDistAtIndex(2)
distU = tskChild.getSuccDistAtIndex(1)
distN = tskChild.getSuccDistAtIndex(0)
if( distC.whoAmI() == "Computed" and float(distC.getCountByTime(0.1)) == 5 and float(distC.getCountByTime(257)) == 977):
print ("test 19.1: success!")
else:
("test 19: failed :-( - check computed dist")
if( distU.whoAmI() == "Uniform" and float(distU.parmA) == 0 and float(distU.parmB) == 5 ):
print ("test 19.2: success!")
else:
("test 19.2: failed :-( - check uniform dist")
if ( distN.whoAmI() == "Normal"):
print ("test 19.3: success!")
else:
("test 19.3: failed :-( - check normal dist")
def test20():
tree = xmlTree("tests/test3.xml")
root = tree.getRoot()
#this child is type- tsk
child = root.getChild(0)
### create a new dist - and
dist_succ = _createNormalDist(5,2)
dist_fail = _createNormalDist(4,1)
dist_fail1 = _createUniformDist(5, 8)
#add to succ table
child.addDistToSuccTable(dist_succ)
#add to fail table/
child.addDistToFailTable(dist_fail)
#get distribute from the node by it's index (p1,p2,p3..)
dist_get_succ = child.getSuccDistAtIndex(0)
dist_get_fail = child.getFailDistAtIndex(0)
#check that it has the same parms
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
if (dist_get_succ != None and dist_get_succ.parmM == float(5) and dist_get_succ.parmG == float(2) and dist_get_fail != None and dist_get_fail.parmM == float(4) and dist_get_fail.parmG == float(1)):
print ("test 20.1: success!")
else:
("test 20.1: failed :-(")
# try to create computed dist.
#added by RAZ -- Adi, I made the tests a bit more complex, you should always have the tests as hard a possible, checking all possible cases.
dist = _createComputedDist()
dist.setValueToTime(0.1,1)
dist.setValueToTime(0.1, dist.getCountByTime(0.1)+1 )
dist.setValueToTime(0.2,1)
dist.setValueToTime(0.05,1)
dist.printMe()
print dist.calcProb()
print "-----------"
dist_succ.printMe()
print dist_succ.calcProb()
dist_fail1.printMe()
print dist_fail1.calcProb()
if (dist.getCountByTime(0.1) == 2 and dist.getCountByTime(0.2) == 1 and dist.getCountByTime(0.05) == 1):
print ("test 20.2: success!")
else:
("test 20.2: failed :-( - check computed dist")
#provide a tree xml file
def test21():
start = time.time()
tree = xmlTree("tests/event1.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/testE211.xml")
print("test 21.1: success!, testE211.xml")
node.debugMode = False
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE212.xml")
print("test 21.2: success!, testE212.xml")
print "Success probability in offline mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-------Debug mode-------"
node.debugMode = True
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE213.xml")
print("test 21.3: success!, testE213.xml")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test22():
start = time.time()
tree = xmlTree("tests/event2.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/testE221.xml")
print("test 22.1: success!")
node.debugMode = False
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE222.xml")
print("test 22.2: success!")
print "Success probability in offline mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-------Debug mode-------"
node.debugMode = True
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE223.xml")
print("test 22.3: success!")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test23():
start = time.time()
tree = xmlTree("tests/event3.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/testE231.xml")
print("test 23.1: success!")
node.debugMode = False
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE232.xml")
print("test 23.2: success!")
print "Success probability in offline mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-------Debug mode-------"
node.debugMode = True
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE233.xml")
print("test 23.3: success!")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test24():
print "-------TEST 24-------"
start = time.time()
tree = xmlTree("tests/small_test.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/small_test_before_run.xml")
print("test 4.1: success!")
node.debugMode = False
for i in range(1000):
root.runPlan(0)
for i in range(1000):
root.runPlan(1)
root.treeToXml("output/small_test_no_debug.xml")
print("test 4.2: success!")
print "Success probability in offline mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-------Debug mode-------"
node.debugMode = True
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/small_test_debug_mode.xml")
print("test 4.3: success!")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-----------------------"
#run offline monter-carlo and read from outputted file for debug mode.
def test25():
print "-------TEST 25-------"
start = time.time()
tree = xmlTree("tests/small_test.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
node.debugMode = False
for i in range(1000):
root.runPlan(0)
for i in range(1000):
root.runPlan(1)
root.treeToXml("output/small_test_after_offline.xml")
print "Finished gathering offline statistics."
print "-------Debug mode-------"
node.debugMode = True
tree = xmlTree("output/small_test_after_offline.xml")
root = tree.getRoot()
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/small_test_debug_mode.xml")
print("test 4.3: success!")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-----------------------"
def test26():
print "-------TEST 26-------"
start = time.time()
tree = xmlTree("tests/small_test_no_tsk_attrib.xml", None, "tests/small_test_tsk_attrib.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
node.debugMode = False
for i in range(1000):
root.runPlan(0)
for i in range(1000):
root.runPlan(1)
root.treeToXml("output/small_test_after_offline_tsk.xml")
print "Finished gathering offline statistics."
print "-------Debug mode-------"
#tree = xmlTree("output/small_test_after_offline_tsk.xml")
node.debugMode = True
root = tree.getRoot()
for i in range(1000):
root.runPlan(0)
for i in range(1000):
root.runPlan(1)
root.treeToXml("output/small_test_debug_mode_tsk.xml")
print("test 26: success!")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-----------------------"
def test27():
start = time.time()
tree = xmlTree("tests/event1_no_tsk_attrib.xml",None ,"tests/event1_tsk_attrib.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/testE271.xml")
print("test 27.1: success!, testE271.xml")
node.debugMode = False
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE272.xml")
print("test 21.2: success!, testE272.xml")
print "Success probability in offline mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-------Debug mode-------"
node.debugMode = True
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE273.xml")
print("test 27.3: success!, testE273.xml")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test28():
start = time.time()
tree = xmlTree("tests/event2_no_tsk_attrib.xml",None,"tests/event2_tsk_attrib.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/testE281.xml")
print("test 28.1: success!")
node.debugMode = False
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE282.xml")
print("test 28.2: success!")
print "Success probability in offline mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-------Debug mode-------"
node.debugMode = True
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE283.xml")
print("test 28.3: success!")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test29():
start = time.time()
tree = xmlTree("tests/event3_no_tsk_attrib.xml", None,"tests/event3_tsk_attrib.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/testE291.xml")
print("test 29.1: success!")
node.debugMode = False
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE292.xml")
print("test 29.2: success!")
print "Success probability in offline mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
print "Time: %f" %elapsed
print "-------Debug mode-------"
node.debugMode = True
for i in range(100):
root.runPlan(0)
for i in range(100):
root.runPlan(1)
root.treeToXml("output/testE293.xml")
print("test 29.3: success!")
print "Success probability in debug mode: Clear sky = %f, Cloudy = %f" %(root.getChild(0).getProbAtIndex(0),root.getChild(0).getProbAtIndex(1))
print "Average success time in debug mode with clear sky = %f" %(root.getChild(0).getAverageSuccTime(0))
print "Average success time in debug mode when Cloudy = %f" %(root.getChild(0).getAverageSuccTime(1))
elapsed = (time.time() - start)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test30():
start = time.time()
tree = xmlTree("tests/small_test_integration.xml", None,"tests/small_test_integration_tsk_attrib.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
# root.treeToXml("output/event3_m.xml")
print("test 30.1: success!")
node.debugMode = False
for i in range(1000):
root.runPlan(0)
# for i in range(100):
# root.runPlan(1)
root.treeToXml("output/small_test_integration.xml")
print("test 30.2: success!")
print "Success probability in offline mode: %f" % root.getChild(0).getProbAtIndex(0)
print "Average success time = %f" % root.getChild(0).getAverageSuccTime(0)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test31():
start = time.time()
tree = xmlTree("tests/event3_m.xml",None,"tests/event3_m_tsk_attribs.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/small_test_event3_m.xml")
print("test 31.1: success!")
node.debugMode = False
for i in range(1000):
root.runPlan(0)
root.treeToXml("output/small_test_event3_m_after_run.xml")
print("test 31.2: success!")
print "Success probability in offline mode: %f" % root.getChild(0).getProbAtIndex(0)
print "Average success time = %f" % root.getChild(0).getAverageSuccTime(0)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test32():
start = time.time()
tree = xmlTree("tests/skill4.xml",None,"tests/skill4_tsk_attribs.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/skill4.xml")
print("test 32.1: success!")
node.debugMode = False
for i in range(1000):
root.runPlan(0)
tree.createWrapperTreeMap("id")
# monitorID = "param=9b82d340-6893-4e68-a676-4d1658aae8d0"
# print monitorID.split("=")[1]
monitordNode=tree.getWrappedNode("ae9c53ae-b42c-4f21-ba6a-7b1fa8741c2d")
if monitordNode:
print "Success probability in offline mode monitor: Mission: %f" % monitordNode.getProbAtIndex(0)
print "Average success time monitor: Mission= %f" % monitordNode.getAverageSuccTime(0)
print "SD success time monitor: Mission= %f" % monitordNode.getSDSuccTime(0)
monitordNode=tree.getWrappedNode("b9e18714-4869-422c-bc38-cb2dca88c530")
if monitordNode:
print "Success probability in offline mode monitor: ExitFromCar: %f" % monitordNode.getProbAtIndex(0)
print "Average success time monitor: ExitFromCar= %f" % monitordNode.getAverageSuccTime(0)
print "SD success time monitor: Mission= %f" % monitordNode.getSDSuccTime(0)
root.treeToXml("output/skill4_after_run.xml")
print("test 32.2: success!")
print "Success probability in offline mode - root: %f" % root.getChild(0).getProbAtIndex(0)
print "Average success time - root= %f" % root.getChild(0).getAverageSuccTime(0)
elapsed = (time.time() - start)
print "Time: %f" %elapsed
def test33():
tree = xmlTree("tests/asmall.xml",None,"tests/asmall_tsk_attrib.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
tree.createWrapperTreeMap("id")
root.treeToXml("output/asmall.xml")
print("test 32.1: success!")
node.debugMode = False
for i in range(10000):
root.runPlan(0)
# monitorID = "param=9b82d340-6893-4e68-a676-4d1658aae8d0"
# print monitorID.split("=")[1]
if root:
#print "Success probability in offline mode monitor: Mission: %f" % root.getChild(0).getProbAtIndex(0)
print "Success probability in offline mode monitor: Mission: %f" % root.getChild(0).getLessThenTProb(0,10)
print "Average success time monitor: Mission= %f" % root.getChild(0).getAverageSuccTime(0)
print "SD success time monitor: Mission= %f" % root.getChild(0).getSDSuccTime(0)
root.treeToXml("output/asmall_after_run.xml")
print("test 32.2: success!")
finished_node = tree.getWrappedNode("1")
finished_node.setDebug("True" + " " + "2.0")
tree.treeToXml("output/"+Ctree.filePath[6:-4]+"11"+"_after_run_debug_true.xml")
tree = xmlTree("output/"+Ctree.filePath[6:-4]+"11"+"_after_run_debug_true.xml")
root = tree.getRoot()
node.debugMode = True
for i in range(1000):
root.runPlan(0)
# monitorID = "param=9b82d340-6893-4e68-a676-4d1658aae8d0"
# print monitorID.split("=")[1]
if root:
#print "Success probability in offline mode monitor: Mission: %f" % root.getChild(0).getProbAtIndex(0)
print "Success probability in offline mode monitor: Mission: %f" % root.getChild(0).getLessThenTProb(0,10)
print "Average success time monitor: Mission= %f" % root.getChild(0).getAverageSuccTime(0)
print "SD success time monitor: Mission= %f" % root.getChild(0).getSDSuccTime(0)
root.treeToXml("output/asmall_after_run.xml")
print("test 32.3: success!")
tree = xmlTree("tests/asmall.xml",None,"tests/asmall_tsk_attrib2.xml")
root = tree.getRoot()
node.parmetersInTheWorld = 1
root.treeToXml("output/asmall2.xml")
print("test 32.4: success!")
node.debugMode = False
for i in range(1000):
root.runPlan(0)
# monitorID = "param=9b82d340-6893-4e68-a676-4d1658aae8d0"
# print monitorID.split("=")[1]
if root:
#print "Success probability in offline mode monitor: Mission: %f" % root.getChild(0).getProbAtIndex(0)
print "Success probability in offline mode monitor: Mission: %f" % root.getChild(0).getLessThenTProb(0,10)
print "Average success time monitor: Mission= %f" % root.getChild(0).getAverageSuccTime(0)
print "SD success time monitor: Mission= %f" % root.getChild(0).getSDSuccTime(0)
root.treeToXml("output/asmall_after_run2.xml")
print("test 32.5: success!")
#changed by RAZ -- we can now import from dist.* files, since the directory has an empty __init__.py file, and python recognizes it as a module.#thanks
def _createComputedDist(string = None):
from distributions.computed import Computed
return Computed()
#changed by RAZ -- we can now import from dist.* files, since the directory has an empty __init__.py file, and python recognizes it as a module.
def _createNormalDist(parmM,parmG):
from distributions.normal import Normal
return Normal(float(parmM),float(parmG))
#changed by RAZ -- we can now import from dist.* files, since the directory has an empty __init__.py file, and python recognizes it as a module.
def _createUniformDist(parmA,parmB):
from distributions.uniform import Uniform
return Uniform(float(parmA),float(parmB))
if __name__ == "__main__":
#run the 10 tests
test33()
#
# if len(sys.argv) == 2 and sys.argv[1] == "all":
# test1()
# test2()
# test3()
# test4()
# test5()
# test6()
# test7()
# test8()
# test9()
# test10()
## test11()
# test12()
# test14()
# test15()
# test16()
# test17()
# test18()
# test19()
# test20()
# test21()
# test22()
# test23()
# test24()
# test25()
# test26()
# test27()
# test28()
# test29()
# test31()
#
# elif len(sys.argv) == 2 and sys.argv[1] == "events":
# test21()
# test22()
# test23()
# test27()
# test28()
# test29()
# elif len(sys.argv) == 2 and sys.argv[1] == "demo":
# test24()
# test25()
# elif len(sys.argv) == 2 and sys.argv[1] == "integration":
# test30()
# else:
# print "please provide one of the following command line arguments: [all,events,demo]"
|
"""
Adapted from:
Modification by: Gurkirt Singh
Modification started: 2nd April 2019
large parts of this files are from many github repos
mainly adopted from
https://github.com/gurkirt/realtime-action-detection
Please don't remove above credits and give star to these repos
Licensed under The MIT License [see LICENSE for details]
"""
import os
import pdb
import time, json
import socket
import getpass
import argparse
import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data_utils
from torch.optim.lr_scheduler import MultiStepLR
from modules import utils
from modules.utils import str2bool
from modules.evaluation import evaluate_detections
from modules.box_utils import decode, nms
from modules import AverageMeter
from data import DetectionDataset, custum_collate
from models.retinanet_shared_heads import build_retinanet_shared_heads
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from torchvision import transforms
from data.transforms import Resize
from train import validate
parser = argparse.ArgumentParser(description='Training single stage FPN with OHEM, resnet as backbone')
# anchor_type to be used in the experiment
parser.add_argument('--anchor_type', default='kmeans', help='kmeans or default')
# Name of backbone networ, e.g. resnet18, resnet34, resnet50, resnet101 resnet152 are supported
parser.add_argument('--basenet', default='resnet50', help='pretrained base model')
# if output heads are have shared features or not: 0 is no-shareing else sharining enabled
parser.add_argument('--multi_scale', default=False, type=str2bool,help='perfrom multiscale training')
parser.add_argument('--shared_heads', default=0, type=int,help='4 head layers')
parser.add_argument('--num_head_layers', default=4, type=int,help='0 mean no shareding more than 0 means shareing')
parser.add_argument('--use_bias', default=True, type=str2bool,help='0 mean no bias in head layears')
# Name of the dataset only voc or coco are supported
parser.add_argument('--dataset', default='coco', help='pretrained base model')
# Input size of image only 600 is supprted at the moment
parser.add_argument('--min_size', default=600, type=int, help='Input Size for FPN')
parser.add_argument('--max_size', default=1000, type=int, help='Input Size for FPN')
# data loading argumnets
parser.add_argument('--batch_size', default=16, type=int, help='Batch size for training')
# Number of worker to load data in parllel
parser.add_argument('--num_workers', '-j', default=8, type=int, help='Number of workers used in dataloading')
# optimiser hyperparameters
parser.add_argument('--optim', default='SGD', type=str, help='Optimiser type')
parser.add_argument('--loss_type', default='mbox', type=str, help='loss_type')
parser.add_argument('--lr', '--learning-rate', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--eval_iters', default='90000', type=str, help='Chnage the lr @')
# Freeze batch normlisatio layer or not
parser.add_argument('--fbn', default=True, type=bool, help='if less than 1 mean freeze or else any positive values keep updating bn layers')
parser.add_argument('--freezeupto', default=1, type=int, help='if 0 freeze or else keep updating bn layers')
# Evaluation hyperparameters
parser.add_argument('--iou_thresh', default=0.5, type=float, help='Evaluation threshold')
parser.add_argument('--conf_thresh', default=0.05, type=float, help='Confidence threshold for evaluation')
parser.add_argument('--nms_thresh', default=0.5, type=float, help='NMS threshold')
parser.add_argument('--topk', default=100, type=int, help='topk for evaluation')
# Progress logging
parser.add_argument('--log_iters', default=True, type=str2bool, help='Print the loss at each iteration')
parser.add_argument('--log_step', default=10, type=int, help='Log after k steps for text/Visdom/tensorboard')
parser.add_argument('--tensorboard', default=False, type=str2bool, help='Use tensorboard for loss/evalaution visualization')
parser.add_argument('--visdom', default=False, type=str2bool, help='Use visdom for loss/evalaution visualization')
parser.add_argument('--vis_port', default=8098, type=int, help='Port for Visdom Server')
# Program arguments
parser.add_argument('--man_seed', default=1, type=int, help='manualseed for reproduction')
parser.add_argument('--multi_gpu', default=1, type=int, help='If more than then use all visible GPUs by default only one GPU used ')
# source or dstination directories
parser.add_argument('--data_root', default='/mnt/mercury-fast/datasets/', help='Location to root directory fo dataset') # /mnt/mars-fast/datasets/
parser.add_argument('--save_root', default='/mnt/mercury-fast/datasets/', help='Location to save checkpoint models') # /mnt/sun-gamma/datasets/
parser.add_argument('--model_dir', default='', help='Location to where imagenet pretrained models exists') # /mnt/mars-fast/datasets/
## Parse arguments
args = parser.parse_args()
args = utils.set_args(args, 'test') # set directories and subsets fo datasets
## set random seeds and global settings
np.random.seed(args.man_seed)
torch.manual_seed(args.man_seed)
torch.cuda.manual_seed_all(args.man_seed)
torch.set_default_tensor_type('torch.FloatTensor')
def main():
args.exp_name = utils.create_exp_name(args)
args.save_root += args.dataset+'/'
args.save_root = args.save_root+'cache/'+args.exp_name+'/'
val_transform = transforms.Compose([
Resize(args.min_size, args.max_size),
transforms.ToTensor(),
transforms.Normalize(mean=args.means,std=args.stds)])
val_dataset = DetectionDataset(args, train=False, image_sets=args.val_sets,
transform=val_transform, full_test=False)
print('Done Loading Dataset Validation Dataset :::>>>\n',val_dataset.print_str)
args.data_dir = val_dataset.root
args.num_classes = len(val_dataset.classes) + 1
args.classes = val_dataset.classes
args.head_size = 256
net = build_retinanet_shared_heads(args).cuda()
if args.multi_gpu>0:
print('\nLets do dataparallel\n')
net = torch.nn.DataParallel(net)
net.eval()
for iteration in args.eval_iters:
args.det_itr = iteration
log_file = open("{pt:s}/testing-{it:06d}-{date:%m-%d-%Hx}.log".format(pt=args.save_root, it=iteration, date=datetime.datetime.now()), "w", 10)
log_file.write(args.exp_name + '\n')
args.model_path = args.save_root + 'model_{:06d}.pth'.format(iteration)
log_file.write(args.model_path+'\n')
net.load_state_dict(torch.load(args.model_path))
print('Finished loading model %d !' % iteration)
# Load dataset
val_data_loader = data_utils.DataLoader(val_dataset, int(args.batch_size), num_workers=args.num_workers,
shuffle=False, pin_memory=True, collate_fn=custum_collate)
# evaluation
torch.cuda.synchronize()
tt0 = time.perf_counter()
log_file.write('Testing net \n')
net.eval() # switch net to evaluation mode
if args.dataset != 'coco':
mAP, ap_all, ap_strs , det_boxes = validate(args, net, val_data_loader, val_dataset, iteration, iou_thresh=args.iou_thresh)
else:
mAP, ap_all, ap_strs , det_boxes = validate_coco(args, net, val_data_loader, val_dataset, iteration, log_file, iou_thresh=args.iou_thresh)
for ap_str in ap_strs:
print(ap_str)
log_file.write(ap_str+'\n')
ptr_str = '\nMEANAP:::=>'+str(mAP)+'\n'
print(ptr_str)
log_file.write(ptr_str)
torch.cuda.synchronize()
print('Complete set time {:0.2f}'.format(time.perf_counter() - tt0))
log_file.close()
def validate_coco(args, net, val_data_loader, val_dataset, iteration_num, resFile_txt, iou_thresh=0.5):
"""Test a FPN network on an image database."""
print('Validating at ', iteration_num)
annFile='{}/instances_{}.json'.format(args.data_dir,args.val_sets[0])
cocoGT=COCO(annFile)
coco_dets = []
resFile = args.save_root + 'detections-{:05d}.json'.format(args.det_itr)
# resFile_txt = open(args.save_root + 'detections-{:05d}.txt'.format(args.det_itr), 'w')
num_images = len(val_dataset)
num_classes = args.num_classes
det_boxes = [[] for _ in range(num_classes-1)]
gt_boxes = []
print_time = True
val_step = 50
count = 0
torch.cuda.synchronize()
ts = time.perf_counter()
activation = nn.Sigmoid().cuda()
if args.loss_type == 'mbox':
activation = nn.Softmax(dim=2).cuda()
idlist = val_dataset.idlist
all_ids = val_dataset.ids
with torch.no_grad():
for val_itr, (images, targets, batch_counts, img_indexs, wh) in enumerate(val_data_loader):
torch.cuda.synchronize()
t1 = time.perf_counter()
batch_size = images.size(0)
height, width = images.size(2), images.size(3)
images = images.cuda(0, non_blocking=True)
decoded_boxes, conf_data = net(images)
conf_scores_all = activation(conf_data).clone()
if print_time and val_itr%val_step == 0:
torch.cuda.synchronize()
tf = time.perf_counter()
print('Forward Time {:0.3f}'.format(tf-t1))
for b in range(batch_size):
coco_image_id = int(all_ids[img_indexs[b]][1][8:])
width, height = wh[b][0], wh[b][1]
o_width, o_height = wh[b][2], wh[b][3]
# print(wh[b])
gt = targets[b, :batch_counts[b]].numpy()
gt_boxes.append(gt)
decoded_boxes_b = decoded_boxes[b]
conf_scores = conf_scores_all[b].clone()
#Apply nms per class and obtain the results
for cl_ind in range(1, num_classes):
# pdb.set_trace()
scores = conf_scores[:, cl_ind].squeeze()
if args.loss_type == 'yolo':
scores = conf_scores[:, cl_ind].squeeze() * conf_scores[:, 0].squeeze() * 5.0
# scoresth, _ = torch.sort(scores, descending=True)
c_mask = scores.gt(args.conf_thresh) # greater than minmum threshold
# c_mask = scores.gt(min(max(max_scoresth, args.conf_thresh), min_scoresth)) # greater than minmum threshold
scores = scores[c_mask].squeeze()
# print('scores size',scores.size())
if scores.dim() == 0:
# print(len(''), ' dim ==0 ')
det_boxes[cl_ind - 1].append(np.asarray([]))
continue
boxes = decoded_boxes_b.clone()
l_mask = c_mask.unsqueeze(1).expand_as(boxes)
boxes = boxes[l_mask].view(-1, 4)
# idx of highest scoring and non-overlapping boxes per class
ids, counts = nms(boxes, scores, args.nms_thresh, args.topk*10) # idsn - ids after nms
scores = scores[ids[:counts]].cpu().numpy()
pick = min(scores.shape[0], args.topk)
scores = scores[:pick]
boxes = boxes[ids[:counts]].cpu().numpy()
boxes = boxes[:pick, :]
cls_id = cl_ind-1
if len(idlist)>0:
cls_id = idlist[cl_ind-1]
# pdb.set_trace()
for ik in range(boxes.shape[0]):
boxes[ik, 0] = max(0, boxes[ik, 0])
boxes[ik, 2] = min(width, boxes[ik, 2])
boxes[ik, 1] = max(0, boxes[ik, 1])
boxes[ik, 3] = min(height, boxes[ik, 3])
# box_ = [round(boxes[ik, 0], 1), round(boxes[ik, 1],1), round(boxes[ik, 2],1), round(boxes[ik, 3], 1)]
box_ = [round(boxes[ik, 0]*o_width/width,1), round(boxes[ik, 1]*o_height/height,1), round(boxes[ik, 2]*o_width/width,1), round(boxes[ik, 3]*o_height/height,1)]
# box_ = [round(box_*o_width/width,1), round(), round(boxes[ik, 2],1), round(boxes[ik, 3], 1)]
box_[2] = round(box_[2] - box_[0], 1)
box_[3] = round(box_[3] - box_[1], 1)
box_ = [float(b) for b in box_]
coco_dets.append({"image_id" : int(coco_image_id), "category_id" : int(cls_id),
"bbox" : box_, "score" : float(scores[ik]),
})
cls_dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=True)
det_boxes[cl_ind-1].append(cls_dets)
count += 1
if print_time and val_itr%val_step == 0:
torch.cuda.synchronize()
te = time.perf_counter()
print('im_detect: {:d}/{:d} time taken {:0.3f}'.format(count, num_images, te-ts))
torch.cuda.synchronize()
ts = time.perf_counter()
print('NMS stuff Time {:0.3f}'.format(ts - tf))
# print('Evaluating detections for itration number ', iteration_num)
mAP, ap_all, ap_strs , det_boxes = evaluate_detections(gt_boxes, det_boxes, val_dataset.classes, iou_thresh=iou_thresh)
for ap_str in ap_strs:
print(ap_str)
resFile_txt.write(ap_str+'\n')
ptr_str = '\nMEANAP:::=>'+str(mAP)+'\n'
print(ptr_str)
resFile_txt.write(ptr_str)
print('saving results :::::')
with open(resFile,'w') as f:
json.dump(coco_dets, f)
cocoDt=cocoGT.loadRes(resFile)
# running evaluation
cocoEval = COCOeval(cocoGT, cocoDt, 'bbox')
# cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
resFile_txt.write(ptr_str)
# pdb.set_trace()
eval_strings = utils.eval_strings()
ptr_str = ''
for sid, val in enumerate(cocoEval.stats):
ptr_str += eval_strings[sid] + str(val) + '\n'
print('\n\nPrintning COCOeval Generated results\n\n ')
print(ptr_str)
resFile_txt.write(ptr_str)
return mAP, ap_all, ap_strs , det_boxes
if __name__ == '__main__':
main()
|
from celery import Celery
app = Celery('celery_back', broker='redis://:package_thief@localhost:6379',
task_serializer='pickle', result_serializer='pickle',
include=['celery_back.tasks'])
app.conf.task_serializer = 'pickle'
app.conf.result_serializer = 'pickle'
if __name__=='__main__':
app.start()
|
import numpy as np
from time import clock
from sklearn import svm
from dataloader import get_dataset
from argparse import ArgumentParser
from multi_kernels import MultiKernelheuristic as mkh
from multi_kernels import MultiKernelfixedrules as mkfr
from sklearn.model_selection import StratifiedKFold
parser = ArgumentParser()
parser.add_argument('--dataroot', required=True, help='root folder for the dataset')
parser.add_argument('--kernel_set', required=True, help='kernel_set : fixed_rules | heuristic')
parser.add_argument('--gamma0', type=float, help='parameter for linear kernel')
parser.add_argument('--gamma1', type=float, help='parameter for polynomial kernel')
parser.add_argument('--gamma2', type=float, help='parameter for gaussian kernel')
parser.add_argument('--normalize', action='store_true', help='toggle for normalizing the input data')
opt = parser.parse_args()
train, test_x = get_dataset(root=opt.dataroot, normalize=opt.normalize)
train_x, train_y = train[ : , 0 : train.shape[1] - 1], train[ : , train.shape[1] - 1 ]
cross_valid = StratifiedKFold(n_splits=5)
tr_accuracies = []
va_accuracies = []
tr_times = []
for tr, va in cross_valid.split(train_x, train_y):
print('Started cross validation split: {0}'.format(len(tr_accuracies) + 1))
print('Ratio: {0}/{1} :: TR/VA'.format(tr.shape[0], va.shape[0]))
tr_x, va_x = train_x[tr], train_x[va]
tr_y, va_y = train_y[tr], train_y[va]
svm_classifier = svm.SVC(kernel='precomputed')
if opt.kernel_set == 'fixed_rules':
multi_k = mkfr(gammas={'linear': opt.gamma0, 'polynomial': opt.gamma1, 'gaussian': opt.gamma2},
hyperparameters={'polynomial': 3, 'gaussian': 0.2})
elif opt.kernel_set == 'heuristic':
multi_k = mkh(hyperparameters={'polynomial': 3, 'gaussian': 0.2}, X=tr_x, Y=tr_y)
t_start = clock()
tr_gram_matrix = multi_k.gram_matrix(tr_x, tr_x)
svm_classifier.fit(tr_gram_matrix, tr_y)
t_stop = clock()
va_gram_matrix = multi_k.gram_matrix(va_x, tr_x)
tr_times.append((t_stop - t_start))
tr_predictions = svm_classifier.predict(tr_gram_matrix)
va_predictions = svm_classifier.predict(va_gram_matrix)
tr_accuracy = 1 - np.abs(tr_predictions - tr_y).sum()/tr_y.shape[0]
va_accuracy = 1 - np.abs(va_predictions - va_y).sum()/va_y.shape[0]
tr_accuracies.append(float(round(tr_accuracy, 5)))
va_accuracies.append(float(round(va_accuracy, 5)))
file_results = open('results.txt', 'a')
file_results.write('{0}\t{1}\t{2}\t{3}\t{4}\t{5}\n'.format(opt.kernel_set, round(np.mean(tr_accuracy), 4), round(np.std(tr_accuracy), 4), round(np.mean(va_accuracy), 4), round(np.std(va_accuracy), 4), round(np.mean(tr_times), 4)))
|
import re
from functools import partial
from common import file_to_lines
class ValidatorsFactory:
def create_min_max_validator(self, min, max):
return partial(self._check_minmax, min, max)
def create_min_max_map_validator(self, map):
return partial(self._check_minmax_map, map)
def create_regex_validator(self, pattern):
return partial(self._check_regex, pattern)
def create_includes_validator(self, array):
return partial(self._check_contains, array)
def _check_minmax_map(self, map, value):
search = re.search(r"\d*(\D*)", value)
if not search:
return False
key = search.group(1)
if key not in map:
return False
return self._check_minmax(map[key][0], map[key][1], value.rstrip(key))
@staticmethod
def _check_contains(array, value):
return value in array
@staticmethod
def _check_minmax(min, max, value):
return min <= int(value) <= max
@staticmethod
def _check_regex(pattern, value):
re_pattern = re.compile(pattern)
return re_pattern.match(value)
class PassportValidator:
def __init__(self):
self.validators = ValidatorsFactory()
self.rules = {
"byr": self.validators.create_min_max_validator(1920, 2002),
"iyr": self.validators.create_min_max_validator(2010, 2020),
"eyr": self.validators.create_min_max_validator(2020, 2030),
"hgt": self.validators.create_min_max_map_validator(
{
"cm": (150, 193),
"in": (59, 76),
}
),
"hcl": self.validators.create_regex_validator(r"^#[a-f0-9]{6}$"),
"ecl": self.validators.create_includes_validator(
["amb", "blu", "brn", "gry", "grn", "hzl", "oth"]
),
"pid": self.validators.create_regex_validator(r"^[0-9]{9}$"),
}
def validate_presence(self, credentials):
return all([key in credentials for key in self.rules.keys()])
def validate(self, credentials):
if not self.validate_presence(credentials):
return False
return all(
[validator(credentials[key]) for key, validator in self.rules.items()]
)
def row_to_creds(row):
return dict([(pair.split(':')) for pair in row.split()])
def first(rows):
pass_validator = PassportValidator()
return sum(pass_validator.validate_presence(row_to_creds(row)) for row in rows)
def second(rows):
pass_validator = PassportValidator()
return sum(pass_validator.validate(row_to_creds(row)) for row in rows)
def main():
rows = file_to_lines("inputs/day04.txt", separate_with_empty=True)
print(first(rows))
print(second(rows))
if __name__ == "__main__":
main()
|
def ascci3(n):
i=0
for k in range(n):
print "%"*n
i+=1
#output:
>>> ascci3(6)
%%%%%%
%%%%%%
%%%%%%
%%%%%%
%%%%%%
%%%%%%
>>>
def createdonerow(width, height):
A=''
row=''
for row in range(height):
row=''
for col in range(width):
row+='*'
A+='*'
print row
#OUTPUT:
>>> createdonerow(10,6)
**********
**********
**********
**********
**********
**********
|
from modules.color import Color
from modules.vector import Vector
from modules.image import read_ppm
class Texture:
def __init__(self, texture_file, u_vector=Vector(0,0.1,0), v_vector=Vector(0,0,0)):
self.map = read_ppm(texture_file)
self.width = self.map.width
self.height = self.map.height
self.u_vector = u_vector
self.v_vector = v_vector
def get_texel(self, point):
# print(self.map.pixels[round(v)][round(u)])
u = abs(point.dot_product(self.u_vector)) * self.width
v = abs(point.dot_product(self.v_vector)) * self.height
v_bound = round(v) % self.height
u_bound = round(u) % self.width
return self.get_pixel(u_bound, v_bound)
def get_pixel(self, u, v):
return self.map.pixels[v][u]
class Material:
"""Cor e as propriedades de luz"""
def __init__(
self,
color=Color.from_hex("#FFFFFF"),
ambient=0.0005,
diffuse=1.0,
specular=1.0,
exp_specular=20,
reflection=0.9,
diff_reflection=0,
refraction=0.0,
ior=0,
texture=None
):
self.color = color
self.ambient = ambient # ka
self.diffuse = diffuse # kd
self.specular = specular # ks
self.exp_specular = exp_specular # alpha
self.reflection = reflection # kr
self.diff_reflection = diff_reflection # reflexão difusa
self.refraction = refraction # kt
self.ior = ior # ior
self.texture = texture
def color_at(self, position):
return self.color
def get_texel(self, point):
return self.texture.get_texel(point)
def set_acabamento(self, ka, kd, ks, alpha, kr, kt, ior):
"""
Parâmetros:
ka - coeficiente de luz ambiente
kd - coeficiente de luz difusa
ks - coeficiente de luz especular
alpha - expoente da reflexão especular
kr - coeficiente de reflexão
kt - coeficiente de transmissão (refração)
ior - taxa entre índices de refração ambiente e material (n1/n2)
"""
self.ambient = ka
self.diffuse = kd
self.specular = ks
self.exp_specular = alpha
self.reflection = kr
self.refraction = kt
self.ior = ior
class ChequeredMaterial:
"""Material xadrezado"""
def __init__(
self, color1=Color.from_hex("#FFFFFF"),
color2=Color.from_hex("#FFFFFF"),
ambient=0.0005,
diffuse=1.0,
specular=1.0,
reflection=0.5,
diff_reflection=0,
refraction=0.0,
ior=0,
exp_specular=20,
tamanho=15,
up=Vector(0,1,0)
):
self.color1 = color1
self.color2 = color2
self.up = up
self.ambient = ambient # ka
self.diffuse = diffuse # kd
self.specular = specular # ks
self.exp_specular = exp_specular # alpha
self.reflection = reflection # kr
self.diff_reflection = diff_reflection # reflexão difusa
self.refraction = refraction # kt
self.ior = ior # ior
self.texture = None
self.tamanho = max(tamanho,2)
def set_acabamento(self, ka, kd, ks, alpha, kr, kt, ior):
"""
Parâmetros:
ka - coeficiente de luz ambiente
kd - coeficiente de luz difusa
ks - coeficiente de luz especular
alpha - expoente da reflexão especular
kr - coeficiente de reflexão
kt - coeficiente de transmissão (refração)
ior - taxa entre índices de refração ambiente e material (n1/n2)
"""
self.ambient = ka
self.diffuse = kd
self.specular = ks
self.exp_specular = alpha
self.reflection = kr
self.refraction = kt
self.ior = ior
def color_at(self, position):
if (self.up == Vector(1,0,0)):
modX = int(position.x) % (self.tamanho * 2.5)
modZ = int(position.z) % (self.tamanho * 2.5)
def corX(modX):
return (modX <= self.tamanho/4 or modX > self.tamanho * 1.5)
def corZ(modZ):
return (modZ <= self.tamanho/4 or modZ > self.tamanho * 1.5)
if (corX(modX) != corZ(modZ)):
return self.color1
else:
return self.color2
elif (self.up == Vector(0,0,1)):
if position.x < 0:
x = abs(round(position.x)) + self.tamanho
else:
x = round(position.x)
if position.y < 0:
y = abs(round(position.y)) + self.tamanho
else:
y = round(position.y)
mult = 2 if (self.tamanho >= 30) else 3
modX = x % (self.tamanho * mult)
modY = y % (self.tamanho * mult)
def corX(modX):
return (modX <= self.tamanho)
def corY(modY):
return (modY <= self.tamanho)
if (corX(modX) != corY(modY)):
return self.color1
else:
return self.color2
else:
if position.x < 0:
x = abs(round(position.x)) + self.tamanho
else:
x = round(position.x)
if position.z < 0:
z = abs(round(position.z)) + self.tamanho
else:
z = round(position.z)
mult = 2.1 if (self.tamanho >= 20) else 3
modX = x % (self.tamanho * mult)
modZ = z % (self.tamanho * mult)
def corX(modX):
return (modX <= self.tamanho)
def corZ(modZ):
return (modZ <= self.tamanho)
if (corX(modX) != corZ(modZ)):
return self.color1
else:
return self.color2
# modX = int(position.x) % (self.tamanho * 2.5)
# modZ = int(position.z) % (self.tamanho * 2.5)
# def corX(modX):
# return (modX <= self.tamanho/4 or modX > self.tamanho * 1.5)
# def corZ(modZ):
# return (modZ <= self.tamanho/4 or modZ > self.tamanho * 1.5)
# if (corX(modX) != corZ(modZ)):
# return self.color1
# else:
# return self.color2
|
import socket
import threading
import urlparse
import select
BUF_LEN = 8192
BUFLEN=8192
class MyProxy1(threading.Thread):
def __init__(self,conn,addr):
threading.Thread.__init__(self)
self.source = conn
self.request = ""
self.headers = {}
self.destnation = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
def get_headers(self):
header = ''
while True:
header += self.source.recv(BUFLEN)
# print (header)
index = header.find('\n')
if index > 0:
break
#firstLine,self.request=header.split('\r\n',1)
firstLine = header[:index]
self.request = header[index+1:]
self.headers['method'], self.headers['path'], self.headers['protocol'] = firstLine.split()
def conn_destnation(self):
url = urlparse.urlparse(self.headers['path'])
hostname = url.netloc
port = "80"
if hostname.find(':') > 0:
addr,port = hostname.split(':')
else:
addr = hostname
port = int(port)
print (addr)
ip = socket.gethostbyname(addr)
# print ip,port
self.destnation.connect((ip,port))
data = "%s %s %s\r\n" %(self.headers['method'],self.headers['path'],self.headers['protocol'])
# print (data+self.request)
self.destnation.send(data+self.request)
print ("send \n" + data + self.request)
def renderto(self):
readsocket = [self.destnation]
while True:
data = ''
(rlist,wlist,elist)=select.select(readsocket,[],[],3)
if rlist:
data = rlist[0].recv(BUFLEN)
if len(data) > 0:
# print (data)
self.source.send(data)
else:
break
def run(self):
self.get_headers()
self.conn_destnation()
self.renderto()
class MyProxy(threading.Thread):
def __init__(self, conn, addr):
threading.Thread.__init__(self)
self.source = conn
self.headers = {}
self.request = ""
self.destnation = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# socket.setdefaulttimeout()
def get_header(self):
header = ''
while True:
header += self.source.recv(BUF_LEN)
index = header.find('\r')
if index > 0:
break
first_line = header[:index]
self.request = header[index+1:]
self.headers['method'], self.headers['path'], self.headers['protocol'] = first_line.split()
def conn_destnation(self):
try:
result = urlparse.urlparse(self.headers['path'])
except KeyError as e:
self.destnation.close()
return
port = "80"
hostname = result.netloc
if hostname.find(':') > 0:
addr, port = hostname.split(':')
else:
addr = hostname
port = int(port)
dest_ip = socket.gethostbyname(addr)
print (type(dest_ip))
print (type(port))
self.destnation.connect((dest_ip, port))
data = "%s %s %s%s" % (self.headers['method'], \
self.headers['path'], \
self.headers['protocol'], \
self.request)
self.destnation.send(data)
print ("send \n" + data)
# print ("send " + data)
def render_to(self):
readsocket=[self.destnation]
while True:
data=''
(rlist, wlist, elist) = select.select(readsocket,[],[],3)
if rlist:
data = rlist[0].recv(BUF_LEN)
if len(data)>0:
print ("recv \n" + data)
self.source.send(data)
else:
break
def run(self):
self.get_header()
self.conn_destnation()
self.render_to()
class MyServer(object):
rec = []
def __init__(self, host, port, handler=MyProxy):
self.host = host
self.port = port
self.handler = handler
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((host,port))
self.server.listen(5)
def start(self):
while True:
try:
conn, addr = self.server.accept()
t = MyProxy1(conn, addr)
self.rec.append(t)
t.start()
except KeyboardInterrupt:
print ("shit happens")
for t in self.rec:
t.destnation.close()
exit(0)
if "__main__" == __name__:
host = '127.0.0.1'
port = 8008
MyServer(host, port, MyProxy1).start()
|
import numba
from numpy import *
from plucked_map import *
@numba.jit(nopython=True)
def accumulate(nbins, n, m, s):
density = zeros(nbins)
dx = 2.0/nbins
x = 2*rand()
for i in range(n):
x = osc_tent(x, s, m)
bno = int(x//dx)
density[bno] += 1/n/dx
return density
@numba.jit(nopython=True)
def analytical_rho(x,s):
if x < 0.5:
return (1-s)**2/2
elif x < 1:
return (1-s*s)/2
elif x < 1.5:
return (1+s)**2/2
return (1-s*s)/2
nbins = 2**12
nrep = 5
density = zeros(nbins)
s = 0.1
n = 3
for m in range(nrep):
print("repeat {}".format(m))
density += accumulate(nbins, 1000000000, n, s)/nrep
x = array(linspace(0, 2, nbins+2)[1:-1])
#for i,xi in enumerate(x):
# density[i] = analytical_rho(xi,s)
fig = figure(figsize=(8,5))
ax = fig.add_subplot(111)
ax.fill_between(x, 0, density)
ax.xaxis.set_tick_params(labelsize=40)
ax.yaxis.set_tick_params(labelsize=40)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def product_of_array_except_self(nums):
length = len(nums)
l = [1] * length
r = [1] * length
res = [1] * length
for i in range(1, length):
l[i] = l[i-1] * nums[i-1]
for i in reversed(range(length-1)):
r[i] = r[i+1] * nums[i+1]
for i in range(length):
res[i] = l[i] * r[i]
return res
def product_of_array_except_self1(nums):
"""
给定一个包含n个整数的数组(n>1),请返回一个这样的数组:其中每个元素表示,原始数组中的每个位置除了其自身外的其他元素的乘积
:param nums: List[int]
:return: List[int]
"""
dp = [1] * len(nums)
for i in range(1, len(nums)):
dp[i] = dp[i-1] * nums[i-1]
prod = 1
for i in reversed(range(len(nums) - 1)):
prod *= nums[i+1]
dp[i] *= prod
return dp
if __name__ == "__main__":
nums = [1,2,3,4]
print(product_of_array_except_self1(nums)) |
import Data_Utils1
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler, normalize
from sklearn import svm
from sklearn.neural_network import MLPClassifier
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.model_selection import StratifiedKFold
from sklearn import preprocessing
import numpy as np
from sklearn.model_selection import cross_val_score
import random
CU_XX, Y = Data_Utils1.Get_Casis_CUDataset("results")
print(Y, CU_XX)
evaluated = 0
fcount = len(CU_XX[0])
def evaluate(child):
global evaluated
evaluated += 1
#rbfsvm = svm.SVC()
lsvm = svm.LinearSVC()
#mlp = MLPClassifier(max_iter=2000)
skf = StratifiedKFold(n_splits=4, shuffle=True, random_state=0)
fold_accuracy = []
CU_X = CU_XX * child
scaler = StandardScaler()
tfidf = TfidfTransformer(norm=None)
dense = Data_Utils.DenseTransformer()
for train, test in skf.split(CU_X, Y):
#train split
CU_train_data = CU_X[train]
train_labels = Y[train]
#test split
CU_eval_data = CU_X[test]
eval_labels = Y[test]
# tf-idf
tfidf.fit(CU_train_data)
CU_train_data = dense.transform(tfidf.transform(CU_train_data))
CU_eval_data = dense.transform(tfidf.transform(CU_eval_data))
# standardization
scaler.fit(CU_train_data)
CU_train_data = scaler.transform(CU_train_data)
CU_eval_data = scaler.transform(CU_eval_data)
# normalization
CU_train_data = normalize(CU_train_data)
CU_eval_data = normalize(CU_eval_data)
train_data = CU_train_data
eval_data = CU_eval_data
# evaluation
#rbfsvm.fit(train_data, train_labels)
lsvm.fit(train_data, train_labels)
#mlp.fit(train_data, train_labels)
#rbfsvm_acc = rbfsvm.score(eval_data, eval_labels)
lsvm_acc = lsvm.score(eval_data, eval_labels)
#mlp_acc = mlp.score(eval_data, eval_labels)
fold_accuracy.append(lsvm_acc)
return np.mean(fold_accuracy, axis=0)
# fold_accuracy = []
# fold_accuracy.append((lsvm_acc, rbfsvm_acc, mlp_acc))
#
# print(np.mean(fold_accuracy, axis = 0))
def generatePopuplation(size):
population = []
for i in range(0, size):
population.append([np.random.randint(2, size=fcount), -1])
return population
def BestFit(population):
best = 0
for i, _ in enumerate(population):
if population[best][1] < population[i][1]:
best = i
return best
def WorstFit(population):
worst = 0
for i, _ in enumerate(population):
if population[worst][1] > population[i][1]:
worst = i
return worst
def Crossover(parents):
parentCount = len(parents)
child = [0] * fcount
for i in range(fcount):
child[i] = parents[np.random.random_integers(parentCount)-1][i]
return child
def Mutation(child, rate):
for i, _ in enumerate(child):
if random.randint(0, 100) < rate:
child[i] = (child[i]+1) % 2
return child
opt = 1
tSelect = 2
pSize = 2
mRate = [5, 5]
if opt == 1:
tSelect = 4
pSize = 5
mRate = [2, 6]
def SteadyState(population):
while evaluated < 5000:
Selected_Parents = []
for i in range(pSize):
parent = [population[random.randrange(len(population))] for i in range(tSelect)]
sorted(parent, key=lambda x: float(x[1]))
Selected_Parents.append(parent[0][0])
child = Crossover(Selected_Parents)
worstOne = population[WorstFit(population)][1]
popValues = [indi[1] for indi in population]
for i, val in enumerate(popValues):
if(val == worstOne):
del popValues[i]
break
popValues = np.array(popValues)
avg = worstOne
if len(popValues) > 0:
avg = popValues.mean()
bestWorstDifference = (population[BestFit(population)][1] - avg) * 100
selectedRate = mRate[0]
if mRate[0] == mRate[1]:
selectedRate = mRate[0]
elif bestWorstDifference < 10:
selectedRate = ( (mRate[1]-mRate[0])*bestWorstDifference/10 )+mRate[0]
print("Diff: ",bestWorstDifference, ", selected rate", selectedRate)
child = Mutation(child, selectedRate)
worst = WorstFit(population)
population[worst] = [child, evaluate(child)]
print( population[worst][1], " - Best fit on", evaluated, " | ", BestFit(population), " with ", population[BestFit(population)][1])
# all_features = [1] * fcount
# gains = [0] * fcount
# all_fit = evaluate(all_features)
# mustDisabled = []
# mustEnabled = []
# for i, _ in enumerate(all_features):
# copy = all_features[:]
# copy[i] = 0
# gains[i] = evaluate(copy) - all_fit
# if(gains[i] > 0):
# mustDisabled.append(i)
# if(gains[i] < 0):
# mustEnabled.append(i)
# print(i, "-", gains[i])
#
# print(mustEnabled)
# print(mustDisabled)
#
mustEnabled = [ ]
mustDisabled = [ ]
if opt == 1:
mustEnabled = [311, 378]
mustDisabled = [140, 262, 273, 385]
population = generatePopuplation(25)
print(len(population))
for i, individual in enumerate(population):
if(i < 4):
for disable in mustDisabled:
individual[0][i] = 0
for disable in mustEnabled:
individual[0][i] = 1
population[i][1] = evaluate(individual[0])
print("Moving to steady state")
SteadyState(population)
print(population)
popValues = np.array([indi[1] for indi in population])
avg = popValues.mean()
print("AVG: ", avg, " | BestFit: ", population[BestFit(population)][1])
|
# Definition for an interval.
# class Interval:
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
def findRightInterval(self, intervals):
lookup = []
for idx, val in enumerate(intervals):
lookup.append((val.start, idx))
lookup.sort()
n = len(lookup)
def helper(x):
lo = 0
hi = n
while lo < hi:
mid = (lo + n) // 2
if lookup[mid][0] < x:
lo = mid + 1
else:
hi = mid
return lo
res = []
for interval in intervals:
idx = helper(interval.end)
if idx == n:
res.append(-1)
elif idx == 0:
if interval.end <= lookup[0][0]:
res.append(lookup[0][1])
else:
res.append(-1)
else:
res.append(lookup[idx][1])
return res
def findRightInterval1(self, intervals):
import bisect
lookup = []
for idx, val in enumerate(intervals):
lookup.append((val[0], idx))
lookup.sort()
n = len(lookup)
# print(intervals)
# print(lookup)
# def helper(x):
# lo = 0
# hi = n
# while lo < hi:
# mid = (lo + hi) // 2
# if lookup[mid][0] < x:
# lo = mid + 1
# else:
# hi = mid
# return lo
res = []
for interval in intervals:
# idx = helper(interval[1])
idx = bisect.bisect_left(lookup, (interval[1],))
print(interval, idx)
if idx == n:
res.append(-1)
elif idx == 0:
if interval.end <= lookup[0][0]:
res.append(lookup[0][1])
else:
res.append(-1)
else:
res.append(lookup[idx][1])
return res
a = Solution()
print(a.findRightInterval1([[3, 4], [2, 3], [1, 2]]))
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AuthKey'
db.create_table('urlauth_authkey', (
('id', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('uid', self.gf('django.db.models.fields.PositiveIntegerField')()),
('expired', self.gf('django.db.models.fields.DateTimeField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('onetime', self.gf('django.db.models.fields.BooleanField')(default=True)),
('data', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('urlauth', ['AuthKey'])
def backwards(self, orm):
# Deleting model 'AuthKey'
db.delete_table('urlauth_authkey')
models = {
'urlauth.authkey': {
'Meta': {'object_name': 'AuthKey'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {}),
'expired': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'onetime': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uid': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['urlauth']
|
#!/usr/bin/env python
import ADC0832
import time
import math
def init():
ADC0832.setup()
def loop():
while True:
analogVal = ADC0832.getResult()
Vr = 5 * float(analogVal) / 255
Rt = 10000 * Vr / (5 - Vr)
temp = 1/(((math.log(Rt / 10000)) / 3950) + (1 / (273.15+25)))
temp = temp - 273.15
print 'temperature = %d C' % temp
time.sleep(0.2)
if __name__ == '__main__':
init()
try:
loop()
except KeyboardInterrupt:
ADC0832.destroy()
print 'The end !'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
HOST = "192.168.10.120"
PORT = 4223
GPSUID = "cPA"
SERVOUID = "9oVKfGxvXL7"
motor = 5
steeringsrv = 1
stop = 400
mid = 0
speed = 3000
earthRadius = 6371140 #in meters
point1 = 48.799332, 9.051900
|
import sys
import collections
x = [1, 0, -1, 0]
y = [0, 1, 0, -1]
while True:
W, H = map(int, sys.stdin.readline().split())
if W == 0:
break
tile = [[0 for i in xrange(W + 2)] for j in xrange(H + 2)]
visited = set()
next = set()
for i in xrange(H):
line = raw_input().strip()
for j in xrange(W):
if line[j] == '.':
tile[i + 1][j + 1] = 1
if line[j] == '@':
tile[i + 1][j + 1] = 1
next.add((i + 1, j + 1))
# for line in tile:
# print line
while len(next) > 0:
temp_next = set()
for n in next:
visited.add(n)
for i in xrange(4):
temp = (n[0] + x[i], n[1] + y[i])
if temp not in visited and tile[temp[0]][temp[1]] == 1:
temp_next.add(temp)
next = temp_next
print len(visited)
|
import pytest
from .pages.basket_page import BasketPage
from .pages.login_page import LoginPage
from .pages.main_page import MainPage
from .pages.product_page import ProductPage
from .pages.search_page import SearchPage
link = "http://selenium1py.pythonanywhere.com/"
class TestSearchFromMainPage():
def test_guest_can_go_to_search_page(self, browser):
# Data
link = "http://selenium1py.pythonanywhere.com"
book_name = "The shellcoder's handbook"
# Arrange
page = MainPage(browser, link)
page.open()
# Act
page = SearchPage(browser, link)
page.go_to_search_page(book_name)
# Assert
page.should_be_search_url()
page.should_be_search_page()
page.should_be_search_book_name(book_name)
@pytest.mark.parametrize('book_name',
["The shellcoder's handbook",
"Hacking Exposed Wireless",
"Coders at Work",
pytest.param("Studyguide for Counter Hack", marks=pytest.mark.xfail),
"Gray Hat Hacking",
"Reversing",
"Applied cryptography",
"Hacker's Delight",
"Silence On The Wire",
"Google Hacking"])
def test_guest_can_go_to_search_many_page(self, browser, book_name):
# Data
link = "http://selenium1py.pythonanywhere.com"
book_name = f"{book_name}"
template = "{} has been added to your basket."
# Arrange
page = MainPage(browser, link)
page.open()
# Act
page = SearchPage(browser, link)
page.go_to_search_page(book_name)
page.should_be_search_page()
page.should_be_search_book_name(book_name)
page.add_to_basket_from_search_page()
# Assert
page = ProductPage(browser, link)
page.check_add_to_basket_notification(book_name, template)
def test_guest_can_add_to_basket_from_search_page(self, browser):
# Data
link = "http://selenium1py.pythonanywhere.com"
book_name = "The shellcoder's handbook"
template = "{} has been added to your basket."
# Arrange
page = MainPage(browser, link)
page.open()
# Act
page = SearchPage(browser, link)
page.go_to_search_page(book_name)
page.should_be_search_url()
page.should_be_search_page()
page.should_be_search_book_name(book_name)
# Assert
page.add_to_basket_from_search_page()
page = ProductPage(browser, link)
page.check_add_to_basket_notification(book_name, template)
class TestUserAddToBasketFromSearchPage:
@pytest.fixture(scope="function", autouse=True)
def setup(self, browser):
# Data
link_login = "http://selenium1py.pythonanywhere.com/en-gb/accounts/login/"
email = "ishelter@mail.ru"
password = "zfdR88gMcgwxKD9"
# Arrange
page = LoginPage(browser, link_login)
page.open()
# Act
page.login_user(email, password)
# Assert
page.should_be_authorized_user()
def test_user_can_add_and_clear_into_basket_found_products_(self, browser):
# Data
link = "http://selenium1py.pythonanywhere.com"
book_name = "The shellcoder's handbook"
# Arrange
page = MainPage(browser, link)
page.open()
# Act
page = SearchPage(browser, link)
page.go_to_search_page(book_name)
page.should_be_search_url()
page.should_be_search_page()
page.should_be_search_book_name(book_name)
page.add_to_basket_from_search_page()
page.go_to_basket()
basket_page = BasketPage(browser, browser.current_url)
basket_page.clear_basket()
# Assert
basket_page.basket_has_no_product()
basket_page.mess_about_basket_is_empty()
def test_user_can_change_quantity_products_into_basket(self, browser):
# Data
link = "http://selenium1py.pythonanywhere.com"
book_name = "The shellcoder's handbook"
# Arrange
page = MainPage(browser, link)
page.open()
# Act
page = SearchPage(browser, link)
page.go_to_search_page(book_name)
page.should_be_search_url()
page.should_be_search_page()
page.should_be_search_book_name(book_name)
page.add_to_basket_from_search_page()
page.go_to_basket()
basket_page = BasketPage(browser, browser.current_url)
# Assert
basket_page.addition_products()
basket_page.clear_basket()
basket_page.basket_has_no_product()
basket_page.mess_about_basket_is_empty()
|
from selenium import webdriver
import requests
url = 'http://www.shanyaoo.com/_account/login.shtml'
driver = webdriver.Chrome()
#登录时没有验证码的情况下可以不打开浏览器,selenium
# option_chrome = webdriver.ChromeOptions()
# option_chrome.add_argument('--headless')
# driver = webdriver.Chrome(chrome_options=option_chrome)
driver.get(url)
driver.find_element_by_xpath('//input[@placeholder="用户名"]').send_keys('xs25391')
driver.find_element_by_xpath('//input[@placeholder="登录密码"]').send_keys('123456')
driver.find_element_by_xpath('//button[@class="loginBtn"]').click()
cookies = driver.get_cookies()
print(cookies)
cookies_list= []
for cookie_dict in cookies:
cookie =cookie_dict['name']+'='+cookie_dict['value']
cookies_list.append(cookie)
#print(cookies_list)
header_cookie = ';'.join(cookies_list)
#print(header_cookie)
headers = {
'cookie':header_cookie,
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
fin_url = 'http://www.shanyaoo.com/_shop/pazd/search.shtml?rt=ProductOfShop&sv=%E4%B8%AD%E8%8D%AF&total=691&ffs=&sn=2'
r = requests.get(fin_url,headers=headers)
print(r.text) |
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 4 01:13:25 2020
@author: ASUS
"""
import shapefile # mengimport library shapefile
w=shapefile.Writer("soal8", shapeType=shapefile.POLYGON) # inisialiasi untuk Membuat file shapefile baru menggunakan Writer dan membuat oject baru
w.shapeType # menentukan shapeType nya
w.field("kolom1","C") # untuk membuat dbf nya terlebih dahulu yang berupa kolom1 Type Character
w.field("kolom2","C") # untuk membuat dbf nya terlebih dahulu yang berupa kolom2 Type Character
w.record("ngek","satu") # Untuk mengisi field yang sudah dibuat
w.poly([[[1,3],[5,3],[1,2],[5,2], [1,3]]]) # Untuk mengisi data poly sesuai koordinat masing - masing x dan y
w.close() # menutup perintah |
##############################################################################
#
# Copyright (c) 2002, 2018 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Tests for logging configuration via ZConfig."""
import doctest
import logging
import os
import sys
import unittest
from io import StringIO
from sys import maxsize
import ZConfig
import ZConfig.components.logger.tests.support
import ZConfig.tests.support
from ZConfig.components.logger import datatypes
from ZConfig.components.logger import handlers
from ZConfig.components.logger import loghandler
class CustomFormatter(logging.Formatter):
def formatException(self, ei):
"""Format and return the exception information as a string.
This adds helpful advice to the end of the traceback.
"""
import traceback
sio = StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], file=sio)
return sio.getvalue() + "... Don't panic!"
def read_file(filename):
with open(filename) as f:
return f.read()
class TestConfig(ZConfig.components.logger.tests.support.LoggingTestHelper,
unittest.TestCase):
_schematext = """
<schema>
<import package='ZConfig.components.logger'/>
<section type='eventlog' name='*' attribute='eventlog'/>
</schema>
"""
def test_config_without_logger(self):
conf = self.get_config("")
self.assertTrue(conf.eventlog is None)
def test_config_without_handlers(self):
logger = self.check_simple_logger("<eventlog/>")
# Make sure there's a NullHandler, since a warning gets
# printed if there are no handlers:
self.assertEqual(len(logger.handlers), 1)
self.assertTrue(isinstance(logger.handlers[0], loghandler.NullHandler))
# And it does nothing
logger.handlers[0].emit(None)
logger.handlers[0].handle(None)
def test_factory_without_stream(self):
factory = self.check_simple_logger_factory("<eventlog>\n"
" <logfile>\n"
" path STDERR\n"
" </logfile>\n"
" <logfile>\n"
" path STDERR\n"
" level info\n"
" </logfile>\n"
" <logfile>\n"
" path STDERR\n"
" level debug\n"
" </logfile>\n"
"</eventlog>")
factory.startup()
logger = factory.instance
factory.level = logging.NOTSET
self.assertEqual(factory.getLowestHandlerLevel(), logging.DEBUG)
logger.handlers[0].reopen = lambda: None
factory.reopen()
def test_with_logfile(self):
fn = self.mktemp()
logger = self.check_simple_logger("<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" </logfile>\n"
"</eventlog>" % fn)
logfile = logger.handlers[0]
self.assertEqual(logfile.level, logging.DEBUG)
self.assertTrue(isinstance(logfile, loghandler.FileHandler))
self.assertFalse(logfile.delay)
self.assertIsNotNone(logfile.stream)
logger.removeHandler(logfile)
logfile.close()
def test_with_encoded(self):
fn = self.mktemp()
logger = self.check_simple_logger("<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" encoding shift-jis\n"
" </logfile>\n"
"</eventlog>" % fn)
logfile = logger.handlers[0]
self.assertEqual(logfile.level, logging.DEBUG)
self.assertTrue(isinstance(logfile, loghandler.FileHandler))
self.assertFalse(logfile.delay)
self.assertIsNotNone(logfile.stream)
self.assertEqual(logfile.stream.encoding, "shift-jis")
logger.removeHandler(logfile)
logfile.close()
def test_with_logfile_delayed(self):
fn = self.mktemp()
logger = self.check_simple_logger("<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" delay true\n"
" </logfile>\n"
"</eventlog>" % fn)
logfile = logger.handlers[0]
self.assertEqual(logfile.level, logging.DEBUG)
self.assertTrue(isinstance(logfile, loghandler.FileHandler))
self.assertTrue(logfile.delay)
self.assertIsNone(logfile.stream)
logger.info("this is a test")
self.assertIsNotNone(logfile.stream)
logger.removeHandler(logfile)
logfile.close()
def test_with_stderr(self):
self.check_standard_stream("stderr")
def test_with_stdout(self):
self.check_standard_stream("stdout")
def test_delayed_stderr(self):
self.check_standard_stream_cannot_delay("stderr")
def test_delayed_stdout(self):
self.check_standard_stream_cannot_delay("stdout")
def test_encoded_stderr(self):
self.check_standard_stream_cannot_encode("stderr")
def test_encoded_stdout(self):
self.check_standard_stream_cannot_encode("stdout")
def test_with_rotating_logfile(self):
fn = self.mktemp()
logger = self.check_simple_logger("<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" max-size 5mb\n"
" old-files 10\n"
" </logfile>\n"
"</eventlog>" % fn)
logfile = logger.handlers[0]
self.assertEqual(logfile.level, logging.DEBUG)
self.assertEqual(logfile.backupCount, 10)
self.assertEqual(logfile.maxBytes, 5*1024*1024)
self.assertTrue(isinstance(logfile, loghandler.RotatingFileHandler))
logger.removeHandler(logfile)
logfile.close()
def test_with_timed_rotating_logfile(self):
fn = self.mktemp()
logger = self.check_simple_logger("<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" when D\n"
" interval 3\n"
" old-files 11\n"
" </logfile>\n"
"</eventlog>" % fn)
logfile = logger.handlers[0]
self.assertEqual(logfile.level, logging.DEBUG)
self.assertEqual(logfile.backupCount, 11)
self.assertEqual(logfile.interval, 86400*3)
self.assertIsInstance(logfile, loghandler.TimedRotatingFileHandler)
logger.removeHandler(logfile)
logfile.close()
def test_with_timed_rotating_logfile_and_size_should_fail(self):
fn = self.mktemp()
self.assertRaises(
ZConfig.DataConversionError,
self.check_simple_logger_factory,
"<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" max-size 5mb\n"
" when D\n"
" old-files 10\n"
" </logfile>\n"
"</eventlog>" % fn)
# Mising old-files
self.assertRaisesRegex(
ZConfig.DataConversionError,
"old-files must be set",
self.check_simple_logger_factory,
"<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" max-size 5mb\n"
" when D\n"
" </logfile>\n"
"</eventlog>" % fn)
self.assertRaisesRegex(
ZConfig.DataConversionError,
"max-bytes or when must be set",
self.check_simple_logger_factory,
"<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" interval 1\n"
" old-files 10\n"
" </logfile>\n"
"</eventlog>" % fn)
def test_with_rotating_logfile_and_STD_should_fail(self):
for path in ('STDERR', 'STDOUT'):
for param in ('old-files 10', 'max-size 5mb'):
self.assertRaises(
ZConfig.DataConversionError,
self.check_simple_logger_factory,
"<eventlog>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" when D\n"
" %s\n"
" </logfile>\n"
"</eventlog>" % (path, param))
def check_standard_stream(self, name):
old_stream = getattr(sys, name)
conf = self.get_config("""
<eventlog>
<logfile>
level info
path %s
</logfile>
</eventlog>
""" % name.upper())
self.assertTrue(conf.eventlog is not None)
# The factory has already been created; make sure it picks up
# the stderr we set here when we create the logger and
# handlers:
sio = StringIO()
setattr(sys, name, sio)
try:
logger = conf.eventlog()
finally:
setattr(sys, name, old_stream)
logger.warning("woohoo!")
self.assertIs(logger.handlers[0].stream, sio)
self.assertTrue(sio.getvalue().find("woohoo!") >= 0)
def check_standard_stream_cannot_delay(self, name):
with self.assertRaises(ZConfig.DataConversionError) as cm:
self.get_config("""
<eventlog>
<logfile>
level info
path %s
delay true
</logfile>
</eventlog>
""" % name.upper())
self.assertIn("cannot delay opening %s" % name.upper(),
str(cm.exception))
def check_standard_stream_cannot_encode(self, name):
with self.assertRaises(ZConfig.DataConversionError) as cm:
self.get_config("""
<eventlog>
<logfile>
level info
path %s
encoding utf-8
</logfile>
</eventlog>
""" % name.upper())
self.assertIn("cannot specify encoding for %s" % name.upper(),
str(cm.exception))
def test_custom_formatter(self):
clsname = __name__ + '.CustomFormatter'
old_stream = sys.stdout
sio = StringIO()
sys.stdout = sio
try:
conf = self.get_config("""
<eventlog>
<logfile>
formatter %s
level info
path STDOUT
</logfile>
</eventlog>
""" % clsname)
logger = conf.eventlog()
finally:
sys.stdout = old_stream
try:
raise KeyError
except KeyError:
logger.exception("testing a KeyError")
self.assertTrue(sio.getvalue().find("KeyError") >= 0)
self.assertTrue(sio.getvalue().find("Don't panic") >= 0)
def test_with_syslog(self):
logger = self.check_simple_logger("<eventlog>\n"
" <syslog>\n"
" level error\n"
" facility local3\n"
" </syslog>\n"
"</eventlog>")
syslog = logger.handlers[0]
self.assertEqual(syslog.level, logging.ERROR)
self.assertTrue(isinstance(syslog, loghandler.SysLogHandler))
syslog.close() # avoid ResourceWarning
def test_with_http_logger_localhost(self):
logger = self.check_simple_logger("<eventlog>\n"
" <http-logger>\n"
" level error\n"
" method post\n"
" </http-logger>\n"
"</eventlog>")
handler = logger.handlers[0]
self.assertEqual(handler.host, "localhost")
# XXX The "url" attribute of the handler is misnamed; it
# really means just the selector portion of the URL.
self.assertEqual(handler.url, "/")
self.assertEqual(handler.level, logging.ERROR)
self.assertEqual(handler.method, "POST")
self.assertTrue(isinstance(handler, loghandler.HTTPHandler))
def test_with_http_logger_remote_host(self):
logger = self.check_simple_logger("<eventlog>\n"
" <http-logger>\n"
" method get\n"
" url http://example.com/log/\n"
" </http-logger>\n"
"</eventlog>")
handler = logger.handlers[0]
self.assertEqual(handler.host, "example.com")
# XXX The "url" attribute of the handler is misnamed; it
# really means just the selector portion of the URL.
self.assertEqual(handler.url, "/log/")
self.assertEqual(handler.level, logging.NOTSET)
self.assertEqual(handler.method, "GET")
self.assertTrue(isinstance(handler, loghandler.HTTPHandler))
def test_with_email_notifier(self):
logger = self.check_simple_logger("<eventlog>\n"
" <email-notifier>\n"
" to sysadmin@example.com\n"
" to sa-pager@example.com\n"
" from zlog-user@example.com\n"
" level fatal\n"
" </email-notifier>\n"
"</eventlog>")
handler = logger.handlers[0]
self.assertEqual(handler.toaddrs, ["sysadmin@example.com",
"sa-pager@example.com"])
self.assertEqual(handler.fromaddr, "zlog-user@example.com")
self.assertEqual(handler.level, logging.FATAL)
def test_with_email_notifier_with_credentials(self):
logger = self.check_simple_logger("<eventlog>\n"
" <email-notifier>\n"
" to sysadmin@example.com\n"
" from zlog-user@example.com\n"
" level fatal\n"
" smtp-server foo:487\n"
" smtp-username john\n"
" smtp-password johnpw\n"
" </email-notifier>\n"
"</eventlog>")
handler = logger.handlers[0]
self.assertEqual(handler.toaddrs, ["sysadmin@example.com"])
self.assertEqual(handler.fromaddr, "zlog-user@example.com")
self.assertEqual(handler.fromaddr, "zlog-user@example.com")
self.assertEqual(handler.level, logging.FATAL)
self.assertEqual(handler.username, 'john')
self.assertEqual(handler.password, 'johnpw')
self.assertEqual(handler.mailhost, 'foo')
self.assertEqual(handler.mailport, 487)
def test_with_email_notifier_with_invalid_credentials(self):
# smtp-username without smtp-password
with self.assertRaises(ZConfig.DataConversionError) as cm:
self.check_simple_logger_factory(
"<eventlog>\n"
" <email-notifier>\n"
" to sysadmin@example.com\n"
" from zlog-user@example.com\n"
" level fatal\n"
" smtp-username john\n"
" </email-notifier>\n"
"</eventlog>")
self.assertIn(
'both smtp-username and smtp-password or none must be given',
str(cm.exception))
# smtp-password without smtp-username
with self.assertRaises(ZConfig.DataConversionError) as cm:
self.check_simple_logger_factory(
"<eventlog>\n"
" <email-notifier>\n"
" to sysadmin@example.com\n"
" from zlog-user@example.com\n"
" level fatal\n"
" smtp-password john\n"
" </email-notifier>\n"
"</eventlog>")
self.assertIn(
'both smtp-username and smtp-password or none must be given',
str(cm.exception))
def check_simple_logger_factory(self, text, level=logging.INFO):
conf = self.get_config(text)
self.assertTrue(conf.eventlog is not None)
self.assertEqual(conf.eventlog.level, level)
return conf.eventlog
def check_simple_logger(self, text, level=logging.INFO):
logger = self.check_simple_logger_factory(text, level)()
self.assertTrue(isinstance(logger, logging.Logger))
self.assertEqual(len(logger.handlers), 1)
return logger
if os.name == 'nt':
# Though log files can be closed and re-opened on Windows, these
# tests expect to be able to move the underlying files out from
# underneath the logger while open. That's not possible on
# Windows. So we don't extend TestCase so that they don't get run.
#
# Different tests are needed that only test that close/re-open
# operations are performed by the handler; those can be run on
# any platform.
_RotateTestBase = object
else:
_RotateTestBase = unittest.TestCase
class TestReopeningRotatingLogfiles(
ZConfig.components.logger.tests.support.LoggingTestHelper,
_RotateTestBase):
# These tests should not be run on Windows.
handler_factory = loghandler.RotatingFileHandler
_schematext = """
<schema>
<import package='ZConfig.components.logger'/>
<multisection type='logger' name='*' attribute='loggers'/>
</schema>
"""
_sampleconfig_template = """
<logger>
name foo.bar
<logfile>
path %(path0)s
level debug
max-size 1mb
old-files 10
</logfile>
<logfile>
path %(path1)s
level info
max-size 1mb
old-files 3
</logfile>
<logfile>
path %(path1)s
level info
when D
old-files 3
</logfile>
</logger>
<logger>
name bar.foo
<logfile>
path %(path2)s
level info
max-size 10mb
old-files 10
</logfile>
</logger>
"""
def test_filehandler_reopen(self):
def mkrecord(msg):
args = ["foo.bar", logging.ERROR, __file__, 42, msg, (), ()]
return logging.LogRecord(*args)
# This goes through the reopening operation *twice* to make
# sure that we don't lose our handle on the handler the first
# time around.
fn = self.mktemp()
h = self.handler_factory(fn)
h.handle(mkrecord("message 1"))
nfn1 = self.move(fn)
h.handle(mkrecord("message 2"))
h.reopen()
h.handle(mkrecord("message 3"))
nfn2 = self.move(fn)
h.handle(mkrecord("message 4"))
h.reopen()
h.handle(mkrecord("message 5"))
h.close()
# Check that the messages are in the right files::
text1 = read_file(nfn1)
text2 = read_file(nfn2)
text3 = read_file(fn)
self.assertTrue("message 1" in text1)
self.assertTrue("message 2" in text1)
self.assertTrue("message 3" in text2)
self.assertTrue("message 4" in text2)
self.assertTrue("message 5" in text3)
def test_logfile_reopening(self):
#
# This test only applies to the simple logfile reopening; it
# doesn't work the same way as the rotating logfile handler.
#
paths = self.mktemp(), self.mktemp(), self.mktemp()
d = {
"path0": paths[0],
"path1": paths[1],
"path2": paths[2],
}
text = self._sampleconfig_template % d
conf = self.get_config(text)
self.assertEqual(len(conf.loggers), 2)
# Build the loggers from the configuration, and write to them:
conf.loggers[0]().info("message 1")
conf.loggers[1]().info("message 2")
#
# We expect this to re-open the original filenames, so we'll
# have six files instead of three.
#
loghandler.reopenFiles()
#
# Write to them again:
conf.loggers[0]().info("message 3")
conf.loggers[1]().info("message 4")
#
# We expect this to re-open the original filenames, so we'll
# have nine files instead of six.
#
loghandler.reopenFiles()
#
# Write to them again:
conf.loggers[0]().info("message 5")
conf.loggers[1]().info("message 6")
#
# We should now have all nine files:
for fn in paths:
fn1 = fn + ".1"
fn2 = fn + ".2"
self.assertTrue(os.path.isfile(fn), "%r must exist" % fn)
self.assertTrue(os.path.isfile(fn1), "%r must exist" % fn1)
self.assertTrue(os.path.isfile(fn2), "%r must exist" % fn2)
#
# Clean up:
for logger in conf.loggers:
logger = logger()
for handler in logger.handlers[:]:
logger.removeHandler(handler)
handler.close()
class TestReopeningLogfiles(TestReopeningRotatingLogfiles):
handler_factory = loghandler.FileHandler
_sampleconfig_template = """
<logger>
name foo.bar
<logfile>
path %(path0)s
level debug
</logfile>
<logfile>
path %(path1)s
level info
</logfile>
</logger>
<logger>
name bar.foo
<logfile>
path %(path2)s
level info
</logfile>
</logger>
"""
def test_logfile_reopening(self):
#
# This test only applies to the simple logfile reopening; it
# doesn't work the same way as the rotating logfile handler.
#
paths = self.mktemp(), self.mktemp(), self.mktemp()
d = {
"path0": paths[0],
"path1": paths[1],
"path2": paths[2],
}
text = self._sampleconfig_template % d
conf = self.get_config(text)
self.assertEqual(len(conf.loggers), 2)
# Build the loggers from the configuration, and write to them:
conf.loggers[0]().info("message 1")
conf.loggers[1]().info("message 2")
npaths1 = [self.move(fn) for fn in paths]
#
# We expect this to re-open the original filenames, so we'll
# have six files instead of three.
#
loghandler.reopenFiles()
#
# Write to them again:
conf.loggers[0]().info("message 3")
conf.loggers[1]().info("message 4")
npaths2 = [self.move(fn) for fn in paths]
#
# We expect this to re-open the original filenames, so we'll
# have nine files instead of six.
#
loghandler.reopenFiles()
#
# Write to them again:
conf.loggers[0]().info("message 5")
conf.loggers[1]().info("message 6")
#
# We should now have all nine files:
for fn in paths:
self.assertTrue(os.path.isfile(fn), "%r must exist" % fn)
for fn in npaths1:
self.assertTrue(os.path.isfile(fn), "%r must exist" % fn)
for fn in npaths2:
self.assertTrue(os.path.isfile(fn), "%r must exist" % fn)
#
# Clean up:
for logger in conf.loggers:
logger = logger()
for handler in logger.handlers[:]:
logger.removeHandler(handler)
handler.close()
def test_filehandler_reopen_thread_safety(self):
# The reopen method needs to do locking to avoid a race condition
# with emit calls. For simplicity we replace the "acquire" and
# "release" methods with dummies that record calls to them.
fn = self.mktemp()
h = self.handler_factory(fn)
calls = []
h.acquire = lambda: calls.append("acquire")
h.release = lambda: calls.append("release")
h.reopen()
self.assertEqual(calls, ["acquire", "release"])
del calls[:]
# FileHandler.close() does acquire/release, and invokes
# StreamHandler.flush(), which does the same. Since the lock is
# recursive, that's ok.
#
h.close()
self.assertEqual(calls, ["acquire", "acquire", "release", "release"])
def test_with_logfile_delayed_reopened(self):
fn = self.mktemp()
conf = self.get_config("<logger>\n"
" <logfile>\n"
" path %s\n"
" level debug\n"
" delay true\n"
" encoding shift-jis\n"
" </logfile>\n"
"</logger>" % fn)
logger = conf.loggers[0]()
logfile = logger.handlers[0]
self.assertTrue(logfile.delay)
self.assertIsNone(logfile.stream)
logger.info("this is a test")
self.assertIsNotNone(logfile.stream)
self.assertEqual(logfile.stream.encoding, "shift-jis")
# After reopening, we expect the stream to be reset, to be
# opened at the next event to be logged:
logfile.reopen()
self.assertIsNone(logfile.stream)
logger.info("this is another test")
self.assertIsNotNone(logfile.stream)
self.assertEqual(logfile.stream.encoding, "shift-jis")
logger.removeHandler(logfile)
logfile.close()
class TestFunctions(ZConfig.tests.support.TestHelper, unittest.TestCase):
def test_log_format_bad_syntax_1(self):
with self.assertRaises(ValueError) as cm:
# Really, disallowed character following '%':
handlers.log_format('%{no-such-key}s')
self.assertEqual(str(cm.exception),
'Invalid log format string %{no-such-key}s')
def test_log_format_bad_syntax_2(self):
with self.assertRaises(ValueError) as cm:
# Missing trailing 's':
handlers.log_format('%(levelname)')
self.assertEqual(str(cm.exception),
'Invalid log format string %(levelname)')
def test_log_format_unknown_key(self):
with self.assertRaises(ValueError) as cm:
handlers.log_format('%(no-such-key)s')
self.assertEqual(str(cm.exception),
'Invalid log format string %(no-such-key)s')
def test_log_format_ok(self):
fmt = handlers.log_format(r'\n\t\b\f\r')
self.assertEqual(fmt, '\n\t\b\f\r')
def test_log_format_func_name(self):
fmt = '%(funcName)s'
self.assertEqual(handlers.log_format(fmt), fmt)
def test_log_format_levelno_integer(self):
fmt = '%(levelno)2d'
self.assertEqual(handlers.log_format(fmt), fmt)
def test_log_format_msecs_float(self):
fmt = '%(msecs)03.0f'
self.assertEqual(handlers.log_format(fmt), fmt)
def test_log_format_relative_created_float(self):
fmt = '%(relativeCreated)+.3f'
self.assertEqual(handlers.log_format(fmt), fmt)
def test_resolve_deep(self):
old_mod = None
if hasattr(logging, 'handlers'):
# This module is nested so it hits our coverage target,
# and it doesn't alter any state
# on import, so a "reimport" is fine
del logging.handlers
old_mod = sys.modules['logging.handlers']
del sys.modules['logging.handlers']
try:
handlers.resolve('logging.handlers')
finally:
if old_mod is not None:
logging.handlers = old_mod
sys.modules['logging.handlers'] = old_mod
def test_http_handler_url(self):
self.assertRaisesRegex(ValueError,
'must be an http',
handlers.http_handler_url, 'file://foo/baz')
self.assertRaisesRegex(ValueError,
'must specify a location',
handlers.http_handler_url, 'http://')
self.assertRaisesRegex(ValueError,
'must specify a path',
handlers.http_handler_url, 'http://server')
v = handlers.http_handler_url("http://server/path;param?q=v#fragment")
self.assertEqual(v, ('server', '/path;param?q=v#fragment'))
def test_close_files(self):
class F:
closed = 0
def close(self):
self.closed += 1
f = F()
def wr():
return f
loghandler._reopenable_handlers.append(wr)
loghandler.closeFiles()
loghandler.closeFiles()
self.assertEqual(1, f.closed)
def test_reopen_files_missing_wref(self):
# simulate concurrent iteration that pops the ref
def wr():
loghandler._reopenable_handlers.remove(wr)
loghandler._reopenable_handlers.append(wr)
loghandler.reopenFiles()
def test_logging_level(self):
# Make sure the expected names are supported; it's not clear
# how to check the values in a meaningful way.
# Just make sure they're case-insensitive.
convert = datatypes.logging_level
for name in ["notset", "all", "trace", "debug", "blather",
"info", "warn", "warning", "error", "fatal",
"critical"]:
self.assertEqual(convert(name), convert(name.upper()))
self.assertRaises(ValueError, convert, "hopefully-not-a-valid-value")
self.assertEqual(convert('10'), 10)
self.assertRaises(ValueError, convert, '-1000')
self.assertRaises(ValueError, convert, '-1')
self.assertRaises(ValueError, convert, '51')
self.assertRaises(ValueError, convert, '100')
def test_http_method(self):
convert = handlers.get_or_post
self.assertEqual(convert("get"), "GET")
self.assertEqual(convert("GET"), "GET")
self.assertEqual(convert("post"), "POST")
self.assertEqual(convert("POST"), "POST")
self.assertRaises(ValueError, convert, "")
self.assertRaises(ValueError, convert, "foo")
def test_syslog_facility(self):
convert = handlers.syslog_facility
for name in ["auth", "authpriv", "cron", "daemon", "kern",
"lpr", "mail", "news", "security", "syslog",
"user", "uucp", "local0", "local1", "local2",
"local3", "local4", "local5", "local6", "local7"]:
self.assertEqual(convert(name), name)
self.assertEqual(convert(name.upper()), name)
self.assertRaises(ValueError, convert, "hopefully-never-a-valid-value")
class TestStartupHandler(unittest.TestCase):
def test_buffer(self):
handler = loghandler.StartupHandler()
self.assertFalse(handler.shouldFlush(None))
self.assertEqual(maxsize, handler.capacity)
records = []
def handle(record):
records.append(record)
handle.handle = handle
handler.flushBufferTo(handle)
self.assertEqual([], records)
handler.buffer.append(1)
handler.flushBufferTo(handle)
self.assertEqual([1], records)
del handle.handle
def test_logger_convenience_function_and_ommiting_name_to_get_root_logger():
"""
The ZConfig.loggers function can be used to configure one or more loggers.
We'll configure the rot logger and a non-root logger.
>>> old_level = logging.getLogger().getEffectiveLevel()
>>> old_handler_count = len(logging.getLogger().handlers)
>>> ZConfig.configureLoggers('''
... <logger>
... level INFO
... <logfile>
... PATH STDOUT
... format root %(levelname)s %(name)s %(message)s
... </logfile>
... </logger>
...
... <logger>
... name ZConfig.TEST
... level DEBUG
... <logfile>
... PATH STDOUT
... format test %(levelname)s %(name)s %(message)s
... </logfile>
... </logger>
... ''')
>>> logging.getLogger('ZConfig.TEST').debug('test message')
test DEBUG ZConfig.TEST test message
root DEBUG ZConfig.TEST test message
>>> logging.getLogger().getEffectiveLevel() == logging.INFO
True
>>> len(logging.getLogger().handlers) == old_handler_count + 1
True
>>> logging.getLogger('ZConfig.TEST').getEffectiveLevel() == logging.DEBUG
True
>>> len(logging.getLogger('ZConfig.TEST').handlers) == 1
True
.. cleanup
>>> logging.getLogger('ZConfig.TEST').setLevel(logging.NOTSET)
>>> logging.getLogger('ZConfig.TEST').removeHandler(
... logging.getLogger('ZConfig.TEST').handlers[-1])
>>> logging.getLogger().setLevel(old_level)
>>> logging.getLogger().removeHandler(logging.getLogger().handlers[-1])
"""
def test_suite():
return unittest.TestSuite([
unittest.defaultTestLoader.loadTestsFromName(__name__),
doctest.DocTestSuite()
])
if __name__ == '__main__':
unittest.main(defaultTest="test_suite")
|
#
# Copyright (c) 2020 Cord Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import dataclasses
import logging
from typing import TypeVar, Type, List
import requests
import requests.exceptions
from requests import Session, Timeout
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util import Retry
from cord.configs import BaseConfig
from cord.exceptions import *
from cord.http.error_utils import check_error_response
from cord.http.query_methods import QueryMethods
from cord.http.request import Request
class Querier:
""" Querier for DB get/post requests. """
T = TypeVar('T')
def __init__(self, config: BaseConfig):
self._config = config
def basic_getter(self, db_object_type: Type[T], uid=None, payload=None) -> T:
""" Single DB object getter. """
request = self.request(
QueryMethods.GET,
db_object_type,
uid,
self._config.read_timeout,
payload=payload
)
res = self.execute(request)
if res:
if dataclasses.is_dataclass(db_object_type):
return db_object_type(**res)
else:
return db_object_type(res)
else:
raise ResourceNotFoundError("Resource not found.")
def get_multiple(self, object_type: Type[T], uid=None, payload=None) -> List[T]:
request = self.request(
QueryMethods.GET,
object_type,
uid,
self._config.read_timeout,
payload=payload
)
result = self.execute(request)
if result is not None:
return [object_type(**item) for item in result]
else:
raise ResourceNotFoundError(f"[{object_type}] not found for query with uid=[{uid}] and payload=[{payload}]")
def basic_delete(self, db_object_type: Type[T], uid=None):
""" Single DB object getter. """
request = self.request(
QueryMethods.DELETE,
db_object_type,
uid,
self._config.read_timeout,
)
self.execute(request)
def basic_setter(self, db_object_type: Type[T], uid, payload):
""" Single DB object setter. """
request = self.request(
QueryMethods.POST,
db_object_type,
uid,
self._config.write_timeout,
payload=payload,
)
res = self.execute(request)
if res:
return res
else:
raise RequestException("Setting %s with uid %s failed." % (db_object_type, uid))
def basic_put(self, db_object_type, uid, payload):
""" Single DB object put request. """
request = self.request(
QueryMethods.PUT,
db_object_type,
uid,
self._config.write_timeout,
payload=payload,
)
res = self.execute(request)
if res:
return res
else:
raise RequestException("Setting %s with uid %s failed." % (db_object_type, uid))
def request(self, method, db_object_type: Type[T], uid, timeout, payload=None):
request = Request(method, db_object_type, uid, timeout, self._config.connect_timeout, payload)
request.headers = self._config.define_headers(request.data)
return request
def execute(self, request):
""" Execute a request. """
logging.info("Request: %s", (request.data[:100] + '..') if len(request.data) > 100 else request.data)
session = Session()
session.mount("https://", HTTPAdapter(max_retries=Retry(connect=0)))
req = requests.Request(
method=request.http_method,
url=self._config.endpoint,
headers=request.headers,
data=request.data,
).prepare()
timeouts = (request.connect_timeout, request.timeout)
try:
res = session.send(req, timeout=timeouts)
except Timeout as e:
raise TimeOutError(str(e))
except RequestException as e:
raise RequestException(str(e))
except Exception as e:
raise UnknownException(str(e))
try:
res_json = res.json()
except Exception:
raise CordException("Error parsing JSON response: %s" % res.text)
if res_json.get("status") != requests.codes.ok:
response = res_json.get("response")
extra_payload = res_json.get("payload")
check_error_response(response, extra_payload)
session.close()
return res_json.get("response")
|
from stellar_sdk import operation
def add_payment_op(source_account, destination_account, asset, amount):
op = operation.payment(destination_account, asset, amount, source_account)
return op
|
import itertools as it
import numpy as np
from sklearn.linear_model import LogisticRegressionCV
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def make_clf(*args, **kwargs):
clf = make_pipeline(FunctionTransformer(crossterm),
LogisticRegressionCV())
return clf
def crossterm(X):
return np.vstack([x*y for x, y in it.combinations(X.T, 2)]).T
|
"""Handles reading the dictionary of ItemTypes from the XML file.
This is largely copied from the original author's repository, just a bit
stripped down. In particular, this stripped down version requires the XML file
to be topologically sorted.
https://github.com/Omnifarious/factorio_calc/blob/master/factorio_calc.py
"""
import os
import sys
import re
from xml.etree import ElementTree
from fractions import Fraction as _F
from models import ItemType
def _checkXMLHasNoText(xmlel):
return ((xmlel.text is None) or (xmlel.text.strip() == '')) \
and ((xmlel.tail is None) or (xmlel.tail.strip() == ''))
class ItemTypeDb(set):
"""A limited dictionary-like structure with all item types.
Limited means that it does not work exactly like a dictionary. E.g. you
cannot check for key's existance using "in" statement.
"""
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
self._by_name = dict()
def __getitem__(self, name):
item = self._by_name.get(name)
if item is not None:
return item
else:
for item in self:
if item._name == name:
self._by_name[item._name] = item
return item
raise KeyError(name)
@staticmethod
def _itemId(item):
idstr = item._name.lower()
idstr = re.sub(r'\s+', '_', idstr)
return idstr
@staticmethod
def createFromXML(infile):
newdb = ItemTypeDb()
ET = ElementTree
parser = ET.XMLParser()
block = infile.read(4 * 1024 * 1024)
while len(block) > 0:
parser.feed(block)
block = infile.read(4 * 1024 * 1024)
block = None
tree = parser.close()
parser = None
if tree.tag != 'factorio_calc_item_db':
raise ValueError("Not an XML item database.")
if tree.attrib.get('version', '1.0') != '1.0':
raise ValueError(f"Do not know how to handle version "
f"{tree.attrib['version']}.")
if not _checkXMLHasNoText(tree):
raise ValueError("Invalid XML database.")
item_idmap = {}
for itemel in tree.getchildren():
itemid, item = ItemTypeDb.itemFromXML(item_idmap, itemel)
item_idmap[itemid] = item
newdb.add(item)
return newdb
@staticmethod
def itemFromXML(item_idmap, itemel):
if itemel.tag != 'item':
raise ValueError(f"Got element '{itemel.tag}', expecting 'item'.")
itemid = itemel.attrib['id']
if not _checkXMLHasNoText(itemel):
raise ValueError(f"Invalid item {itemid}")
if itemid in item_idmap:
raise ValueError(f"Item {itemid} defined twice.")
name = itemel.attrib.get('name', itemid)
time = itemel.attrib.get('time', None)
produced = itemel.attrib.get('produced', None)
if (produced is None) != (time is None):
raise ValueError(f"Invalid item '{itemid}'.")
if time is not None:
time = _F(time)
produced = int(produced)
ingredients = []
for ingredientel in itemel.getchildren():
if ingredientel.tag != 'ingredient':
raise ValueError(f"Item {itemid} has {ingredientel.tag}")
ingid = ingredientel.attrib['idref']
if not _checkXMLHasNoText(ingredientel):
raise ValueError(f"Invalid ingredient '{ingid}' in '{itemid}'")
ingcount = int(ingredientel.attrib['count'])
if ingid not in item_idmap:
raise ValueError(f"Item '{itemid}' mentions ingredient "
f"'{ingid}' before it's defined.")
ingredients.append((ingcount, item_idmap[ingid]))
if (len(ingredients) > 0) and (time is None):
raise ValueError(f"Item '{itemid}' has ingredients but "
"no production time.")
return (itemid,
ItemType(name, time, tuple(ingredients), produced))
def load():
dbdir = os.path.dirname(__file__)
xml_fname = os.path.join(dbdir, 'items.xml')
if os.path.exists(xml_fname):
with open(xml_fname, 'r') as _item_f:
item_db = ItemTypeDb.createFromXML(_item_f)
else:
raise Exception("No items.xml found")
return item_db
|
"""
Generate regions
"""
import csv
import os
from .base import BaseGen
class Regions(BaseGen):
def __init__(self, config):
super().__init__(config)
def run(self):
"""Generate regions jsons"""
csv_path = os.path.join(self.config.csv.base, self.config.csv.path.regions)
regions = []
with open(csv_path, encoding="utf8") as f:
reader = csv.DictReader(f)
for i, row in enumerate(reader):
if i > 0:
regions.append({
'id': 57000000 + i - 1,
'key': row.get('Name'),
'name': row.get('DisplayName'),
'isCountry': row.get('IsCountry') == 'TRUE'
})
json_path = os.path.join(self.config.json.base, self.config.json.regions)
self.save_json(regions, json_path)
|
# Filename: ps2_controller.py
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""PyCalc is a simple calculator built using Python and the MVC pattern."""
from functools import partial
from pycalc import ERROR_MSG
# Create a Controller class to connect the GUI and the model
class PyCalcCtrl:
"""PyCalc's Controller."""
def __init__(self, model, view):
"""Controller initializer."""
self._model = model
self._view = view
def run(self):
"""Run the controller."""
# Connect signals and slots
self._connectSignals()
def _calculateResult(self):
"""Evaluate expressions."""
result = self._model.evaluate(self._view.displayText())
self._view.setDisplayText(result)
def _buildExpression(self, sub_exp):
"""Build expression."""
if self._view.displayText() == ERROR_MSG:
self._view.clearDisplay()
expression = self._view.displayText() + sub_exp
self._view.setDisplayText(expression)
def _connectSignals(self):
"""Connect signals and slots."""
for btnText, btn in self._view.buttons.items():
if btnText not in {"=", "C"}:
btn.clicked.connect(partial(self._buildExpression, btnText))
self._view.buttons["="].clicked.connect(self._calculateResult)
self._view.display.returnPressed.connect(self._calculateResult)
self._view.buttons["C"].clicked.connect(self._view.clearDisplay)
|
# regular printing
print("Hello, world")
# printing with a different line-end character
print("Hello, ", end='')
print("world")
|
#1 import
try:
import configparser
except:
from six.moves import configparser
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import requests
#2 variable related to weather API
weather_dict = {'freezing_rain_heavy': 'Heavy rain and snow', 'freezing_rain': 'Rain and snow', 'freezing_rain_light': 'Light rain and snow', 'freezing_drizzle': 'Light drizzle and snow', 'ice_pellets_heavy': 'Heavy ice pellets', 'ice_pellets': 'Normal ice pellets', 'ice_pellets_light': 'Light ice pellets', 'snow_heavy': 'Heavy snow', 'snow': 'Normal snow', 'snow_light': 'Light snow', 'tstorm': 'Thunder storm', 'rain_heavy': 'Heavy rain', 'rain': 'Normal rain', 'rain_light': 'Light rain'}
url = "https://api.climacell.co/v3/weather/nowcast"
querystring = {"lat":"1.29027","lon":"103.851959","unit_system":"si","timestep":"60","start_time":"now","fields":"temp,humidity,weather_code","apikey":"xxxx"}
#3 class
class EmailSender():
#4 initialization
def __init__(self):
self.cf = configparser.ConfigParser()
self.cf.read('./config.ini')
self.sec = 'email'
self.email = self.cf.get(self.sec, 'email')
self.host = self.cf.get(self.sec, 'host')
self.port = self.cf.get(self.sec, 'port')
self.password = self.cf.get(self.sec, 'password')
#5 main function to send email
def SendEmail(self, recipient):
title = "Home Sweet Home"
#6 create a new multipart mime object
msg = MIMEMultipart()
msg['Subject'] = '[Weather Notification]'
msg['From'] = self.email
msg['To'] = ', '.join(recipient)
#7 call weather API using requests
response = requests.request("GET", url, params=querystring)
result = ""
json_data = response.json()
#print(json_data)
#8 loop over each data and check for abnormal weather (rain, snow)
for i in range(len(json_data)):
if(json_data[i]['weather_code']['value'] in weather_dict):
if(i == 0):
result = "%s at the moment. Current temperature is " % (weather_dict[json_data[i]['weather_code']['value']])
else:
result = "%s in %s hour(s) time. Forecasted temperature is " % (weather_dict[json_data[i]['weather_code']['value']], i)
result += '%s%s while the humidity is about %s%s' % (json_data[i]['temp']['value'], json_data[i]['temp']['units'], json_data[i]['humidity']['value'], json_data[i]['humidity']['units'])
msgText = MIMEText('<b>%s</b><p>%s</p>' % (title, result), 'html')
msg.attach(msgText)
#9 authenticate and send email
with smtplib.SMTP(self.host, self.port) as smtpObj:
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.login(self.email, self.password)
smtpObj.sendmail(self.email, recipient, msg.as_string())
return "Success"
return "Failed"
break
|
spożywka = ["chleb", "mleko", "ser", "szynka", "masło"]
print(spożywka)
print(spożywka[0])
print(spożywka[4])
print(spożywka[-1])
print(spożywka[::4])
print(spożywka[0])
print(spożywka[4])
print(spożywka[0] + " " + spożywka[4])
print(spożywka[0], spożywka[4]) |
#coding=utf-8
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
# br=webdriver.Chrome()
# br.get("https://www.baidu.com")
# br.maximize_window()
# element=br.find_element_by_link_text("新闻")
#单击元素
# ActionChains(br).click(element).perform()
#元素上按下左键不放
# element=br.find_element_by_id('s-usersetting-top')
# ActionChains(br).click_and_hold(element).perform()
#单击右键
# ActionChains(br).context_click(element).perform()
#双击元素
# ActionChains(br).double_click(element).perform()
br=webdriver.Chrome()
br.get("https://www.runoob.com/try/try.php?filename=jqueryui-api-droppable")
br.maximize_window()
br.switch_to.frame('iframeResult')
source=br.find_element_by_id('draggable')
target=br.find_element_by_id('droppable')
action=ActionChains(br)
action.drag_and_drop(source,target).perform() |
from threading import Thread
import serial, time
class Drill(Thread):
def __init__(self, port, get_next_block, block_done, emit_state):
Thread.__init__(self)
self.port = port
self.current_block = None
self.get_next_block = get_next_block
self.block_done = block_done
self.emit_state = emit_state
self.drill_on = True
self.ser = None
self.baudrate = 9600
self.connected = False
# def ser_send(self, command):
# try:
# ser_send
def connect(self):
if not self.connected:
self.ser = serial.Serial(self.port, baudrate=self.baudrate)
def run(self):
while True:
time.sleep(0.2)
if not self.drill_on:
continue
if self.current_block == None:
if not self.drill_on:
continue
self.get_next_block()
continue
if not self.block_on_slide():
if not self.drill_on:
self.emit_state('waiting')
continue
if self.current_block["holes"] == []:
self.emit_state('waiting')
self.block_done({'id' : self.current_block["id"]})
self.current_block = None
continue
if not self.drill_on:
continue
self.emit_state('drilling')
next_hole = self.current_block["holes"].pop(0)
self.drill_hole(next_hole, self.current_block["depth"])
def drill_hole(self, position, depth):
print("drilled hole")
pass
def block_on_slide(self):
return True
def drill_extend(self):
print("drill extend")
def drill_retract(self):
print("drill retract")
def drill_moveto(self, pos):
print("drill moveto " + str(pos))
def linear_down(self):
print("linear down")
def linear_up(self):
print("linear up")
def linear_moveto(self, pos):
print("linear moveto " + str(pos))
def got_next_block(self, block):
self.current_block = block
def stop_drill(self):
self.drill_on = False
self.emit_state('stopped')
print("stop drill")
def start_drill(self):
self.emit_state('idle')
self.drill_on = True
print("start drill") |
#%%
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates
from numpy.polynomial import Polynomial as P
from numpy.polynomial import Chebyshev as T
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import seaborn as sns
from sklearn.metrics import r2_score
#%%
#%%
df5_1 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=5/T_egal_t.csv')
df5_2 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=5/T_egal_2t.csv')
df5_5 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=5/T_egal_30t.csv')
df5_7 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=5/T_egal_15t.csv')
df5_10 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=5/T_egal_10t.csv')
df5_22 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=5/T_egal_25t.csv')
df5_48 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=5/T_egal_48t.csv')
#%%
df6_1 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=6/T_egal_t.csv')
df6_2 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=6/T_egal_2t.csv')
df6_5 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=6/T_egal_30t.csv')
df6_7 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=6/T_egal_15t.csv')
df6_10 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=6/T_egal_10t.csv')
df6_22 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=6/T_egal_25t.csv')
df6_48 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=6/T_egal_48t.csv')
#%%
df7_1 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=7/T_egal_t.csv')
df7_2 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=7/T_egal_2t.csv')
df7_5 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=7/T_egal_30t.csv')
df7_7 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=7/T_egal_15t.csv')
df7_10 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=7/T_egal_10t.csv')
df7_22 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=7/T_egal_25t.csv')
df7_48 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=7/T_egal_48t.csv')
#%%
df8_1 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=8/T_egal_t.csv')
df8_2 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=8/T_egal_2t.csv')
df8_5 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=8/T_egal_30t.csv')
df8_7 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=8/T_egal_15t.csv')
df8_10 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=8/T_egal_10t.csv')
df8_22 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=8/T_egal_25t.csv')
df8_48 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=8/T_egal_48t.csv')
#%%
df9_1 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=9/T_egal_t.csv')
df9_2 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=9/T_egal_2t.csv')
df9_5 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=9/T_egal_30t.csv')
df9_7 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=9/T_egal_15t.csv')
df9_10 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=9/T_egal_10t.csv')
df9_22 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=9/T_egal_25t.csv')
df9_48 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=9/T_egal_48t.csv')
#%%
df10_1 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=10/T_egal_t.csv')
df10_2 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=10/T_egal_2t.csv')
df10_5 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=10/T_egal_30t.csv')
df10_7 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=10/T_egal_15t.csv')
df10_10 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=10/T_egal_10t.csv')
df10_22 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=10/T_egal_25t.csv')
df10_48 = pd.read_csv('/Users/admin/Documents/ML/Thesis/save/Paper/w=10/T_egal_48t.csv')
#%%
df5 = list([df5_1["lost"].mean(), df5_2["lost"].mean(), df5_10["lost"].mean(), df5_22["lost"].mean(),df5_5["lost"].mean(), df5_48["lost"].mean()])
df6 = list([df6_1["lost"].mean(), df6_2["lost"].mean(), df6_10["lost"].mean(), df6_22["lost"].mean(), df6_5["lost"].mean(), df6_48["lost"].mean()])
df8 = list([df8_1["lost"].mean(), df8_2["lost"].mean(), df8_10["lost"].mean(), df8_22["lost"].mean(), df8_5["lost"].mean(), df8_48["lost"].mean()])
df7 = list([df7_1["lost"].mean(), df7_2["lost"].mean(), df7_10["lost"].mean(), df7_22["lost"].mean(),df7_5["lost"].mean(), df7_48["lost"].mean()])
df9 = list([df9_1["lost"].mean(), df9_2["lost"].mean(), df9_10["lost"].mean(), df9_22["lost"].mean(),df9_5["lost"].mean(), df9_48["lost"].mean()])
df10 = list([df10_1["lost"].mean(), df10_2["lost"].mean(), df10_10["lost"].mean(), df10_22["lost"].mean(),df10_5["lost"].mean(), df10_48["lost"].mean()])
df5.sort()
df6.sort()
df7.sort()
df8.sort()
df9.sort()
df10.sort()
x_axis = list([1,2,10,25,30,48])
#%%
print(df5)
print(df6)
print(df7)
print(df8)
print(df9)
print(df10)
#%%
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["font.size"] = "12"
#plt.plot_date(Z, X, c='blue', label="real Data")
#plt.scatter(x_axis, data, c='red', label="estimated data")
plt.plot(x_axis, df5, label="w=5")
plt.plot(x_axis, df6,label="w=6")
plt.plot(x_axis, df7,label="w=7")
plt.plot(x_axis, df8,label="w=8")
plt.plot(x_axis, df9, label="w=9")
plt.plot(x_axis, df10, label="w=10")
#plt.plot(Z, W, 'green', label="real Data")
plt.legend(loc='lower right')
plt.xticks(rotation=45, ha='right')
plt.xlabel("λ values")
plt.ylabel("error")
plt.show()
#%%
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["font.size"] = "12"
#plt.plot_date(Z, X, c='blue', label="real Data")
#plt.scatter(x_axis, data, c='red', label="estimated data")
plt.scatter(x_axis, df5, label="w=5")
plt.scatter(x_axis, df6,label="w=6")
plt.scatter(x_axis, df7,label="w=7")
plt.scatter(x_axis, df8,label="w=8")
plt.scatter(x_axis, df9, label="w=9")
plt.scatter(x_axis, df10, label="w=10")
#plt.plot(Z, W, 'green', label="real Data")
plt.legend(loc='lower right')
plt.xticks(rotation=45, ha='right')
plt.xlabel("λ values")
plt.ylabel("error")
plt.show()
#%%
plt.rcParams["font.family"] = "Times New Roman"
plt.rcParams["font.size"] = "12"
#plt.plot_date(Z, X, c='blue', label="real Data")
#plt.scatter(x_axis, data, c='red', label="estimated data")
plt.plot(x_axis, df8)
# plt.plot(x_axis, df6,label="w=6")
# plt.plot(x_axis, df7,label="w=7")
# plt.plot(x_axis, df8,label="w=8")
# plt.plot(x_axis, df9, label="w=9")
# plt.plot(x_axis, df10, label="w=10")
#plt.plot(Z, W, 'green', label="real Data")
#plt.legend(loc='upper left')
plt.xticks(rotation=45, ha='right')
plt.xlabel("λ value")
plt.ylabel("mean error")
plt.show()
# %%
|
cart={
'天谕':{'近战':["光刃","圣堂","业刹"],
'远程':["炎天","玉虚","灵珑","流光"]},
'阴阳师':{'SSR':["一目连","荒","茨木童子"],
'SR':["姑获鸟","妖狐","夜叉"],
'R':["椒图","山兔","雨女"]},
'王者荣耀':{'法师':["诸葛亮","貂蝉","妲己"],
'刺客':["兰陵王","荆轲","李白"],
'射手':["李元芳","马可波罗","百里守约"]}
}
count=False
while not count:
for i in cart:
print(i)
choose1=input('请输入:')
if choose1 in cart:
while not count:
for i in cart[choose1]:
print(i)
choose2=input('请输入:')
if choose2 in cart[choose1]:
while not count:
for i in cart[choose1][choose2]:
print(i)
choose3 = input('请输入:')
if choose3=='q':
break
elif choose3=='n':
count=True
elif choose2 == 'q':
break
elif choose2 == 'n':
count = True
elif choose1 == 'q':
break
elif choose1 == 'n':
count = True
|
print "hi"
print "helo"
<<<<<<< HEAD
#this is a brach
# master commit
# test changes
#git hub edit
|
from selenium import webdriver
class Driver_Factory():
def __init__(self, browser='ff', browser_version=None, os_name=None):
self.browser = browser
def get_web_driver(self, browser):
web_driver = self.run_local(browser)
return web_driver
def run_local(self, browser):
local_driver = None
if browser.lower() == "ff" or browser.lower() == 'firefox':
local_driver = webdriver.Firefox()
print("")
elif browser.lower() == "ie":
local_driver = webdriver.Ie()
elif browser.lower() == "chrome":
# cap = {'binary_location': '/opt/geckodriver'}
# cap["marionette"] = False
# local_driver = webdriver.Chrome(desired_capabilities=cap, executable_path='/opt/geckodriver')
local_driver = webdriver.Chrome('/opt/geckodriver')
elif browser.lower() == "opera":
local_driver = webdriver.Opera()
elif browser.lower() == "safari":
local_driver = webdriver.Safari()
return local_driver
|
#!/usr/bin/env python
import json
infname = 'toolshed_data.json'
data = json.load(open(infname, 'r'))
cats = json.load(open('categories.json', 'r'))
catdict = {}
for cat in cats:
catdict[cat['id']] = cat['name']
tools = {}
for entry in data:
if entry['type'] == 'unrestricted':
cats = entry['category_ids']
for c in cats:
cat = catdict[c]
if cat not in tools:
tools[cat] = []
tools[cat].append(entry['name'])
output = open('toolshed_categories.json', 'w')
print >> output, json.dumps(tools)
output.close()
|
# Problem: Given scores of N althletes
# Return their relative ranks and the people with the top 3 scores
class Solution(object):
def findRelativeRanks(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
ranks = {}
numsSorted = sorted(nums)
numsSorted.reverse()
output = []
k = 0
for i in range(len(numsSorted)):
if i == 0:
ranks[numsSorted[i]] = "Gold Medal"
elif i == 1:
ranks[numsSorted[i]] = "Silver Medal"
elif i == 2:
ranks[numsSorted[i]] = "Bronze Medal"
else:
ranks[numsSorted[i]] = i+1
for elm in nums:
output.append(str(ranks[elm]))
return(output)
driver = Solution()
t1 = [5, 4, 3, 2, 1]
t2 = [1, 2, 3, 4, 5]
t3 = [15,4,18,19,20,25,99,80,76,10001,0,12]
print('\n')
print('atheletes:',t1, '\nranks:',driver.findRelativeRanks(t1),'\nExpected Answer:','["Gold Medal", "Silver Medal", "Bronze Medal", "4", "5"]')
print('atheletes:',t2, '\nranks:',driver.findRelativeRanks(t2),'\nExpected Answer:','["5","4","Bronze Medal","Silver Medal","Gold Medal"]')
print('atheletes:',t3, '\nranks:',driver.findRelativeRanks(t3),'\nExpected Answer:',["9","11","8","7","6","5","Silver Medal","Bronze Medal","4","Gold Medal","12","10"]) |
from heapq import heapify, heappop, heappush
N, *a = map(int, open(0).read().split())
q = a[:N]
heapify(q)
b = [0] * (N + 1)
b[0] = sum(q)
for i in range(N):
x = q[0]
y = max(heappop(q), a[N + i])
heappush(q, y)
b[i + 1] = b[i] - x + y
q = [-x for x in a[2 * N:]]
heapify(q)
c = [0] * (N + 1)
c[N] = -sum(q)
for i in range(N):
x = -q[0]
y = min(-heappop(q), a[2 * N - 1 - i])
heappush(q, -y)
c[N - i - 1] = c[N - i] - x + y
print(max(b[i] - c[i] for i in range(N + 1)))
|
import requests
import time
import json
from datetime import date, datetime
import re
accessToken = ''
api_url = 'https://api.vk.com/method/'
NUM_OF_USERS = 50
app_params = {
'access_token' : accessToken,
'v' : '5.131'
}
def process_raw_str(string):
return re.sub(r'[\W]', ' ', string).lower()
'''
Сначала получаем всех пользователей
'''
def fetchUsers():
user_ids = []
method = 'groups.getMembers'
local_params = {
# 'group_id' : 'beryozoviy_markova',
'group_id' : '174834195'
}
res = requests.get(api_url + method, params={**app_params, **local_params})
ids = res.json()['response']['items']
return ids
'''
Собираем по ним данные
'''
def parsePages(ids):
method = 'users.get'
local_params = {
'user_ids' : ','.join(map(str,ids[:NUM_OF_USERS])),
'fields' : ', '.join(['activities', 'interests', 'bdate', 'occupation', 'sex'])
}
res = requests.get(api_url + method, params={**app_params, **local_params})
data = res.json()['response']
'''
Считвыаем возраст, если возраст не указан - пытаемся угадать,
исходя из места работы
'''
for item in data:
if not 'bdate' in item:
if 'occupation' in item and item['occupation']['type'] == 'university':
item['age'] = 20
else:
item['age'] = None
else:
dateSplit = item['bdate'].split('.')
if len(dateSplit) == 3:
item['age'] = datetime.now().year - int(item['bdate'].split('.')[2])
else:
item['age'] = None
return data
'''
Находим id групп, в которых они состоят
'''
def fetchGroups(people):
for p in people:
p['groups'] = []
if 'is_closed' in p and not p['is_closed']:
method = 'groups.get'
local_params = {
'user_id' : p['id']
}
res = requests.get(api_url + method, params={**app_params, **local_params})
groups = res.json()
if 'response' in groups:
groups = groups['response']['items']
for group in groups:
method = 'groups.getById'
local_params = {
'group_id': group,
'fields' : ','.join(['description', 'activity'])
}
res = requests.get(api_url + method, params={**app_params, **local_params})
g_data = res.json()
if 'response' in g_data:
r = g_data['response'][0]
g = {}
if 'activity' in r:
g['activity'] = r['activity']
if 'description' in r:
g['description'] = r['description']
g['id'] = r['id']
g['name'] = r['name']
p['groups'].append(g)
time.sleep(0.6)
ids = fetchUsers()
people = parsePages(ids)
fetchGroups(people)
print(people)
aggregates = {
'age_agg' : {},
'groups_agg' : {},
'working_agg': 0,
'activities' : {},
'interests': {}
}
# for item in people:
# if 'age' in item and item['age'] in aggregates['age_agg']:
# aggregates['age_agg'][item['age']] += 1
# elif 'age' in item and item['age'] not in aggregates['age_agg']:
# aggregates['age_agg'][item['age']] = 1
# if 'interests' in item and item['interests'] != '':
# interests = item['interests'].split(', ')
# for intrst in interests:
# if intrst in aggregates['interests']:
# aggregates['interests'][intrst] += 1
# else:
# aggregates['interests'][intrst] = 1
# if 'groups' in item and len(item['groups']) != 0:
# for group in item['groups']:
# g = process_raw_str(group['name'])
# if g in aggregates['groups_agg']:
# aggregates['groups_agg'][g] += 1
# else:
# aggregates['groups_agg'][g] = 1
# if 'occupation' in item and item['occupation']['type'] == 'work':
# aggregates['working_agg'] += 1
# if 'activities' in item and len(item['activities']) != 0:
# activities = item['activities'].split(', ')
# for a in activities:
# if a in aggregates['activities']:
# aggregates['actvities'][a] += 1
# else:
# aggregates['activities'][a] = 1
# aggregates['working_agg'] /= len(people)
# print(aggregates['activities'])
# for p in people:
# if 'activities' in p:
# print(p['activities'])
# print()
# if 'interests' in p:
# f.write(p['interests'])
# if 'groups' in p:
# print(p['groups'])
# print()
data = json.dumps(people)
f = open("murino.json", "w")
f.write(data)
f.close()
|
name = input('What is your name? ')
print('Hi ' + name)
main_name = input('What is your name? ')
print('Hi ' + main_name)
color = input('What is your favourite colour? ')
print('Hi ' + main_name + ' you like colour ' + color) |
nums=[12,13,14,15,10,18]
for num in nums:
if num %5==0:
print(num)
break
else:
print("not found") |
#! /use/bin/python
class Solution:
def binarySearch(self, A,target):
start, end = 0, len(A)
while start+1 < end:
mid = (start+end)/2
if A[mid] > target:
end = mid
elif A[mid] == target:
start = mid
else:
start = mid
print start,end,mid
if A[start] == target:
return start
if A[end] == target:
return end
return -1
if __name__ == "__main__":
s = Solution()
print s.binarySearch([1,1,3,4,5,6],1)
|
import pickle
import time
import numpy as np
import tensorflow as tf
from collections import Counter
from sklearn.utils import shuffle
from imblearn.under_sampling import RandomUnderSampler
from tensorflow.contrib.layers import flatten
with open('data_us.pickle', mode = 'rb') as f:
dataset = pickle.load(f)
x_train = dataset[0][0]
y_train = dataset[0][1]
print('x_train type: ', type(x_train))
print('x_train shape: ', np.shape(x_train))
print('y_train type and shape: ', type(y_train), ' ', np.shape(y_train))
x_val = dataset[1][0]
y_val = dataset[1][1]
print('x_val type: ', type(x_val))
print('x_val shape: ', np.shape(x_val))
print('y_val type and shape: ', type(y_val), ' ', np.shape(y_val))
# Number of training examples
n_train = len(y_train)
# Number of validation examples
n_validation = len(y_val)
# Single input vetor dimension
inp_n = np.shape(x_train)[1]
#model parameters
#learning_rate = 0.001
lrmin = 0.001
lrmax = 0.003
epochs = 2
batch_size = 9000
print('Initial shape of training set: ',np.shape(x_train))
# In[21]:
#reshaping the array to be given as input to
#for val set only traing set reshaping is done inside the sess object
x_val = x_val[:,np.newaxis,:,np.newaxis]
# In[8]:
x = tf.placeholder(tf.float32, [None,1,inp_n,1])
y_ = tf.placeholder(tf.int32, [None])
y = tf.one_hot(y_, 2)
l = tf.placeholder(tf.float32) #learning rate placeholder
# In[3]:
def lr_fn(i):
return lrmin + (lrmax-lrmin)*np.exp(-i/epochs)
def conv_(x, wts, bias, stride=1, padding='VALID'):
x = tf.nn.conv2d(x, wts, [1,1,stride,1], padding)
x = tf.nn.bias_add(x, bias)
return tf.nn.relu(x)
def NN_lay(x, wts, bias):
x = tf.add(tf.matmul(x, wts), bias)
return tf.nn.relu(x)
# In[4]:
#first conv layer variables
cnw_1 = tf.Variable(tf.truncated_normal([1,3,1,8], mean=0, stddev=0.1)) #stride 1
cnb_1 = tf.Variable(tf.zeros([8]))
#second conv layer variables
cnw_2 = tf.Variable(tf.truncated_normal([1,3,8,16], mean=0, stddev=0.1)) #stride 2
cnb_2 = tf.Variable(tf.zeros([16]))
#third conv layer variables
cnw_3 = tf.Variable(tf.truncated_normal([1,3,16,24], mean=0, stddev=0.1)) #stride 3
cnb_3 = tf.Variable(tf.zeros([24]))
#NN wts and bias
nnwts_1 = tf.Variable(tf.truncated_normal([120, 96], mean=0, stddev=0.1))
nnb_1 = tf.Variable(tf.zeros([96]))
nnwts_2 = tf.Variable(tf.truncated_normal([96, 64], mean=0, stddev=0.1))
nnb_2 = tf.Variable(tf.zeros([64]))
nnwts_3 = tf.Variable(tf.truncated_normal([64, 32], mean=0, stddev=0.1))
nnb_3 = tf.Variable(tf.zeros([32]))
nnwts_4 = tf.Variable(tf.truncated_normal([32, 2], mean=0, stddev=0.1))
nnb_4 = tf.Variable(tf.zeros([2]))
# In[5]:
logits = conv_(x, cnw_1, cnb_1)
#c1 = tf.shape(logits)
logits = conv_(logits, cnw_2, cnb_2, padding='VALID')
logits = tf.nn.max_pool(logits, [1,1,3,1], [1,1,3,1], 'VALID')
logits = conv_(logits, cnw_3, cnb_3,stride=2,padding='VALID')
#padding logits
#logits = tf.pad(logits, tf.convert_to_tensor([[0,0],[0,0],[1,1],[0,0]]))
#after_pad = tf.shape(logits)
logits = tf.nn.max_pool(logits, [1,1,3,1], [1,1,3,1], 'VALID')
#m2 = tf.shape(logits)
# In[6]:
logits = flatten(logits)
logits = NN_lay(logits, nnwts_1, nnb_1)
logits = NN_lay(logits, nnwts_2, nnb_2)
logits = NN_lay(logits, nnwts_3, nnb_3)
logits = tf.add(tf.matmul(logits, nnwts_4), nnb_4)
# In[9]:
#cross entropy loss is objective function
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
# Optimizer
optimizer = tf.train.AdamOptimizer(learning_rate=l).minimize(cost)
# In[10]:
correct_pred = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
rec = tf.metrics.recall(tf.argmax(y, 1), tf.argmax(logits,1))
prec = tf.metrics.precision(tf.argmax(y, 1), tf.argmax(logits,1))
# In[11]:
init = tf. global_variables_initializer()
init_l = tf.local_variables_initializer()
#saving the model
save_file = 'New_wts_1/model'
saver = tf.train.Saver()
# In[25]:
with tf.Session() as sess:
sess.run(init)
sess.run(init_l)
st_time = time.time()
for epoch in range(epochs):
strt_tym = time.time()
x_t, y_t = shuffle(x_train, y_train)
print('------------------------------------------------------')
print('Epoch: ', epoch)
us = RandomUnderSampler(ratio=0.7)
x_t, y_t = us.fit_sample(x_t, y_t)
print('Resampled dataset composition {}'.format(Counter(y_t)))
x_t = x_t[:,np.newaxis,:,np.newaxis]
no_of_batches = int(len(y_t)/batch_size)
#print('No of batches: ', no_of_batches)
for offset in range(no_of_batches):
idx = np.random.randint(0, high=len(y_t), size=batch_size)
batch_x, batch_y = x_t[idx], y_t[idx]
sess.run(optimizer, feed_dict={x:batch_x, y_:batch_y, l:lr_fn(epoch)})
loss = sess.run(cost, feed_dict={x:batch_x, y_:batch_y})
print('Training loss: ', sess.run(cost, feed_dict={x:x_t, y_:y_t}))
print('Validation Training loss: ', sess.run(cost, feed_dict={x:x_val, y_:y_val}))
print(' ')
print('Precision on Validation Set: ', sess.run(prec, feed_dict={x: x_val, y_: y_val}))
print('Recall on Validation Set: ', sess.run(rec, feed_dict={x: x_val, y_: y_val}))
print('Validation Accuracy: ', sess.run(accuracy, feed_dict={x:x_val, y_:y_val}))
saver.save(sess, save_file)
et_time = time.time()
print('Total training time (mins): ', (et_time-st_time)/60)
|
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'ui_main.ui'
##
## Created by: Qt User Interface Compiler version 6.1.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
if not MainWindow.objectName():
MainWindow.setObjectName(u"MainWindow")
MainWindow.resize(670, 500)
MainWindow.setMinimumSize(QSize(670, 500))
MainWindow.setMaximumSize(QSize(16777215, 16777215))
MainWindow.setStyleSheet(u"background-color: rgba(28, 29, 73, 255);")
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName(u"centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(u"verticalLayout")
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.frame = QFrame(self.centralwidget)
self.frame.setObjectName(u"frame")
self.frame.setStyleSheet(u"border: none;\n"
"background-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:1, stop:0 rgba(42, 44, 111, 255), stop:0.555012 rgba(28, 29, 73, 255));")
self.frame.setFrameShape(QFrame.StyledPanel)
self.frame.setFrameShadow(QFrame.Raised)
self.verticalLayout_2 = QVBoxLayout(self.frame)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName(u"verticalLayout_2")
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.frame_title = QFrame(self.frame)
self.frame_title.setObjectName(u"frame_title")
self.frame_title.setMaximumSize(QSize(16777215, 50))
self.frame_title.setStyleSheet(u"border:none;\n"
"background-color:none;")
self.frame_title.setFrameShape(QFrame.StyledPanel)
self.frame_title.setFrameShadow(QFrame.Raised)
self.verticalLayout_3 = QVBoxLayout(self.frame_title)
self.verticalLayout_3.setObjectName(u"verticalLayout_3")
self.label = QLabel(self.frame_title)
self.label.setObjectName(u"label")
font = QFont()
font.setPointSize(20)
font.setBold(False)
self.label.setFont(font)
self.label.setStyleSheet(u"color:rgb(60, 231, 195)")
self.verticalLayout_3.addWidget(self.label)
self.verticalLayout_2.addWidget(self.frame_title)
self.frame_contents = QFrame(self.frame)
self.frame_contents.setObjectName(u"frame_contents")
self.frame_contents.setStyleSheet(u"border:none;\n"
"background-color:none;")
self.frame_contents.setFrameShape(QFrame.StyledPanel)
self.frame_contents.setFrameShadow(QFrame.Raised)
self.horizontalLayout = QHBoxLayout(self.frame_contents)
self.horizontalLayout.setSpacing(10)
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.horizontalLayout.setContentsMargins(5, 0, 5, 5)
self.frame_cpu = QFrame(self.frame_contents)
self.frame_cpu.setObjectName(u"frame_cpu")
self.frame_cpu.setMinimumSize(QSize(0, 0))
self.frame_cpu.setStyleSheet(u"QFrame {\n"
" background-color: none;\n"
" border: 5px solid rgb(60, 231, 195);\n"
" border-radius: 10px;\n"
"}\n"
"\n"
"QFrame:hover{\n"
" border: 5px solid rgb(105, 95, 148);\n"
"}")
self.frame_cpu.setFrameShape(QFrame.StyledPanel)
self.frame_cpu.setFrameShadow(QFrame.Raised)
self.verticalLayout_4 = QVBoxLayout(self.frame_cpu)
self.verticalLayout_4.setSpacing(10)
self.verticalLayout_4.setObjectName(u"verticalLayout_4")
self.verticalLayout_4.setContentsMargins(10, 50, 10, 0)
self.label_cpu_title = QLabel(self.frame_cpu)
self.label_cpu_title.setObjectName(u"label_cpu_title")
self.label_cpu_title.setMinimumSize(QSize(0, 30))
self.label_cpu_title.setMaximumSize(QSize(16777215, 30))
font1 = QFont()
font1.setPointSize(14)
font1.setBold(True)
self.label_cpu_title.setFont(font1)
self.label_cpu_title.setStyleSheet(u"color:rgb(60, 231, 195);\n"
"border:none;")
self.label_cpu_title.setAlignment(Qt.AlignCenter)
self.verticalLayout_4.addWidget(self.label_cpu_title)
self.label_cpu_usage_per = QLabel(self.frame_cpu)
self.label_cpu_usage_per.setObjectName(u"label_cpu_usage_per")
self.label_cpu_usage_per.setMinimumSize(QSize(0, 80))
self.label_cpu_usage_per.setMaximumSize(QSize(16777215, 80))
font2 = QFont()
font2.setPointSize(50)
self.label_cpu_usage_per.setFont(font2)
self.label_cpu_usage_per.setStyleSheet(u"border:none;\n"
"color: rgb(220, 220, 220);")
self.label_cpu_usage_per.setAlignment(Qt.AlignCenter)
self.verticalLayout_4.addWidget(self.label_cpu_usage_per)
self.label_cpu_model = QLabel(self.frame_cpu)
self.label_cpu_model.setObjectName(u"label_cpu_model")
self.label_cpu_model.setMinimumSize(QSize(0, 20))
self.label_cpu_model.setMaximumSize(QSize(16777215, 20))
font3 = QFont()
font3.setPointSize(10)
self.label_cpu_model.setFont(font3)
self.label_cpu_model.setStyleSheet(u"border:none;\n"
"color: rgb(128, 102, 168);")
self.label_cpu_model.setAlignment(Qt.AlignCenter)
self.verticalLayout_4.addWidget(self.label_cpu_model)
self.frame_cpu_detail = QFrame(self.frame_cpu)
self.frame_cpu_detail.setObjectName(u"frame_cpu_detail")
self.frame_cpu_detail.setMinimumSize(QSize(0, 0))
self.frame_cpu_detail.setStyleSheet(u"border: none;")
self.frame_cpu_detail.setFrameShape(QFrame.StyledPanel)
self.frame_cpu_detail.setFrameShadow(QFrame.Raised)
self.horizontalLayout_2 = QHBoxLayout(self.frame_cpu_detail)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setObjectName(u"horizontalLayout_2")
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.frame_cpu_detail_column = QFrame(self.frame_cpu_detail)
self.frame_cpu_detail_column.setObjectName(u"frame_cpu_detail_column")
self.frame_cpu_detail_column.setFrameShape(QFrame.StyledPanel)
self.frame_cpu_detail_column.setFrameShadow(QFrame.Raised)
self.verticalLayout_5 = QVBoxLayout(self.frame_cpu_detail_column)
self.verticalLayout_5.setObjectName(u"verticalLayout_5")
self.label_freq_title = QLabel(self.frame_cpu_detail_column)
self.label_freq_title.setObjectName(u"label_freq_title")
self.label_freq_title.setMinimumSize(QSize(0, 0))
self.label_freq_title.setMaximumSize(QSize(16777215, 16777215))
self.label_freq_title.setFont(font3)
self.label_freq_title.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_freq_title.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout_5.addWidget(self.label_freq_title)
self.label_core_phy_title = QLabel(self.frame_cpu_detail_column)
self.label_core_phy_title.setObjectName(u"label_core_phy_title")
self.label_core_phy_title.setMaximumSize(QSize(16777215, 16777215))
self.label_core_phy_title.setFont(font3)
self.label_core_phy_title.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_core_phy_title.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout_5.addWidget(self.label_core_phy_title)
self.label_core_log_title = QLabel(self.frame_cpu_detail_column)
self.label_core_log_title.setObjectName(u"label_core_log_title")
self.label_core_log_title.setMaximumSize(QSize(16777215, 16777215))
self.label_core_log_title.setFont(font3)
self.label_core_log_title.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_core_log_title.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout_5.addWidget(self.label_core_log_title)
self.horizontalLayout_2.addWidget(self.frame_cpu_detail_column, 0, Qt.AlignTop)
self.frame_cpu_detail_data = QFrame(self.frame_cpu_detail)
self.frame_cpu_detail_data.setObjectName(u"frame_cpu_detail_data")
self.frame_cpu_detail_data.setFrameShape(QFrame.StyledPanel)
self.frame_cpu_detail_data.setFrameShadow(QFrame.Raised)
self.verticalLayout_6 = QVBoxLayout(self.frame_cpu_detail_data)
self.verticalLayout_6.setObjectName(u"verticalLayout_6")
self.label_freq = QLabel(self.frame_cpu_detail_data)
self.label_freq.setObjectName(u"label_freq")
self.label_freq.setFont(font3)
self.label_freq.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_freq.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.verticalLayout_6.addWidget(self.label_freq)
self.label_core_phy = QLabel(self.frame_cpu_detail_data)
self.label_core_phy.setObjectName(u"label_core_phy")
self.label_core_phy.setFont(font3)
self.label_core_phy.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_core_phy.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.verticalLayout_6.addWidget(self.label_core_phy)
self.label_core_log = QLabel(self.frame_cpu_detail_data)
self.label_core_log.setObjectName(u"label_core_log")
self.label_core_log.setFont(font3)
self.label_core_log.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_core_log.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.verticalLayout_6.addWidget(self.label_core_log)
self.horizontalLayout_2.addWidget(self.frame_cpu_detail_data, 0, Qt.AlignTop)
self.verticalLayout_4.addWidget(self.frame_cpu_detail)
self.horizontalLayout.addWidget(self.frame_cpu)
self.frame_memory = QFrame(self.frame_contents)
self.frame_memory.setObjectName(u"frame_memory")
self.frame_memory.setMinimumSize(QSize(0, 0))
self.frame_memory.setStyleSheet(u"QFrame {\n"
" background-color: none;\n"
" border: 5px solid rgb(60, 231, 195);\n"
" border-radius: 10px;\n"
"}\n"
"\n"
"QFrame:hover{\n"
" border: 5px solid rgb(105, 95, 148);\n"
"}")
self.frame_memory.setFrameShape(QFrame.StyledPanel)
self.frame_memory.setFrameShadow(QFrame.Raised)
self.verticalLayout_9 = QVBoxLayout(self.frame_memory)
self.verticalLayout_9.setSpacing(10)
self.verticalLayout_9.setObjectName(u"verticalLayout_9")
self.verticalLayout_9.setContentsMargins(10, 50, 10, 0)
self.label_memory_title = QLabel(self.frame_memory)
self.label_memory_title.setObjectName(u"label_memory_title")
self.label_memory_title.setMinimumSize(QSize(0, 30))
self.label_memory_title.setMaximumSize(QSize(16777215, 30))
self.label_memory_title.setFont(font1)
self.label_memory_title.setStyleSheet(u"color:rgb(60, 231, 195);\n"
"border:none;")
self.label_memory_title.setAlignment(Qt.AlignCenter)
self.verticalLayout_9.addWidget(self.label_memory_title)
self.label_memory_usage_per = QLabel(self.frame_memory)
self.label_memory_usage_per.setObjectName(u"label_memory_usage_per")
self.label_memory_usage_per.setMinimumSize(QSize(0, 80))
self.label_memory_usage_per.setMaximumSize(QSize(16777215, 80))
self.label_memory_usage_per.setFont(font2)
self.label_memory_usage_per.setStyleSheet(u"border:none;\n"
"color: rgb(220, 220, 220);")
self.label_memory_usage_per.setAlignment(Qt.AlignCenter)
self.verticalLayout_9.addWidget(self.label_memory_usage_per)
self.frame_memory_detail = QFrame(self.frame_memory)
self.frame_memory_detail.setObjectName(u"frame_memory_detail")
self.frame_memory_detail.setMinimumSize(QSize(0, 0))
self.frame_memory_detail.setStyleSheet(u"border: none;")
self.frame_memory_detail.setFrameShape(QFrame.StyledPanel)
self.frame_memory_detail.setFrameShadow(QFrame.Raised)
self.horizontalLayout_3 = QHBoxLayout(self.frame_memory_detail)
self.horizontalLayout_3.setSpacing(0)
self.horizontalLayout_3.setObjectName(u"horizontalLayout_3")
self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0)
self.frame_memory_detail_column = QFrame(self.frame_memory_detail)
self.frame_memory_detail_column.setObjectName(u"frame_memory_detail_column")
self.frame_memory_detail_column.setFrameShape(QFrame.StyledPanel)
self.frame_memory_detail_column.setFrameShadow(QFrame.Raised)
self.verticalLayout_7 = QVBoxLayout(self.frame_memory_detail_column)
self.verticalLayout_7.setObjectName(u"verticalLayout_7")
self.label_memory_total_title = QLabel(self.frame_memory_detail_column)
self.label_memory_total_title.setObjectName(u"label_memory_total_title")
self.label_memory_total_title.setFont(font3)
self.label_memory_total_title.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_memory_total_title.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout_7.addWidget(self.label_memory_total_title)
self.label_memory_free_title = QLabel(self.frame_memory_detail_column)
self.label_memory_free_title.setObjectName(u"label_memory_free_title")
self.label_memory_free_title.setFont(font3)
self.label_memory_free_title.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_memory_free_title.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout_7.addWidget(self.label_memory_free_title)
self.label_memory_used_title = QLabel(self.frame_memory_detail_column)
self.label_memory_used_title.setObjectName(u"label_memory_used_title")
self.label_memory_used_title.setFont(font3)
self.label_memory_used_title.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_memory_used_title.setAlignment(Qt.AlignRight|Qt.AlignTrailing|Qt.AlignVCenter)
self.verticalLayout_7.addWidget(self.label_memory_used_title)
self.horizontalLayout_3.addWidget(self.frame_memory_detail_column, 0, Qt.AlignTop)
self.frame_memory_detail_data = QFrame(self.frame_memory_detail)
self.frame_memory_detail_data.setObjectName(u"frame_memory_detail_data")
self.frame_memory_detail_data.setFrameShape(QFrame.StyledPanel)
self.frame_memory_detail_data.setFrameShadow(QFrame.Raised)
self.verticalLayout_8 = QVBoxLayout(self.frame_memory_detail_data)
self.verticalLayout_8.setObjectName(u"verticalLayout_8")
self.label_memory_total = QLabel(self.frame_memory_detail_data)
self.label_memory_total.setObjectName(u"label_memory_total")
self.label_memory_total.setFont(font3)
self.label_memory_total.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_memory_total.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.verticalLayout_8.addWidget(self.label_memory_total)
self.label_memory_free = QLabel(self.frame_memory_detail_data)
self.label_memory_free.setObjectName(u"label_memory_free")
self.label_memory_free.setFont(font3)
self.label_memory_free.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_memory_free.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.verticalLayout_8.addWidget(self.label_memory_free)
self.label_memory_used = QLabel(self.frame_memory_detail_data)
self.label_memory_used.setObjectName(u"label_memory_used")
self.label_memory_used.setFont(font3)
self.label_memory_used.setStyleSheet(u"border:none;\n"
"color: rgb(60, 231, 195);")
self.label_memory_used.setAlignment(Qt.AlignLeading|Qt.AlignLeft|Qt.AlignVCenter)
self.verticalLayout_8.addWidget(self.label_memory_used)
self.horizontalLayout_3.addWidget(self.frame_memory_detail_data, 0, Qt.AlignTop)
self.verticalLayout_9.addWidget(self.frame_memory_detail)
self.horizontalLayout.addWidget(self.frame_memory)
self.verticalLayout_2.addWidget(self.frame_contents)
self.verticalLayout.addWidget(self.frame)
self.frame_credits = QFrame(self.centralwidget)
self.frame_credits.setObjectName(u"frame_credits")
self.frame_credits.setMinimumSize(QSize(0, 25))
self.frame_credits.setMaximumSize(QSize(16777215, 25))
self.frame_credits.setStyleSheet(u"border: none;\n"
"background-color:none;")
self.frame_credits.setFrameShape(QFrame.StyledPanel)
self.frame_credits.setFrameShadow(QFrame.Raised)
self.verticalLayout_10 = QVBoxLayout(self.frame_credits)
self.verticalLayout_10.setSpacing(0)
self.verticalLayout_10.setObjectName(u"verticalLayout_10")
self.verticalLayout_10.setContentsMargins(0, 0, 0, 0)
self.label_2 = QLabel(self.frame_credits)
self.label_2.setObjectName(u"label_2")
self.label_2.setMinimumSize(QSize(0, 0))
self.label_2.setMaximumSize(QSize(16777215, 25))
font4 = QFont()
font4.setBold(True)
self.label_2.setFont(font4)
self.label_2.setStyleSheet(u"color: rgb(128, 102, 168);")
self.verticalLayout_10.addWidget(self.label_2)
self.verticalLayout.addWidget(self.frame_credits)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setObjectName(u"menubar")
self.menubar.setGeometry(QRect(0, 0, 670, 22))
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
QMetaObject.connectSlotsByName(MainWindow)
# setupUi
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QCoreApplication.translate("MainWindow", u"Task Monitor", None))
self.label.setText(QCoreApplication.translate("MainWindow", u"Task Monitor", None))
self.label_cpu_title.setText(QCoreApplication.translate("MainWindow", u"CPU", None))
self.label_cpu_usage_per.setText(QCoreApplication.translate("MainWindow", u"50%", None))
self.label_cpu_model.setText(QCoreApplication.translate("MainWindow", u"Intel(R) Core(TM) i7-8700 CPU @ 3.20GHz", None))
self.label_freq_title.setText(QCoreApplication.translate("MainWindow", u"Speed :", None))
self.label_core_phy_title.setText(QCoreApplication.translate("MainWindow", u"Core (Physical) :", None))
self.label_core_log_title.setText(QCoreApplication.translate("MainWindow", u"Core (logical) :", None))
self.label_freq.setText(QCoreApplication.translate("MainWindow", u"3.1920 GHz", None))
self.label_core_phy.setText(QCoreApplication.translate("MainWindow", u"6", None))
self.label_core_log.setText(QCoreApplication.translate("MainWindow", u"12", None))
self.label_memory_title.setText(QCoreApplication.translate("MainWindow", u"MEMORY", None))
self.label_memory_usage_per.setText(QCoreApplication.translate("MainWindow", u"50%", None))
self.label_memory_total_title.setText(QCoreApplication.translate("MainWindow", u"Total :", None))
self.label_memory_free_title.setText(QCoreApplication.translate("MainWindow", u"Free :", None))
self.label_memory_used_title.setText(QCoreApplication.translate("MainWindow", u"Used :", None))
self.label_memory_total.setText(QCoreApplication.translate("MainWindow", u"32 GB", None))
self.label_memory_free.setText(QCoreApplication.translate("MainWindow", u"19 GB", None))
self.label_memory_used.setText(QCoreApplication.translate("MainWindow", u"13 GB", None))
self.label_2.setText(QCoreApplication.translate("MainWindow", u"Create By Overload", None))
# retranslateUi
|
# -*- coding: utf-8 -*-
'''
:Author: stransky
'''
import morphjongleur.util.auto_string
import numpy
class Compartment(object):
'''
classdocs
@see http://web.mit.edu/neuron_v7.1/doc/help/neuron/neuron/classes/python.html#Section
'''
def __init__(self, compartment_id, compartment_parent_id, radius=1.0, x=float('nan'),y=float('nan'),z=float('nan'), morphology=None):#, compartment_key=None
'''
classdocs
'''
self.compartment_id = int(compartment_id)
self.compartment_parent_id = int(compartment_parent_id)
self.radius = float(radius)
self.xyz = numpy.array([float(x), float(y), float(z)])
self.parent = None #parent automatisch mappen lassen
self.children = [] #necessary for Morphology._crate_tree()
self._morphology = morphology
self._info = [None]
self._groups = []
self.synapse = None
def neuron_create(self, parent, parent_location=1, self_location=0 ):
'''
@see http://web.mit.edu/neuron_v7.1/doc/help/neuron/neuron/geometry.html
'''
import neuron
self.neuron_h_Section = neuron.h.Section()
#??? must be defined BEOFRE to many connections !!!
if self.length == 0:
self.length = numpy.finfo(self.length).tiny
import sys
print >> sys.stderr, "distance from %s to its parent %s = 0" %(self.__repr__(), self.parent.__repr__())#oder neu einhängen
self.neuron_h_Section.L = self.length
self.neuron_h_Section.diam = self.radius + parent.radius #2 * self.radius
# self.neuron_h_Section.Ra =
# self.neuron_h_Section.ri =
#TODO: if not ( numpy.isnan(self.x) and numpy.isnan(self.y) and numpy.isnan(self.z) ) :
# self.neuron_h_Section.x3d = self.x
# self.neuron_h_Section.y3d = self.y
# self.neuron_h_Section.z3d = self.z
self.neuron_h_Section.connect( parent.neuron_h_Section, parent_location, self_location) #connect c 0 with parent(1)
@property
def info(self):
if self._info[0] == None:
self._info[0] = Compartment_info()
return self._info[0]
@property
def x(self):
return self.xyz[0]
@x.setter
def x(self, x):
self.xyz[0] = x
@property
def y(self):
return self.xyz[1]
@y.setter
def y(self, y):
self.xyz[1] = y
@property
def z(self):
return self.xyz[2]
@z.setter
def z(self, z):
self.xyz[2] = z
@staticmethod
def huge():
return Compartment(-7, -7, radius=float('+inf'))
@staticmethod
def tiny():
return Compartment(-7, -7, radius=float('-inf'))
def distance_euclid(self, compartment):
return ( ((compartment.x - self.x) ** 2)
+ ((compartment.y - self.y) ** 2)
+ ((compartment.z - self.z) ** 2)
)**0.5
@property
def length(self):
"""
parent_distance
"""
if not vars(self).has_key('_length') or self._length == None:
if self.parent == None:
self._length = 0
else:
self._length = self.distance_euclid(self.parent)
return self._length
def lca(self, compartment):
'''
lowest common ancestor
'''
left = self
right = compartment
leftp = {left.compartment_id : True}
rightp = {right.compartment_id : True}
while left != right:
if(left.compartment_parent_id > 0):
left = left.parent
leftp[left.compartment_id] = True
if(rightp.get(left.compartment_id) != None):
return left;
if(right.compartment_parent_id > 0):
right = right.parent
rightp[right.compartment_id] = True;
if(leftp.get(right.compartment_id) != None):
return right;
return left
def distance_path(self, compartment):
a = self.lca(compartment);
left = self
right = compartment
dist = 0;
while(left != a):
dist +=left.length
left = left.parent
while(right != a):
dist +=right.length
right = right.parent
return dist
@staticmethod
def write_svg(svg_file, compartment_iterables, colors=['#000000'], x=0, y=1, name="morphology"):
'''
write Scalable Vector Graphics file
'''
import itertools
import math
compartment_iterables1 = []
compartment_iterables2 = []
for compartment_iterable in compartment_iterables:
compartment_iterable1,compartment_iterable2 = itertools.tee(compartment_iterable)
compartment_iterables1.append(compartment_iterable1)
compartment_iterables2.append(compartment_iterable2)
compartment_iterables = compartment_iterables1
stream = open(svg_file, "w")
stream.write('<?xml version="1.0"?>\n')
stream.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
xyz_min = [float("+inf"),float("+inf"),float("+inf")]
xyz_max = [float("-inf"),float("-inf"),float("-inf")]
radius_max = float("-inf")
for compartment_iterable in compartment_iterables2:
for c in compartment_iterable:
for d in [x,y]:#range(3):
if xyz_min[d] > c.xyz[d]:
xyz_min[d] = c.xyz[d]
if xyz_max[d] < c.xyz[d]:
xyz_max[d] = c.xyz[d]
if radius_max < c.radius:
radius_max = c.radius
xyz_shift = [0,0,0]
for d in [x,y]:#range(3):
xyz_shift[d] = -xyz_min[d]+radius_max
width = int( xyz_max[x]-xyz_min[x]+2*radius_max + 0.5 )
text_width = 28.46 + (math.log10(0.9*width))*7.65
height = int( xyz_max[y]-xyz_min[y]+2*radius_max + 0.5)
stream.write('<svg xmlns="http://www.w3.org/2000/svg" width="%i" height="%i">\n' % (width,height) )
stream.write('<g id="%s">\n' % name)
for i in xrange(len(compartment_iterables)):
color = colors[i%len(colors)]
for c in compartment_iterables[i]:#id="%i" c.compartment_id,
stream.write('<circle cx="%f" cy="%f" r="%f" fill="%s" />\n' % (c.xyz[x]+xyz_shift[x], c.xyz[y]+xyz_shift[y], c.radius, color))
bar_width = 10**int(math.log10(0.9*width))
stream.write('</g>\n')
stream.write('<g id="legend">')
stream.write('<line x1="%i" y1="%i" x2="%i" y2="%i" style="stroke:black;stroke-width:1"/>\n' % (width-bar_width, height-1,width, height-1))
stream.write('<text x="%i" y="%i">%i µm</text>\n' % (width-text_width,height-2,bar_width))
stream.write('</g>\n')
stream.write('</svg>\n')
stream.close()
@staticmethod
def plot(compartment_iterables, colors, x=0, y=1, picture_file=None, picture_formats=['png', 'pdf', 'svg']):
import matplotlib.pyplot # 00ff00 00800 00ff80 008080
import numpy
for i in xrange(len(compartment_iterables)):
xs = []
ys = []
ss = []
for c in compartment_iterables[i]:
xyz = (c.x, c.y, c.z)
xs.append(xyz[x])
ys.append(xyz[y])
ss.append(numpy.pi * c.radius**2)
#matplotlib.pyplot.axes().add_artist(
# matplotlib.patches.Circle(xy=(c.x, c.y),
# color=colors[i%len(colors)],
# radius=c.radius
# )
#)
ss = numpy.diff(matplotlib.pyplot.axes().transData.transform(zip([0]*len(ss), ss)))
matplotlib.pyplot.scatter(xs, ys, s=ss, c=colors[i], marker='.', edgecolors=colors[i])#'. o
matplotlib.pyplot.axes().set_aspect('equal', 'datalim')
#fig = matplotlib.pyplot.gcf()
#fig.set_size_inches(7,7)
#matplotlib.pyplot.axes().yaxis.set_ticks([])
#matplotlib.pyplot.axes().xaxis.set_ticks([])
#for spine in matplotlib.pyplot.axes().spines.values():
# spine.set_visible(spines_visible)
if(picture_file != None):
for picture_format in picture_formats:
try:
matplotlib.pyplot.savefig(picture_file+'.'+picture_format, format=picture_format, dpi=600, transparent=True)
except Exception, e:
import traceback
print picture_format
print traceback.format_exc()
else:
matplotlib.pyplot.show()
matplotlib.pyplot.close()
@staticmethod
def plot3d(compartment_iterable, color='black', ratio=None, picture_file=None, picture_formats=['png', 'pdf', 'svg']):
import mpl_toolkits.mplot3d
import matplotlib.pyplot
import numpy
fig = matplotlib.pyplot.figure()
#for c in compartment_iterable:
#draw sphere
#u, v = numpy.mgrid[0:2*numpy.pi:20j, 0:numpy.pi:10j]
#x=numpy.cos(u)*numpy.sin(v)
#y=numpy.sin(u)*numpy.sin(v)
#z=numpy.cos(v)
#ax.plot_wireframe(x, y, z, color="r")
ax = fig.add_subplot(111, projection='3d')
for ci in [compartment_iterable]:
xs = []
ys = []
zs = []
cs = []
ss = []
colorbar = True
for c in compartment_iterable:
xs.append(c.x)
ys.append(c.y)
zs.append(c.z)
if vars(c).has_key('color'):
cs.append(float(c.color))
else:
colorbar = False
cs.append(color)
ss.append(numpy.pi * c.radius**2)
ss = numpy.diff(ax.transData.transform(zip([0]*len(ss), ss)))
p = ax.scatter(xs, ys, zs, c=cs, marker='.', s=ss)
if ratio != None:#matplotlib.figure.figaspect(arg)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(ratio[0],ratio[1])
if False and colorbar:
matplotlib.pyplot.figure().colorbar(p)
ax.set_aspect('equal')
#fig.set_size_inches(7,7)
if(picture_file != None):
for picture_format in picture_formats:
try:
matplotlib.pyplot.savefig(picture_file+'.'+picture_format, format=picture_format, dpi=600, transparent=True)
except Exception, e:
import traceback
print picture_format
print traceback.format_exc()
else:
matplotlib.pyplot.show()
matplotlib.pyplot.close()
@staticmethod
def plot_color(compartment_iterables, x=0, y=1, picture_file=None, picture_formats=['png', 'pdf', 'svg']):
'''
requires c.color
'''
import matplotlib.pyplot # 00ff00 00800 00ff80 008080
fig = matplotlib.pyplot.figure()
for i in xrange(len(compartment_iterables)):
xs = []
ys = []
ss = []
cs = []
for c in compartment_iterables[i]:
xyz = (c.x, c.y, c.z)
xs.append(xyz[x])
ys.append(xyz[y])
ss.append(c.radius)
cs.append(float(c.color))
matplotlib.pyplot.axes().add_artist(
matplotlib.patches.Circle(
xy=(c.x, c.y),
radius=c.radius,
color=c.color
)
)
#ss = numpy.diff(matplotlib.pyplot.axes().transData.transform(zip([0]*len(ss), ss)))
p = matplotlib.pyplot.scatter(xs, ys, s=ss, c=cs, marker='.', edgecolors=cs)#'. o
#fig.colorbar(p)
matplotlib.pyplot.axes().set_aspect('equal', 'datalim')
#fig = matplotlib.pyplot.gcf()
#fig.set_size_inches(7,7)
#matplotlib.pyplot.axes().yaxis.set_ticks([])
#matplotlib.pyplot.axes().xaxis.set_ticks([])
#for spine in matplotlib.pyplot.axes().spines.values():
# spine.set_visible(spines_visible)
if(picture_file != None):
for picture_format in picture_formats:
try:
matplotlib.pyplot.savefig(picture_file+'.'+picture_format, format=picture_format, transparent=True)#, dpi=600
except Exception, e:
import traceback
print picture_format
print traceback.format_exc()
else:
matplotlib.pyplot.show()
matplotlib.pyplot.close()
def plot_distance(self, compartment_iterable, name='', xlim=None, ylim=None, color='#000000', ratio=None, picture_file=None, picture_formats=['png', 'pdf', 'svg']):
Compartment.plot_distances(compartment_iterables=[compartment_iterable], centers=[self], names=[name], xlim=xlim, ylim=ylim, colors=[color], ratio=ratio,picture_file=picture_file, picture_formats=picture_formats)
@staticmethod
def plot_distances(compartment_iterables, centers, names, xlim=None, ylim=None, colors=['#000000'], ratio=None, picture_file=None, picture_formats=['png', 'pdf', 'svg']):
import scipy.stats.stats
import matplotlib.pyplot
legends = []
for (compartment_iterable,center,color,name) in zip(compartment_iterables,centers,colors,names):
import itertools
compartment_iterable, compartment_iterable2 = itertools.tee(compartment_iterable)
radii = [c.radius for c in compartment_iterable]
distances = [c.distance_path(center) for c in compartment_iterable2]
matplotlib.pyplot.scatter(distances, radii, c=color, marker='.', edgecolors=color)
legends.append("%s" % (name))
if False:
r_mean = numpy.mean(radii)
r_std = numpy.std(radii)
r_gmean = scipy.stats.stats.gmean(radii)
r_hmean = scipy.stats.stats.hmean(radii)
r_median= numpy.median(radii)
matplotlib.pyplot.axhline(y=r_mean, color='blue')
xmin, xmax, ymin, ymax = matplotlib.pyplot.axis()
width = r_std / (ymax-ymin)
center = (r_mean - ymin) / (ymax-ymin)
matplotlib.pyplot.axvline(x=.5*(xmax-xmin), ymin=center-.5*width, ymax=center+.5*width, color='red')
matplotlib.pyplot.axhline(y=r_gmean, color='violet')
matplotlib.pyplot.axhline(y=r_hmean, color='orange')
matplotlib.pyplot.axhline(y=r_median, color='black')
d_mean = numpy.mean(distances)
d_std = numpy.std(distances)
if numpy.min(distances) <= 0:
d_gmean = 0
d_hmean = 0
else:
d_gmean = scipy.stats.stats.gmean(distances)
d_hmean = scipy.stats.stats.hmean(distances)
d_median= numpy.median(distances)
matplotlib.pyplot.axvline(x=d_mean, color='blue')
xmin, xmax, ymin, ymax = matplotlib.pyplot.axis()
width = d_std / (xmax-xmin)
center = (d_mean - xmin) / (xmax-xmin)
matplotlib.pyplot.axhline(y=.5*(xmax-xmin), xmin=center-.5*width, xmax=center+.5*width, color='red')
matplotlib.pyplot.axvline(x=d_gmean, color='violet')
matplotlib.pyplot.axvline(x=d_hmean, color='orange')
matplotlib.pyplot.axvline(x=d_median, color='black')
matplotlib.pyplot.legend( [
'r mean %f' % ( r_mean ),
'r std %f' % ( r_std ),
'r gmean %f' % ( r_gmean ),
'r hmean %f' % ( r_hmean ),
'r median %f' % ( r_median ),
'd mean %f' % ( d_mean ),
'd std %f' % ( d_std ),
'd gmean %f' % ( d_gmean ),
'd hmean %f' % ( d_hmean ),
'd median %f' % ( d_median ),
'compartments'
] )
matplotlib.pyplot.ylabel(u'radius [µm]')
if ylim != None:
matplotlib.pyplot.ylim((0,ylim))
matplotlib.pyplot.xlabel(u'distance [µm]')
if xlim != None:
matplotlib.pyplot.xlim((0,xlim))
#matplotlib.pyplot.legend( legends )
if ratio != None:#matplotlib.figure.figaspect(arg)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(ratio[0],ratio[1])
if(picture_file != None):
for picture_format in picture_formats:
try:
matplotlib.pyplot.savefig(picture_file+'.'+picture_format, format=picture_format, transparent=True)
except Exception, e:
print picture_format
print traceback.format_exc()
else:
matplotlib.pyplot.show()
matplotlib.pyplot.close()
def __repr__(self):
return "Compartment(%i, %i, %f, %f,%f,%f)" % (
self.compartment_id, self.compartment_parent_id, self.radius, self.x, self.y, self.z)
def __str__(self):
return """<Compartment(
compartment_key = '%s'
compartment_id = %i
compartment_parent_id = %i
radius = %f
%s groups = [
%s ]
)>""" % (
str(self.compartment_key if vars(self).has_key('compartment_key') else ''),
int(self.compartment_id),
int(self.compartment_parent_id),
float(self.radius),
str( self.info if self.info != None else ''),
" ,\n".join(map(str, self._groups)),
)
class Morphology(object):
'''
Model of a simulatable Neuron
To check the morphology with NEURON gui:
>>> from neuron import gui
'''
_type_compartment = Compartment
def __init__(self, name, file_origin, description, datetime_recording, compartments=[]):
'''
Constructor
'''
#assert len(compartments) == 0
#self.morphology_key = morphology_key
self.name = name
self.file_origin = file_origin
self.description = description
self.datetime_recording = datetime_recording
if compartments == []:#or list(compartments) to prevent static list
self._compartments = []
else:
self._compartments = compartments
self._info = [None]
self._groups = []
#TODO: map orm
self._root = None
self._biggest = None # biggest compartment (probable part of soma)
self._leafs = []
self._branch_points = []
#not orm mapped
self._compartments_map = {}
@property
def info(self):
if self._info[0] == None:
self._info[0] = MorphologyInfo()
return self._info[0]
def _create_tree(self):
if vars(self).has_key('_compartments_map') and self._compartments_map != {}:
return
self._compartments_map = {}
self._biggest = self._compartments[0] if self._compartments[0].compartment_parent_id != -1 else self._compartments[1]
for c in self.compartments:
self._compartments_map[ c.compartment_id ] = c
c.children = []
if(c.radius > self._biggest.radius and c.compartment_parent_id != -1):
self._biggest = c
for c in self.compartments:
assert c.compartment_parent_id != "-1"
if c.compartment_parent_id != -1:
c.parent = self._compartments_map[ c.compartment_parent_id ]
c.parent.children.append( c )
else:
if vars(self).has_key('_root') and self._root != None:
import sys
print >> sys.stderr, "multiple roots: self.root %s != %s" % (self.root , c)
self._root = c
def neuron_create(self):
import neuron
if vars(self.root.children[0]).has_key('neuron_h_Section'):
return
todo_stack = []
root = self.root
first_child = root.children[0]
root.neuron_h_Section = neuron.h.Section()
first_child.neuron_h_Section = root.neuron_h_Section
root.neuron_h_Section.L = first_child.length
root.neuron_h_Section.diam = root.radius + first_child.radius # 2 * first_child.radius
todo_stack.extend( first_child.children )
for c in root.children[1:]:
c.neuron_create( first_child, parent_location=0, self_location=0 ) #connect c 0 with parent(0)
todo_stack.extend( c.children )
#deepth-first
while len( todo_stack ) > 0:
c = todo_stack.pop()
c.neuron_create( c.parent, parent_location=1, self_location=0 ) #connect c 0 with parent(1)
todo_stack.extend( c.children )
@property
def compartments_map(self):
if not vars(self).has_key('_compartments_map') or self._compartments_map == {}:
self._create_tree()
return self._compartments_map
@property
def root(self):
if not vars(self).has_key('_root') or self._root == None:
self._create_tree()
return self._root
@property
def biggest(self):
if not vars(self).has_key('_biggest') or self._biggest == None:
self._create_tree()
return self._biggest
@property
def root_biggest_child(self):
if not vars(self).has_key('_root_biggest_child') or self._root_biggest_child == None:
self._create_tree()
self._root_biggest_child = self._root.children[0]
for c in self._root.children:
if(c.radius > self._root_biggest_child):
self._root_biggest_child = c
return self._root_biggest_child
@property
def terminal_tips(self):
if not vars(self).has_key('_leafs'):
self._leafs = []
#self._branch_points = []
if self._leafs == []:
self._create_tree()
for c in self.compartments:
if len(c.children) == 0 or len(c.children) == 1 and c.parent == None:
self._leafs.append(c)
# elif len(c.children) > 1:
# self.branch_points.append(c)
for leaf in self._leafs:
yield(leaf)
@property
def number_of_terminal_tips(self):
if not vars(self).has_key('_leafs') or self._leafs == []:
for l in self.terminal_tips:
pass
return len(self._leafs)
@property
def terminal_tips_biggest(self):
tb = Compartment.tiny()
for t in self.terminal_tips:
if tb.radius < t.radius:
tb = t
return tb
@property
def branch_points(self):
if not vars(self).has_key('_branch_points'):
self._branch_points = []
#self._leafs = []
if self._branch_points == []:
self._create_tree()
for c in self.compartments:
# if len(c.children) == 0:
# self._leafs.append(c)
if len(c.children) > 2 or len(c.children) > 1 and c.parent != None:
self._branch_points.append(c)
for branch_point in self._branch_points:
yield(branch_point)
@property
def branches(self):
if not vars(self).has_key('_branches') or self._branches == None or self._branches == []:
self._branches = {}
for leafset in [self.terminal_tips,self.branch_points]:
for leaf in leafset:
if leaf.parent == None:
continue
compartment = leaf
cs = []
#upper vertexdistances
cs.append(leaf)
c = compartment.parent
while len(c.children) == 1 and c.parent != None:# or len(c.children) = 2 and c.parent == None:
cs.append(compartment)
c = c.parent
#lower vertex
branch_point = c
cs.append(branch_point)
leaf.parent_node = branch_point
self._branches[leaf] = cs
#===================================================================
# leafs = [leaf for leaf in morphology.terminal_tips]
# while len(leafs) > 0:
# new_leafs = {}
# for compartment in leafs:
# if compartment.compartment_parent_id < 1:
# branchingpoints_distances.append(leaf.length/.2)
# continue
# distance = 0
# compartment = compartment.parent
# while compartment.compartment_parent_id > 0 and len(compartment.children) == 1:
# distance += compartment.length
# compartment = compartment.parent
# branchingpoints_distances.append(distance)
# if compartment.compartment_parent_id > 0:
# new_leafs[compartment] = True
# leafs = new_leafs.keys()
#===================================================================
#===========================================================================
# self._branches = {}
# leafs = set([leaf for leaf in self.terminal_tips])
# while len(leafs) > 0:
# new_leafs = set()
# for leaf in leafs:
# if self._branches.has_key(leaf):
# continue
# #assert not branch_ranges.has_key(leaf)
# if leaf.compartment_parent_id < 1:
# continue
# compartment = leaf
# cs = []
# #upper vertexdistances
# cs.append(leaf)
# compartment = compartment.parent
# while compartment.compartment_parent_id > 0 and len(compartment.children) == 1:
# cs.append(compartment)
# compartment = compartment.parent
# #lower vertex
# branch_point = compartment
# cs.append(branch_point)
#
# self._branches[leaf] = cs
# leaf.parent_node = branch_point
# #print (leaf.compartment_id,branch_point.compartment_id, numpy.min(distances),numpy.max(distances))
# if compartment.compartment_parent_id > 0 and not self._branches.has_key(branch_point):# and leafs.issuperset([branch_point])
# new_leafs.add(branch_point)
# leafs = new_leafs
#===========================================================================
return self._branches
@property
def number_of_branch_points(self):
if not vars(self).has_key('_branch_points') or self._branch_points == []:
for b in self.branch_points:
pass
return len(self._branch_points)
@property
def plebs(self):
for branch_point in self._branch_points:
if len(branch_point.children) > 2:
yield(branch_point)
@property
def compartments(self):
'''
generator over compartments
'''
for compartment in self._compartments:
yield(compartment)
@property
def number_of_compartments(self):
return len(self._compartments)
@property
def non_root_compartments(self):
'''
generator over compartments
'''
if not vars(self).has_key('_root') or self._root == None:
self._create_tree()
for compartment in self.compartments:
if compartment.parent != None:
yield(compartment)
def get_compartment(self, compartment_id):
'''
in order to be conform with Model definition, it is possible to get an compartment by i.
'''
return self.compartments_map[ compartment_id ];
def add_compartment(self, compartment):
"""
doc-string to be added.
"""
self._compartments.append(compartment)
#compartment = Compartment(parent, id, length)
#parent.children[id] = compartment
def subtree(self, compartment):
if not vars(self).has_key('_compartments_map') or self._compartments_map == {}:
self._create_tree()
if type(compartment) == type(1):
compartment = self.get_compartment(compartment)
todo_stack = []
yield( compartment )
todo_stack.append( compartment )
while len( todo_stack ) > 0:
c = todo_stack.pop()
todo_stack.extend( c.children )
for cc in c.children:
yield( cc )
def plot_distance_distribution(self, center, name='', xlim=None, ylim=None, color='#000000', mcolors='#000000', bins=20, ratio=None, picture_file=None, picture_formats=['png', 'pdf', 'svg']):
Morphology.plot_distance_distributions(compartment_iterables=[self.compartments], centers=[center], mcolors=[mcolors], names=[name], colors=[color], bins=bins, xlim=xlim, ylim=ylim, ratio=ratio, picture_file=picture_file, picture_formats=picture_formats)
@staticmethod
def plot_distance_distributions(compartment_iterables, centers, names=[''], colors=['#000000'], mcolors=['#000000'], bins=20, xlim=None, ylim=None, ratio=None, picture_file=None, picture_formats=['png', 'pdf', 'svg']):
import matplotlib
#matplotlib.rc('text', usetex=True): error with names
legends = []
for (compartment_iterable,center,color,mcolor,name) in zip(compartment_iterables,centers,colors,mcolors,names):
x = [c.distance_path(center) for c in compartment_iterable]
if len(x) == 0:
import sys
print >> sys.stderr, "iterable list has 0 elements"
continue
mean = numpy.mean(x)
std = numpy.std(x)
legends.append(u"µ %i µm\nσ %i µm" % (round(mean),round(std)))
matplotlib.pyplot.axvline(x=mean, color=mcolor, label='mean'+name)
if xlim != None:#TODO: isnumber
matplotlib.pyplot.hist(x, bins=range(0,xlim,bins), normed=0, color=color, edgecolor=color, alpha=0.6)
else:
matplotlib.pyplot.hist(x, bins, normed=0, color=color, edgecolor=color, alpha=0.6)
#matplotlib.pyplot.title('Endpoints of %s' % (name.replace('_',' ')) )
#print 'distribution of %s : mean=%f, std=%f' % (name, mean, std)
matplotlib.pyplot.grid(True, color='lightgrey')
matplotlib.pyplot.ylabel('#')#%
if ylim != None:
matplotlib.pyplot.ylim((0,ylim))
matplotlib.pyplot.xlabel(u'distance [µm]')
if xlim != None:
matplotlib.pyplot.xlim((0,xlim))
matplotlib.pyplot.legend( legends )
if ratio != None:#matplotlib.figure.figaspect(arg)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(ratio[0],ratio[1])
if(picture_file != None):
for picture_format in picture_formats:
matplotlib.pyplot.savefig(picture_file+'.'+picture_format, format=picture_format, transparent=True)
else:
matplotlib.pyplot.show()
matplotlib.pyplot.close('all')
def plot(self, x=0, y=1, color='#000000', picture_file=None, picture_formats=['png', 'pdf', 'svg']):
Compartment.plot([self.compartments], colors=[color], x=x, y=y, picture_file=picture_file, picture_formats=picture_formats)
def write_svg(self, svg_file, color='#000000', x=0, y=1):
'''
write Scalable Vector Graphics file
'''
Compartment.write_svg(svg_file=svg_file, compartment_iterables=[self.compartments], colors=[color], x=x, y=y)
def __repr__(self):
return "Morphology('%s', '%s', '%s', '%s')" % (
str(self.name), str(self.file_origin), str(self.description), str(self.datetime_recording) )
def __str__(self):
return """<Morphology(
morphology_key = '%s'
name = '%s'
file_origin = '%s'
description = '%s'
datetime_recording = '%s'
%s groups = [
%s ]
%s
)>""" % (
str(self.morphology_key if vars(self).has_key('morphology_key') else ''),
str(self.name),
str(self.file_origin),
str(self.description),
str(self.datetime_recording),
str( self.info if self.info != None else ''),
# " ,\n".join(map(str, self._groups)) if self.__dict__.has_key('_groups') else '',
" ,\n".join(map(str, self._groups)),
str( self.analysis if self.__dict__.has_key('analysis') else '')
)
class Star(Morphology):
'''
n-dendrites = n+1-Compartments Model
This class will produce Neuron with simple star formation
with a standard soma (L=40 um, diam=20 um) with identical dendrites connected on opposite sites of the
soma.
For the dendrites the following parameters can be changed:
* dendrite_Ra:
* dendrite_length: length of each dendrite
* dendrite_diameter: diameter of each dendrite
* dendrite_nseg:
'''
def __init__(self, medials=1, laterals=1, use_passive_channes=True, gp=0.004, E=-60, Ra=200,
soma_Ra=1, soma_length=40, soma_diameter=20, soma_nseg=10,
dendrite_Ra=1, dendrite_length=150, dendrite_diameter=3, dendrite_nseg=int(150/10)):
'''
consisting of `medials` medial dendrite and `laterals` lateral dendrite
soma_Ra: Axial Resistivity (Ra): 200 [Ohm * cm]
soma_L in µm; stored as a float number
soma_diam diameter (soma.diam): [µm]
soma_nseg: stored as an integer
'''
self.name = "Star_%i_%i" % (medials, laterals)
import neuron
#Morphology.__init__(self);
# soma (compartment: neuron.h.Section() )
self.soma = Compartment(1,-1, radius=soma_diameter/2.0);
self.medial_dendrites = [];
self.lateral_dendrites = [];
for k in xrange(int(medials)): # medial dendrites
self.medial_dendrites.append( Compartment(2*k+2, 1) )
assert len(self.medial_dendrites) == medials
for k in xrange(int(laterals)): # lateral dendrites
self.lateral_dendrites.append( Compartment(2*k+3, 1) )
assert len(self.lateral_dendrites) == laterals
#http://code.activestate.com/recipes/52235-calling-a-superclasss-implementation-of-a-method/
self._compartments = []
self._compartments.extend(self.medial_dendrites)
self._compartments.extend(self.lateral_dendrites)
self.soma.neuron_h_Section = neuron.h.Section()
for c in self.compartments:
c.neuron_h_Section = neuron.h.Section()
c.neuron_h_Section.L = dendrite_length;
c.neuron_h_Section.diam = dendrite_diameter
c.neuron_h_Section.nseg = dendrite_nseg
c.neuron_h_Section.Ra = dendrite_Ra
self._compartments.append(self.soma)
for medial_dendrite in self.medial_dendrites:
medial_dendrite.neuron_h_Section.connect(self.soma.neuron_h_Section, 1, 0) # connect soma(1) with medial_dendrite(0)
for lateral_dendrite in self.lateral_dendrites:
lateral_dendrite.neuron_h_Section.connect(self.soma.neuron_h_Section, 0, 0) # connect soma(0) with lateral_dendrites(0)
def get_compartment(self, compartment_id):
'''
in order to be conform with Model definition, it is possible to get an compartment by i.
compartment_id = 0 returns soma
compartment_id =-1 returns lateral dendrite
compartment_id = 1 returns medial dendrite
'''
if(compartment_id < 0):
return self.lateral_dendrites[0];
if(compartment_id > 0):
return self.medial_dendrites[0];
if(compartment_id == 0):
return self.soma;
def __repr__(self):
return 'Star(medials=%i, laterals=%i)' % ( #, use_passive_channes=%s, gp=%f, E=%f, Ra=%f, soma_Ra=%f, soma_L=%f, soma_diam=%f, soma_nseg=%f, dendrite_Ra=%f, dendrite_length=%f, dendrite_diameter=%f, dendrite_nseg=%i)' % (
len(self.medial_dendrites), len(self.lateral_dendrites)
);
def __str__(self):
return '<Star has %i medial dendrites and %i lateral dendrites>' % (
len(self.medial_dendrites), len(self.lateral_dendrites)
);
@morphjongleur.util.auto_string.auto_string
class MorphologyGroups(object):
pass
@morphjongleur.util.auto_string.auto_string
class CompartmentGroups(object):
pass
@morphjongleur.util.auto_string.auto_string
class MorphologyInfo(object):
pass
@morphjongleur.util.auto_string.auto_string
class CompartmentInfo(object):
pass
@morphjongleur.util.auto_string.auto_string
class Compartment_info(object):
"""
parent_radius = %f,
length = %f,
cylindric_volume = %f,
frustum_volume = %f,
cylindric_lateral_area = %f,
frustum_lateral_area = %f,
frustum_length = %f,
#children = %i
"""
@property
def parent_radius(self):
if not vars(self).has_key('_parent_radius') or self._parent_radius == None:
self._parent_radius = float('nan')
return self._parent_radius
@parent_radius.setter
def parent_radius(self, value):raise AttributeError("cannot change calculated information")
@parent_radius.deleter
def parent_radius(self): del self._parent_radius
@property
def length(self):
if not vars(self).has_key('_length') or self._length == None:
self._length = float('nan')
return self._length
@length.setter
def length(self, value):raise AttributeError("cannot change calculated information")
@length.deleter
def length(self): del self._length
@property
def cylindric_volume(self):
if not vars(self).has_key('_cylindric_volume') or self._cylindric_volume == None:
self._cylindric_volume = float('nan')
return self._cylindric_volume
@cylindric_volume.setter
def cylindric_volume(self, value):raise AttributeError("cannot change calculated information")
@cylindric_volume.deleter
def cylindric_volume(self): del self._cylindric_volume
@property
def frustum_volume(self):
if not vars(self).has_key('_frustum_volume') or self._frustum_volume == None:
self._frustum_volume = float('nan')
return self._frustum_volume
@frustum_volume.setter
def frustum_volume(self, value):raise AttributeError("cannot change calculated information")
@frustum_volume.deleter
def frustum_volume(self): del self._frustum_volume
@property
def cylindric_lateral_area(self):
if not vars(self).has_key('_cylindric_lateral_area') or self._cylindric_lateral_area == None:
self._cylindric_lateral_area = float('nan')
return self._cylindric_lateral_area
@cylindric_lateral_area.setter
def cylindric_lateral_area(self, value):raise AttributeError("cannot change calculated information")
@cylindric_lateral_area.deleter
def cylindric_lateral_area(self): del self._cylindric_lateral_area
@property
def frustum_lateral_area(self):
if not vars(self).has_key('_frustum_lateral_area') or self._frustum_lateral_area == None:
self._frustum_lateral_area = float('nan')
return self._frustum_lateral_area
@frustum_lateral_area.setter
def frustum_lateral_area(self, value):raise AttributeError("cannot change calculated information")
@frustum_lateral_area.deleter
def frustum_lateral_area(self): del self._frustum_lateral_area
@property
def frustum_length(self):
if not vars(self).has_key('_frustum_length') or self._frustum_length == None:
self._frustum_length = float('nan')
return self._frustum_length
@frustum_length.setter
def frustum_length(self, value):raise AttributeError("cannot change calculated information")
@frustum_length.deleter
def frustum_length(self): del self._frustum_length
@property
def children(self):
if not vars(self).has_key('_children') or self._children == None:
self._children = float('nan')
return self._children
@children.setter
def children(self, value):raise AttributeError("cannot change calculated information")
@children.deleter
def children(self): del self._children
if __name__ == '__main__':
# import morphjongleur.io.swc
morphologies = []
# for swc in ['../../data/test.swc','../../data/H060602DB_10_2_zentai_.swc','../../data/H060602VB_10_2_zentai_.swc','../../data/H060607DB_10_2(zentai).swc','../../data/H060607VB_10_2(zentai).swc']:
# print swc
# morphology = morphjongleur.model.morphology.Morphology.swc_parse(swc)
from morphjongleur.io.database import Database
import morphjongleur.orm.morphology
db = Database(
db_name='postgresql://hal08.g-node.pri/morphjongleur',
exec_role='morphjokey_admin',#TODO: select does not find table!
exec_path='mitsubachi'
)
# must be mapped before Object is created
mapping = morphjongleur.orm.morphology.Mapper( db )
mapping.orm_map()
for i in [3,5,4,6]:#[H060602DB_10_2_zentai_','H060607DB_10_2(zentai)','H060602VB_10_2_zentai_','H060607VB_10_2(zentai)']
morphology = mapping.load_morphology(i)
morphologies.append(morphology)
morphology.plot_endpoints_histogramm(xlim=(0, 1000), ylim=(0, 100), picture_file='/tmp/mitsubachi_endpoints_'+str(morphology.name), picture_formats=['svg'])#
morphology.plot_all_properties(morphologies=morphologies, picture_file='/tmp/mitsubachi_', picture_formats=['svg'])
|
import re
from lxml import etree
import time
class Post(object):
def __init__(self,uid,s):
self.s=s
self.uid=uid
# 选取某个小组进行发帖
def posting(self,group,uid,s):
pass
# 获取已经发的帖子
def readyPosts(self,uid,s):
pass
# 顶贴
def topPost(self):
pass |
#!/usr/bin/python
"""
Starter code for exploring the Enron dataset (emails + finances);
loads up the dataset (pickled dict of dicts).
The dataset has the form:
enron_data["LASTNAME FIRSTNAME MIDDLEINITIAL"] = { features_dict }
{features_dict} is a dictionary of features associated with that person.
You should explore features_dict as part of the mini-project,
but here's an example to get you started:
enron_data["SKILLING JEFFREY K"]["bonus"] = 5600000
"""
import pickle
enron_data = pickle.load(open("../final_project/final_project_dataset.pkl", "r"))
print(len(enron_data))
print(len(enron_data[enron_data.keys()[0]]))
num_poi = 0
for person in enron_data:
if enron_data[person]['poi']:
num_poi += 1
print(num_poi)
print(enron_data["Prentice james".upper()]["total_stock_value"])
print(enron_data["Colwell Wesley".upper()]["from_this_person_to_poi"])
print(enron_data["Skilling Jeffrey K".upper()]["exercised_stock_options"])
print("total payments")
print(enron_data["skilling jeffrey k".upper()]["total_payments"])
print(enron_data["lay kenneth l".upper()]["total_payments"])
print(enron_data["fastow andrew s".upper()]["total_payments"])
num_valid_salary = 0
num_valid_email = 0
for person in enron_data:
if str(enron_data[person]["salary"]) != "NaN":
num_valid_salary += 1
if str(enron_data[person]["email_address"]) != "NaN":
num_valid_email += 1
print("Num valid salary:", num_valid_salary)
print("Num valid email:", num_valid_email)
num_invalid_total_payment = 0
for person in enron_data:
if str(enron_data[person]["total_payments"]) == "NaN":
num_invalid_total_payment += 1
print("Invalid total payment:", num_invalid_total_payment)
print("%:", float(num_invalid_total_payment) / len(enron_data.keys()))
num_invalid_total_payment_poi = 0
for person in enron_data:
if enron_data[person]["poi"] and str(enron_data[person]["total_payments"]) == "NaN":
num_invalid_total_payment_poi += 1
print("Invalid total payments for poi %:", float(num_invalid_total_payment_poi) / num_poi) |
import math
import hashlib
# Function to left
# rotate n by d bits
def leftRotate(n, d):
INT_BITS = 8
return (n << d) & 0xFF | (n >> (INT_BITS - d))
# Function to right
# rotate n by d bits
def rightRotate(n, d):
INT_BITS = 8
return (n >> d) | (n << (INT_BITS - d)) & 0xFF
def ByteIntArrayToHex(byteintarray):
hexString = ''
for byteint in (byteintarray):
temp = (hex(byteint)[2:])
if (len(temp)==1):
hexString = hexString + '0' + temp
else:
hexString = hexString + temp
return hexString
def HexToByteIntArray(hexString):
byteintarray = []
for i in range (len(hexString)//2):
temp = '0x'
temp = temp + hexString[i*2:i*2+2]
temp = int(temp,16)
byteintarray.append(temp)
return byteintarray
def StringToByteIntArray(string):
# Mengubah string menjadi array of integer (byte) sesuai dengan ascii/utf-8
# Input : string
# Output : array of integer (byte) dari string
byteint_array = []
for char in string:
byteint_array.append(ord(char))
return byteint_array
def ByteIntArrayToString (byteint_array):
# Mengubah string menjadi array of integer (byte) sesuai dengan ascii/utf-8
# Input : array of integer (byte)
# Output : string
string = "".join([chr(value) for value in byteint_array])
return string
def OpenFileAsByteIntArray(filename):
# Membuka file dengan nama filename per byte lalu menyimpannya menjadi array of integer (byte)
# Input : filename
# Output : array of integer (byte) dari isi file
# Buka file
input_file = open(filename,"rb")
# Baca isi file per byte hingga habis
byteint_array = []
byte = input_file.read(1)
while (byte):
# Ubah byte tersebut menjadi integer yang sesuai lalu masukkan ke array
byteint = int.from_bytes(byte,byteorder='little')
byteint_array.append(byteint)
byte = input_file.read(1)
# Tutup file
input_file.close()
return byteint_array
def EncryptImage(image_byteintarray):
init = 255
text_byteintarray = []
for byteint in (image_byteintarray):
xor = init^byteint
enc = leftRotate(xor, 3)
text_byteintarray.append(enc)
init = enc
return text_byteintarray
def DecryptText(text_byteintarray):
init = 255
image_byteintarray = []
for byteint in (text_byteintarray):
dec = rightRotate(byteint, 3)
xor = init^dec
image_byteintarray.append(xor)
init = byteint
return image_byteintarray |
# Algorithm for non-continuous blobbing
from lucidreader import LucidFile
import numpy as np
import math
class BlobFinder:
def square(self, x, y, size):
half_size = (size - 1) / 2
x, y = x - half_size, y - half_size
# Return a sizexsize square of pixels around the coordinates
pixels = []
for i in range(size):
for j in range(size):
if (x + i < 0 or y + j < 0) or (x + i > 255 or y + j > 255):
continue # Can't have out of bounds coordinates
else:
pixels.append((x + i, y + j))
return pixels
def add(self, x, y):
self.frame[x][y] = 0 # Pixel has already been processed so can be set to 0
close_region = self.square(x, y, self.SQUARE_SIZE)
for pixel in close_region:
if self.frame[pixel[0]][pixel[1]] > 0:
self.blob.append((pixel[0], pixel[1]))
self.add(pixel[0], pixel[1])
def find_blobs(self):
blobs = []
self.blob = None # Holds currently active blob
for x in range(256):
for y in range(256):
active_pixel = self.frame[x][y]
if active_pixel > 0:
# Create new blob
self.blob = [(x, y)]
self.add(x, y)
self.blob = Blob(self.blob)
blobs.append(self.blob)
self.frame[x][y] = 0
return blobs
def write_blob_file(self, filename):
file_obj = open(filename, 'w')
for blob in self.find_blobs():
file_obj.write(str(blob.pixel_list) + "\n")
file_obj.close()
def __init__(self, frame, search_radius):
self.SQUARE_SIZE = search_radius
self.frame = frame
# Class for storing blobs, and calculating their attributes
class Blob:
def __init__(self, pixels):
self.pixel_list = pixels
# Calculate centroid
x_values, y_values = [], []
for pixel in pixels:
x_values.append(pixel[0])
y_values.append(pixel[1])
self.centroid = ( float(sum(x_values)) / len(x_values) , float(sum(y_values)) / len(y_values) )
# Calculate radius
self.radius = 0
for pixel in pixels:
x_distance = abs(pixel[0] - self.centroid[0])
y_distance = abs(pixel[1] - self.centroid[1])
distance = math.hypot(x_distance, y_distance)
if distance > self.radius:
self.radius = distance
self.radius += 0.5 # Stop 1 particle tracks having a radius of 0
self.radius = math.ceil(self.radius)
def relativise(self):
new_blob = []
min_x, min_y = 256, 256
for pixel in self.pixel_list:
if pixel[0] < min_x:
min_x = pixel[0]
if pixel[1] < min_y:
min_y = pixel[1]
for pixel in self.pixel_list:
new_blob.append(((pixel[0] - min_x) + 1, (pixel[1] - min_y) + 1))
self.pixel_list = new_blob
|
import numpy as np
import matplotlib.pyplot as plt
def get_solidblockage(aircraft,tunnel,bool):
t1w = 0.87
k1w = 1.02
esb_w = (k1w * t1w * aircraft.wing.V) / ((tunnel.C) ** (3 / 2.))
t1f = 0.86
k3f = 0.915
esb_f = (k3f * t1f * aircraft.fuselage.V) / ((tunnel.C) ** (3 / 2.))
t1ss = 0.86
k1ss = 0.90
ss_V = 0.0035296
esb_ss = (k1ss * t1ss * ss_V) / ((tunnel.C) ** (3 / 2.))
t1ms = 0.86
k1ms = 0.90
ms_V = 0.0004491
esb_ms = (t1ms * k1ms * ms_V) / ((tunnel.C) ** (3 / 2.))
t1h = 0.86
k1h = 1.035
esb_th = (t1h * k1h * aircraft.tailh.V) / ((tunnel.C) ** (3 / 2.))
t1v = 0.86
k1v = 1.035
esb_tv = (t1v * k1v * aircraft.tailv.V) / ((tunnel.C) ** (3 / 2.))
if bool == 'w':
esb = esb_f + esb_w + esb_ms + esb_ss
elif bool == 'wt':
esb = esb_f + esb_w + esb_ss + esb_ms + esb_th + esb_tv
return esb
def get_wakeblockage(polar,aircraft,tunnel):
CLu = np.array([point.CFl for point in polar.points])
CDu = np.array([point.CFd for point in polar.points])
CD0 = np.min(CDu)
CLu_2 = CLu**2
CLu_2_linear = []
CDu_linear = []
for i,CL in enumerate(CLu_2):
if 0.0 < CL < 0.7:
CLu_2_linear.append(CL)
CDu_linear.append(CDu[i])
m,c = np.polyfit(CLu_2_linear,CDu_linear,1)
CDi = m*CLu_2
CDs_t = CDu - CD0 - CDi
CDs = np.array([max(CD, 0) for CD in CDs_t])
ewb = (aircraft.wing.S/(4*tunnel.C)) * CD0 + ((5 * aircraft.wing.S)/(4 * tunnel.C)) * CDs
return ewb
def correct_blockage(polar,aircraft,tunnel,bool):
esb = get_solidblockage(aircraft, tunnel, bool)
ewb = get_wakeblockage(polar,aircraft, tunnel)
for i,point in enumerate(polar.points):
point.et = esb + ewb[i]
point.qInf = point.qInf*((1+point.et)**2)
point.U = point.U*(1+point.et)
point.get_coeffs(aircraft)
point.get_modelaxiscoeffs(aircraft)
point.get_conventionalcoeffs(aircraft)
def get_streamline_wing(CLu,alphau,aircraft,tunnel):
bv = 0.76 * aircraft.wing.b
be = (aircraft.wing.b + bv) / 2
delta = 0.108
delta_alpha_wing_sc = delta * (aircraft.wing.S / tunnel.C) * (180 / np.pi) * CLu
t2w = 0.11
delta_alpha_wing = (1 + t2w) * delta_alpha_wing_sc
delta_drag_wing = delta * (aircraft.wing.S / tunnel.C) * CLu ** 2
CLu_linear = []
alphau_linear = []
for i, alpha_i in enumerate(alphau):
if -1 < alpha_i < 7:
alphau_linear.append(alpha_i)
CLu_linear.append(CLu[i])
CLu_linear = np.array(CLu_linear)
alphau_linear = np.array(alphau_linear)
CLalpha, c = np.polyfit(alphau_linear, CLu_linear, 1)
delta_moment_wing = 0.125 * delta_alpha_wing * CLalpha
delta_array_wing = np.array([[delta_alpha_wing[i],delta_drag_wing[i],delta_moment_wing[i]] for i in range(len(delta_alpha_wing))])
return delta_array_wing
def correct_notail(polar,aircraft,tunnel,bool):
esb = get_solidblockage(aircraft,tunnel, bool)
ewb = get_wakeblockage(polar,aircraft, tunnel)
CLu = np.array([point.CFl for point in polar.points])
alphau = np.array([point.alpha for point in polar.points])
delta_array_wing = get_streamline_wing(CLu, alphau, aircraft, tunnel)
for i,point in enumerate(polar.points):
point.et = esb + ewb[i]
point.qInf = point.qInf * ((1 + point.et) ** 2)
point.U = point.U * (1 + point.et)
point.get_reynolds(aircraft)
point.alpha = point.alpha + delta_array_wing[i, 0]
point.CFd = point.CFd*(1/(1+point.et)**2) + delta_array_wing[i, 1]
point.CMp = point.CMp*(1/(1+point.et)**2) + delta_array_wing[i, 2]
point.CFl = point.CFl*(1/(1+point.et)**2)
def get_streamline_tail(CMu,CMwu,CLwu,alphau,aircraft,tunnel):
CMh = CMu - CMwu
CMh_linear = []
alpha_linear = []
for i, alpha in enumerate(alphau):
if -1 < alpha < 7:
alpha_linear.append(alpha)
CMh_linear.append(CMh[i])
CMhalpha, c = np.polyfit(alpha_linear,CMh_linear,1)
#print(CMhalpha)
lth = 3.22*aircraft.wing.cmac
delta = 0.108
t2h = 0.88
#print(CLwu)
delta_alpha_tail = delta*t2h*(aircraft.wing.S/tunnel.C)*CLwu*(180/np.pi)
delta_moment_tail = delta_alpha_tail*CMhalpha
delta_array_tail = np.array([[delta_alpha_tail[i], delta_moment_tail[i]] for i in range(len(delta_alpha_tail))])
return delta_array_tail
def correct_tail(polar,polar_tailOff_uncorr,aircraft,tunnel,bool):
esb = get_solidblockage(aircraft,tunnel,'wt')
ewb = get_wakeblockage(polar,aircraft,tunnel)
'Uncorrected tail off'
CLwu = np.array([point.CFl for point in polar_tailOff_uncorr.points])
alphawu = np.array([point.alpha for point in polar_tailOff_uncorr.points])
CMwu = np.array([point.CMp for point in polar_tailOff_uncorr.points])
CDwu = np.array([point.CFd for point in polar_tailOff_uncorr.points])
delta_array_wing = get_streamline_wing(CLwu, alphawu, aircraft, tunnel)
'Uncorrected net'
CLu = np.array([point.CFl for point in polar.points])
CDu = np.array([point.CFd for point in polar.points])
CMu = np.array([point.CMp for point in polar.points])
alphau = np.array([point.alpha for point in polar.points])
'Get same alphas for uncorrected wing'
CMwu_t = []
CLwu_t = []
alphawu_t = []
CDwu_t = []
delta_array_wing_tail = []
for i, alpha in enumerate(alphau):
min_index = np.argmin(np.abs(alphawu - alpha))
CMwu_t.append(CMwu[min_index])
CLwu_t.append(CLwu[min_index])
alphawu_t.append(alphawu[min_index])
CDwu_t.append(CDwu[min_index])
delta_array_wing_tail.append(delta_array_wing[min_index,:])
CMwu_t = np.array(CMwu_t)
CLwu_t = np.array(CLwu_t)
CDwu_t = np.array(CDwu_t)
alphawu_t = np.array(alphawu_t)
delta_array_wing_tail = np.array(delta_array_wing_tail)
delta_array_tail = get_streamline_tail(CMu,CMwu_t,CLwu_t,alphau,aircraft,tunnel)
for i,point in enumerate(polar.points):
point.et = esb + ewb[i]
point.qInf = point.qInf * ((1 + point.et) ** 2)
point.U = point.U * (1 + point.et)
point.get_reynolds(aircraft)
alpha_temp = point.alpha
point.alpha = point.alpha + delta_array_wing_tail[i, 0]
point.alpha_t = alpha_temp + delta_array_tail[i, 0]
point.CFL_w = CLwu_t[i] * (1 / (1 + point.et) ** 2)
point.CFd_w = CDwu_t[i] * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 1]
point.CMp_w = CMwu_t[i] * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 2]
point.CFd = point.CFd * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 1]
point.CMp = point.CMp * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 2] + delta_array_tail[i, 1]
point.CFl = point.CFl * (1 / (1 + point.et) ** 2)
point.CFl_t = point.CFl - point.CFL_w
point.CFd_t = point.CFd - point.CFd_w
point.CMp_t = point.CMp - point.CMp_w
def get_listfrompolar(polar,bool):
alpha = []
var = []
for i, point in enumerate(polar.points):
if bool == 'CL':
alpha.append(point.alpha)
var.append(point.CFl)
elif bool == 'CD':
alpha.append(point.alpha)
var.append(point.CFd)
elif bool == 'CM':
alpha.append(point.alpha)
var.append(point.CMp)
elif bool == 'CLw':
alpha.append(point.alpha)
var.append(point.CFL_w)
elif bool == 'CDw':
alpha.append(point.alpha)
var.append(point.CFd_w)
elif bool == 'CMw':
alpha.append(point.alpha)
var.append(point.CMp_w)
elif bool == 'CLt':
alpha.append(point.alpha_t)
var.append(point.CFl_t)
elif bool == 'CDt':
alpha.append(point.alpha_t)
var.append(point.CFd_t)
elif bool == 'CMt':
alpha.append(point.alpha_t)
var.append(point.CMp_t)
array = np.array([[alpha[i], var[i]] for i in range(len(var))])
return array
def get_thrust_curve(polar,aircraft):
J = np.array([point.J for point in polar.points])
rho = np.array([point.rho for point in polar.points])
n = np.array([point.n for point in polar.points])
qInf = np.array([point.qInf for point in polar.points])
J_fit = np.array([1.756,1.873,1.982,2.073,2.194,2.299])
CT_fit = np.array([0.236, 0.195, 0.153, 0.119, 0.075, 0.028])
grad, c = np.polyfit(J_fit,CT_fit,1)
CT_calc = grad*J + c
T_calc = CT_calc*rho*(n**2)*(aircraft.prop.D**4)
Sp = np.pi * (aircraft.prop.D/2)**2
Tc_calc = T_calc/(qInf*Sp)
for i, point in enumerate(polar.points):
point.CT = CT_calc[i]
point.T = T_calc[i]
point.Tc = Tc_calc[i]
# def get_thrust_zero(polar,polar_propOff,aircraft):
# index_0 = np.argmin(np.abs(np.array([point.alpha for point in polar.points])))
# index_0_propOff = np.argmin(np.abs(np.array([point.alpha for point in polar_propOff.points])))
# Fx = polar.points[index_0].F1
# Fx_propOff = polar_propOff.points[index_0_propOff].F1
# Fz = polar.points[index_0].F3
# Fz_propOff = polar_propOff.points[index_0_propOff].F3
# #print(index_0,index_0_propOff)
# L = Fz_propOff
# N = Fz - L
# D = Fx_propOff
# T = Fx_propOff - Fx
# CT = (T/2)/(polar.points[index_0].rho*polar.points[index_0].n**2*aircraft.prop.D**4)
# return L, N, D, T, CT
def get_slipstream(Tc, aircraft):
var1 = Tc*np.sqrt((np.sqrt(1 + Tc) + 1)/(2*np.sqrt(1+Tc)))
var2 = aircraft.prop.Nblades*0.6*(aircraft.prop.D/aircraft.tailh.b)*1
q_ratio = 1 + var2*var1
return q_ratio
def get_CT_alpha(fname,alpha_i):
f = open(fname,'r')
lines = f.readlines()
f.close()
CT = []
alpha = []
for line in lines:
line = line.strip()
line = line.split(',')
alpha.append(float(line[0]))
CT.append(float(line[1]))
CT_alpha_ratio = np.interp(alpha_i, alpha, CT)/np.interp(0, alpha, CT)
return CT_alpha_ratio
def correct_CT_alpha(polar,Jfname,aircraft):
for point in polar.points:
alpha_i = point.alpha
CT_ratio = get_CT_alpha(Jfname,alpha_i)
point.CT = point.CT*CT_ratio
T_calc = point.CT * point.rho * (point.n ** 2) * (aircraft.prop.D ** 4)
Sp = np.pi * (aircraft.prop.D / 2) ** 2
Tc_calc = T_calc / (point.qInf * Sp)
point.T = T_calc
point.Tc = Tc_calc
def get_tail_CLalpha(CLtc_tailOn,aircraft):
dCLtc, c = np.polyfit(CLtc_tailOn[:,0],CLtc_tailOn[:,1]*(aircraft.wing.S/aircraft.tailh.S),1)
return dCLtc
def get_tail_CDCL2(CDtc_tailOn,CLtc_tailOn,aircraft):
CD = []
CL2 = []
for i in range(len(CLtc_tailOn)):
if -0.1 < CLtc_tailOn[i,1] < 0.2:
CL2.append(CLtc_tailOn[i,1]**2)
CD.append(CDtc_tailOn[i,1])
#print(CL2)
dCD_CL2, c = np.polyfit(CL2,CD,1)
return dCD_CL2
def correct_thrust(polar,polar_tailOn,polar_tailOff,dCD_CL2_tail,aircraft):
diff = 1
iter = 1
point_0 = polar.points[1]
point_0_propOff = polar_tailOn.points[2]
point_0_tailOff = polar_tailOff.points[5]
Tc = point_0.Tc
while diff > 0.001 and iter < 20:
Fx = point_0.F1
Dwt = point_0_propOff.F1
Sp = np.pi * (aircraft.prop.D / 2) ** 2
T_calc = Tc * point_0.qInf * Sp
q_ratio_0 = get_slipstream(Tc, aircraft)
Lwt = point_0_propOff.F3
Lw = point_0_tailOff.F3
Lt = Lwt - Lw
Fz = point_0.F3
delta_Lt = Fz - Lwt
# delta_Lt_slipstream = Lt * (q_ratio_0 - 1)
# delta_CLt = (delta_Lt_slipstream/(aircraft.wing.S * point_0.qInf))*(aircraft.wing.S/aircraft.tailh.S)
delta_CLt = (delta_Lt / (aircraft.wing.S * point_0.qInf)) * (aircraft.wing.S / aircraft.tailh.S)
delta_CDt = (delta_CLt ** 2) * dCD_CL2_tail
delta_Dt = delta_CDt * point_0.qInf * aircraft.tailh.S
T = Dwt + delta_Dt - Fx
T = T / 2
diff = np.abs(T_calc - T)
iter = iter + 1
Tc_old = Tc
Tc_new = T / (point_0.qInf * Sp)
Tc = Tc_old * 0.75 + 0.25 * Tc_new
for point in polar.points:
point.Tc = Tc
point.T = Tc*(point_0.qInf*Sp)
point.CT = point.T/(point.rho * (point.n ** 2) * (aircraft.prop.D ** 4))
def get_slipstreamblockage(polar,aircraft,tunnel):
Tc = np.array([point.Tc for point in polar.points])
var1 = np.sqrt(1 + (8/np.pi)*Tc)-1
Sp = np.pi*(aircraft.prop.D/2)**2
ess = (-1/(4*np.pi*tunnel.b*tunnel.h))*Sp*var1
return ess
def get_uncorrected_drag(polar,aircraft):
for point in polar.points:
T = 2*point.T
#T = T*1.03
point.D = point.F1 + T*np.cos(point.alpha*(np.pi/180))
point.CFd = point.D/(point.qInf * aircraft.wing.S)
def get_uncorrect_powered(polar,aircraft):
Sp = np.pi * (aircraft.prop.D / 2) ** 2
for i,point in enumerate(polar.points):
T = point.Tc*point.qInf*Sp
T2 = 2*T
D = point.F1 + T2*np.cos(point.alpha*(np.pi/180))
L = point.F3 - T2*np.sin(point.alpha*(np.pi/180))
point.CFd = D/(point.qInf * aircraft.wing.S)
point.CFl = L/(point.qInf * aircraft.wing.S)
def correct_powered(polar,polar_tailOff,aircraft,tunnel):
esb = get_solidblockage(aircraft, tunnel, 'wt')
ewb = get_wakeblockage(polar, aircraft, tunnel)
#Tc = np.array([point.Tc for point in polar.points])
#print('Tc')
ess = get_slipstreamblockage(polar, aircraft, tunnel)
'Uncorrected tail off'
CLwu = np.array([point.CFl for point in polar_tailOff.points])
alphawu = np.array([point.alpha for point in polar_tailOff.points])
CMwu = np.array([point.CMp for point in polar_tailOff.points])
CDwu = np.array([point.CFd for point in polar_tailOff.points])
delta_array_wing = get_streamline_wing(CLwu, alphawu, aircraft, tunnel)
'Uncorrected net'
CLu = np.array([point.CFl for point in polar.points])
CDu = np.array([point.CFd for point in polar.points])
CMu = np.array([point.CMp for point in polar.points])
alphau = np.array([point.alpha for point in polar.points])
'Get same alphas for uncorrected wing'
CMwu_t = []
CLwu_t = []
alphawu_t = []
CDwu_t = []
delta_array_wing_tail = []
for i, alpha in enumerate(alphau):
min_index = np.argmin(np.abs(alphawu - alpha))
CMwu_t.append(CMwu[min_index])
CLwu_t.append(CLwu[min_index])
alphawu_t.append(alphawu[min_index])
CDwu_t.append(CDwu[min_index])
delta_array_wing_tail.append(delta_array_wing[min_index, :])
CMwu_t = np.array(CMwu_t)
CLwu_t = np.array(CLwu_t)
CDwu_t = np.array(CDwu_t)
alphawu_t = np.array(alphawu_t)
delta_array_wing_tail = np.array(delta_array_wing_tail)
delta_array_tail = get_streamline_tail(CMu, CMwu_t, CLwu_t, alphau, aircraft, tunnel)
for i,point in enumerate(polar.points):
point.et = esb + ewb[i] + ess[i]
point.qInf = point.qInf * ((1 + point.et) ** 2)
point.U = point.U * (1 + point.et)
point.get_reynolds(aircraft)
point.get_advanceratio(aircraft)
alpha_temp = point.alpha
point.alpha = point.alpha + delta_array_wing_tail[i, 0]
point.alpha_t = alpha_temp + delta_array_tail[i, 0]
point.CFL_w = CLwu_t[i] * (1 / (1 + point.et) ** 2)
point.CFd_w = CDwu_t[i] * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 1]
point.CMp_w = CMwu_t[i] * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 2]
point.CFd = point.CFd * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 1]
point.CMp = point.CMp * (1 / (1 + point.et) ** 2) + delta_array_wing_tail[i, 2] + delta_array_tail[i, 1]
point.CFl = point.CFl * (1 / (1 + point.et) ** 2)
point.CFl_t = point.CFl - point.CFL_w
point.CFd_t = point.CFd - point.CFd_w
point.CMp_t = point.CMp - point.CMp_w
q_ratio = np.sqrt(get_slipstream(point.Tc,aircraft))
delta_CL = point.CFl_t * ((1/q_ratio)**2 -1)
point.CFl_t_0 = point.CFl_t + delta_CL
def interpolateTc(polar):
J16_alpha = [0.0, 4.0]
J16_Tc = [0.318, 0.324]
J16_Tc_fit = np.interp(3, J16_alpha, J16_Tc)
J20_alpha = [0.0, 4.0]
J20_Tc = [0.1015, 0.1127]
J20_Tc_fit = np.interp(3, J20_alpha, J20_Tc)
J24_alpha = [0.0, 4.0]
J24_Tc = [-0.0117, -0.0056]
J24_Tc_fit = np.interp(3, J24_alpha, J24_Tc)
Juc = np.array([1.6, 2.0, 2.4])
Tc = np.array([J16_Tc_fit, J20_Tc_fit, J24_Tc_fit])
for i, point in enumerate(polar.points):
point.Tc = Tc[i]
def correct_deflection(polar,polar_tailOff,polar_J16u,polar_J20u,polar_J24u,polar_J16,polar_J20,polar_J24,aircraft,tunnel):
'Uncorrected tail off'
CLwu = np.array([point.CFl for point in polar_tailOff.points])
alphawu = np.array([point.alpha for point in polar_tailOff.points])
CMwu = np.array([point.CMp for point in polar_tailOff.points])
CDwu = np.array([point.CFd for point in polar_tailOff.points])
delta_array_wing = get_streamline_wing(CLwu, alphawu, aircraft, tunnel)
delta_wing_alpha = np.interp(3.0,alphawu,delta_array_wing[:,0])
delta_wing_drag = np.interp(3.0, alphawu, delta_array_wing[:,1])
delta_wing_moment = np.interp(3.0, alphawu, delta_array_wing[:,2])
CLwu_val = np.interp(3.0,alphawu,CLwu)
CDwu_val = np.interp(3.0,alphawu,CDwu)
CMwu_val = np.interp(3.0,alphawu,CMwu)
for i, point in enumerate(polar.points):
if i == 0:
polar_tailOn = polar_J16u
et_polar = polar_J16
elif i == 1:
polar_tailOn= polar_J20u
et_polar = polar_J20
elif i == 2:
polar_tailOn = polar_J24u
et_polar = polar_J24
'Uncorrected net'
CLu = np.array([point.CFl for point in polar_tailOn.points])
CDu = np.array([point.CFd for point in polar_tailOn.points])
CMu = np.array([point.CMp for point in polar_tailOn.points])
alphau = np.array([point.alpha for point in polar_tailOn.points])
'Get same alphas for uncorrected wing'
CMwu_t = []
CLwu_t = []
alphawu_t = []
CDwu_t = []
delta_array_wing_tail = []
for i, alpha in enumerate(alphau):
min_index = np.argmin(np.abs(alphawu - alpha))
CMwu_t.append(CMwu[min_index])
CLwu_t.append(CLwu[min_index])
alphawu_t.append(alphawu[min_index])
CDwu_t.append(CDwu[min_index])
delta_array_wing_tail.append(delta_array_wing[min_index, :])
CMwu_t = np.array(CMwu_t)
CLwu_t = np.array(CLwu_t)
CDwu_t = np.array(CDwu_t)
alphawu_t = np.array(alphawu_t)
delta_array_tail = get_streamline_tail(CMu, CMwu_t, CLwu_t, alphau, aircraft, tunnel)
delta_tail_moment = np.interp(3.0,alphau,delta_array_tail[:,1])
et_alpha = np.array([point.alpha for point in et_polar.points])
et_value = np.array([point.et for point in et_polar.points])
point.et = np.interp(3.0,et_alpha,et_value)
point.qInf = point.qInf * ((1 + point.et) ** 2)
point.U = point.U * (1 + point.et)
point.get_reynolds(aircraft)
point.get_advanceratio(aircraft)
alpha_temp = point.alpha
point.alpha = point.alpha + delta_wing_alpha
point.alpha_t = alpha_temp
point.CFL_w = CLwu_val * (1 / (1 + point.et) ** 2)
point.CFd_w = CDwu_val * (1 / (1 + point.et) ** 2) + delta_wing_drag
point.CMp_w = CMwu_val * (1 / (1 + point.et) ** 2) + delta_wing_moment
point.CFd = point.CFd * (1 / (1 + point.et) ** 2) + delta_wing_drag
point.CMp = point.CMp * (1 / (1 + point.et) ** 2) + delta_wing_moment + delta_tail_moment
point.CFl = point.CFl * (1 / (1 + point.et) ** 2)
point.CFl_t = point.CFl - point.CFL_w
point.CFd_t = point.CFd - point.CFd_w
point.CMp_t = point.CMp - point.CMp_w
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib import pyplot as plt
from tkinter import filedialog
from tkinter import *
root = Tk()
root.withdraw()
root.filename = filedialog.askopenfilename(initialdir = "/",title = "Select file",filetypes = (("all files",".*"),("jpg files",".jpg")))
img = cv2.imread(root.filename,0)
root.destroy()
""" cv2.imshow("Ventana de imagen seleccionada",img)
cv2.waitKey(0) """
######Equalizado adaptado
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
cl = clahe.apply(img)
cv2.imshow("Ventana de imagen ",cl)
cv2.imwrite('clk.png',cl)
#####Equalizacion
equ = cv2.equalizeHist(img)
res = np.hstack((img,equ)) #stacking images side-by-side
cv2.imshow("Ventana de imagen seleccionada",res)
######Highpass
kernel=np.array([[-1,-1,-1],[-1,-8,-1],[-1,-1,-1]])
dst = cv2.filter2D(equ,-1,kernel)
####Rotation and SCALE
rows,cols = equ.shape
M = cv2.getRotationMatrix2D((cols/2,rows/2),-15,1)
Rotada = cv2.warpAffine(equ,M,(cols,rows))
cv2.imshow("Ventana de imagen rotada",dst)
cv2.waitKey(0)
""" dst2 = cv2.filter2D(cl,-1,kernel)
cv2.imshow("Ventana",dst2) """
##############
|
from typing import Tuple
from torch import cuda
import torch.nn as nn
import torch as th
class ResNet(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
def forward(self, inputs):
return self.module(inputs) + inputs
class ActorCriticNetwork(nn.Module):
def __init__(self, num_input_channels: int, num_filters: int = 32):
super(ActorCriticNetwork, self).__init__()
self.features_extractor = nn.Sequential(
nn.Conv2d(num_input_channels, num_filters, kernel_size=3, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
ResNet(
nn.Sequential(
nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
)
),
ResNet(
nn.Sequential(
nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
)
),
ResNet(
nn.Sequential(
nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
nn.Conv2d(num_filters, num_filters, kernel_size=3, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
)
),
)
self.policy_net = nn.Sequential(
nn.Conv2d(num_filters, 2, kernel_size=1, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(2),
nn.Conv2d(2, 1, kernel_size=1, stride=1, padding= 'same'),
nn.Flatten(),
)
self.value_net = nn.Sequential(
nn.Conv2d(num_filters, num_filters, kernel_size=1, stride=1, padding= 'same'),
nn.ReLU(),
nn.BatchNorm2d(num_filters),
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(num_filters, 1),
nn.ReLU(),
)
def masked(self, x, mask):
x_masked = x.clone()
# print(x.device, x_masked.device)
x_masked[mask == 0] = -float("inf")
return x_masked
def forward(self, observations: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
# observations = th.tensor(observations, dtype=th.float).cuda()
# observations = observations.to("cuda")
# observations = th.as_tensor(observations, device=th.device('cuda'), dtype =th.float)
# print("leg shape = ", observations[:,1,:,:].shape)
legal_action = th.reshape(observations[:,1,:,:], (-1, observations.shape[2] * observations.shape[3]))
# print("leg act shape = ", legal_action.shape)
feature = self.features_extractor(observations)
pi = self.policy_net(feature)
pi_masked = self.masked(self.policy_net(feature), legal_action)
# pi = nn.functional.softmax(pi, dim = 1)
pi_masked = nn.functional.softmax(pi_masked, dim = 1)
value = self.value_net(feature)
# print(observations.device, legal_action.device, feature.device, pi.device, value.device)
return pi, pi_masked, value
if __name__ == '__main__':
# import torch.multiprocessing as mp
# net = ActorCriticNetwork(num_input_channels=4).share_memory()
# print("test = ", next(net.parameters()).is_cuda)
# input = th.randn(1, 4, 16, 16).cuda()
# pi, value = net(input)
# print(pi.size(), value[0][0].data)
import torch.multiprocessing as mp
try:
mp.set_start_method('spawn')
except RuntimeError:
pass
model= ActorCriticNetwork(num_input_channels=4).cuda('cuda:1')
# model = nn.DataParallel(model)
# model.cuda()
# print(model.device)
print("test = ", next(model.parameters()).is_cuda)
# a = th.randn(1, 4, 16, 16).cuda('cuda:0')
b = th.randn(1, 4, 16, 16).cuda('cuda:1')
# print(a, b)
# pi, value = model(a)
for _ in range(100000):
pi, value = model(b)
|
import random
import time
from webapi.awebapi import AHTTP
# HTTP is synchronous, AHTTP is asynchronous. The former is used for functional testing, the latter for load testing.
from webapi.webapi import HTTP
class User:
'''Represents a user of the app.
Creating an object of this class with no parametres creates a random user.
'''
# order of attributes matters
ATTRIBUTES = ['username', 'email', 'password', 'retypedPassword',
'firstName', 'lastName', 'internalName', "bio", "website", "tagline"]
NAMES = ['Josh', 'David', 'Kyle', 'Emmanuel', 'Rohan', 'Russel', 'Goddard',
'Olton', 'Spencer', 'Phillips', 'Amadio', 'Calydris', "Smith", "Fong",
"Judy", "Linda", "Jai", "Bin", "Lucy", "Matthew", "Leigh", "Nick",
"Birnie", "James", "Erin", "Bernard", "Jeniffer", "Deb", "Jessica"]
PASSWORD = 'oryx*967'
MAX_USERNAME = 18
# pre_defined_user is a name in the users array
# attributes is a dictionary
def __init__(self, pre_defined_user = None, attributes = None):
if pre_defined_user is not None:
attributes = get_user(pre_defined_user)
else:
attributes = {}
for attribute in User.ATTRIBUTES:
try:
setattr(self, attribute, attributes[attribute])
except KeyError:
setattr(self, attribute, self._getValueFor(attribute))
self.fullName = ('%s %s'%(self.firstName, self.lastName)).strip()
# These attributes allows a user to interact with the website via http requests.
firstNames = self.firstName.split()
if len(firstNames) > 0:
self.firstName = self.firstName.split()[0]
if len(firstNames) > 1:
self.lastName = "".join(firstNames[1::])
self.http = HTTP(self)
self.ahttp = AHTTP(self)
def _getRandomUsername(self):
current_unix_epoch = int(time.time())
return '%s%s'%(current_unix_epoch, self._getRandomNumber())
def _getRandomEmail(self):
return 'joshua+%s@magic.al'%(self._getRandomNumber())
def _getPassword(self):
return User.PASSWORD
def _getRandomFirstName(self):
return random.choice(User.NAMES)
def _getRandomLastName(self):
return random.choice(User.NAMES)
def _getInternalName(self):
'''This is how your name appears to yourself in the app'''
return 'Me'
def _getValueFor(self, value_for):
switch = {'email' : self._getRandomEmail,
'username' : self._getRandomUsername,
'password' : self._getPassword,
'retypedPassword' : self._getPassword,
'firstName' : self._getRandomFirstName,
'lastName' : self._getRandomLastName,
'internalName' : self._getInternalName,
'bio' :self._getBio,
'website' :self._getWebsite,
'tagline' :self._getTagline,
}
return switch[value_for]()
def _getRandomNumber(self):
return int(random.random()*99999999)
def setFirstName(self, name, shouldTrim = True):
self.firstName = name
fullName = ('%s %s'%(self.firstName, self.lastName))
if shouldTrim:
fullName = fullName.strip()
self.fullName = fullName
def setLastName(self, name,shouldTrim = True):
self.lastName = name
fullName = ('%s %s'%(self.firstName, self.lastName))
if shouldTrim:
fullName = fullName.strip()
self.fullName = fullName
def _getWebsite(self):
return "dev.magic.al"
def _getBio(self):
return "I was created to test magical"
def _getTagline(self):
return "What's a tagline?"
# pre-defined users
# add more here if you need to...
users = [
{"name": "extantUser", "email": "joshua@magic.al", "password": "oryx*967", 'username' : 'joshgoddard',
"firstName" : "j" , "lastName" : "goddard"},
{"name": "emptyEmail", "email": ""},
{"name": "emptyPassword", "email": "joshua@magic.al", "password": ""},
{"name": "nonExistentUser", "email": "joshuaz@magic.al", "password": "oryx*967"},
{"name": "longEmail", "email": "joshuaz"*15+"@magic.al", "password": "oryx*967"},
{"name": "emptyEmailAndPassword", "email": "", "password": ""},
{"name": "usernameTooLong", "username": "abc123abc123abc123z"},
{"name": "emptyPasswords", "password": "", "retypedPassword" : ""},
{"name": "passwordsDontMatch", "password": "password1", "retypedPassword": "password2"},
{"name": "emptyPassword", "password": ""},
{"name": "emptyUsername", "username": ""},
{"name": "emptyFirstName", "firstName": ""},
{"name": "emptyLastName", "lastName": ""},
{"name": "invalidEmail", "email": "aaaaazasaasadasf"},
{"name": "emptyRetypedPassword", "retypedPassword": ""},
{"name": "emptyLastNameEmptyEmail", "lastName": "", "email":""},
{"name": "random"},
{"name":"funnyCharInFirstName", "firstName" : "Pökémön"},
{"name": "funnyCharInLastName", "lastName": "Pökémön"},
{"name": "emptyName", "lastName": "", "firstName":""},
{"name": "davidGoddard", "firstName" : "David", "lastName": "Goddard"}, #may need to recreate this user if the account gets deleted
{"name": "threePartname", "lastName": "Butz", "firstName": "Dr Frank"},
]
def get_user(name):
for user in users:
if user['name'] == name:
return user
raise KeyError("\n User %s is not defined, enter a valid user.\n" %name)
|
import subprocess as sp
import os
import glob
if __name__ == '__main__':
Model_dir = '/home/zzhzhao/Model'
test_dir = os.path.join(Model_dir, 'tests')
source_name = 'original-WRF3.9.1'
target_name = 'original-WRF3.9.1-YW_Lake-comp4'
WRF_version = 'WRFV3'
Modified_wrf_files_path = '/home/zzhzhao/code/python/compile_wrf/Modified_WRF_Files'
geogrid_tbl_source_path = '/home/zzhzhao/Model/tests/original-comp/WPS/geogrid/GEOGRID.TBL.ARW'
wrf_compile_option = 15
wps_compile_option = 17
WRF_path = os.path.join(test_dir, target_name, WRF_version)
WPS_path = os.path.join(test_dir, target_name, 'WPS')
### 复制相应文件
print('>>>> Copy source file <<<<')
os.chdir(test_dir)
sp.run(f'cp -r {source_name} {target_name}', shell=True)
### 复制吴阳程序
print('>>>> Copy Wuyang files <<<<')
sp.run(f'cp {Modified_wrf_files_path}/YangW_Registry.EM_COMMON {WRF_path}/Registry/Registry.EM_COMMON', shell=True)
sp.run(f'cp {Modified_wrf_files_path}/YangW_module_sf_lake.F {WRF_path}/phys/module_sf_lake.F', shell=True)
sp.run(f'cp {Modified_wrf_files_path}/YangW_registry.lake {WRF_path}/Registry/registry.lake', shell=True)
sp.run(f'cp {Modified_wrf_files_path}/YangW_registry.dimspec {WRF_path}/Registry/registry.dimspec', shell=True)
sp.run(f'cp {Modified_wrf_files_path}/YangW_module_surface_driver.F {WRF_path}/phys/module_surface_driver.F', shell=True)
sp.run(f'cp {Modified_wrf_files_path}/YangW_module_first_rk_step_part1.F {WRF_path}/dyn_em/module_first_rk_step_part1.F', shell=True)
sp.run(f'cp {Modified_wrf_files_path}/YangW_module_physics_init.F {WRF_path}/phys/module_physics_init.F', shell=True)
sp.run(f'cp {Modified_wrf_files_path}/YangW_start_em.F {WRF_path}/dyn_em/start_em.F', shell=True)
### 编译WRF
os.chdir(WRF_path)
print('>>>> Configure WRF <<<<')
sp.run(f'echo {wrf_compile_option} | ./configure > log.configure', shell=True)
print('>>>> Compile WRF <<<<')
sp.run('./compile em_real >& log.compile', shell=True)
wrf_exefile = ['wrf.exe', 'real.exe', 'ndown.exe', 'tc.exe'].sort()
if glob.glob(os.path.join(WRF_path, 'main, *.exe')).sort() == wrf_exefile:
print('>>>> WRF Compile Success <<<<')
else:
print('xxxx Error: WRF Compile Fail xxxx')
os._exit(0)
### 编译WPS
os.chdir(WPS_path)
print('>>>> Configure WPS <<<<')
sp.run(f'echo {wps_compile_option} | ./configure > log.configure', shell=True)
print('>>>> Compile WPS <<<<')
sp.run('./compile >& log.compile', shell=True)
wps_exefile = ['geogrid.exe', 'ungrib.exe', 'metgrid.exe'].sort()
if glob.glob(os.path.join(WPS_path, '*.exe')).sort() == wps_exefile:
print('>>>> WPS Compile Success <<<<')
else:
print('xxxx Error: WPS Compile Fail xxxx')
os._exit(0)
### 复制GEOGRID.TBL.ARW
print('>>>> Copy GEOGRID.TBL.ARW <<<<')
sp.run(f"cp {geogrid_tbl_source_path} {os.path.join(WPS_path, 'geogrid/GEOGRID.TBL.ARW')}")
print('**** Compile Success! ****') |
import timeit
from lib2to3.fixer_util import Number
def principal_period(s): #numerical repetition finder
i = (s+s).find(s, 1, -1)
return None if i == -1 else s[:i]
def primeList(n): #primesieve: returns list of primes
nroot = int(n**0.5)+1
print(nroot)
sieve = list(range(n+1))
sieve[1] = 0
print(sieve)
for i in range(2, nroot):
if sieve[i] != 0:
m = int(n/i - i)
sieve[i*i: n+1:i] = [0] * (m+1)
sieve = [x for x in sieve if x !=0]
return sieve
from math import *; from itertools import count, islice
def isPrime(n): #test if number is prime
return n > 1 and all(n%i for i in islice(count(2), int(sqrt(n)-1)))
def zeventien(vanaf,bereik):
import csv
file = open('zeventien.txt','w')
list=[]
dicti = {
1 : "one",
2 : "two",
3 : "three",
4 : "four",
5 : "five",
6 : "six",
7 : "zeven",
8 : "eight",
9 : "nine",
10 : "ten",
11 : "eleven",
12 : "twelve",
13 : "thirteen",
14 : "fourteen",
15 : "fifteen",
16 : "sixteen",
17 : "seventeen",
18 : "eighteen",
19 : "nineteen",
20 : "twenty",
30 : "thirty",
40 : "fourty",
50 : "fifty",
60 : "sixty",
70 : "seventy",
80 : "eighty",
90 : "ninety",
100: "onehundred",
200: "twohundred",
300: "threehundred",
400: "fourhundred",
500: "fivehundred",
600: "sixhundred",
700: "sevenhundred",
800: "eighthundred",
900: "ninehundred",
1000: "onethousand"
}
for i in range(vanaf,bereik+1):
if i in dicti:
list.append(dicti[i])
elif i < 100:
j = i % 10
k = i - j
list.append(dicti[k]+dicti[j])
else :
new_i = i % 100
hontal = floor(i / 100)
if new_i in dicti:
list.append(dicti[hontal]+"hundredand"+dicti[new_i])
else :
e = i % 10
tiental = (i%100)-e
list.append(dicti[hontal]+"hundredand"+dicti[tiental]+dicti[e])
slist = str(list)
file.write(slist)
#print(list)
getallenrij = []
for plak in range(vanaf-1,bereik):
getallenrij += list[plak]
return getallenrij
#print(zeventien(1,1000))
def deeltester(getal):
delers=[]
for n in range(1,getal):
if (getal/n)%1 == 0:
delers.append(n)
return sum(delers)
#print(deeltester(220))
def eenentwintig(bereik):
dict={}
amic={}
for n in range(1,bereik):
dict[n]= deeltester(n)
if n != dict.get(n):
if n == dict.get(dict.get(n)):
amic[n] = dict[n]
return amic
#print(eenentwintig(10000))
def twaalf(bereik,doel):
#bereik is aantal trian nummers
#doel is aantal delers van dat trian nummer
prime=[2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59]
lijst=[]
plist=[]
som = []
for n in range(1,bereik+1):
som.append(n)
trian = sum(som)
if len(lijst) == doel:
#print(trian)
break
for p in prime:
if (trian/p)%1 == 0:
plist.append(p)
if trian <= p:
print(trian)
print(plist)
plist=[]
"""for d in range(1,trian+1):
if (trian/d)%1 == 0:
lijst.append(d)
if trian == d:
print(trian)
print(lijst)
print(len(lijst))
print(plist)
if len(lijst) == doel:
print(trian)
break
lijst=[]"""
from decimal import Decimal
def zesentwintig(bereik): #decimal repeating sequence length
getcontext().prec = 4000
for i in range(3,bereik+1):
strFrac = str(Decimal(1)/Decimal(i)) #create decimal representation of the unit fractions
if len(strFrac) > 27:
for j in range(2,6):
for k in range(-1,-500,-1):
decimals = strFrac[j:k] #grab decimals
repeater = principal_period(decimals) #Find repetition
if repeater:
# print("1/" + str(i) + " - " + strFrac + " (" +repeater+")" + " " + str(len(repeater)))
if len(repeater) > 400:
print("1/" + str(i) + " - " + str(len(repeater)))
break
if repeater:
break
def zevenentwintig(a,b,n):
highest = 0
f = open("test6.txt","a") #opens file with name of "test.txt"
for i in range(-1000,a):
for j in range(-1000,b):
for teller in range(1,n):
# print(i)
formResult = (teller*teller) + (teller*i) + j
# if isPrime(formResult):
# f.write(str(teller) + "^2 + " + str(i) + "*" + str(teller) + " + " + str(j) + " = " + str(formResult)+ " prime :D \n")
if formResult < 1:
break
if not isPrime(formResult):
if teller > highest:
highest = teller
f.write(str(teller) + "^2 + " + str(i) + "*" + str(teller) + " + " + str(j) + " = " + str(formResult)+ " no sigar \n")
highestFormula = (str(teller) + "^2 + " + str(i) + "*" + str(teller) + " + " + str(j) + " = " + str(formResult) + " wordt: " + str(-i*j))
break
f.close()
return highestFormula
#print(zevenentwintig(1000,1000,2000))
def Euler31(target): #count how many ways there are to make 2 pounds
counter = 0
f = open("coin2.txt","a")
for p100 in [0,1,2]:
for p50 in [0,1,2,3,4]:
for p20 in range(0,11):
for p10 in range(0,(21-2*p20)):
for p5 in range(0,(41-2*p10-10*p50)):
for p2 in range(0,101-5*p10-10*p20):
for p1 in range(0,(201-2*p2-10*p10-20*p20)):
if 100*p100+50*p50+20*p20+10*p10+5*p5+2*p2+p1==target:
counter += 1
f.write("€" + str(p100) + " + 50p=" + str(p50) + " + 20p=" + str(p20) + " + 10p=" + str(p10) + " + 5p=" + str(p5) + " + 2p=" + str(p2) + " + 1p=" + str(p1) + " = 200 #" + str(counter) + "\n")
f.close()
return counter
# print(Euler31(20))
from fractions import Fraction
def Euler32(): #confusing fraction finder
som = 1
for noemer in range(10,100):
for teller in range(10,noemer):
a=int(str(teller)[0])
b=int(str(teller)[1])
c=int(str(noemer)[0])
d=int(str(noemer)[1])
if d != 0:
if Fraction(teller,noemer) == Fraction(a,d) and b==c: #or Fraction(int(str(teller)[1]), int(str(noemer)[0])) or Fraction(int(str(teller)[0]), int(str(noemer)[1])) or Fraction(int(str(teller)[1]), int(str(noemer)[1]):
print(str(a) + "/" + str(d))
print(str(teller) + "/" + str(noemer) + "\n")
som *= Fraction(teller,noemer)
return som
#print(Euler32())
|
"""Utilities for Grappler autoparallel optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.core.framework import variable_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
FLAGS = tf.flags.FLAGS
def export_state_tuples(state_tuples, name):
for state_tuple in state_tuples:
tf.add_to_collection(name, state_tuple.c)
tf.add_to_collection(name, state_tuple.h)
def import_state_tuples(state_tuples, name, num_replicas):
restored = []
for i in range(len(state_tuples) * num_replicas):
c = tf.get_collection_ref(name)[2 * i + 0]
h = tf.get_collection_ref(name)[2 * i + 1]
restored.append(tf.contrib.rnn.LSTMStateTuple(c, h))
return tuple(restored)
def with_prefix(prefix, name):
"""Adds prefix to name."""
return "/".join((prefix, name))
def with_autoparallel_prefix(replica_id, name):
return with_prefix("AutoParallel-Replica-%d" % replica_id, name)
class UpdateCollection(object):
"""Update collection info in MetaGraphDef for AutoParallel optimizer."""
def __init__(self, metagraph, model):
self._metagraph = metagraph
self.replicate_states(model.initial_state_name)
self.replicate_states(model.final_state_name)
self.update_snapshot_name("variables")
self.update_snapshot_name("trainable_variables")
def update_snapshot_name(self, var_coll_name):
var_list = self._metagraph.collection_def[var_coll_name]
for i, value in enumerate(var_list.bytes_list.value):
var_def = variable_pb2.VariableDef()
var_def.ParseFromString(value)
if var_def.snapshot_name != "Model/global_step/read:0":
var_def.snapshot_name = with_autoparallel_prefix(0, var_def.snapshot_name)
value = var_def.SerializeToString()
var_list.bytes_list.value[i] = value
def replicate_states(self, state_coll_name):
state_list = self._metagraph.collection_def[state_coll_name]
num_states = len(state_list.node_list.value)
for replica_id in range(1, FLAGS.num_gpus):
for i in range(num_states):
state_list.node_list.value.append(state_list.node_list.value[i])
for replica_id in range(FLAGS.num_gpus):
for i in range(num_states):
index = replica_id * num_states + i
state_list.node_list.value[index] = with_autoparallel_prefix(replica_id, state_list.node_list.value[index])
def auto_parallel(metagraph, model):
from tensorflow.python.grappler import tf_optimizer
rewriter_config = rewriter_config_pb2.RewriterConfig()
rewriter_config.optimizers.append("autoparallel")
rewriter_config.auto_parallel.enable = True
rewriter_config.auto_parallel.num_replicas = FLAGS.num_gpus
optimized_graph = tf_optimizer.OptimizeGraph(rewriter_config, metagraph)
metagraph.graph_def.CopyFrom(optimized_graph)
UpdateCollection(metagraph, model) |
"""Veles NN workflow benchmark.
"""
from __future__ import division
import gc
import logging
import numpy
import time
from veles import prng
from veles.backends import CUDADevice, OpenCLDevice
from veles.config import root
from veles.dummy import DummyLauncher
from veles.loader import FullBatchLoader, IFullBatchLoader
from veles.mutable import Bool
from veles.plumbing import Repeater
from veles.units import Unit, IUnit
from veles.znicz.standard_workflow import StandardWorkflow
from zope.interface import implementer
base_lr = 0.01
wd = 0.0005
root.alexnet.update({
"layers": [{"type": "conv_str",
"->": {"n_kernels": 64, "kx": 11, "ky": 11,
"padding": (2, 2, 2, 2), "sliding": (4, 4),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"type": "max_pooling",
"->": {"kx": 3, "ky": 3, "sliding": (2, 2)}},
# {"type": "norm", "n": 5, "alpha": 0.0001, "beta": 0.75},
# {"type": "zero_filter",
# "grouping": 2},
{"type": "conv_str",
"->": {"n_kernels": 192, "kx": 5, "ky": 5,
"padding": (2, 2, 2, 2), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"type": "max_pooling", "->": {"kx": 3, "ky": 3,
"sliding": (2, 2)}},
# {"type": "norm", "n": 5, "alpha": 0.0001, "beta": 0.75},
# {"type": "zero_filter", "grouping": 2},
{"type": "conv_str",
"->": {"n_kernels": 384, "kx": 3, "ky": 3,
"padding": (1, 1, 1, 1), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
# {"type": "zero_filter", "grouping": 2},
{"type": "conv_str",
"->": {"n_kernels": 256, "kx": 3, "ky": 3,
"padding": (1, 1, 1, 1), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
# {"type": "zero_filter", "grouping": 2},
{"type": "conv_str",
"->": {"n_kernels": 256, "kx": 3, "ky": 3,
"padding": (1, 1, 1, 1), "sliding": (1, 1),
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0.1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
{"type": "max_pooling",
"->": {"kx": 3, "ky": 3, "sliding": (2, 2)}},
{"type": "all2all",
"->": {"output_sample_shape": 4096,
"weights_filling": "gaussian", "weights_stddev": 0.005,
"bias_filling": "constant", "bias_stddev": 1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
# {"type": "dropout", "dropout_ratio": 0.5},
{"type": "all2all",
"->": {"output_sample_shape": 4096,
"weights_filling": "gaussian", "weights_stddev": 0.005,
"bias_filling": "constant", "bias_stddev": 1},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}},
# {"type": "dropout", "dropout_ratio": 0.5},
{"type": "softmax",
"->": {"output_sample_shape": 1000,
"weights_filling": "gaussian", "weights_stddev": 0.01,
"bias_filling": "constant", "bias_stddev": 0},
"<-": {"learning_rate": base_lr,
"learning_rate_bias": base_lr * 2,
"weights_decay": wd, "weights_decay_bias": 0,
"gradient_moment": 0.9, "gradient_moment_bias": 0.9}}
]})
@implementer(IFullBatchLoader)
class BenchmarkLoader(FullBatchLoader):
BATCH = 128
def load_data(self):
self.class_lengths[0] = 0
self.class_lengths[1] = 0
self.class_lengths[2] = max(self.BATCH, 1000)
self.create_originals((224, 224, 3))
prng.get().fill(self.original_data.mem)
self.original_labels[:] = numpy.arange(
self.original_data.shape[0], dtype=numpy.int32)
@implementer(IUnit)
class Timer(Unit):
def __init__(self, workflow, **kwargs):
super(Timer, self).__init__(workflow, **kwargs)
self.sync_iterations = set(kwargs.get("sync_iterations", []))
self.n_it = kwargs.get("n_it", 11) # one for dry-run
self.it = 0
self.complete = Bool(False)
self.times = [time.time()]
def initialize(self, **kwargs):
self.times[:] = [time.time()]
self.it = 0
def run(self):
self.it += 1
if self.it in self.sync_iterations:
self.info("%s: Syncing device at iteration %d", self.name, self.it)
self.workflow.device.sync()
self.info("%s: Done", self.name)
if self.it >= self.n_it:
self.info("%s: Completed, syncing device at iteration %d",
self.name, self.it)
self.workflow.device.sync()
self.times.append(time.time())
self.info("%s: Done", self.name)
self.complete <<= True
else:
self.times.append(time.time())
class BenchmarkWorkflow(StandardWorkflow):
def create_workflow(self):
self.loader = self.real_loader = BenchmarkLoader(
self, minibatch_size=BenchmarkLoader.BATCH,
force_numpy=True) # do not preload all dataset to device
self.loader.link_from(self.start_point)
self.t0 = Timer(self, name="Timer 0",
sync_iterations=(1,)).link_from(self.loader)
self.repeater.link_from(self.t0)
self.link_forwards(("input", "minibatch_data"), self.loader)
self.forwards[0].unlink_before()
self.forwards[0].link_from(self.repeater)
self.t1 = Timer(self, name="Timer 1",
sync_iterations=(1,)).link_from(self.forwards[-2])
self.repeater.link_from(self.t1)
self.forwards[-1].gate_block = ~self.t1.complete
self.forwards[0].gate_block = self.t1.complete
self.link_evaluator(self.forwards[-1])
self.link_decision(self.evaluator)
self.decision.gate_skip = Bool(True)
last_gd = self.link_gds(self.decision)
self.t2 = Timer(self, name="Timer 2",
sync_iterations=(1,)).link_from(self.gds[-1])
self.repeater2 = Repeater(self).link_from(self.t2)
self.gds[-2].unlink_before()
self.gds[-2].link_from(self.repeater2)
self.t3 = Timer(self, name="Timer 3",
sync_iterations=(1,)).link_from(last_gd)
self.repeater2.link_from(self.t3)
self.end_point.link_from(self.t3)
self.end_point.gate_block = ~self.t3.complete
self.repeater2.gate_block = self.t3.complete
def initialize(self, device, **kwargs):
super(BenchmarkWorkflow, self).initialize(device, **kwargs)
self.forwards[-1].unlink_before()
self.forwards[-1].link_from(self.repeater)
root.decision.update({"max_epochs": 1})
def main():
def nothing():
pass
def clBLASOff():
logging.info("\nclBLAS = OFF\n")
root.common.engine.ocl.clBLAS = False
def clBLASOn():
logging.info("\nclBLAS = ON\n")
root.common.engine.ocl.clBLAS = True
for backend in ((CUDADevice, nothing),
(OpenCLDevice, clBLASOff),
(OpenCLDevice, clBLASOn),
):
device_class = backend[0]
backend[1]()
for dtype in ("float",
"double",
):
logging.info("\n%s: benchmark started for dtype %s\n",
device_class, dtype)
root.common.precision_type = dtype
root.common.precision_level = 0
try:
device = device_class()
except Exception as e:
logging.error("Could not create %s: %s", device_class, e)
break
launcher = DummyLauncher()
wf = BenchmarkWorkflow(launcher,
loader_name="imagenet_loader",
decision_config=root.decision,
layers=root.alexnet.layers,
loss_function="softmax")
wf.initialize(device, snapshot=False)
if device_class is CUDADevice:
wf.generate_graph("alexnet.svg")
wf.run()
logging.info("Forward pass: %.2f msec",
1000.0 * (wf.t1.times[-1] - wf.t1.times[1]) /
(wf.t1.n_it - 1))
logging.info("Backward pass: %.2f msec",
1000.0 * (wf.t3.times[-1] - wf.t3.times[1]) /
(wf.t3.n_it - 1))
logging.info("\n%s: benchmark ended for dtype %s\n",
device_class, dtype)
# Full garbage collection
del wf
del launcher
del device
pool = Unit.reset_thread_pool()
gc.collect()
pool.shutdown()
del pool
gc.collect()
# For pypy which can garbage colect with delay
for _ in range(100):
gc.collect()
logging.info("End of job")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
|
m = float(input('Insira a distancia em metros: '))
print('A medida de {}m equivale a:'.format(m))
print('{}Km'.format(m/1000))
print('{}Hm'.format(m/100))
print('{}dam'.format(m/10))
print('{}dm'.format(m*10))
print('{}cm'.format(m*100))
print('{}mm'.format(m*1000)) |
#! /usr/bin/python
import cgi, os
import cgitb; cgitb.enable()
import csv
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
def read_template(filename):
with open(filename) as template:
return template.read()
def blast(from_email,to_email,subject):
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = from_email
msg['To'] = to_email
text = "Please allow the Html Version\n"
html =read_template("mail_template.html")
# Login credentials
username = 'user username here'
password = "your password here"
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
msg.attach(part1)
msg.attach(part2)
# Open a connection to the SendGrid mail server
s = smtplib.SMTP('smtp.sendgrid.net', 587)
# Authenticate
s.login(username, password)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(from_email, to_email, msg.as_string())
s.quit()
#cgi part starts here !
form = cgi.FieldStorage()
# Get data from fields
mail_from = form.getvalue('mail_from')
mail_sub = form.getvalue('mail_sub')
burst = form.getvalue('mail_count')
fileobject = form['file']
fname = fileobject.filename
#function to remove the temp uploaded file !
def del_file():
os.system("rm /tmp/"+fname)
# Function to get file and save in the temp folder !
def read_file():
content =fileobject.file.read()
with open('/tmp/'+fname, 'w') as f:
f.write(content)
f.close()
read_file()
#opening the stored file and sending the mails .
with open('/tmp/'+fname) as csvfile:
spamreader = csv.reader(csvfile, delimiter=",", quotechar='|')
for line in spamreader:
blast(mail_from,line[1],mail_sub)
|
from classmerge import mergesort
from main import sort_given_list
from classsum import sumAll
print(__init__)
|
# Generated by Django 3.1.5 on 2021-01-09 18:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('benefits', '0002_benefit_img_file'),
]
operations = [
migrations.AddField(
model_name='benefit',
name='is_verified',
field=models.BooleanField(default=False),
),
]
|
from django.conf.urls import url
from accounts.views import login_view, register_view, logout_view
app_name = 'accounts'
urlpatterns = [
# login page
url(r'^login/', login_view, name="login"),
# logout page
url(r'^logout/', logout_view, name="logout"),
# register page
url(r'^register/', register_view, name="register"),
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Stdlib
import codecs
import xml
# 3rd party
import pyxb
# D1
import d1_common.types.dataoneTypes as dataoneTypes
import d1_common.types.dataoneErrors as dataoneErrors
import d1_common.util
# Stdlib
import os
# D1
import d1_common.types.dataoneTypes_v2_0 as v2
def get_test_filepath(filename):
return os.path.join(d1_common.util.abs_path('test_docs'), filename)
def read_test_file(filename, mode_str='rb'):
with open(get_test_filepath(filename), mode_str) as f:
return f.read()
def read_utf8_to_unicode(filename):
utf8_path = get_test_filepath(filename)
unicode_file = codecs.open(utf8_path, encoding='utf-8', mode='r')
return unicode_file.read()
def read_test_xml(filename, mode_str='r'):
xml_str = read_test_file(filename, mode_str)
xml_obj = v2.CreateFromDocument(xml_str)
return xml_obj
def deserialize_and_check(doc, shouldfail=False):
try:
dataoneTypes.CreateFromDocument(doc)
except (pyxb.PyXBException, xml.sax.SAXParseException):
if shouldfail:
return
else:
raise
if shouldfail:
raise Exception('Did not receive expected exception')
def deserialize_exception_and_check(doc, shouldfail=False):
try:
obj = dataoneErrors.CreateFromDocument(doc)
except (pyxb.PyXBException, xml.sax.SAXParseException):
if shouldfail:
return
else:
raise
if shouldfail:
raise Exception('Did not receive expected exception')
return obj
|
#1235. Maximum Profit in Job Scheduling
#We have n jobs, where every job is scheduled to be done from startTime[i] to endTime[i], obtaining a profit of profit[i].
#You're given the startTime, endTime and profit arrays, return the maximum profit you can take such that there are no two jobs in the subset with overlapping time range.
#If you choose a job that ends at time X you will be able to start another job that starts at time X.
#Example 1:
#Input: startTime = [1,2,3,3], endTime = [3,4,5,6], profit = [50,10,40,70]
#Output: 120
#Explanation: The subset chosen is the first and fourth job.
#Time range [1-3]+[3-6] , we get profit of 120 = 50 + 70.
class Solution:
def jobScheduling(self, startTime: List[int], endTime: List[int], profit: List[int]) -> int:
"""
binary search nlogn, n: len(endTime)
"""
jobs = [(endTime[i], startTime[i], profit[i]) for i in range(len(startTime))]
jobs.sort()
dp_endtime, dp_profit = [], [] ## (end time, profit at the end time)
def binary_search(arr, target):
## last entry in arr where the value is <= the target
left, right = 0, len(arr) - 1
while left <= right:
mid = left + (right - left) // 2
if arr[mid] <= target:
left = mid + 1
else:
right = mid - 1
if right < 0 or arr[right] > target:
return -1
return right
for end, start, cur_profit in jobs:
if not dp_endtime:
dp_endtime.append(end)
dp_profit.append(cur_profit)
else:
## binary search to find the last entry in dp
## where the end time is smaller or equal to start
i = binary_search(dp_endtime, start)
if i == -1:
tot_profit = cur_profit
else:
tot_profit = cur_profit + dp_profit[i]
last_profit = dp_profit[-1]
if end == dp_endtime[-1] and tot_profit > last_profit:
dp_endtime.pop()
dp_profit.pop()
if tot_profit > last_profit:
dp_endtime.append(end)
dp_profit.append(tot_profit)
return dp_profit[-1]
"""
start[1, 2, 3, 3]
end [3, 4, 5, 6]
for every start time:
1: 3
2: 4
3: 5, 6
1 2 3 4 5 6
dp[i]= max profit starting at i
default: dp[i] = dp[i+1]
dp[5] = dp[4] =0
dp[3] = max((3 ->5), (3->6)) = 70
dp[2] = max((2 -->4)) + dp[4]
dp[1] = max((1 -- >3) + dp[3])
time: O(len(M * N)), M: max(endTime[i]), N: len(endTime)
space: O(M)
"""
# store = defaultdict(list) ## key: startTime; value: [(endTime, profit)]
# max_time = 0
# for i in range(len(startTime)):
# ## in case two jobs with the same start and end having different profits
# store[startTime[i]].append((endTime[i], profit[i]))
# max_time = max(max_time, endTime[i])
# dp = [0 for _ in range(max_time + 1)]
# for i in range(max_time - 1, 0, -1):
# dp[i] = dp[i + 1]
# if i in store:
# for endTs, pro in store[i]:
# dp[i] = max(dp[i], pro + dp[endTs])
# return dp[1]
"""
1 2 3 4 5 6
1 0 0 50
2 0 0 10
3 0 0 40 70
4 0 0 0
5 0 0
6 0
dp[i][j] = the max profit from start time i to end time j, where j >= i
time: (O(N^3)), N= max(endTime)
space: (O(N^2))
"""
# store = defaultdict(int) ## key: (startTime, endTime); value: profit
# max_time = 0
# for i in range(len(startTime)):
# ## in case two jobs with the same start and end having different profits
# store[(startTime[i], endTime[i])] = \
# max(profit[i], store[(startTime[i], endTime[i])])
# max_time = max(max_time, endTime[i])
# dp = [[0 for _ in range(max_time + 1)] for _ in range(max_time + 1)]
# for i in range(len(dp) - 1, -1, -1):
# for j in range(i, len(dp)):
# if i == j:
# continue
# if (i, j) in store:
# dp[i][j] = store[(i, j)]
# for k in range(i, j):
# dp[i][j] = max(dp[i][j], dp[i][k] + dp[k][j])
# return dp[0][-1]
|
from inmmry import Inmmry
while True:
print("*"*75)
print("1.add 2.view 3.update 4.delete 5.search 6.exit")
print("*"*75)
ch = int(input("enter choice"))
if ch == 1:
Inmmry.addContact()
elif ch == 2:
Inmmry.viewContact()
elif ch == 3:
Inmmry.updateContact()
elif ch == 4:
Inmmry.deleteContact()
elif ch == 5:
Inmmry.searchContact()
else:
break
|
import os
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
path = os.path.dirname(__file__)
data_path = os.path.join(path, "..", "..", "data", "master_cr_file.txt")
df = pd.read_csv(data_path, sep='\t', low_memory=False)
df['period'] = pd.to_datetime(df['period'])
number_banks = df.pivot_table(index='year',
values=['IDRSSD'],
aggfunc=pd.Series.nunique)
plt.title('Total number of banks')
plt.bar(number_banks.index, number_banks.IDRSSD)
plt.xlabel('Year')
plt.ylabel('Number of Banks')
plt.xticks(number_banks.index, rotation=45)
plt.show()
|
import asyncio
import json
from django.contrib.auth import get_user_model
from channels.consumer import SyncConsumer, AsyncConsumer
from channels.db import database_sync_to_async
from .models import Thread, ChatMessage
User = get_user_model()
class TaskConsumer(AsyncConsumer):
async def welcome_message(self, event):
print(event)
timeout = event.get("timeout", 20)
await asyncio.sleep(timeout)
message = event.get("message")
sender_id = event.get('sender_id')
receiver_id = event.get('receiver_id')
sender_user = await self.get_user_by_id(sender_id)
receiver_user = await self.get_user_by_id(receiver_id)
thread_obj = await self.get_thread(sender_user, receiver_user.username)
await self.create_welcome_chat_message(thread_obj, sender_user, message)
@database_sync_to_async
def get_user_by_id(self, user_id):
return User.objects.get(id=user_id)
@database_sync_to_async
def get_thread(self, user, other_username):
return Thread.objects.get_or_new(user, other_username)[0]
@database_sync_to_async
def create_welcome_chat_message(self, thread, user, message):
return ChatMessage.objects.create(thread=thread, user=user, message=message)
class ChatConsumer(AsyncConsumer):
async def websocket_connect(self, event):
# when the socket connects
# self.kwargs.get("username")
self.other_username = self.scope['url_route']['kwargs']['username']
user = self.scope['user']
thread_obj = await self.get_thread(user, self.other_username)
self.cfe_chat_thread = thread_obj
self.room_group_name = thread_obj.room_group_name # group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
self.rando_user = await self.get_name()
await self.send({
"type": "websocket.accept"
})
async def websocket_receive(self, event): # websocket.receive
message_data = json.loads(event['text'])
#print()
user = self.scope['user']
username = "unknown"
if user.is_authenticated:
username = user.username
message_data["user"] = username
await self.create_chat_message(user, message_data['msg'])
final_message_data = json.dumps(message_data)
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': final_message_data
}
)
async def broadcast_message(self, event):
await self.send({
"type": "websocket.send",
"text": json.dumps({'msg': "Loading data please wait...", 'user': 'admin'})
})
await asyncio.sleep(15) ### chatbot? API -> another service --> response --> send
await self.send({
"type": "websocket.send",
"text": event['message']
})
async def chat_message(self, event):
await self.send({
"type": "websocket.send",
"text": event['message']
})
async def websocket_disconnect(self, event):
# when the socket connects
#print(event)
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
@database_sync_to_async
def get_name(self):
return User.objects.all()[0].username
@database_sync_to_async
def get_thread(self, user, other_username):
return Thread.objects.get_or_new(user, other_username)[0]
@database_sync_to_async
def create_chat_message(self, user, message):
thread = self.cfe_chat_thread
return ChatMessage.objects.create(thread=thread, user=user, message=message) |
#numbers2text
def main():
message = input("Please enter the coded message: ")
output = ""
for nr in message.split():
char = chr(int(nr))
output += char
print(output)
main()
# 66 117 101 110 111 115 32 100 237 97 115 33
|
from shutil import copyfile
from scipy.interpolate import CubicSpline
import datetime
import os
import sys
import matplotlib.pyplot as plt
import numpy.random as rnd
import time
from slant import slant
from data_preprocess import *
from myutil import *
def print_image_in_latex_v4( file_to_write, Image_file_prefix, Image_file_suffix, num_w, num_l,figure_caption):
with open(file_to_write,'a') as file:
file.write('\\begin{figure}[!!t]\n')
file.write('\\centering\n')
# section = text
for w_idx in range( num_w):
for lamb_idx in range( num_l):
# parts of image
for ext in range(20):
plot_file= Image_file_prefix + str(ext) + Image_file_suffix
file.write( '\\subfloat{ \t\\includegraphics[scale=0.25]{' + plot_file + '}}\n')
if ( ext%3 == 2):
file.write('\\vspace{-3mm}\n')
# file.write('\\vspace{-3mm}\n')
# # if lamb_idx%5 == 4:
# plot_file= Image_file_prefix + '_w'+str(w_idx)+'v0l'+str(l_idx) + Image_file_suffix
# file.write( '\\subfloat{ \t\\includegraphics[scale=0.25]{' + plot_file + '}}\n')
# if ( lamb_idx%5 == 2):
# file.write('\\vspace{-3mm}\n')
# file.write('\\vspace{-3mm}\n')
file.write('\\caption{'+figure_caption+'}\n')
file.write('\\end{figure}\n')
def print_image_in_latex_v1( file_prefix, fr_list, cpk_lm_list, lm_list ):
file_to_write='../opinion_dynamics_others/write_ups/Plot_files.tex'
with open(file_to_write,'a') as file:
for f in fr_list:
for cpk_l in cpk_lm_list:
for l in lm_list:
plot_file='../Plots/Plots_with_lambda/Time_vs_MSE/all_combo/'+ file_prefix+'.fraction.'+ str(f) + '.cpk_lambda.'+str(c)+'.lambda.'+ str(l)+'.png'
file.write( '\\begin{figure}[h]\n') #\label{online}' )
file.write( '\\includegraphics[width=\\linewidth,keepaspectratio]{' + plot_file + '} ')
file.write( '\\end{figure}' )
def print_image_in_latex_v2( file_prefix, fr_list, lm_list ):
file_to_write='../opinion_dynamics_others/write_ups/Plot_files.tex'
with open(file_to_write,'a') as file:
for f in fr_list:
for l in lm_list:
plot_file='../Plots/Plots_with_lambda/Time_vs_MSE/tuned_cpk/'+ file_prefix+'.fraction.'+ str(f) + '.lambda.'+ str(l)+'.png'
file.write( '\\begin{figure}[h]\n') #\label{online}' )
file.write( '\\includegraphics[width=\\linewidth,keepaspectratio]{' + plot_file + '}\n ')
file.write( '\\end{figure}\n\n\n' )
def print_image_in_latex_v3( file_prefix, fr_list ):
file_to_write='../opinion_dynamics_others/write_ups/Plot_files.tex'
with open(file_to_write,'a') as file:
for f in fr_list:
plot_file='../Plots/Plots_with_lambda/Time_vs_MSE/tuned_over_lambda/'+ file_prefix+'.fraction.'+ str(f) + '.png'
file.write( '\\begin{figure}[h]\n') #\label{online}' )
file.write( '\\includegraphics[width=\\linewidth,keepaspectratio]{' + plot_file + '} ')
file.write( '\\end{figure}' )
def print_image_in_latex_sequential_way(): # contiguous figures
file_to_write='../opinion_dynamics_others/write_ups/Plot_files.tex'
folder = '../opinion_dynamics_others/write_ups/Fig'
if os.path.exists(file_to_write):
os.remove(file_to_write)
with open(file_to_write,'a') as file:
for directory in os.listdir( folder ):
if directory.split('_')[1] == 'cpk':
# file.write('Here we attach the results while we tune over cherrypick\n')
file.write('\\section{Tune over cherrypick}\n')
else:
file.write('\\section{Tune over nowcasting}\n')
# file.write('Here we attach the results while we tune each method at nowcasting result\n')
for files in os.listdir( folder + '/'+directory):
plot_file='Fig/' + directory + '/' + files
file.write( '\\begin{figure}[bp!]\n') #\label{online}' )
file.write( '\\includegraphics[width=\\linewidth,keepaspectratio]{' + plot_file + '}\n')
file.write( '\\caption{'+files.strip('.png')+ '}')
file.write( '\\end{figure}\n' )
def print_image_in_latex_using_subfig(): # contiguous figures
file_to_write='../opinion_dynamics_others/write_ups/Plot_files.tex'
folder = '../opinion_dynamics_others/write_ups/Fig'
if os.path.exists(file_to_write):
os.remove(file_to_write)
with open(file_to_write,'a') as file:
for directory in os.listdir( folder ):
file.write('\\begin{figure}[ht!]\n')
file.write('\\centering\n')
section = 'Sentiment prediction performance using a 10$\\percent$ held-out set for each real-world dataset.'
section+= ' Performance is measured in terms of mean squared error (MSE) on the sentiment value.'
section += 'For each message in the held-out set, we predict the sentiment value m given the history up to T hours before the time of the message,'
section += 'for different values of T. Nowcasting corresponds to T = 0 and forecasting to T $ > $ 0. '
section += 'The sentiment value m $\\in$ (-1, 1) and the sentiment polarity sign (m) $\\in$ \\{-1, 1\\}.'
if directory.split('_')[1] == 'cpk':
# file.write('Here we attach the results while we tune over cherrypick\n')
section += 'We have tune cherrypick results over $\\lambda$ here.'
else:
section += 'We have tuned $\\lambda$ for each method based on nowcasting performance.'
# file.write('Here we attach the results while we tune each method at nowcasting result\n')
for files in os.listdir( folder + '/'+directory):
plot_file='Fig/' + directory + '/' + files
file.write( '\\begin{subfigure}{.4\\linewidth}\n') #\label{online}' )
file.write( '\t\\includegraphics[scale=0.25]{' + plot_file + '}\n')
file.write( '\t\\caption{'+files.strip('.png')+ '}\n')
file.write( '\\end{subfigure}\n' )
file.write('\\caption{'+section+'}\n')
file.write('\\end{figure}\n')
def print_image_in_latex_using_subfig_v1( file_to_write , text, directory ): # contiguous figures
# if os.path.exists(file_to_write):
# os.remove(file_to_write)
directory += text + '/'
with open(file_to_write,'a') as file:
file.write('\\begin{figure}[ht!]\n')
file.write('\\centering\n')
section = text
for files in os.listdir(directory):
plot_file='Fig/' + text + '/' + files
file.write( '\\begin{subfigure}{.4\\linewidth}\n') #\label{online}' )
file.write( '\t\\includegraphics[scale=0.25]{' + plot_file + '}\n')
file.write( '\t\\caption{'+files.strip('.png')+ '}\n')
file.write( '\\end{subfigure}\n' )
file.write('\\caption{'+section+'}\n')
file.write('\\end{figure}\n')
# def print_image_in_latex( file_prefix ):
# fr_list = [0,1]
# cpk_lm_list = [0,1]
# lm_list = [0,1]
# # print_image_in_latex_v1( file_prefix, fr_list, cpk_lm_list, lm_list)
# print_image_in_latex_v2( file_prefix, fr_list, lm_list )
# # print_image_in_latex_v3( file_prefix, fr_list )
def print_for_kile( data, row_titles, column_titles):
row_title_with_replacement = []
for row in row_titles :
row_title_with_replacement.append( row.replace('_',' '))
# row = row.replace('_',' ')
row_titles = row_title_with_replacement
col_title_with_replacement = []
for col in column_titles :
col_title_with_replacement.append( col.replace('_',' '))
# row = row.replace('_',' ')
column_titles = col_title_with_replacement
print '\\begin{center}\n\\begin{tabular}{|c|c|c|c|}\n\\hline'
header_str = 'Dataset '
for column in column_titles:
header_str += ' & '+ column
# print 'Dataset & MSE & FR \\\\'
print header_str + '\\\\'
print '\\hline \n'
print_str = ''
# print type( data[0])
for row,data_row in zip(row_titles, data) :
print_str += ( row + ' & ' + ' & '.join( map( str , data_row )))
print_str += '\\\\\n'
# print ' ' + str( result_dict['name']) + ' & ' + str( result_dict['MSE']) + ' & ' + str( result_dict['FR']) +' \\\\'
print print_str + '\\\\'
print 'hline'
print '\\end{tabular}'
print '\\end{center}'
# def find_index( index, num_of_elm ):
# param_index = np.zeros( len( num_of_elm ))
# for i in range( len( num_of_elm ) -1 ):
# param_index[i] = index/ num_of_elm[i]
# index = index % num_of_elm[i]
# param_index[-1] = index
# return param_index
#************************************************************************
def list_images_for_latex( file_prefix_list, file_to_write, Image_file_prefix,plot_file_suffix1, plot_file_suffix2):
# Image_file_prefix=Image_file.split('_slant')[0]
# Image_file_suffix=Image_file.split('_slant')[1]
file_not_to_include=['trump_data','GTwitter','VTwitter','MlargeTwitter','Twitter']
file_count=0
with open(file_to_write,'w') as file:
file.write('\\begin{figure}[!!t]\n')
# file.write('\\hspace{-20mm}\n')
file.write('\\centering\n')
# section = text
buffer=0
for file_prefix in file_prefix_list:
if file_prefix not in file_not_to_include:
plot_file= Image_file_prefix+file_prefix
file.write( '\\subfloat{ \t\\includegraphics[scale=0.20]{' + plot_file + plot_file_suffix1 + '}}\n')
file.write( '\\subfloat{ \t\\includegraphics[scale=0.20]{' + plot_file + plot_file_suffix2 + '}}\n')
buffer+=1
if ( buffer == 2):
file.write('\\vspace{-3mm}\n')
buffer=0
file_count+=1
# if file_count==6:
# file.write('\\caption{Here we plot MSE Vs time span for 6 datasets}\n')
# file.write('\\end{figure}\n')
# file_count=0
# file.write('\\begin{figure}[!!t]\n')
# file.write('\\centering\n')
# figure_caption=file_prefix.replace('_', ' ')
file.write('\\caption{Here we plot MSE Vs time span for rest datasets}\n')
file.write('\\end{figure}\n')
# file.write('\\paragraph')
def write_table_sanitize_test( file_to_read, file_prefix_list):
def get_substr(v):
return str(round(v,3))#[:6]
result=load_data(file_to_read)
print '\\begin{table}[h!]'
print '\t\\begin{center}'
print '\t\t\\caption{Performance after sanitizing test sets}'
print '\t\t\\label{tab:Table1}'
print '\t\t\\begin{tabular}{|l|l|l|l|l|l|l|l|l|}'
print '\t\t\\hline'
print '\t\t\\& \\multicolumn{4}{|l|}{MSE} & \\multicolumn{4}{|l|}{FR}\\'
print '\t\t\tDataset & RCPK & RCPK-ST & CPK & CPK-ST & RCPK & RCPK-ST & CPK & CPK-ST \\\\'
print '\t\t\t\\hline'
for file in file_prefix_list:
# if file=='MlargeTwitter':
# print result['Robust_cherrypick'][file]
# print result['cherrypick'][file]
data=[]
for measure in ['MSE', 'FR']:
for method in ['Robust_cherrypick' ,'cherrypick']:
for ext in ['', '_san']:
# print method, measure+ext
data.append(result[method][file][measure+ext])
# data=np.array([result['Robust_cherrypick'][file]['FR_san'], result['Robust_cherrypick'][file]['FR'], result['cherrypick'][file]['FR_san'], result['cherrypick'][file]['FR']])
# data=[result['Robust_cherrypick'][file]['MSE'],result['Robust_cherrypick'][file]['MSE_san'], result['cherrypick'][file]['MSE'], result['cherrypick'][file]['MSE_san']]
# data=[result['Robust_cherrypick'][file]['FR_san'], result['Robust_cherrypick'][file]['MSE'], result['cherrypick'][file]['MSE_san'], result['cherrypick'][file]['MSE']]
print_str = ( file.replace('_', ' ') + ' & ' + ' & '.join( map( get_substr , data )))
print_str += '\\\\'
print '\t\t\t'+print_str+'\n\t\t\t'+'\\hline'+'\n'
# print print_str
print '\t\t\t\\hline'
print '\t\t\\end{tabular}'
print '\t\\end{center}'
print '\\end{table}'
# print '\\paragraph{Description}'
# print 'For both Robust Cherrypick and Cherrypick method, original test set is filtered again \
# to find only endogenious test messages. MSE is recomputed on that sanitized test set. \
# over the full data set including both training and test set to select only .8 fraction most \
# endogenious message of the full data set. The subset of original test set that intersects \
# with this endogenious subset is considered as sanitized test set. Results sre noted as RCPK-ST and CPK-ST where \
# original results are noted as RCPK and CPK.'
def main():
# obj=load_data('barca.pkl')
# # idx=np.arange(10000)[0:1000]
# plt.plot(obj['t'],obj['I'])
# plt.plot(obj['t'],obj['e'])
# plt.show()
# return
list_of_windows = np.array([.4]) # np.linspace(0.05,1,20)
time_span_input_list = np.linspace(0,.5,6)
file_prefix_list = ['barca','british_election','GTwitter','jaya_verdict', 'JuvTwitter' , 'MlargeTwitter','MsmallTwitter','real_vs_ju_703', 'trump_data','Twitter','VTwitter']
# 'real_vs_ju_703','trump_data' ,
print_slant_result= False # True # False # True
print_image_in_latex_flag = False
print_intensity_latex =False #True
print_opinion_lambda=False#True#True
print_plots_latex=False # True # False # True
write_table_sanitize_test_flag=True # True
print_combined_variation_fraction=False # True
if print_combined_variation_fraction:
for file in file_prefix_list:
if file not in ['GTwitter','MlargeTwitter','trump_data','Twitter','VTwitter']:
# print '\\subfloat{ \\includegraphics[scale=0.15]{FIG_new/MSE.jpg\}\}'
print('\\subfloat{ \t\\includegraphics[scale=0.15]{FIG_new/' + file + '_combined_MSE.jpg' + '}}')
print('\\vspace{-3mm}')
for file in file_prefix_list:
if file not in ['GTwitter','MlargeTwitter','trump_data','Twitter','VTwitter']:
# print '\\subfloat{ \\includegraphics[scale=0.15]{FIG_new/MSE.jpg\}\}'
print('\\subfloat{ \t\\includegraphics[scale=0.15]{FIG_new/' + file + '_combined_FR.jpg' + '}}')
if write_table_sanitize_test_flag:
file_to_read='../result_sanitize_test/f0.8t0.2_MSE_FR'
# file_prefix_list.remove('GTwitter')
file_prefix_list.remove('MlargeTwitter')
file_prefix_list.remove('trump_data')
# file_prefix_list.remove('VTwitter')
write_table_sanitize_test( file_to_read, file_prefix_list)
if print_plots_latex:
# file_to_write='../paper/0511expModelingMSEnew.tex'
# file_to_write='../../../Dropbox/Others/Paramita/paper/0511expModelingNew.tex'
# file_to_write='../paper_working_copy/0511expVarFracRcpkMSE.tex'
file_to_write='../../../Dropbox/Others/Paramita/paper/0511expVarFracNew.tex'
if os.path.exists(file_to_write):
os.remove(file_to_write)
Image_file_pre='FIG_new/'
# Image_file_post='_Robust_cherrypick_MSE.jpg'
# Image_file_post1='_slant_tuned_0.jpg'
# Image_file_post2='_final.jpg'
Image_file_post1='_cherrypick_MSE.jpg'
Image_file_post2='_Robust_cherrypick_MSE.jpg'
list_images_for_latex( file_prefix_list,file_to_write,Image_file_pre,Image_file_post1,Image_file_post2 )
if print_slant_result :
for file_prefix in file_prefix_list :
print_slant_results( file_prefix )
if print_image_in_latex_flag:
# print_image_in_latex_sequential_way()
print_image_in_latex_using_subfig()
# file_index_set = [ 0,1,4,7]
# for index in file_index_set:
# print_image_in_latex( file_prefix_list[index] )
if print_intensity_latex:
# file_to_write='../opinion_dynamics_others/write_ups/Opinions_Intensity.tex'
# directory = '../opinion_dynamics_others/write_ups/Fig/'
# print_image_in_latex_using_subfig_v1( file_to_write , 'Intensity', directory )
# print_image_in_latex_using_subfig_v1( file_to_write , 'Opinions', directory )
# file_prefix_list = [ file_prefix_list[0]]
file_to_write='../opinion_dynamics_others/write_ups/Intensity.tex'
if os.path.exists(file_to_write):
os.remove(file_to_write)
for file_prefix in file_prefix_list:
Image_file_prefix='Fig/Intensities/'+file_prefix+'_window_'
Image_file_suffix='_Intensities.eps'
num_windows=20
figure_caption=file_prefix.replace('_', ' ')
print_image_in_latex_v4( file_to_write, Image_file_prefix, Image_file_suffix, num_windows, figure_caption)
if print_opinion_lambda:
file_prefix_list = [ file_prefix_list[0]]
# file_to_write='../opinion_dynamics_others/write_ups/Opinions_lambda.tex'
# if os.path.exists(file_to_write):
# os.remove(file_to_write)
num_w=1#4
num_l=1 # 5
for file_prefix in file_prefix_list:
file_to_write='../opinion_dynamics_others/write_ups/Opinions_w0v0l0_true_opinion_exact_pred_'+file_prefix+'.tex'
if os.path.exists(file_to_write):
os.remove(file_to_write)
Image_file_prefix='Fig/w0v0l0_barca/barca_w0v0l0_window_0_Exact_Opinions_ext_'
Image_file_suffix='.eps'
figure_caption=file_prefix.replace('_', ' ')
print_image_in_latex_v4( file_to_write, Image_file_prefix, Image_file_suffix, num_w, num_l, figure_caption)
if __name__=='__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.