index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
20,500 | a45cc9950e682a7bcbd4e88e8cecc92b86234bf3 | #!/usr/bin/env python
# encoding: utf-8
"""
helpers.py
Created by Brant Faircloth on 15 July 2011.
Copyright 2011 Brant C. Faircloth. All rights reserved.
"""
import os
import re
import sys
import glob
import argparse
import shutil
import configparser
from collections import defaultdict
from rich import prompt
from phyluce import lastz
from phyluce.pth import get_all_user_params
# import pdb
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(
namespace, self.dest, os.path.abspath(os.path.expanduser(values))
)
class CreateDir(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# get the full path
d = os.path.abspath(os.path.expanduser(values))
# check to see if directory exists
if os.path.exists(d):
if prompt.Confirm.ask(
"[magenta][WARNING] Output directory exists, REMOVE[/magenta]"
):
shutil.rmtree(d)
else:
print("[QUIT]")
sys.exit()
# create the new directory
os.makedirs(d)
# return the full path
setattr(namespace, self.dest, d)
class CreateFile(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# get the full path
f = os.path.abspath(os.path.expanduser(values))
# check to see if directory exists
if os.path.exists(f):
if prompt.Confirm.ask(
"[magenta][WARNING] Output file exists, REMOVE[/magenta]"
):
os.remove(f)
else:
print("[QUIT]")
sys.exit()
setattr(namespace, self.dest, f)
def is_dir(dirname):
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def is_file(filename):
if not os.path.isfile:
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def get_name(header, splitchar="_", items=2):
"""use own function vs. import from match_contigs_to_probes - we don't want lowercase"""
if splitchar:
return "_".join(header.split(splitchar)[:items]).lstrip(">")
else:
return header.lstrip(">")
def get_dupe_matches(lastz_file, splitchar="|", pos=1, longfile=False):
matches = defaultdict(list)
for lz in lastz.Reader(lastz_file, longfile):
target_name = get_name(lz.name1, splitchar, pos)
query_name = get_name(lz.name2, splitchar, pos)
matches[target_name].append(query_name)
return matches
def get_dupes(lastz_file, splitchar="|", pos=1, longfile=False):
dupes = set()
matches = get_dupe_matches(lastz_file, splitchar, pos, longfile)
# see if one probe matches any other probes
# other than the children of the locus
for k, v in matches.items():
# if the probe doesn't match itself, we have
# problems
if len(v) > 1:
for i in v:
if i != k:
dupes.add(k)
dupes.add(i)
elif k != v[0]:
dupes.add(k)
return dupes
def get_names_from_config(config, group):
try:
return [i[0] for i in config.items(group)]
except configparser.NoSectionError:
return None
def get_file_extensions(ftype):
ext = {
"fasta": (".fasta", ".fsa", ".aln", ".fa"),
"nexus": (".nexus", ".nex"),
"phylip": (".phylip", ".phy"),
"phylip-relaxed": (".phylip", ".phy", ".phylip-relaxed"),
"phylip-sequential": (".phylip", ".phy", ".phylip-sequential"),
"clustal": (".clustal", ".clw"),
"emboss": (".emboss",),
"stockholm": (".stockholm",),
}
return ext[ftype]
def get_alignment_files(log, input_dir, input_format):
log.info("Getting alignment files")
alignments = []
for ftype in get_file_extensions(input_format):
alignments.extend(
glob.glob(os.path.join(input_dir, "*{}".format(ftype)))
)
if not alignments:
log.critical(
"No alignment files found. Check --input-format argument."
)
return alignments
def write_alignments_to_outdir(log, outdir, alignments, output_format):
log.info("Writing output files")
for tup in alignments:
locus, aln = tup
if aln.trimmed is not None:
outname = "{}{}".format(
os.path.join(outdir, locus),
get_file_extensions(output_format)[0],
)
with open(outname, "w") as outf:
outf.write(format(aln.trimmed, output_format))
else:
log.warn("DROPPED {0} from output".format(locus))
def get_contig_header_string():
return "|".join(get_all_user_params("headers"))
|
20,501 | 9d9c0d220d5d9dcb394287a0711eb916f9035886 | # A template for data preprocessing to apply ML
# Import the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import the data set
dataset = pd.read_csv("Data.csv")
#divide it into independent(X) and dependent (Y) section
X = dataset.iloc[:,:-1].values
y = dataset.iloc[:,3].values
#splitting the dataset into train and test
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=0)
'''
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
''' |
20,502 | 600496e9dde95ea971af498fdc54ccaa6f99b165 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import subprocess
import pprint
import os
class State( object ):
def __init__( self ,args=None ):
if not args:
pass #添加默认参数信息
else:
self.args = args
self.dellPath = '/opt/MegaRAID/MegaCli/'
self.hpPath = '/opt/compaq/hpacucli/bld/'
self.df = '/bin/'
def search( self,path,cmd,args):
_cmdPath = [ path, '/sbin/','/bin/','/usr/sbin/','/usr/bin/']
for _cmd in _cmdPath :
exist = os.access( _cmd+cmd,os.X_OK )
if exist :
return _cmd + cmd + args
def getRAID( self ):
return []
def getDiskInfo( self ):
return []
def getSize( self ):
try:
cmd = self.search( self.df,'df', ' -h')
n = os.popen( cmd )
tmp = n.readlines()
rr =[]
for x in tmp:
r = {}
if x.startswith('data'):
xx = x.split()
r['point'] = xx[5].strip()
r['size'] = xx[1].strip()
if r :
rr.append( r )
return rr
except:
return None
def get( self ):
r = {}
r['size'] = self.getSize()
r['raid'] = self.getRAID()
r['pdisk'] = self.getDiskInfo()
return r
if __name__ == '__main__':
o = State()
pprint.pprint( o.get() )
|
20,503 | 8e22c5a9fffe6abb549bac068bd662c47906187b | from django import forms
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.conf import settings
from django.contrib.auth import get_user_model
from bulletin.models import Bullet
class BulletForm(forms.ModelForm):
description = forms.CharField(
widget=forms.Textarea,
label='Объявление'
)
class Meta:
model = Bullet
exclude = ('user_posted',)
class LoginForm(forms.Form):
login = forms.CharField(label='Логин')
password = forms.CharField(label='Пароль', widget=forms.PasswordInput)
class RegistrationForm(forms.Form):
login = forms.CharField(label='Логин', min_length=5)
password = forms.CharField(
label='Пароль',
min_length=8,
widget=forms.PasswordInput
)
repeat_password = forms.CharField(
label='Повторите пароль',
widget=forms.PasswordInput
)
email = forms.CharField(label='Адрес электронной Почты')
first_name = forms.CharField(label='Имя')
last_name = forms.CharField(label='Фамилия')
avatar = forms.ImageField(required=False)
def clean_login(self):
user_model = get_user_model()
login = self.cleaned_data['login']
if user_model.objects.filter(username=login):
raise ValidationError('Этот login уже занят')
return login
def clean_email(self):
user_model = get_user_model()
email = self.cleaned_data['email']
validate_email(self.cleaned_data['email'])
if user_model.objects.filter(email=email):
raise ValidationError('Этот email уже зарегистрирован')
return self.cleaned_data['email']
def clean(self):
cleaned_data = super(RegistrationForm, self).clean()
if self.cleaned_data.get('password') and self.cleaned_data.get('repeat_password'):
if self.cleaned_data['password'] != self.cleaned_data['repeat_password']:
raise ValidationError('Пароли не совпадают')
return cleaned_data
def save(self):
user_model = get_user_model()
ava_pic = self.cleaned_data['avatar']
if not ava_pic:
ava_pic = 'user_images/None/no-avatar.jpg'
user = user_model.objects.create_user(
username=self.cleaned_data['login'],
email=self.cleaned_data['email'],
password=self.cleaned_data['password'],
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'],
avatar=ava_pic,
)
return user
|
20,504 | 9e93d8e0f20ecef31dc9e4d21245b38c5aff47f8 | #!/usr/bin/env python2.7
''' Calculates the monthly mean one timestep at a time in serial '''
from netCDF4 import Dataset
import numpy as np
def file_avg(mean,filename):
''' Averages the current netCDF file, 'filename', and returns
the result.
'''
f = Dataset(filename,'r')
time = f.variables['time']
timelength = len(time)
u = f.variables['u']
for t in xrange(12):
print t
mean = (mean+u[t,:,:,:])/2
f.close()
print mean.shape
return mean
def main():
direct = '/group_workspaces/jasmin/hiresgw/mj07/'
files = ['xjanpa.pj19910301.u.nc']
f = Dataset(direct+files[0],'r')
z = f.variables['z_hybrid_height']
lat = f.variables['latitude']
lon = f.variables['longitude']
mean = np.zeros([len(z),len(lat),len(lon)])
for filename in files:
mean += file_avg(mean,direct+filename)
if __name__ == '__main__':
import time
starttime = time.time()
main()
print 'Run time: ',time.time()-starttime
|
20,505 | 18a762a4d4803297c5119f84575a49cf7a2dd011 | # Copyright (c) 2022 Graphcore Ltd. All rights reserved.
from email.policy import default
from random import choices
from typing import List, Optional
from jsonargparse import ActionConfigFile, ArgumentParser
from jsonargparse.typing import ClosedUnitInterval, PositiveInt
def get_parser():
"""Command line argument parser.
Returns:
jsonargparser: processed arguments into jsonargparse object
"""
parser = ArgumentParser()
parser.add_argument("--config", action=ActionConfigFile)
parser.add_argument("--seed", type=int, default=1984)
# Model inputs
parser.add_argument(
"--inputs",
default=["node_feat", "edge_feat"],
type=list,
help="Inputs to model, must align with the features selected",
)
# Model hyper-parameters
parser.add_argument(
"--model.micro_batch_size",
type=PositiveInt,
default=8,
help='Compute batch size (if using packing this is measured in "packs per batch")',
)
parser.add_argument("--model.valid_batch_size", type=PositiveInt, help="Batch size for use in validation")
parser.add_argument(
"--model.target_total_batch_size",
type=PositiveInt,
default=None,
help="Try and match training batch size to this value.",
)
parser.add_argument("--model.n_nodes_per_pack", default=80, type=PositiveInt, help='nodes per "pack"')
parser.add_argument("--model.n_edges_per_pack", default=160, type=PositiveInt, help='edges per "pack"')
parser.add_argument(
"--model.n_graphs_per_pack", default=16, type=PositiveInt, help='maximum number of graphs per "pack"'
)
parser.add_argument("--model.epochs", default=100, type=PositiveInt, help="Maximum number of epochs to run for")
parser.add_argument("--model.lr", type=float, default=2e-5, help="Learning rate")
parser.add_argument(
"--model.learning_rate_schedule",
default="cosine",
choices=["cosine", "linear", "static"],
help="Learning rate scheduler",
)
parser.add_argument(
"--model.cosine_lr", type=bool, default=False, help="use a cosine lr decay"
) # add this back to accommodate ensembling for previous runs
parser.add_argument("--model.min_lr", default=0, type=float, help="minimum learning rate for the lr scheduler")
parser.add_argument("--model.lr_warmup_epochs", default=0.0, type=float, help="Number of warmup epochs")
parser.add_argument(
"--model.lr_init_prop", default=1.0 / 8.0, type=float, help="Initial scale of lr when warming up"
)
parser.add_argument(
"--model.loss_scaling",
default=16,
type=float,
help="loss scaling factor (to keep gradients representable in IEEE FP16)",
)
parser.add_argument(
"--model.node_latent", default=300, type=PositiveInt, help="number of latent units in the network"
)
parser.add_argument("--model.node_exp_ratio", default=2.0, type=float, help="Ratio between latent and hidden size.")
parser.add_argument(
"--model.node_mlp_layers",
default=2,
type=PositiveInt,
help="total number of layers in the MLPs (including output)",
)
parser.add_argument("--model.node_dropout", default=0.0, type=float, help="dropout for nodes")
parser.add_argument("--model.node_prenorm", default=False, type=bool, help="Add norm+act to start of MLP")
parser.add_argument("--model.use_edges", default=True, type=bool, help="use edges features")
parser.add_argument(
"--model.edge_latent", default=None, type=PositiveInt, help="number of edge latent units in the network"
)
parser.add_argument(
"--model.edge_exp_ratio", default=None, type=float, help="Ratio between latent and hidden size."
)
parser.add_argument(
"--model.edge_mlp_layers",
default=None,
type=PositiveInt,
help="total number of layers in the MLPs (including output)",
)
parser.add_argument("--model.edge_dropout", default=0.0, type=float, help="dropout for edges")
parser.add_argument(
"--model.edge_dna_dropout", default=0.0, type=float, help="dropout for direct neighbour aggregation of edges"
)
parser.add_argument(
"--model.eigv_rand_sign_flip",
default=True,
type=bool,
help="Add random sign flipping to laplacian eigen vectors.",
)
parser.add_argument("--model.edge_prenorm", type=bool, help="Add norm+act to start of MLP")
parser.add_argument(
"--model.edge_dropout_loc",
default="before_residual_add",
type=str,
choices=["before_residual_add", "before_scatter"],
help="Location for edge dropout",
)
parser.add_argument("--model.use_globals", default=False, type=bool, help="Use global features")
parser.add_argument("--model.global_latent", default=None, type=PositiveInt, help="Number of global latents")
parser.add_argument(
"--model.global_exp_ratio", default=None, type=float, help="Ratio between latent and hidden size."
)
parser.add_argument(
"--model.global_mlp_layers",
default=None,
type=PositiveInt,
help="total number of layers in the MLPs (including output)",
)
parser.add_argument("--model.global_dropout", default=0.0, type=float, help="dropout for globals")
parser.add_argument("--model.global_prenorm", type=bool, help="Add norm+act to start of MLP")
parser.add_argument("--model.encoder_latent", default=None, type=PositiveInt, help="Number of global latents")
parser.add_argument(
"--model.encoder_exp_ratio", default=None, type=float, help="Ratio between latent and hidden size."
)
parser.add_argument(
"--model.encoder_mlp_layers",
default=None,
type=PositiveInt,
help="total number of layers in the MLPs (including output)",
)
parser.add_argument("--model.encoder_dropout", default=0.0, type=float, help="dropout for globals")
parser.add_argument("--model.encoder_prenorm", type=bool, help="Add norm+act to start of MLP")
parser.add_argument(
"--model.encoder_norm_pos",
choices=["none", "layer_hidden", "layer_output"],
help="For the MLPs, whether and where to use normalization.",
)
parser.add_argument(
"--model.encoder_act_fn",
choices=["relu", "gelu", "swish"],
help="Activation function used for the encoder MLPs.",
)
parser.add_argument(
"--model.atom_encoder_model",
default="node",
choices=["node", "encoder"],
help="Which model to use for the atom encoder",
)
parser.add_argument(
"--model.bond_encoder_model",
default="edge",
choices=["edge", "encoder"],
help="Which model to use for the bond encoder",
)
parser.add_argument(
"--model.override_encoder_dropout",
type=dict,
default={},
help="Dictionary to override dropout for specific encoder features",
)
parser.add_argument("--model.expand_first_hidden", default=True, type=bool, help="Expand first MLP hidden")
parser.add_argument(
"--model.encoder_concat_mode",
type=str,
default="concat_all",
choices=["concat_all", "sum_all", "sum_extras"],
help="Whether to sum or concat encoders",
)
parser.add_argument(
"--model.n_embedding_channels",
default=100,
type=PositiveInt,
help="how many channels to use for the input embeddings",
)
parser.add_argument(
"--model.n_graph_layers", default=5, type=PositiveInt, help="how many message-passing steps in the model"
)
parser.add_argument(
"--model.opt", default="adam", choices=["SGD", "adam", "tf_adam"], help="which optimizer to use"
)
parser.add_argument("--model.grad_clip_value", default=None, type=float, help="Clipping value for gradients")
parser.add_argument("--model.l2_regularization", default=None, type=float, help="L2 weight regularization scale")
parser.add_argument("--model.use_noisy_nodes", default=False, type=bool, help="Use noisy nodes or not")
parser.add_argument("--model.noisy_nodes_weight", default=1.0, type=float, help="Weight of the noisy nodes loss")
parser.add_argument(
"--model.noisy_nodes_noise_prob", default=0.025, type=float, help="Probability of applying noise"
)
parser.add_argument(
"--model.noisy_node_method",
default="split_softmax",
choices=["combined_softmax", "split_softmax"],
type=str,
help="Method for performing noisy node/edge softmax.",
)
parser.add_argument(
"--model.noisy_node_model",
default="dense",
choices=["dense", "mlp"],
type=str,
help="Use a single dense layer for the noisy node decoder or a multi layer mlp.",
)
parser.add_argument("--model.use_noisy_edges", default=False, type=bool, help="Use noisy edges or not")
parser.add_argument("--model.noisy_edges_weight", default=1.0, type=float, help="Weight of the noisy nodes loss")
parser.add_argument(
"--model.noisy_edges_noise_prob", default=0.025, type=float, help="Probability of applying noise"
)
parser.add_argument("--model.layer_output_scale", default=1.0, type=float, help="Scaling layer outputs")
parser.add_argument(
"--model.adam_m_dtype",
default="float16",
choices=["float16", "float32"],
help="dtype for the m part of the adam optimizer",
)
parser.add_argument(
"--model.adam_v_dtype",
default="float16",
choices=["float16", "float32"],
help="dtype for the v part of the adam optimizer",
)
parser.add_argument(
"--model.dtype", default="float16", choices=["float16", "mixed_float16", "float32"], help="data dtype"
)
parser.add_argument(
"--model.eval_mode", default="ogb", choices=["ogb", "keras", "both"], help="Evaluator to use in inference"
)
# Hybrid args
parser.add_argument(
"--model.layer_specs",
nargs="+",
type=str,
default=["MPNN+MHSA+FFN"],
help="Config of each GPS layer in the model body",
)
parser.add_argument(
"--model.layer_repeats", nargs="+", type=int, default=[], help="Repeat count for each entry in layer_specs"
)
parser.add_argument("--model.n_attn_heads", type=int, default=32, help="Number of self-attention heads")
parser.add_argument(
"--model.ffn_dim", type=int, default=768, help="Hidden dimension in the middle of the FFN (boom) layers"
)
parser.add_argument(
"--model.attention_dropout_rate", type=float, default=0.1, help="Dropout for self-attention mask"
)
parser.add_argument("--model.ffn_dropout_rate", type=float, default=0.1, help="Dropout in the ffn boom layer")
parser.add_argument(
"--model.gnn_output_dropout_rate",
type=float,
default=0.1,
help="Dropout for the output of the gnn in hybrid model",
)
parser.add_argument(
"--model.mhsa_output_dropout_rate",
type=float,
default=0.1,
help="Dropout for the output of the mhsa in hybrid model",
)
parser.add_argument(
"--model.ffn_output_dropout_rate",
type=float,
default=0.1,
help="Dropout for the output of the ffn in hybrid model",
)
parser.add_argument("--model.num_gaussian_kernels", type=int, help="Number of Gaussian basis kernels")
parser.add_argument(
"--model.gaussian_kernel_init_low",
type=float,
default=0.0,
help="Lower bound for gaussian kernel mean/std initialisation",
)
parser.add_argument(
"--model.gaussian_kernel_init_high",
type=float,
default=1.5,
help="Upper bound for gaussian kernel mean/std initialisation",
)
parser.add_argument(
"--model.gaussian_kernel_scale", type=float, default=1.0, help="Static scale for gaussian kernels"
)
parser.add_argument(
"--model.gaussian_kernel_epsilon", type=float, default=1e-5, help="Epsilon for gaussian kernels"
)
parser.add_argument(
"--model.max_path_length",
type=int,
default=5,
help="max edges in a path that contribute to the edge feature encoding",
)
parser.add_argument(
"--model.max_shortest_path_distance",
type=int,
default=100,
help="Maximum SPD in any molecule, should be > max graph diameter in the dataset",
)
parser.add_argument(
"--model.graph_dropout_rate",
type=float,
default=0.0,
help="Dropout whole graphs in the stochastic depth fashion, rather than individual nodes."
" Applied to MHSA, MPNN and FFN in the hybrid model.",
)
parser.add_argument(
"--model.override_graph_dropout_rate.FFN",
type=Optional[float],
default=None,
help="Dropout whole graphs in the FFN rather than individual nodes.",
)
parser.add_argument(
"--model.override_graph_dropout_rate.MHSA",
type=Optional[float],
default=None,
help="Dropout whole graphs in the MHSA rather than individual nodes.",
)
parser.add_argument(
"--model.override_graph_dropout_rate.MPNN",
type=Optional[float],
default=None,
help="Dropout whole graphs in the MPNN rather than individual nodes.",
)
parser.add_argument(
"--model.hybrid_mpnn_extra_node_residual",
type=bool,
default=True,
help="Add the extra residual connection on nodes around the MPNN, even there's already one inside the MPNN",
)
# Dataset + Generated Data
parser.add_argument(
"--dataset.split_path",
type=str,
default="./pcqm4mv2-cross_val_splits/",
help="The path where split files are saved.",
)
parser.add_argument(
"--dataset.split_mode",
type=str,
default="original",
choices=["original", "incl_half_valid", "47_kfold", "train_plus_valid"],
help="Which dataset split to use (options: original, 47_k_fold, add_half_valid, train_plus_valid)",
)
parser.add_argument("--dataset.split_num", type=int, default=0, help="Which dataset split number to use.")
parser.add_argument(
"--dataset.trim_chemical_features", type=bool, default=False, help="Trim chemical input features"
)
parser.add_argument(
"--dataset.chemical_node_features",
type=list,
default=[
"atomic_num",
"chiral_tag",
"degree",
"possible_formal_charge",
"possible_numH",
"possible_number_radical_e",
"possible_hybridization",
"possible_is_aromatic",
"possible_is_in_ring",
],
help="Which chemical node features to use.",
)
parser.add_argument(
"--dataset.chemical_edge_features",
type=list,
default=["possible_bond_type", "possible_bond_stereo", "possible_is_conjugated"],
help="Which chemical edge features to use.",
)
parser.add_argument(
"--dataset.use_periods_and_groups",
default=False,
type=bool,
help="Convert atomic number to groups and periods as additional node input features",
)
parser.add_argument(
"--dataset.do_not_use_atomic_number",
default=False,
type=bool,
help="Option to not use the atomic number as input feature when groups and periods are used",
)
parser.add_argument(
"--dataset.dataset_name",
default="pcqm4mv2",
choices=["generated", "pcqm4mv2", "pcqm4mv2_conformers_28features"],
help="which dataset to use",
)
parser.add_argument("--dataset.cache_path", default=".", type=str, help="Path to download the datasets to.")
parser.add_argument(
"--dataset.generated_data",
default=False,
type=bool,
help="Use randomly generated data instead of a real dataset.",
)
parser.add_argument(
"--dataset.generated_data_n_nodes",
default=24,
type=PositiveInt,
help="nodes per graph for the randomly generated dataset",
)
parser.add_argument(
"--dataset.generated_data_n_edges",
default=50,
type=PositiveInt,
help="edges per graph for the randomly generated dataset",
)
parser.add_argument(
"--dataset.generated_data_n_graphs",
default=2048,
type=PositiveInt,
help="Number of graphs for the randomly generated dataset",
)
parser.add_argument(
"--dataset.normalize_labels",
default=False,
type=bool,
help="Optionally normalize the labels [Only valid for PCQ].",
)
parser.add_argument("--dataset.prop_to_use", default=1.0, type=float, help="Proportion of the dataset to use.")
parser.add_argument(
"--dataset.valid.prop_to_use", default=1.0, type=float, help="Proportion of the dataset to use."
)
parser.add_argument(
"--dataset.clean_train.prop_to_use", default=0.1, type=float, help="Proportion of the dataset to use."
)
parser.add_argument("--dataset.features", default={}, type=dict, help="Which features to use.")
parser.add_argument(
"--dataset.load_from_cache",
default=True,
type=bool,
help="Whether to attempt to load preprocessed dataset from cache.",
)
parser.add_argument(
"--dataset.save_to_cache",
default=True,
type=bool,
help="Whether to attempt to save preprocessed dataset to cache.",
)
parser.add_argument(
"--dataset.packing_strategy",
default="streaming",
choices=["pad_to_max", "streaming"],
help="Which packing strategy to use.",
)
parser.add_argument(
"--dataset.parallel_processes",
default=240,
type=int,
help="How many parallel processes to use when processing smiles.",
)
parser.add_argument(
"--dataset.ogbBL_norm",
default="mean_only",
type=str,
choices=["z_score", "std_only", "mean_only", "None"],
help="What normalization method for OGB bond lengths.",
)
parser.add_argument(
"--dataset.distance_norm",
default="mean_only",
type=str,
choices=["z_score", "std_only", "mean_only", "None"],
help="What normalization method for OGB atom distances.",
)
parser.add_argument(
"--dataset.input_masking_groups",
type=Optional[List[List[str]]],
default=None,
help="Groups of input features to mask together. The first group is always used for inference",
)
parser.add_argument(
"--dataset.input_masking_weights",
type=Optional[List[float]],
default=None,
help="Weights for the input masking groups will use 1:1:... by default",
)
parser.add_argument(
"--dataset.use_distance_sum_feature",
default=True,
type=bool,
help="If atom distances are being used, also generate node features for the sum of distances",
)
# Training + Validation + Test Options
parser.add_argument("--do_training", default=True, type=bool, help="Run training on the dataset")
parser.add_argument("--do_validation", default=True, type=bool, help="Run validation on the dataset")
parser.add_argument(
"--validate_every_n_epochs",
default=10,
type=int,
help="How often to try and validate the training. Important when using moving average as checkpoints saved each epoch.",
)
parser.add_argument("--do_clean_training", default=True, type=bool, help="Evaluate on training data without noise")
parser.add_argument("--do_test", default=False, type=bool, help="Run test on the dataset")
parser.add_argument(
"--inference_fold",
default="valid",
type=str,
help="Run inference on 'valid', 'test-dev' or 'test-challenge' fold.",
)
# Monitoring (wandb, execution profiles, checkpoints)
parser.add_argument(
"--execution_profile", default=False, type=bool, help="Create an execution profile in TensorBoard."
)
parser.add_argument("--wandb", default=True, type=bool, help="Enable logging to Weights & Biases")
parser.add_argument("--upload_final_ckpt", default=True, type=bool, help="Upload the final checkpoint to wandb.")
parser.add_argument("--wandb_entity", default="ogb-lsc-comp", help="WandB entity.")
parser.add_argument("--wandb_project", default="PCQM4Mv2", help="WandB project.")
parser.add_argument("--_note", default=None, help="Add note to config to view in WandB")
parser.add_argument(
"--note", default=None, help="Add note to config to view in WandB"
) # add this back to accommodate ensembling for previous runs
parser.add_argument(
"--checkpoint_dir", default="checkpoints", help="Base directory to save checkpoints to. Usually `checkpoints`."
)
parser.add_argument(
"--submission_results_dir", default="submission_results", help="Base directory to save submission results to."
)
parser.add_argument(
"--save_checkpoints_locally",
default=False,
type=bool,
help="Save the checkpoints to the local dir. Otherwise saved to tmp/",
)
parser.add_argument("--checkpoint_path", default=None, help="Path to checkpoint file if skipping training.")
parser.add_argument("--checkpoint_every_n_epochs", default=1, type=int, help="Create checkpoints every N epochs.")
# ipu options
parser.add_argument(
"--ipu_opts.replicas", default=1, type=int, help="The number of replicas to scale the model over."
)
parser.add_argument(
"--ipu_opts.gradient_accumulation_factor",
default=1,
type=int,
help="The number of times to locally accumulate gradients.",
)
parser.add_argument(
"--ipu_opts.gradient_accumulation_dtype",
default=None,
type=str,
help="Dtype to store accumulated gradients in.",
)
parser.add_argument(
"--ipu_opts.num_pipeline_stages", default=1, type=int, help="The number of pipeline stages to use."
)
parser.add_argument(
"--ipu_opts.pipeline_stages",
type=Optional[List[List[str]]],
default=None,
help="""Pipeline stages, a list of [enc, hid, dec] layers forming the pipeline.""",
)
parser.add_argument(
"--ipu_opts.pipeline_device_mapping", type=List[int], help="""Mapping of pipeline stages to IPU"""
)
parser.add_argument("--ipu_opts.recompute", default=False, type=bool, help="Do recomputation")
parser.add_argument(
"--ipu_opts.offload_optimizer_state",
default=False,
type=bool,
help="Offload optimizer state to external memory",
)
parser.add_argument("--ipu_opts.RTS", default=False, type=bool, help="Turn on replicated optimizer state sharding")
parser.add_argument(
"--ipu_opts.available_memory_proportion",
default=[0.2],
type=List[float],
help="memory proportion to reserve for matmuls",
)
parser.add_argument(
"--ipu_opts.optimization_target",
default="cycles",
choices=["balanced", "cycles", "memory"],
help="optimization target for the planner",
)
parser.add_argument(
"--ipu_opts.scheduling_algorithm",
default="CHOOSE_BEST",
choices=["CHOOSE_BEST", "SHORTEST_PATH", "CLUSTERING", "POST_ORDER", "LOOK_AHEAD"],
help="the schedling algorithm to use.",
)
parser.add_argument(
"--ipu_opts.maximum_cross_replica_sum_buffer_size",
default=1000000,
type=int,
help="max size of the cross-replica sum buffer",
)
parser.add_argument("--ipu_opts.fp_exceptions", default=False, type=bool, help="Turn on floating point exceptions.")
parser.add_argument("--ipu_opts.nanoo", default=False, type=bool, help="Turn on NaN on overflow.")
# Layers Options
parser.add_argument(
"--layer.rn_multiplier", default="none", choices=["constant", "softplus", "none"], help="RN multiplier"
)
parser.add_argument(
"--layer.decoder_mode", default="node_global", choices=["node_global", "global", "node"], help="decoder mode"
)
parser.add_argument("--layer.weight_dtype", choices=["float16", "float32"], help="decoder mode")
parser.add_argument(
"--layer.mlp_norm",
default="layer_hidden",
choices=["none", "layer_hidden", "layer_output"],
help="For the MLPs, whether and where to use normalization.",
)
parser.add_argument(
"--layer.activation_function",
default="relu",
choices=["relu", "gelu", "swish"],
help="Activation function used for the MLPs.",
)
parser.add_argument(
"--layer.gather_scatter",
default="grouped",
choices=["grouped", "debug", "dense"],
help="if `grouped`, use the batch axis to separate packs which cannot speak to each other. This may "
"speed up computation by using grouped gather/scatter underlying implementations. "
"If `dense`, senders/receivers will be one-hot matrices and matmuls will be used. "
"If `debug`, will use a list comprehension over the batch dimension (this is bad and slow "
"but may be useful for debugging",
)
parser.add_argument(
"--layer.one_hot_embeddings", default=False, type=bool, help="Use a one-hot formulation of the embedding lookup"
)
# New Args
parser.add_argument(
"--layer.gather_from",
default="both",
choices=["both", "receivers", "senders"],
help="gather from option in interaction network",
)
parser.add_argument(
"--layer.scatter_to",
default="receivers",
choices=["both", "receivers", "senders"],
help="scatter to option in interaction network",
)
parser.add_argument(
"--layer.concat_globals_to",
default=["nodes", "edges"],
type=list,
# choices=["nodes", "edges", "both"],
help="Which inputs to concat globals to",
)
parser.add_argument(
"--layer.aggregator",
default=["sum"],
type=list,
# choices=["sum", "max", "min", "mean", "var", "std", "sqrtN", "softmax"],
help="aggregation function to use in scatter and pooling layers for GNN",
)
parser.add_argument(
"--layer.direct_neighbour_aggregation",
default=False,
type=bool,
help="Append node representation to outgoing edge message",
)
parser.add_argument(
"--layer.node_combine_method",
default="concat",
choices=["concat", "sum"],
help="How to combine nodes after the gathers from senders/receivers and scatter to senders/receivers",
)
parser.add_argument(
"--debug.last_layer_only",
default=False,
help="Add debug stats for final processing layer only (i.e. just before the decoder)",
)
parser.add_argument("--debug.max_abs", type=bool, default=True, help="Include 'max_abs' in debug stats")
parser.add_argument("--debug.mean_abs", type=bool, default=False, help="Include 'mean_abs' in debug stats")
parser.add_argument("--debug.mean", type=bool, default=False, help="Include 'mean' in debug stats")
parser.add_argument("--debug.var", type=bool, default=False, help="Include 'var' in debug stats")
parser.add_argument(
"--debug.check_data_for_nans", type=bool, default=False, help="When collecting dataset stas also check for NaNs"
)
parser.add_argument(
"--debug.calc_conformer_stats",
type=bool,
default=False,
help="When collecting dataset calculate the conformer position stats",
)
return parser
def parse_args():
parser = get_parser()
return parser.parse_args()
def parse_dict(in_dict):
parser = get_parser()
return parser.parse_object(in_dict)
|
20,506 | c99b84c8ef9e2dd7d04b6057471a7591248d8d94 | from setuptools import setup
setup(name='template',
version='0.0.1',
description='用于填充模板,从而修改文档的常修改项',
author='Stcoder',
author_email='stcode98@foxmail.com',
py_modules=['template'],
install_requires=['jinja2', 'docopt'],
entry_points={'console_scripts': [
'template = template:main']},
zip_safe=False,
license='MIT'
)
|
20,507 | 3a22cd184f25122af82da8956fe649e57c20b61e | import pymongo
import sys
connection = pymongo.Connection("mongodb://localhost", safe=True)
db=connection.students
grades = db.grades
try:
doc=grades.find_one()
except:
print "Unexpected error:", sys.exc_info()[0]
print doc
|
20,508 | 1e657077e6450e8f0dc86e62497a37c00e4292e7 | from .encrypt import Encryption |
20,509 | c9ec0d8c8c669e327be828e0c25ae1fdaa5f73e4 | import requests
from bs4 import BeautifulSoup
URL = "https://www.afi.com/afis-100-years-100-movies/"
response = requests.get(URL)
website_html = response.text
soup = BeautifulSoup(website_html, "html.parser")
all_movies = [title.getText() for title in soup.find_all("h6", class_="q_title")[:100]]
with open("movies.txt", "w") as file:
[file.write(f"{movie}\n") for movie in all_movies] |
20,510 | 94d08a5304e0580420bf74b1562966a5f64163c6 | def decrypting(letter):
return str((int(letter) ** private_key) % n)
open_keys = list(map(int, open("public_key", "r").read().split()))
cod = list(map(int, open("encrypted", "r").read().split(" ")))
private_key, e, n = int(open("private_key", "r").read()), open_keys[0], open_keys[1]
splited_cod = list(map(int, map("".join, zip(*[iter(list(map(decrypting, cod)))] * 2))))
open("decrypted", "w").write("".join(list(map(chr, splited_cod))))
|
20,511 | dc187496194729f21153032ad123498f812a3e9d | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .tools import lazystr
import sys
import platform
import warnings
import re
from threading import RLock
from textwrap import wrap, dedent
warnings.filterwarnings("ignore")
from os.path import splitext
try:
import colorama
except ImportError:
colorama = None
else:
colorama.init()
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
WIN = sys.platform.startswith("win")
if PY3:
text_type = str
binary_type = bytes
xrange = range
string_types = (str, bytes)
else:
text_type = unicode
binary_type = str
string_types = (basestring,)
class ConsoleHighlighter(object):
styles = {}
highlights = []
@classmethod
def highlight(cls, text, default_style=""):
cls.compile()
text = AttrText(text)
default_style = style(default_style or cls.styles.get(None, ""))
if default_style:
text.add_span(0, len(text), **default_style)
for highlight in cls._compiled_highlights:
for match in highlight.finditer(text):
for k, v in match.groupdict().items():
if k in cls._compiled_styles and v:
start, end = match.span(k)
text.add_span(start, end, **cls._compiled_styles[k])
return text
@classmethod
def compile(cls):
if not hasattr(cls, "_compiled_highlights"):
cls._compiled_highlights = [
re.compile(h, re.UNICODE | re.DOTALL | re.MULTILINE)
for h in cls.highlights
]
cls._compiled_styles = {k: style(v) for k, v in cls.styles.items()}
class XMLHighlighter(ConsoleHighlighter):
styles = {
"xmlns": "italic",
"tag": "bold blue not dim",
"attribute": "not bold cyan",
"string": "yellow",
"substitution": "bold magenta",
"templatetag": "bold magenta",
"braced": "bold",
"comment": "dim italic not bold",
}
highlights = [
r"(?P<tag>\<[^\!].*?\>)",
r"(?P<attribute>\s\S*?=\".*?\")",
r"(?P<string>\".*?\")",
r"(?P<braced>\{.*?\})",
r"(?P<substitution>\$\{.*?\})",
r"(?P<templatetag>\{\%.*?\%\})",
r"(?P<comment>\<\!\-\-.*?\-\-\>)",
]
class TemplateHighlighter(ConsoleHighlighter):
styles = {
"tag": "bold blue not dim",
"attr": "cyan not bold",
"string": "yellow",
"substitution": "bold magenta",
"templatetag": "bold",
"comment": "dim white italic",
}
highlights = [
r"(?P<templatetag>\{\%.*?\%\})",
r"(?P<tag>\<.*?\>)",
r"(?P<attr>\s\S*?=\".*?\")",
r"(?P<string>\".*?\")",
r"(?P<substitution>\$\{.*?\})",
r"(?P<comment>\<\!\-\-.*?\-\-\>)",
]
class PythonHighlighter(ConsoleHighlighter):
styles = {"comment": "dim white italic", "string": "yellow"}
highlights = []
class INIHighligher(ConsoleHighlighter):
styles = {
"section": "bold green",
"key": "bold",
"value": "cyan",
"substitution": "bold magenta",
"comment": "dim white italic",
}
highlights = [
r"^(?P<key>\S+?)\s*?\=\s*?(?P<value>.*?)$",
r"^\s+?(?P<value>.+?)$",
r"(?P<substitution>\$\{.*?\})",
r"^(?P<section>\[.*?\])",
r"^(?P<comment>\#.*?)$",
]
def style(style_def):
"""Convert a console style definition in to dictionary
>>> style("bold red on yellow")
{fg="red", bg="yellow", bold=True}
"""
if not style_def:
return {}
if isinstance(style_def, dict):
return style_def
colors = {"yellow", "magenta", "green", "cyan", "blue", "red", "black", "white"}
text_styles = {"bold", "underline", "dim", "reverse", "italic"}
style = {}
foreground = True
style_set = True
for s in style_def.split(" "):
if s == "on":
foreground = False
elif s == "not":
style_set = False
elif s in colors:
style["fg" if foreground else "bg"] = s
elif s in text_styles:
style[s] = style_set
else:
raise ValueError("unknown style '{}'".format(s))
return style
class AttrText(text_type):
"""A string with associate console attribute information"""
def __init__(self, text, spans=None, *args, **kwargs):
super(AttrText, self).__init__()
self.attr_spans = spans or []
def __repr__(self):
return "AttrText(%r)" % super(AttrText, self).__repr__()
def add_span(self, start, end=None, **attrs):
"""Apply attributes to a span in the string"""
# if end < 0 or start > len(self):
# return
if end is None:
end = len(self)
self.attr_spans.append((max(0, start), min(len(self), end), attrs))
def splitlines(self):
"""Split text in to lines, preserving attributes"""
bucket_shift = 6
lines = [[] for _ in xrange((len(self) >> bucket_shift) + 1)]
pos = 0
new_lines = []
line_count = 0
find = self.find
l = len(self)
while pos < l:
line_end = find("\n", pos)
if line_end == -1:
line_end = len(self) # - 1
new_lines.append(AttrText(self[pos:line_end]))
for line_no in xrange(pos >> bucket_shift, (line_end >> bucket_shift) + 1):
lines[line_no].append((pos, line_end, line_count))
line_count += 1
pos = line_end + 1
for start, end, attrs in self.attr_spans:
for line_list in lines[start >> bucket_shift : (end >> bucket_shift) + 1]:
for line_start, line_end, line_offset in line_list:
line = new_lines[line_offset]
line.attr_spans.append(
(
max(0, start - line_start),
min(len(line), end - line_start),
attrs,
)
)
return new_lines
def __moyaconsole__(self, console):
"""Write to a console (called by Console.text)"""
chars = list(self)
attrs = [{} for c in chars]
for start, end, span_attrs in self.attr_spans:
for r in range(start, end):
attrs[r].update(span_attrs)
last_attrs = {}
accum = []
text = []
accum_append = accum.append
text_append = text.append
for c, c_attrs in zip(chars, attrs):
if c_attrs == last_attrs:
accum_append(c)
else:
if accum:
span_text = "".join(accum)
text_append((span_text, last_attrs))
del accum[:]
accum_append(c)
last_attrs = c_attrs
if accum:
span_text = "".join(accum)
text_append((span_text, last_attrs))
console_out = console.__call__
with console._lock:
for text, attrs in text:
console_out(text, **attrs)
if platform.system() == "Windows":
def getTerminalSize():
try:
## {{{ http://code.activestate.com/recipes/440694/ (r3)
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11from
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(
bufx,
bufy,
curx,
cury,
wattr,
left,
top,
right,
bottom,
maxx,
maxy,
) = struct.unpack(b"hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
else:
sizex, sizey = (
80,
25,
) # can't determine actual size - return default values
return sizex, sizey
except:
return 80, 25
else:
def getTerminalSize():
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
import os
cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
import os
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if cr:
return int(cr[1]), int(cr[0])
try:
h, w = os.popen("stty size", "r").read().split()
return int(w), int(h)
except:
pass
return 80, 25
class Cell(object):
"""A table cell"""
def __init__(self, text, processor=None, **attribs):
self.text = text_type(text)
self.processor = processor or (lambda t: t)
self.attribs = attribs
@classmethod
def create(cls, cell, processor=None):
if isinstance(cell, Cell):
return cell
if cell is None:
return Cell("None", italic=True, dim=True)
try:
text = text_type(cell)
except:
text = repr(cell)
attribs = {}
return Cell(text, processor=processor, **attribs)
def __len__(self):
return 2
def __iter__(self):
yield self.processor(self.text)
yield self.attribs
def __getitem__(self, index):
if index == 0:
return self.processor(self.text)
if index == 1:
return self.attribs
raise IndexError(index)
def __repr__(self):
return "Cell(%r)" % self.text
def get_min_length(self):
try:
return min(
max(len(token) for token in self.text.split(splitter))
for splitter in (" ", ",", "/")
)
except ValueError:
return 1
def get_lines(self, max_length=80, continuation="↪"):
center = self.attribs.get("center", False)
if max_length is None:
return self.text.splitlines()
lines = []
if max_length < 8:
continuation = ""
def add_line(line):
if not line:
lines.append("")
return
new_lines = []
while len(line) > max_length:
broken_line = line[:max_length]
break_pos = max_length
for break_char in " /.-_":
if break_char in broken_line[8:]:
break_pos = broken_line.rindex(break_char)
break
new_line = line[:break_pos]
remaining_line = continuation + line[break_pos:]
new_lines.append(new_line)
line = remaining_line
if line:
new_lines.append(line)
if center:
new_lines = [l.center(max_length) for l in new_lines]
lines.extend(new_lines)
for line in self.text.splitlines():
tokens = line.split(" ")[::-1]
line_tokens = []
while tokens:
token = tokens.pop()
line_tokens.append(token)
new_line = " ".join(line_tokens)
if len(new_line) > max_length:
if len(line_tokens) > 1:
line_tokens.pop()
add_line(" ".join(line_tokens))
del line_tokens[:]
tokens.append(token)
else:
add_line(new_line)
del line_tokens[:]
if line_tokens:
add_line(" ".join(line_tokens))
return lines
def expand(self, max_length=None, continuation="↪"):
lines = self.get_lines(max_length=max_length, continuation=continuation)
expanded_lines = [
Cell(line, processor=self.processor, **self.attribs) for line in lines
]
return expanded_lines
def make_table_header(*headers):
"""Create the first row of headers in a table."""
return [[Cell(h, bold=True) for h in headers]]
class _TextOut(object):
def __init__(self):
self.text = []
def write(self, text):
if not isinstance(text, text_type):
text = text_type(text, "utf-8")
self.text.append(text)
def flush(self):
pass
def getvalue(self):
return "".join(self.text)
class _ConsoleFileInterface(object):
"""A simple writable file-like proxy."""
def __init__(self, console, **style):
self._console = console
self._style = style
def write(self, text):
self._console(text, **self._style)
class Console(object):
"""Write output to the console, with styles and color."""
fg_colors = dict(
black=30, red=31, green=32, yellow=33, blue=34, magenta=35, cyan=36, white=37
)
bg_colors = dict(
black=40, red=41, green=42, yellow=43, blue=44, magenta=45, cyan=46, white=47
)
_lock = RLock()
def __init__(
self,
out=None,
nocolors=False,
text=False,
width=None,
html=False,
unicode_borders=True,
):
self.unicode_borders = unicode_borders
if html:
self.html = True
self.encoding = "utf-8"
self.out = _TextOut()
self.terminal_width = width or 120
return
if text:
self.unicode_borders = False
nocolors = True
out = _TextOut()
self.out = out or sys.stdout
self.html = html
self.encoding = getattr(self.out, "encoding", "utf-8") or "utf-8"
if nocolors:
self.terminal_colors = False
else:
self.terminal_colors = self.is_terminal()
if sys.platform.startswith("win") and not colorama:
self.terminal_colors = False
if self.is_terminal():
w, h = getTerminalSize()
self.terminal_width = w
else:
self.terminal_width = width or 80
self.unicode_borders = self.terminal_colors and not WIN
if not self.terminal_width:
self.terminal_width = 80
@property
def width(self):
return self.terminal_width
def make_file_interface(self, **style):
return _ConsoleFileInterface(self)
def flush(self):
with self._lock:
if hasattr(self.out, "flush"):
self.out.flush()
def update_terminal_width(self):
with self._lock:
if self.is_terminal():
w, h = getTerminalSize()
self.terminal_width = w or 80
def get_text(self):
assert isinstance(self.out, _TextOut)
return self.out.getvalue()
def __repr__(self):
return "<console>"
def is_terminal(self):
try:
return self.out.isatty() and not self.html
except AttributeError:
return False
def _html_out(
self,
text,
fg=None,
bg=None,
bold=False,
underline=False,
dim=False,
reverse=False,
italic=False,
nl=False,
):
css_classes = []
append = css_classes.append
if bold:
append("console-bold")
if underline:
append("console-underline")
if dim:
append("console-dim")
if reverse:
append("console-reverse")
if italic:
append("console-italic")
if fg is not None:
append("console-foreground-" + fg)
if bg is not None:
append("console-background-" + bg)
if italic:
append("console-italic")
class_attrib = " ".join(css_classes)
if class_attrib:
tag = '<span class="{}">'.format(class_attrib)
self.out.write(tag)
text = text.replace("&", "&").replace("<", "<").replace(">", ">")
text = text.replace("\n", "<br>")
self.out.write(text.encode("utf-8"))
if class_attrib:
self.out.write("</span>")
if nl:
self.out.write("<br>")
return self
def __call__(
self,
text,
fg=None,
bg=None,
bold=False,
underline=False,
dim=False,
reverse=False,
italic=False,
nl=False,
asstr=False,
center=False,
):
if isinstance(text, AttrText):
return self.text(text)
if self.html:
return self._html_out(
text,
fg=fg,
bg=bg,
bold=bold,
underline=underline,
dim=dim,
reverse=reverse,
italic=italic,
nl=nl,
)
if isinstance(text, lazystr):
text = text_type(text)
if isinstance(text, bytes):
text = text.decode("ascii", "replace")
if PY2:
text = text.encode(self.encoding, "replace")
if nl:
text += "\n"
if not self.terminal_colors:
if asstr:
return text
self.out.write(text)
return self
out = []
fg = self.fg_colors.get(fg, None) if fg is not None else None
bg = self.bg_colors.get(bg, None) if bg is not None else None
attrs = []
if bold:
attrs.append(1)
if dim:
attrs.append(2)
if italic:
attrs.append(3)
if underline:
attrs.append(4)
if reverse:
attrs.append(7)
attrs.append(fg)
attrs.append(bg)
display_attributes = ["%i" % da for da in attrs if da is not None]
if display_attributes:
out.append("\x1b[%sm" % ";".join(display_attributes))
out.append(text)
if display_attributes:
out.append("\x1b[0m")
if asstr:
return "".join(out)
if PY3:
def console_encode(s):
"""Work around a bug with colorama on Windows"""
if self.encoding.lower() != "utf-8":
return s.encode(self.encoding, "replace").decode(self.encoding)
return s
with self._lock:
self.out.write(
"".join(
(
text.decode("utf-8", "replace")
if isinstance(text, bytes)
else console_encode(text)
)
for text in out
)
)
else:
with self._lock:
self.out.write(
b"".join(
(
text.encode(self.encoding, "replace")
if isinstance(text, text_type)
else text
)
for text in out
)
)
return self
def progress(self, msg, num_steps=100, width=12):
"""A context manager to manage progress bars"""
from .progress import Progress, ProgressContext
p = Progress(self, msg, num_steps=num_steps, width=width)
p.render()
return ProgressContext(p)
def nl(self, count=1):
with self._lock:
if self.html:
self.out.write("<br>" * count)
else:
self.out.write("\n" * count)
return self
def div(self, msg=None, **attrs):
"""Inserts a horizontal dividing line"""
if "italic" not in attrs:
attrs["italic"] = True
with self._lock:
self.update_terminal_width()
if msg is None:
self("-" * self.terminal_width, dim=True).nl()
else:
space = self.terminal_width - len(msg)
lspace = space // 2
rspace = space - lspace
self("-" * lspace, dim=True)(msg, **attrs)("-" * rspace, dim=True).nl()
return self
def text(self, text, **params):
with self._lock:
if isinstance(text, AttrText):
text.__moyaconsole__(self)
else:
self(text, **params).nl()
return self
def wraptext(self, text, do_dedent=True, **attribs):
"""Output wrapper text"""
with self._lock:
if do_dedent:
text = dedent(text)
for line in text.splitlines():
wrapped_text = wrap(line, self.terminal_width)
self.text("\n".join(wrapped_text).lstrip(), **attribs)
return self
def xmlsnippet(self, code, lineno=1, colno=None, extralines=3, line_numbers=True):
"""Render a snippet of xml, with a highlighted line"""
with self._lock:
if not code:
return
if colno is not None:
highlight_columns = (colno - 1, colno)
else:
highlight_columns = None
_lineno = max(0, lineno - extralines)
self.snippet(
code,
(_lineno, _lineno + extralines * 2 + 1),
highlight_line=lineno,
highlight_columns=highlight_columns,
line_numbers=True,
)
return self
def pysnippet(self, code, lineno=1, colno=None, extralines=3, line_numbers=True):
"""Render a snippet of xml, with a highlighted line"""
with self._lock:
if not code:
return
if colno is not None:
highlight_columns = (colno - 1, colno)
else:
highlight_columns = None
_lineno = max(0, lineno - extralines)
highlighter = PythonHighlighter()
self.snippet(
code,
(_lineno, _lineno + extralines * 2 + 1),
highlight_line=lineno,
highlight_columns=highlight_columns,
line_numbers=True,
highlighter=highlighter,
)
return self
def ini(self, code):
highlighter = INIHighligher()
self.snippet(code, highlighter=highlighter, line_numbers=False)
def templatesnippet(
self, code, lineno=1, colno=None, endcolno=None, extralines=3, line_numbers=True
):
with self._lock:
if not code:
return
if colno is not None:
highlight_columns = (colno - 1, colno if endcolno is None else endcolno)
else:
highlight_columns = None
_lineno = max(0, lineno - extralines)
self.snippet(
code,
(_lineno, _lineno + extralines * 2 + 1),
highlight_line=lineno,
highlight_columns=highlight_columns,
highlighter=TemplateHighlighter,
line_numbers=line_numbers,
)
return self
def xml(self, code):
code = code.strip("\n") + "\n"
self.snippet(code, line_numbers=False)
return self
# with self._lock:
# self.update_terminal_width()
# if not self.terminal_colors:
# self(code).nl()
# else:
# try:
# from pygments import highlight
# from pygments.lexers import XmlLexer
# from pygments.formatters import TerminalFormatter
# except ImportError:
# self(xml).nl()
# else:
# hcode = highlight(code, XmlLexer(), TerminalFormatter())
# self(hcode)
# return self
def document_error(self, msg, path, code, lineno, colno, diagnosis=None):
with self._lock:
self.div()
if colno is None:
self('File "%s", line %i' % (path, lineno)).nl()
else:
self('File "%s", line %i, column %i' % (path, lineno, colno)).nl()
self(msg, fg="red", bold=True).nl()
if diagnosis:
self.table([[Cell(diagnosis, italic=True)]])
self.xmlsnippet(code, lineno, colno)
return self
def success(self, msg):
with self._lock:
self.wraptext(msg, fg="green", bold=True)
return self
def error(self, msg):
"""Renders a generic error"""
with self._lock:
self.wraptext(msg, fg="red", bold=True)
return self
def exception(self, exc, tb=False):
with self._lock:
self.update_terminal_width()
if not tb:
if hasattr(exc, "get_moya_error"):
exc_text = exc.get_moya_error()
else:
exc_text = text_type(exc)
self(exc_text, fg="red", bold=True).nl()
return self
if isinstance(exc, string_types):
raw_tb = exc
else:
import traceback
raw_tb = traceback.format_exc()
if self.terminal_colors:
from pygments import highlight
from pygments.lexers import PythonTracebackLexer, Python3TracebackLexer
from pygments.formatters import TerminalFormatter
if PY2:
lexer = PythonTracebackLexer
else:
lexer = Python3TracebackLexer
htb = highlight(raw_tb, lexer(), TerminalFormatter())
self(htb)
else:
self(raw_tb)
return self
def table(
self,
table,
header_row=None,
grid=True,
header=True,
divider_attribs=None,
pad=1,
border_style=0,
dividers=True,
cell_processors=None,
):
"""Renders a table of cells with an optional ASCII grid
A table should be a list of lists, where each element is either a string
or a tuple of a string and a dictionary of attributes.
"""
table = list(table)
with self._lock:
if cell_processors is None:
cell_processors = {}
if header_row is not None and header:
table = make_table_header(*header_row) + table[:]
tl = tr = bl = br = ir = it = il = ib = ii = "+"
hor = "-"
ver = "|"
continuation = "..."
if self.unicode_borders:
continuation = "↪ "
if border_style == 0:
tl = "╭"
tr = "╮"
bl = "╰"
br = "╯"
il = "├"
ir = "┤"
it = "┬"
ib = "┴"
hor = "─"
ver = "│"
ii = "┼"
elif border_style == 1:
tl = "╔"
tr = "╗"
bl = "╚"
br = "╝"
il = "╠"
ir = "╣"
it = "╦"
ib = "╩"
hor = "═"
ver = "║"
ii = "╬"
elif border_style == 2:
tl = "┏"
tr = "┓"
bl = "┗"
br = "┛"
il = "┣"
ir = "┫"
it = "┳"
ib = "┻"
hor = "━"
ver = "┃"
ii = "╋"
self.update_terminal_width()
terminal_width = self.terminal_width
if WIN:
terminal_width -= 1
table = [
[
Cell.create(cell, cell_processors.get(rowno))
for rowno, cell in enumerate(row)
]
for row in table
]
def cell_len(cell):
try:
return max(
len(line) for line in cell.get_lines(max_length=terminal_width)
)
except ValueError:
return 0
cell_lengths = []
cell_min_lengths = []
for row_no in xrange(len(table[0])):
cell_lengths.append(max(cell_len(col[row_no]) for col in table))
cell_min_lengths.append(
max(col[row_no].get_min_length() for col in table)
)
num_cols = len(cell_lengths)
table_padding = (num_cols * pad * 2) + (num_cols - 1) + grid * 2
table_width = sum(cell_lengths) + table_padding
if table_width > self.terminal_width:
# make each column its minimum until the table fits, starting with the widest column
for i, (cell_length, min_length) in sorted(
enumerate(zip(cell_lengths, cell_min_lengths)),
key=lambda c: c[1][1],
reverse=True,
):
over_size = table_width - terminal_width
cell_lengths[i] -= min(over_size, (cell_length - min_length))
table_width = sum(cell_lengths) + table_padding
if sum(cell_lengths) <= terminal_width:
break
over_space = table_width - terminal_width
while over_space > 0:
largest_value = 0
largest_index = None
for i, l in enumerate(cell_lengths):
if l > largest_value:
largest_value = l
largest_index = i
if largest_index is None:
break
reduce = min(over_space, int(largest_value - 4))
cell_lengths[largest_index] -= reduce
if reduce <= 0:
break
over_space -= reduce
if grid:
if divider_attribs is None:
divider_attribs = {"dim": True}
top_divider = (tl + "%s" + tr) % it.join(
hor * (l + pad * 2) for l in cell_lengths
)
mid_divider = (il + "%s" + ir) % ii.join(
hor * (l + pad * 2) for l in cell_lengths
)
bot_divider = (bl + "%s" + br) % ib.join(
hor * (l + pad * 2) for l in cell_lengths
)
else:
divider_attribs = {}
divider = ""
top_divider = ""
mid_divider = ""
bot_divider = ""
if grid:
self(top_divider, **divider_attribs).nl()
padding = pad * " "
separator = ver if grid else " "
for row_no, row in enumerate(table):
if row_no == len(table) - 1:
divider = bot_divider
else:
divider = mid_divider
expanded_cells = [
cell.expand(cell_length, continuation=continuation)
for cell_length, cell in zip(cell_lengths, row)
]
max_height = max(len(cell) for cell in expanded_cells)
for cell_line_no in xrange(max_height):
expanded_row = [
ecell[cell_line_no] if cell_line_no < len(ecell) else Cell("")
for ecell in expanded_cells
]
if grid:
r = [(ver, divider_attribs)]
else:
r = []
append = r.append
last_i = len(expanded_row) - 1
for i, (cell, cell_length) in enumerate(
zip(expanded_row, cell_lengths)
):
text, attribs = cell
if grid:
cell_text = padding + text.ljust(cell_length) + padding
else:
cell_text = (
padding * (i > 0)
+ text.ljust(cell_length)
+ padding * (i < last_i)
)
cell_text = cell.processor(cell_text)
append((cell_text, attribs))
if grid or i < last_i:
append((separator, divider_attribs))
for text, attribs in r:
self(text, **attribs)
self.nl()
if grid:
if dividers and not header:
self(divider, **divider_attribs).nl()
elif not dividers:
if row_no == len(table) - 1:
self(divider, **divider_attribs).nl()
elif row_no in (0, len(table) - 1):
self(divider, **divider_attribs).nl()
return self
def cat(self, contents, path):
with self._lock:
self.update_terminal_width()
if not self.terminal_colors:
self(contents).nl()
else:
ext = splitext(path)[-1].lower()
if ext in (".htm", ".html", ".text"):
self(TemplateHighlighter.highlight(contents))
return self
elif ext in (".xml",):
self(XMLHighlighter.highlight(contents))
return self
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import get_lexer_for_filename, guess_lexer
from pygments.util import ClassNotFound
try:
lexer = guess_lexer(contents)
except ClassNotFound:
try:
lexer = get_lexer_for_filename(path)
except ClassNotFound:
lexer = None
if lexer is None:
self(contents).nl()
else:
hcode = highlight(contents, lexer, TerminalFormatter())
self(hcode)
return self
def snippet(
self,
xml,
line_range=None,
line_numbers=True,
highlight_line=None,
highlight_columns=None,
highlighter=None,
):
if isinstance(xml, binary_type):
xml = xml.decode("utf-8", "replace")
with self._lock:
self.update_terminal_width()
if line_range is None:
start = 1
end = None
else:
start, end = line_range
# xml = xml.replace('\r', '\n')
xml = AttrText(xml)
if highlighter is None:
highlighter = XMLHighlighter
xml = highlighter.highlight(xml)
lines = xml.splitlines()
if end is None:
end = len(lines) + 1
if start < 1:
start = 1
if end > len(lines):
end = len(lines) + 1
lines = lines[start - 1 : end - 1]
try:
max_number_length = (
max(len(text_type(n + 1)) for n in range(start, end)) + 1
)
except:
max_number_length = 0
if max_number_length < 6:
max_number_length = 6
if line_numbers:
for i, line in enumerate(lines):
line_no = i + start
if highlight_line is not None and highlight_line == line_no:
if self.terminal_colors:
if self.unicode_borders:
indicator = "\u2022"
else:
indicator = "*"
else:
indicator = "*"
number = (indicator + text_type(line_no)).rjust(
max_number_length, " "
) + " "
if highlight_columns:
col_start, col_end = highlight_columns
line.add_span(col_start, col_end, fg="red", underline=True)
self(number, fg="blue", bold=True)(line).nl()
else:
number = text_type(line_no).rjust(max_number_length) + " "
self(number, fg="blue", dim=False)(line).nl()
else:
for line in lines:
self(line).nl()
return self
def obj(self, context, obj, **kwargs):
"""Writes information regarding an object to the console"""
with self._lock:
if hasattr(obj, "__moyaconsole__"):
try:
obj.__moyaconsole__(self)
except Exception:
pass
else:
return self
if isinstance(obj, string_types):
self.text(context.to_expr(obj, max_size=1000), **kwargs)
elif isinstance(obj, dict):
table = []
for k, v in sorted(obj.items()):
table.append([k, context.to_expr(v, max_size=1000)])
self.table(table, header_row=["key", "value"])
elif isinstance(obj, (list, tuple)):
table = [
(i, context.to_expr(v, max_size=1000)) for i, v in enumerate(obj)
]
self.table(table, header_row=["index", "value"])
elif isinstance(obj, bool):
if obj:
self.text("True", bold=True, fg="green")
else:
self.text("False", bold=True, fg="red")
else:
self.text(context.to_expr(obj, max_size=1000), **kwargs)
return self
def show_cursor(self, show=True):
if not WIN:
if show:
self.out.write("\033[?25h")
else:
self.out.write("\033[?25l")
self.out.flush()
def test_table(console):
long_text = """If you are writing an application of any size, it will most likely require a number of files to run - files which could be stored in a variety of possible locations. Furthermore, you will probably want to be able to change the location of those files when debugging and testing. You may even want to store those files somewhere other than the user's hard drive."""
bold = dict(bold=True)
red = dict(fg="red")
table = [
[("foo", dict(reverse=True, bold=True, dim=True)), ("100", red), "Some text"],
[
("long text!", dict(reverse=True, bold=True, dim=True)),
long_text,
"Some text",
],
[
("bar", bold),
("120", red),
("Some more text\nline 2\nline 3", {"bg": "green"}),
],
[("foo", bold), ("100", red), "Some text"],
[("bar", bold), ("120", red), "Some more text"],
[
("A longer cell...\nwith multiple lines", bold),
(120344, red),
"Some more text",
],
[("foo", bold), ("100", red), "Some text"],
[("bar", bold), ("120", red), "Some more text"],
]
console.table(table, grid=True)
console.nl()
if __name__ == "__main__":
console = Console()
test_table(console)
import sys
# sys.exit()
xml = """<moya xmlns="http://moyaproject.com">
<mountpoint name="main">
<url url="{*path}">
<call py="static.check_hide" dst="hidden" >
<set value="url.path" />
</call>
<if test="url.path $= '/'">
<call py="static.get_dirlist" dst="dirlist">
<set value=".fs[app.settings.fs]" />
<set value="url.path" />
</call>
<servepage template="dirlist.html" withscope="y" if="dirlist" />
</if>
<else>
<serve fs="${app.settings.fs}" path="${url.path}" />
</else>
</url>
</mountpoint>
</moya>
<!-- Commented <b>out</b> -->
"""
console = Console()
# console.xml_trace(xml, (4, 11), highlight_line=8)
console.snippet(xml, (4, 11), highlight_line=7, highlight_columns=(12, 100))
if 0:
console = Console()
console("Hello ", dim=True)("World!", bold=True).nl()
console("Green!", bg="green", fg="white").nl()
xml = (
"""<moya xmlns="http://moyaproject.com">
<mountpoint name="testmount" libname="root">
<url name="article" url="/{year}/{month}/{day}/{slug}/" methods="GET" target="viewpost">
<debug>url main: ${url.year}, ${url.month}, ${url.day}, ${url.slug}</debug>
</url>
<url name="front" url="/" methods="GET">
<debug>Front...</debug>
<return><str>Front</str></return>
</url>
</mountpoint>
<macro docname="viewpost">
<debugIn viewpost</debug>
<return><str>Hello, World</str></return>
<return>
<response template="birthday.html">
<str dst="title">My Birthday</str>
<str dst="body">It was my birthday today!</str>
</response>
</return>
</macro>
<!--
<macro libname="showapp">
<debug>App is ${app}</debug>
</macro>
<macro libname="blogmacro">
<debug>Called blogmacro in blog lib</debug>
</macro>
<macro libname="blogmacro2">
<debug>Called blogmacro2 with app: ${debug:app}</debug>
</macro>
-->
</moya>"""
* 10
)
console.xmlsnippet(xml, 6)
try:
1 / 0
except Exception as e:
console.exception(e, tb=True)
bold = dict(bold=True)
red = dict(fg="red")
long_text = """If you are writing an application of any size, it will most likely require a number of files to run - files which could be stored in a variety of possible locations. Furthermore, you will probably want to be able to change the location of those files when debugging and testing. You may even want to store those files somewhere other than the user's hard drive."""
table = [
[("foo", dict(reverse=True, bold=True, dim=True)), ("100", red), "Some text"],
[
("long text!", dict(reverse=True, bold=True, dim=True)),
long_text,
"Some text",
],
[
("bar", bold),
("120", red),
("Some more text\nline 2\nline 3", {"bg": "green"}),
],
[("foo", bold), ("100", red), "Some text"],
[("bar", bold), ("120", red), "Some more text"],
[
("A longer cell...\nwith multiple lines", bold),
(120344, red),
"Some more text",
],
[("foo", bold), ("100", red), "Some text"],
[("bar", bold), ("120", red), "Some more text"],
]
console.table(table, grid=True)
console.nl()
console.table(table, grid=True, header=True)
console.nl()
console.table(table, grid=False, pad=0)
# for line in Cell("""If you are writing an application of any size, it will most likely require a number of files to run - files which could be stored in a variety of possible locations. Furthermore, you will probably want to be able to change the location of those files when debugging and testing. You may even want to store those files somewhere other than the user's hard drive.""").get_lines():
# print line
|
20,512 | 7d8de01c3e9b12bab702e2b6d75143c5a044ec44 | # Copyright (c) 2021, Hitachi America Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
import click
import torch
import transformers
from transformers import AutoConfig, AutoTokenizer
from contract_nli.conf import load_conf
from contract_nli.dataset.dataset import load_and_cache_examples, \
load_and_cache_features
from contract_nli.evaluation import evaluate_all
from contract_nli.model.classification import BertForClassification
from contract_nli.model.identification_classification import \
MODEL_TYPE_TO_CLASS
from contract_nli.postprocess import format_json, compute_prob_calibration_coeff
from contract_nli.predictor import predict, predict_classification
logger = logging.getLogger(__name__)
@click.command()
@click.option('--dev-dataset-path', type=click.Path(exists=True), default=None)
@click.argument('model-dir', type=click.Path(exists=True))
@click.argument('dataset-path', type=click.Path(exists=True))
@click.argument('output-prefix', type=str)
def main(dev_dataset_path, model_dir, dataset_path, output_prefix):
conf: dict = load_conf(os.path.join(model_dir, 'conf.yml'))
device = torch.device("cuda" if torch.cuda.is_available() and not conf['no_cuda'] else "cpu")
n_gpu = 0 if conf['no_cuda'] else torch.cuda.device_count()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Loading models with following conf %s",
{k: v for k, v in conf.items() if k != 'raw_yaml'})
tokenizer = AutoTokenizer.from_pretrained(
model_dir,
do_lower_case=conf['do_lower_case'],
cache_dir=conf['cache_dir'],
use_fast=False
)
config = AutoConfig.from_pretrained(
model_dir,
cache_dir=conf['cache_dir']
)
if conf['task'] == 'identification_classification':
model = MODEL_TYPE_TO_CLASS[config.model_type].from_pretrained(
model_dir, cache_dir=conf['cache_dir']
)
else:
model = BertForClassification.from_pretrained(
model_dir, cache_dir=conf['cache_dir'])
model.to(device)
if dev_dataset_path is not None:
if conf['task'] != 'identification_classification':
raise click.BadOptionUsage(
'--dev-dataset-path',
'--dev-dataset-path cannot be used when the task is not identification_classification')
examples = load_and_cache_examples(
dev_dataset_path,
local_rank=-1,
overwrite_cache=True,
cache_dir='.'
)
dataset, features = load_and_cache_features(
dev_dataset_path,
examples,
tokenizer,
max_seq_length=conf['max_seq_length'],
doc_stride=conf.get('doc_stride', None),
max_query_length=conf['max_query_length'],
dataset_type=conf['task'],
symbol_based_hypothesis=conf['symbol_based_hypothesis'],
threads=None,
local_rank=-1,
overwrite_cache=True,
labels_available=True,
cache_dir='.'
)
all_results = predict(
model, dataset, examples, features,
per_gpu_batch_size=conf['per_gpu_eval_batch_size'],
device=device, n_gpu=n_gpu,
weight_class_probs_by_span_probs=conf[
'weight_class_probs_by_span_probs'])
calibration_coeff = compute_prob_calibration_coeff(
examples, all_results)
else:
calibration_coeff = None
examples = load_and_cache_examples(
dataset_path,
local_rank=-1,
overwrite_cache=True,
cache_dir='.'
)
dataset, features = load_and_cache_features(
dataset_path,
examples,
tokenizer,
max_seq_length=conf['max_seq_length'],
doc_stride=conf.get('doc_stride', None),
max_query_length=conf['max_query_length'],
dataset_type=conf['task'],
symbol_based_hypothesis=conf['symbol_based_hypothesis'],
threads=None,
local_rank=-1,
overwrite_cache=True,
labels_available=True,
cache_dir='.'
)
if conf['task'] == 'identification_classification':
all_results = predict(
model, dataset, examples, features,
per_gpu_batch_size=conf['per_gpu_eval_batch_size'],
device=device, n_gpu=n_gpu,
weight_class_probs_by_span_probs=conf['weight_class_probs_by_span_probs'],
calibration_coeff=calibration_coeff)
else:
all_results = predict_classification(
model, dataset, features,
per_gpu_batch_size=conf['per_gpu_eval_batch_size'],
device=device, n_gpu=n_gpu)
result_json = format_json(examples, all_results)
with open(output_prefix + 'result.json', 'w') as fout:
json.dump(result_json, fout, indent=2)
with open(dataset_path) as fin:
test_dataset = json.load(fin)
metrics = evaluate_all(test_dataset, result_json,
[1, 3, 5, 8, 10, 15, 20, 30, 40, 50],
conf['task'])
logger.info(f"Results@: {json.dumps(metrics, indent=2)}")
with open(output_prefix + 'metrics.json', 'w') as fout:
json.dump(metrics, fout, indent=2)
if __name__ == "__main__":
main()
|
20,513 | 7dbcc51e787b7c28da7588b889bad8a8ecc6b9f8 | import jenkins
j= jenkins.Jenkins("http://localhost:8080","admin","admin")
j.delete_job('myjob')
|
20,514 | d2754ef413d1b97a5df35b3e3e72f6f889bc19bd | # chaque fichier cree doit finir par .py afin d'etre compris dans l'éditeur de code
# pour ecrire un commentaire ecrire # avant le message ou """ pour commenter plusieurs lignes """
""" on peu commenter une ligne grace à CTRL + /
on peu commenter plusieurs ligne en selectionnant les lignes et en fesant ALT + MAJ + A """
"""
Qu'est ce qu'une variable ?
une variable est une valeur que l'on va definir: exemple
a = 1
print(a)
la console n'affichera que 1
principe de programmation:
le typage:
string => un string est une chaine de charactere souvent indiqué grace à " avant et apres, exemple: a = "pomme"
int => un chiffre rond, 1 - 2 - 3 etc..
float => chiffre a virgule, 0.5 - 1.2 - 1.3 etc..
documentation: https://python.doctor/page-apprendre-variables-debutant-python
on retien donc que:
a = 1 -> int (integer en anglais ou entier en francais mais int en python)
a = "1" -> string (chaine de character en francais)
a = 1.2 -> float (nombre a virgule en francais)
pour connaitre le type de a -> type(a)
type() est une fonction qui va te retourner (te repondre) se qu'est la variable mis en condition, dans notre cas la variable a
en python lors que l'on multiplie un string: exemple
a = "pomme"
print(a*3)
cela va retourner pommepommepomme
et si l'on avais dit que a = pomme (sans les ") tu aurais eu une erreur car pomme n'est pas un chiffre
"""
# maintenant passon au exercices -> je ferais une correction si tu veux
# Pour toute question vous pouvez m'envoyer un mail: thibaut.fourneaux@outlook.com |
20,515 | 86239e86cb29fc66a2104c3ad3fac32d1396a86f | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql import types as T
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format
from pyspark.sql.functions import monotonically_increasing_id
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config.get('AWS', 'AWS_ACCESS_KEY_ID')
os.environ['AWS_SECRET_ACCESS_KEY']=config.get('AWS', 'AWS_SECRET_ACCESS_KEY')
def create_spark_session():
"""Creates spark session with correct configuration."""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""Reads in JSON song data and then writes songs and artists tables to parquet on S3.
Args:
spark: The current SparkSession.
input_data: The S3 bucket to read in the data.
output_data: The S3 bucket to write to.
"""
# get filepath to song data file
song_data = os.path.join(input_data, "song-data/*/*/*/*.json")
# read song data file
df = spark.read.json(song_data)
# extract columns to create songs table
songs_table = df.select('song_id', 'title', 'artist_id', 'year', 'duration')
# write songs table to parquet files partitioned by year and artist_id
songs_table.write.partitionBy('year', 'artist_id').parquet(output_data + 'songs', mode='overwrite')
# extract columns to create artists table
artists_table = df.select('artist_id', 'artist_name', 'artist_location', 'artist_latitude', 'artist_longitude')
# write artists table to parquet files
artists_table.write.parquet(output_data + 'artists', mode='overwrite')
def process_log_data(spark, input_data, output_data):
"""Reads in JSON log data and then writes users, time and songplays tables to parquet on S3.
Args:
spark: The current SparkSession.
input_data: The S3 bucket to read in the data.
output_data: The S3 bucket to write to.
"""
# get filepath to log data file
# For working in the workspace: log_data = os.path.join(input_data, "log-data/*.json")
log_data = os.path.join(input_data, "log-data/*/*/*.json")
# read log data file
df = spark.read.json(log_data)
# filter by actions for song plays
df = df.filter(df.page == 'NextSong')
# rename the columns in df
df = (df.withColumnRenamed('userId', 'user_id')
.withColumnRenamed('firstName', 'first_name')
.withColumnRenamed('lastName', 'last_name')
.withColumnRenamed('itemInSession', 'item_in_session')
.withColumnRenamed('sessionId', 'session_id')
.withColumnRenamed('userAgent', 'user_agent'))
# extract columns for users table
users_table = df.select('user_id', 'first_name', 'last_name', 'gender', 'level').distinct()
# write users table to parquet files
users_table.write.parquet(output_data + 'users', mode='overwrite')
# create timestamp column from original timestamp column
# default type is string for UDFs, so we need to switch that by specifying the correct type
get_timestamp = udf(lambda x: datetime.fromtimestamp(x/1000.0), T.TimestampType())
df = df.withColumn('start_time', get_timestamp(df.ts))
# extract columns to create time table
time_table = df.select('start_time',
hour(col('start_time')).alias('hour'),
dayofmonth(col('start_time')).alias('day'),
weekofyear(col('start_time')).alias('week'),
month(col('start_time')).alias('month'),
year(col('start_time')).alias('year'),
date_format(col('start_time'), 'EEEE').alias('weekday'))
# write time table to parquet files partitioned by year and month
time_table.write.partitionBy('year', 'month').parquet(output_data + 'time', mode='overwrite')
# read in song data to use for songplays table
song_df = spark.read.parquet(output_data + 'songs/year=*/artist_id=*/*.parquet')
artist_df = spark.read.parquet(output_data + 'artists/*.parquet')
# extract columns from joined song and log datasets to create songplays table
songplays_table = df.join(song_df, (df.song == song_df.title) & (df.length == song_df.duration)).join(artist_df, df.artist == artist_df.artist_name).join(time_table, ['start_time'])
# create the songplay_id column
songplays_table = songplays_table.withColumn('songplay_id', monotonically_increasing_id())
# select the columns of interest
songplays_table = songplays_table.select('songplay_id', 'start_time', 'user_id', 'level', 'song_id', 'artist_id', 'session_id', 'location', 'user_agent', 'year', 'month')
# write songplays table to parquet files partitioned by year and month (I think this is a copy paste error because year and month aren't listed as required cols)
songplays_table.write.partitionBy('year', 'month').parquet(output_data + 'songplays', mode='overwrite')
def main():
"""Calls functions to create Spark session and process song and log data."""
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://dend-bucket-cpm/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
|
20,516 | 03726fae05dd05f3c15843749a06b338650efcac | __author__ = 'Boris Breuer'
import pyaudio
import numpy as np
import scipy.signal
CHUNK = 1024 * 2
WIDTH = 2
DTYPE = np.int16
MAX_INT = 32768.0
CHANNELS = 1
RATE = 11025 * 1
RECORD_SECONDS = 20
j = np.cdouble((0,1))
pyAudio = pyaudio.PyAudio()
stream = pyAudio.open(format=pyAudio.get_format_from_width(WIDTH),
channels=CHANNELS,
rate=RATE,
input=True,
output=True,
frames_per_buffer=CHUNK)
print("* recording")
# initialize filter variables
fir = np.zeros(CHUNK * 2)
fir[:(2 * CHUNK)] = 1.
fir /= fir.sum()
fir_last = fir
avg_freq_buffer = np.zeros(CHUNK)
obj = -np.inf
t = 10
# initialize sample buffer
buffer = np.zeros(CHUNK * 2)
from scipy.fftpack import fft
# Number of sample points
N = 600
# sample spacing
T = 1.0 / 800.0
x = np.linspace(0.0, N*T, N)
y = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(80.0 * 2.0*np.pi*x)
yf = fft(y)
yf2 = np.fft.fft(y)
xf = np.linspace(0.0, 1.0/(2.0*T), round(N/2))
import matplotlib.pyplot as plt
plt.plot(xf, 2.0/N * np.abs(yf[0:round(N/2)]))
plt.grid()
plt.show() |
20,517 | d9100f32257e01ab6a2f7b3c54c3de7cbe884d0d | #some string operations to enjoy
#formating
'{0}, {1}, {2}'.format('a', 'b', 'c')
'Coordinates: {latitude}, {longitude}'.format(latitude='37.24N', longitude='-115.81W')
#output: 'Coordinates: 37.24N, -115.81W'
#comma as a thousands separator
'{:,}'.format(1234567890)
#output: '1,234,567,890'
#dictionaries
values = (3, 5)
'X: {0[0]}; Y: {0[1]}'.format(values)
#output: 'X: 3; Y: 5'
#string indexing and slicing
print ("Python rocks!") #to print the string including the space
len("Python rocks!") #to get total length of the string above
#output: 13
#positive indexing for "Python rocks!"; 0 1 2 3 4 5 6 7 8 9 10 11 12
#negative indexing for "Python rocks!"; -13 -12 -11 -10 -9 -8 -7 -6 -5 -4 -3 -2 -1
#automatically in python strings are provided with the attributes of some few list methods without the actual use of list()
#except append, pop, insert and others but the main methods are len(), .find(char), .count(char) and slicing
#slicing
#you can change the numbers to get more outputs
a = "Python rocks!"
print(a[:6])
#Output: 'Python'
print(a[5])
#output: 'n'
print(a[7:])
#Output: 'rocks'
print(a[7:11])
#output: 'rocks'
print(a[::-1])
#output: skcor nohtyP
print(a[::-2])
#output: '!ko otP'
print(a[-4:-1])
#output: 'ock!'
#counting number a characters in a string
a.count("o")
#output: 2
#finding position of a character in a string
b = "fish"
b.find(h)
#output: 3 |
20,518 | df9fa6e6eaee49264534d38e0d379dee08229870 |
def findWinner(cardList):
pass |
20,519 | c409b06c71f51ab6d50beb65ae8e6b393292f2de | import os
for i in os.scandir(r"C:\Users\csx\Desktop\ProcessExcel\excel_combine_connect"):
print(i.name)
print(i.path)
print(i.is_dir())
print(i.is_file()) |
20,520 | 71172cf821934c58b5882da315ad709bdffee158 | from telegram.ext import CommandHandler, CallbackQueryHandler
from settings import *
from keyboards import *
from game.player import Player
from game.bluffGame import BluffGame
from datetime import datetime
from database.user_db import *
from gamehandler import GameHandler
game_handler = GameHandler.get_instance()
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# database things
Base.metadata.create_all()
Session = sessionmaker(bind=engine)
session = Session()
def start(bot, update): # TODO display different messages for youser with 0 and non 0 blance
eff_user = update.effective_user
first_name = eff_user.first_name
id = eff_user.id
msg = 'Hello ' + str(first_name) + '!' + " Let's play!"
#database
if session.query(User.id).filter_by(id=id).scalar() is not None:
update.message.reply_text(text=msg,
reply_markup=start_keyboard_markup)
else:
update.message.reply_text(text=msg,
reply_markup=start_keyboard_markup)
u = User(name=first_name, id=id)
session.add(u)
session.commit()
def menu(bot, update):
message_id = update.callback_query.message.message_id
chat_id = update.effective_message.chat_id
eff_user = update.effective_user
first_name = eff_user.first_name
msg = 'Hello ' + str(first_name) + '!' + " Let's play!"
bot.edit_message_text(chat_id=chat_id,
message_id=message_id,
text=msg,
reply_markup=start_keyboard_markup)
def start_game(bot, update, job_queue):
message_id = update.callback_query.message.message_id
chat_id = update.effective_message.chat_id
eff_user = update.effective_user
user_id = eff_user.id
first_name = eff_user.first_name
player = Player(user_id=user_id,
chat_id=chat_id,
message_id=message_id,
first_name=first_name,
bot=bot)
game = BluffGame(game_id=game_handler.generate_id(),
created_player=player,
job_queue=job_queue,
turn_duration=20)
game_handler.add_game(game)
msg = "Awaiting your opponent"
bot.edit_message_text(chat_id=player.chat_id,
message_id=player.message_id,
text=msg)
def list_games(bot, update):
query = update.callback_query
if game_handler.game_list == []:
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text='no game available',
reply_markup=no_games_markup)
else:
buttons = []
for game in game_handler.game_list:
buttons.append([InlineKeyboardButton(text=str(game.game_id), callback_data=str(game.game_id))])
markup = InlineKeyboardMarkup(buttons)
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text='Chose the game',
reply_markup=markup)
def connect_game(bot, update):
message = update.effective_message
chat_id = message.chat_id
eff_user = update.effective_user
user_id = eff_user.id
first_name = eff_user.first_name
query_data = update.callback_query.data
if game_handler.get_game_by_id(query_data) is not None:
game = game_handler.get_game_by_id(query_data)
player = Player(user_id=user_id,
chat_id=chat_id,
message_id=update.callback_query.message.message_id,
first_name=first_name,
bot=bot)
game.join(player)
game.start()
def make_bet(bot, update):
query_data = update.callback_query.data
multiplier = multipliers[query_data]
eff_user = update.effective_user
user_id = eff_user.id
game = game_handler.get_game_by_user_id(user_id)
# method returns flag if bet successfully multiplied
flag = game.whose_turn.make_bet(100, multiplier)
if flag and (game.awaiting_player.balance > 0):
game.next_turn(bot, game.job)
elif flag:
game.stop()
game_handler.game_list = [] # TODO proper removal
else:
if (game.whose_turn.balance <= 0) and (game.awaiting_player.balance <= 0):
game.stop()
game_handler.game_list = [] # TODO proper removal
elif game.whose_turn.balance <= 0:
game.next_turn(bot, game.job)
else:
msg = "Turn " + str(game.turn) + ' Fail! ' + 'Your turn ' + game.whose_turn.first_name + ' ' \
+ 'Your balance: ' + str(game.whose_turn.balance) + ' Your bid: ' + str(game.whose_turn.bid)
game.whose_turn.show_message(msg, bet_keyboard_markup)
def time_left(bot, update):
query = update.callback_query
eff_user = update.effective_user
user_id = eff_user.id
game = game_handler.get_game_by_user_id(user_id)
time_elapsed = datetime.now() - game.turn_start
turn_time_left = game.turn_duration - time_elapsed.seconds
bot.answerCallbackQuery(callback_query_id=query.id,
text='Time left: ' + str(turn_time_left))
def help(bot, update):
query = update.callback_query
bot.edit_message_text(chat_id=query.message.chat_id,
message_id=query.message.message_id,
text='Game rules',
reply_markup=help_keyboard_markup)
def callback_eval(bot, update, job_queue):
query_data = update.callback_query.data
game_id_list = []
for game in game_handler.game_list:
game_id_list.append(game.game_id)
if query_data == 'start_game':
start_game(bot, update, job_queue)
elif query_data == 'make_bet':
make_bet(bot, update)
elif query_data == 'game_list':
list_games(bot, update)
elif query_data == 'time_left':
time_left(bot, update)
elif query_data == 'help':
help(bot, update)
elif query_data == 'menu':
menu(bot, update)
elif query_data == 'game_list_refresh':
list_games(bot, update)
elif query_data in game_id_list:
connect_game(bot, update)
elif query_data in multipliers:
make_bet(bot, update)
def main():
"""Run bot."""
logging.debug('Main')
start_handler = CommandHandler('start', start)
callback_handler = CallbackQueryHandler(callback_eval,
pass_job_queue=True)
# adding handlers
handlers = [start_handler,
callback_handler]
for handler in handlers:
dispatcher.add_handler(handler)
updater.start_webhook(listen='0.0.0.0', port=8000, url_path=TOKEN) # webhook_url=(URL + TOKEN)
updater.bot.set_webhook(URL + TOKEN)
updater.idle()
if __name__ == '__main__':
main() |
20,521 | 2f43cc09c1c2770cc277b48332eee8bfb231e257 | import random
import printing
import asciipictures
class Event:
def __init__(self, prob, description):
self.prob = prob
self.description = description
self.fight = False
self.image = ""
self.cond = None
self.thenC = []
self.elseC = []
def add_cond(self, cond):
self.cond = cond
def append_then(self, x):
self.thenC.append(x)
def append_else(self, x):
self.elseC.append(x)
def add_room_fighter(figher):
self.fight = True
self.fighter = fighter
def add_image(self, image):
self.image = image
def trigger(self, stdscr):
if random.random() <= self.prob:
printing.print_at_speed(self.description, 100, stdscr)
if self.cond != None:
if self.cond.eval():
for cmd in self.thenC:
cmd.execute()
else:
for cmd in self.elseC:
cmd.execute()
if self.fight:
fight(self.ennemy)
if self.image != "":
stdscr.addstr(asciipictues.display(picture) + "\n") |
20,522 | 1266102353371e4e7822eb5e3a074392013f94ab | # Starting with the number 1 and moving to the right in a clockwise direction a 5 by 5 spiral is formed as follows:
# 21 22 23 24 25
# 20 7 8 9 10
# 19 6 1 2 11
# 18 5 4 3 12
# 17 16 15 14 13
# It can be verified that the sum of both diagonals is 101.
#
# What is the sum of both diagonals in a 1001 by 1001 spiral formed in the same way?
num = 1
sum = 1
n = 3
while n <= 1001:
sum += 4*num + 10*n - 10
num = n*n
n += 2
print sum |
20,523 | aed8f768fb85be9400532612734200a875346c44 |
import itertools, operator
from .data import *
import shapely, shapely.ops, shapely.geometry
from shapely.prepared import prep as supershapely
# PY3 fix
try:
basestring
except NameError:
basestring = (bytes,str) # PY3
# TODO: when multiple input, uses all possible combinations, but need a way to use spatial indexes etc
# SQL components
def aggreg(iterable, aggregfuncs, geomfunc=None):
"""Each func must be able to take an iterable and return a single item.
Aggregfuncs is a series of 3-tuples: an output column name, a value function or value hash index on which to base the aggregation, and a valid string or custom function for aggregating the retieved values.
"""
def lookup_geomfunc(agg):
# handle aliases
if agg == "dissolve":
agg = "union"
elif agg == "unique":
agg = "difference"
# detect
if agg == "intersection":
def _func(fs):
gs = (f.get_shapely() for f in fs if f.geometry)
cur = next(gs)
for g in gs:
if not g.is_empty:
cur = cur.intersection(g)
return cur.__geo_interface__
elif agg == "difference":
def _func(fs):
gs = (f.get_shapely() for f in fs if f.geometry)
cur = next(gs)
for g in gs:
if not g.is_empty:
cur = cur.difference(g)
return cur.__geo_interface__
elif agg == "union":
def _func(fs):
gs = [f.get_shapely() for f in fs if f.geometry]
if len(gs) > 1:
print(gs)
from shapely.ops import cascaded_union
return cascaded_union(gs).__geo_interface__
elif len(gs) == 1:
return gs[0].__geo_interface__
elif hasattr(agg, "__call__"):
# agg is not a string but a custom function
return agg
else:
raise Exception("geomfunc must be a callable function or a valid set geometry string name")
return _func
def lookup_aggfunc(agg):
# handle aliases
if agg in ("average","avg"):
agg = "mean"
# detect
if agg == "count": return len
elif agg == "sum": return sum
elif agg == "max": return max
elif agg == "min": return min
elif agg == "first": return lambda seq: seq.__getitem__(0)
elif agg == "last": return lambda seq: seq.__getitem__(-1)
elif agg == "majority": return lambda seq: max(itertools.groupby(sorted(seq)), key=lambda gidgroup: len(list(gidgroup[1])))[0]
elif agg == "minority": return lambda seq: min(itertools.groupby(sorted(seq)), key=lambda gidgroup: len(list(gidgroup[1])))[0]
elif agg == "mean": return lambda seq: sum(seq)/float(len(seq))
elif isinstance(agg, basestring) and agg.endswith("concat"):
delim = agg[:-6]
return lambda seq: delim.join((str(v) for v in seq))
elif hasattr(agg, "__call__"):
# agg is not a string but a function
return agg
else:
raise Exception("aggfunc must be a callable function or a valid statistics string name")
def check_valfunc(valfunc):
if hasattr(valfunc,"__call__"):
pass
elif isinstance(valfunc,basestring):
hashindex = valfunc
valfunc = lambda f: f[hashindex]
else:
raise Exception("valfunc for field '%s' must be a callable function or a string of the hash index for retrieving the value"%name)
return valfunc
aggregfuncs = [(name,check_valfunc(valfunc),aggname,lookup_aggfunc(aggname)) for name,valfunc,aggname in aggregfuncs]
def make_number(value):
try: return float(value)
except: return None
def is_missing(val):
return val is None or (isinstance(val, float) and math.isnan(val))
iterable = list(iterable)
row = []
for _,valfunc,aggname,aggfunc in aggregfuncs:
values = (valfunc(item) for item in iterable)
# missing values are not considered when calculating stats
values = [val for val in values if not is_missing(val)]
if aggname in ("sum","max","min","mean"):
# only consider number values if numeric stats
values = [make_number(value) for value in values if make_number(value) != None]
if values:
aggval = aggfunc(values)
else:
aggval = "" # or best with None
row.append(aggval)
if geomfunc:
geomfunc = lookup_geomfunc(geomfunc)
geom = geomfunc(iterable)
return row,geom
else:
return row
def select(iterable, columnfuncs, geomfunc=None):
if geomfunc:
# iterate and yield rows and geoms
for item in iterable:
row = [func(item) for name,func in columnfuncs]
geom = geomfunc(item)
yield row,geom
else:
# iterate and yield rows
for item in iterable:
row = [func(item) for name,func in columnfuncs]
yield row
def where(iterable, condition):
for item in iterable:
if condition(item):
yield item
def groupby(iterable, key):
if hasattr(key,"__call__"):
pass
elif isinstance(key,basestring):
hashindex = key
key = lambda f: f[hashindex]
elif isinstance(key,(list,tuple)) and all((isinstance(v,basestring) for v in key)):
hashindexes = key
key = lambda f: tuple((f[h] for h in hashindexes))
else:
raise Exception("groupby key must be a callable function or a string or list/tuple of strings of the hash index(es) for retrieving the value(s)")
iterable = sorted(iterable, key=key)
for groupid,items in itertools.groupby(iterable, key=key):
yield items
def limit(iterable, n):
for i,item in enumerate(iterable):
if i < n:
yield item
else:
break
def query(_from, _select, _geomselect=None, _where=None, _groupby=None, _limit=None):
"""Takes a series of sql generator components, runs them, and iterates over the resulting feature-geom tuples.
Arg _from must be a sequence of one or more iterables.
All combinations of items from the iterables are then tupled together and passed to the remaining _select, _where_, and _groupby args.
This allows us to involve items from all the iterables in the functions that define our queries.
The final _select function should return a row list, and the _geomselect should return a geojson dictionary.
"""
# INSTEAD MAKE INTO CLASS
# WITH .fields attr
# AND .__iter__()
# AND .get_vectordata()
# AND MAKE EACH YIELDED ROW A VECTOR FEATURE CLASS
# THIS WAY ALLOWING CHAINED QUERIES
# parse args
iterables = _from
columnfuncs = _select
geomfunc = _geomselect
condition = _where
key = _groupby
n = _limit
# first yield header as list of column names
colnames = [each[0] for each in columnfuncs]
yield colnames
# make an iterable that yields every combinaion of all input iterables' items
if len(iterables) == 1:
iterable = iterables[0]
else:
iterable = itertools.product(*iterables)
# iterate and add
if key:
groups = groupby(iterable, key)
# limit
if n:
groups = limit(groups, n)
for items in groups:
# filter
if condition:
items = where(items, condition)
# aggregate
# NOTE: columnfuncs and geomfunc must expect an iterable as input and return a single row,geom pair
item = aggreg(items, columnfuncs, geomfunc)
yield item
else:
# filter
if condition:
iterable = where(iterable, condition)
# limit
if n:
iterable = limit(iterable, n)
# select
for item in select(iterable, columnfuncs, geomfunc):
yield item
def query_to_data(_query):
# create table and columns
out = VectorData()
header = next(_query)
out.fields = [name for name in header]
# add each feature
for row,geom in _query:
if geom: # hack, should find a way to add empty geoms
out.add_feature(row, geom)
return out
##########
# EXPERIMENTAL
class Iterable(object):
def __init__(self, iterable):
self.it = iterable
def __iter__(self):
for item in self.it:
yield item
def intersects(self, othercol):
for item in self.quick_overlap(othercol.bbox):
for otheritem in othercol.quick_overlap(self.bbox):
if item.intersects(otheritem):
yield item
|
20,524 | 285d09e44baa9e6256c166a45d4afb6537b3fd43 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
from sklearn.model_selection import train_test_split, StratifiedKFold
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Conv2D, Flatten, MaxPooling2D, Dropout
from tensorflow.keras.layers import BatchNormalization, ZeroPadding2D, Activation, Add, GlobalAveragePooling2D
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
train_data = pd.read_csv('../data/mnist_data/train.csv', index_col=0, header=0)
print(train_data)
img = train_data.iloc[0,2:].values.reshape(28,28).astype(np.uint8)
img_2 = cv2.dilate(img, kernel=np.ones((2,2), np.uint8), iterations=1)
img_2 = cv2.medianBlur(src=img, ksize=5)
img_2 = np.where(img_2>=10, img_2, 0)
print(img_2.shape)
print(img_2)
'''
# 그림 확인
cv2.imshow('before',img)
cv2.imshow('after',img_2)
cv2.waitKey(0)
cv2.destroyAllWindows()
plt.imshow(img_2)
plt.show()
'''
datagen = ImageDataGenerator(
rotation_range=360
)
train_letter = train_data['letter'].values
x_train = train_data.drop(['digit', 'letter'], axis=1).values
x_train = x_train.reshape(-1, 28, 28, 1)
x_train = x_train/255
print(x_train.shape) # (2048, 28, 28, 1)
y = train_data['letter']
alpha_2_num = {'A':0, 'B':1, 'C':2, 'D':3, 'E':4, 'F':5, 'G':6, 'H':7, 'I':8, 'J':9, 'K':10,
'L':11, 'M':12, 'N':13, 'O':14, 'P':15, 'Q':16, 'R':17, 'S':18, 'T':19, 'U':20,
'V':21, 'W':22, 'X':23, 'Y':24, 'Z':25}
y = y.map(alpha_2_num)
y_train = np.zeros((len(y), len(y.unique())))
for i, letter in enumerate(y):
y_train[i, letter] = 1
print(y_train)
print(y_train.shape)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=42, stratify=y_train)
# 모델
input_tensor = Input(shape=x_train.shape[1:], dtype='float32', name='input')
def conv1_layer(x):
x = ZeroPadding2D(padding=(3, 3))(x)
x = Conv2D(64, (7, 7), strides=(1, 1))(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = ZeroPadding2D(padding=(1,1))(x)
return x
def conv2_layer(x):
x = MaxPooling2D((3, 3), 2)(x)
shortcut = x
for i in range(2):
if (i == 0):
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv3_layer(x):
shortcut = x
for i in range(2):
if(i == 0):
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv4_layer(x):
shortcut = x
for i in range(2):
if(i == 0):
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
def conv5_layer(x):
shortcut = x
for i in range(2):
if(i == 0):
x = Conv2D(512, (3, 3), strides=(2, 2), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
shortcut = Conv2D(512, (3, 3), strides=(2, 2), padding='same')(shortcut)
x = BatchNormalization()(x)
shortcut = BatchNormalization()(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
else:
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Add()([x, shortcut])
x = Activation('relu')(x)
shortcut = x
return x
x = conv1_layer(input_tensor)
x = conv2_layer(x)
x = conv3_layer(x)
x = conv4_layer(x)
x = conv5_layer(x)
x = GlobalAveragePooling2D()(x)
output_tensor = Dense(26, activation='softmax')(x)
resnet18 = Model(input_tensor, output_tensor)
resnet18.summary()
model = resnet18
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
file_path = './dacon3/data/mnist_alpha_resnet_test.hdf5'
es = EarlyStopping(monitor='val_accuracy', patience=80)
cp = ModelCheckpoint(filepath=file_path, monitor='val_accuracy', save_best_only=True)
lr = ReduceLROnPlateau(monitor='val_accuracy', factor=0.8, patience=30)
# history = model.fit(x_train, y_train, epochs=5000, batch_size=32, validation_data=(x_val, y_val), verbose=2, callbacks=[es,cp,lr])
hist = model.fit_generator(datagen.flow(x_train, y_train, batch_size=16), epochs=2000,
validation_data=(datagen.flow(x_val, y_val)), verbose=2, callbacks=[es, cp, lr]) |
20,525 | 21af131a23463d2ab27ee1f684994724a9b24531 | import pandas as pd
import numpy as np
from datetime import datetime
df = pd.read_csv("sphist.csv")
df["Date"] = pd.to_datetime(df["Date"])
df["date_compare"] = df["Date"] > datetime(year=2015, month=4, day=1)
df = df.sort_values(by = "Date", ascending = True)
print(df.head()) |
20,526 | a7f7b6032d3f1f913ae540f3cb9cc8b0cb515409 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 9 16:14:42 2016
@author: Guest1
"""
import pickle
import sqlite3
import numpy as np
import os
import pdb
# import HashingVectorizer from local dir
from vectorizer import vect
def update_model(db_path, model, batch_size=10000):
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('SELECT * from sentiment_review')
results = c.fetchmany(batch_size)
while results:
data = np.array(results)
X = data[:, 1]
y = data[:, 2].astype(int)
classes = np.array([0, 1, 2])
X_train = vect.transform(X)
clf.partial_fit(X_train, y, classes=classes)
results = c.fetchmany(batch_size)
conn.close()
return None
#pdb.set_trace()
"""Update the model"""
# cur_dir = '.'
# Use the following path instead if you embed this code into
# the app.py file
# import os
cur_dir = os.path.dirname(__file__)
clf = pickle.load(open(os.path.join(cur_dir,
'pkl_objects',
'classifier.pkl'), 'rb'))
db = os.path.join(cur_dir, 'mydb.sqlite3')
update_model(db_path=db, model=clf, batch_size=10000)
# Uncomment the following lines to update your classifier.pkl file
pickle.dump(clf, open(os.path.join(cur_dir,
'pkl_objects', 'classifier.pkl'), 'wb')
, protocol=4) |
20,527 | 0009fef9483cd4e8884f2180d6cdbbb179a4fffb | from django.shortcuts import render, render_to_response
from trees_app.models import Tree
def index_view(request):
all_trees = Tree.objects.all()
return render_to_response('index.html', {'trees': list(all_trees)})
def data_view(request, capture):
captured_tree = Tree.objects.get(name=capture)
return render_to_response('data.html', {'tree': captured_tree})
def about_view(request):
return render_to_response('about.html', {})
def nav_vew(request):
return render_to_response('nav.html', {})
|
20,528 | ce30ec06416c5764ca8ebfeabdd778d55b09a125 | import random
comleft = random.randint(0,2)
comright = random.randint(0,2)
meleft = int(input("meleft ==> 0) 가위 , 1) 바위 , 2) 보 : "))
meright = int(input("meright ==> 0) 가위 , 1) 바위 , 2) 보 : "))
# 문제가 뭔지 모른다.
# 나의 승률 계산이라고 가정
com = [comleft,comright][random.randint(0,1)]
me = [meleft,meright][random.randint(0,1)]
if com == me :
print("무승부")
elif (com < me and not (com == 0 and me == 2)) or com == 2 and me == 0 :
print("승리")
else :
print("패배") |
20,529 | 75f4d304083ae88ec9af68e7202ab7fc3b633d0d | # -*- coding: utf-8 -*-
"""
Dynamic DynamoDB
Auto provisioning functionality for Amazon Web Service DynamoDB tables.
APACHE LICENSE 2.0
Copyright 2013 Sebastian Dahlgren
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sys
import time
import dynamic_dynamodb.core as core
from dynamic_dynamodb.daemon import Daemon
from dynamic_dynamodb.config_handler import CONFIGURATION as configuration
from dynamic_dynamodb.log_handler import LOGGER as logger
class DynamicDynamoDBDaemon(Daemon):
""" Daemon for Dynamic DynamoDB"""
def run(self, check_interval=1):
""" Run the daemon
:type check_interval: int
:param check_interval: Delay in seconds between checks
"""
while True:
used_keys = set()
table_names = set()
configured_tables = configuration['tables'].keys()
# Add regexp table names
for table_name in core.dynamodb.list_tables():
for key_name in configured_tables:
if re.match(key_name, table_name):
logger.debug(
"Table {0} match with config key {1}".format(
table_name, key_name))
table_names.add((table_name, key_name))
used_keys.add(key_name)
# Remove used tables
for table_name in used_keys:
configured_tables.remove(table_name)
# Ensure provisioning
for table_name, key_name in sorted(table_names):
core.ensure_provisioning(table_name, key_name)
# Sleep between the checks
time.sleep(check_interval)
def main():
""" Main function called from dynamic-dynamodb """
if configuration['global']['daemon']:
pid_file = '/tmp/dynamic-dynamodb.{0}.pid'.format(
configuration['global']['instance'])
daemon = DynamicDynamoDBDaemon(pid_file)
if configuration['global']['daemon'] == 'start':
daemon.start(
check_interval=configuration['global']['check_interval'])
elif configuration['global']['daemon'] == 'stop':
daemon.stop()
elif configuration['global']['daemon'] == 'restart':
daemon.restart()
elif configuration['global']['daemon'] in ['foreground', 'fg']:
daemon.run(
check_interval=configuration['global']['check_interval'])
else:
print 'Valid options for --daemon are start, stop and restart'
sys.exit(1)
else:
table_names = set()
used_keys = set()
configured_tables = configuration['tables'].keys()
# Add regexp table names
for table_name in core.dynamodb.list_tables():
for key_name in configured_tables:
if re.match(key_name, table_name):
logger.debug("Table {0} match with config key {1}".format(
table_name, key_name))
table_names.add((table_name, key_name))
used_keys.add(key_name)
# Remove used tables
for table_name in used_keys:
configured_tables.remove(table_name)
# Ensure provisioning
for table_name, key_name in sorted(table_names):
core.ensure_provisioning(table_name, key_name)
|
20,530 | 2b2f95f96cd351ea7fa8efd9ec5ba1bbc64069cd | {
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 라이브러리 선언"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"### 데이터 처리 라이브러리\n",
"import pandas as pd\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"### 머신러닝 라이브러리\n",
"from sklearn.svm import SVC ## 서포트 vector\n",
"from sklearn.neighbors import KNeighborsClassifier # k neighbors"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"### 데이터 전처리 라이브러리\n",
"from sklearn.model_selection import train_test_split"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"### 클래시피케이션 matricx// 분류에 쓰이는 정확도 지표들 ->\n",
"from sklearn.metrics import accuracy_score, f1_score, recall_score, precision_score"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Classification - SVM(soft vector machine) 마진에 따라 마진에 있는 데이터만 남기고 계산 ( 계산량이 줄어듬, 단 차원 증가시 계산량 증가)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### 정확도와 속도로 알고리즘끼리 경합을 시킨다"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Classification - KNN 데이터로부터 가장 인접한 이웃개수의 그룹을 확인 후 편성"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"##### 느리다"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 데이터 불러오기"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"csData = pd.read_csv(\"../dataset/customer.csv\")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>balance</th>\n",
" <th>stock</th>\n",
" <th>label</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>30000000</td>\n",
" <td>22500000</td>\n",
" <td>normal</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>280000000</td>\n",
" <td>48000000</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" balance stock label\n",
"0 30000000 22500000 normal\n",
"1 280000000 48000000 diamond"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"csData.head(2)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### balance -> 예금 보유금액\n",
"#### stock -> 주식 보유금색\n",
"#### label -> 고객등급. normal, diamon, vip"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0 normal\n",
"1 diamond\n",
"4 vip\n",
"Name: label, dtype: object"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"csData.label.drop_duplicates()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 1. 타입 통합 / 숫자형 컬럼 추가"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"balance int32\n",
"stock int32\n",
"label object\n",
"dtype: object"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"csData.dtypes"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"csData = csData.astype({\"balance\" : \"int\", \"stock\" : \"int\", \"label\" : \"object\"})"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"balance int32\n",
"stock int32\n",
"label object\n",
"dtype: object"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"csData.dtypes"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2. 특성 선정"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
"### 정답지\n",
"labelColumn = \"label_new\""
]
},
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>balance</th>\n",
" <th>stock</th>\n",
" <th>LABEL_NEW</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>balance</th>\n",
" <td>1.000000</td>\n",
" <td>0.565942</td>\n",
" <td>0.883144</td>\n",
" </tr>\n",
" <tr>\n",
" <th>stock</th>\n",
" <td>0.565942</td>\n",
" <td>1.000000</td>\n",
" <td>0.824174</td>\n",
" </tr>\n",
" <tr>\n",
" <th>LABEL_NEW</th>\n",
" <td>0.883144</td>\n",
" <td>0.824174</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" balance stock LABEL_NEW\n",
"balance 1.000000 0.565942 0.883144\n",
"stock 0.565942 1.000000 0.824174\n",
"LABEL_NEW 0.883144 0.824174 1.000000"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"## 상관계수 생성 함수는 숫자형 컬럼에 대해서만 상관계수를 계산한다. \n",
"csData.corr() ## 관계를 알 수 없다. 라벨이 나오지 않았다..\n"
]
},
{
"cell_type": "code",
"execution_count": 33,
"metadata": {},
"outputs": [],
"source": [
"## 라벨인코더는 원하는 값으로 맵핑하기가 힘들다. 딕셔너리는 원하는 값으로 맵핑 가능"
]
},
{
"cell_type": "code",
"execution_count": 40,
"metadata": {},
"outputs": [],
"source": [
"labelMap = {\"normal\" : 0, \"diamond\" : 1, \"vip\" : 2 }"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [],
"source": [
"csData[labelColumn] = csData.label.map(labelMap)"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>balance</th>\n",
" <th>stock</th>\n",
" <th>LABEL_NEW</th>\n",
" <th>label_new</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>balance</th>\n",
" <td>1.000000</td>\n",
" <td>0.565942</td>\n",
" <td>0.883144</td>\n",
" <td>0.883144</td>\n",
" </tr>\n",
" <tr>\n",
" <th>stock</th>\n",
" <td>0.565942</td>\n",
" <td>1.000000</td>\n",
" <td>0.824174</td>\n",
" <td>0.824174</td>\n",
" </tr>\n",
" <tr>\n",
" <th>LABEL_NEW</th>\n",
" <td>0.883144</td>\n",
" <td>0.824174</td>\n",
" <td>1.000000</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" <tr>\n",
" <th>label_new</th>\n",
" <td>0.883144</td>\n",
" <td>0.824174</td>\n",
" <td>1.000000</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" balance stock LABEL_NEW label_new\n",
"balance 1.000000 0.565942 0.883144 0.883144\n",
"stock 0.565942 1.000000 0.824174 0.824174\n",
"LABEL_NEW 0.883144 0.824174 1.000000 1.000000\n",
"label_new 0.883144 0.824174 1.000000 1.000000"
]
},
"execution_count": 36,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"csData.corr()"
]
},
{
"cell_type": "code",
"execution_count": 39,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>balance</th>\n",
" <th>stock</th>\n",
" <th>label</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>30000000</td>\n",
" <td>22500000</td>\n",
" <td>normal</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>280000000</td>\n",
" <td>48000000</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>300000000</td>\n",
" <td>40666666</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>54000000</td>\n",
" <td>28000000</td>\n",
" <td>normal</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>768000000</td>\n",
" <td>32000000</td>\n",
" <td>vip</td>\n",
" </tr>\n",
" <tr>\n",
" <th>...</th>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>19995</th>\n",
" <td>628000000</td>\n",
" <td>44666666</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>19996</th>\n",
" <td>276000000</td>\n",
" <td>20000000</td>\n",
" <td>normal</td>\n",
" </tr>\n",
" <tr>\n",
" <th>19997</th>\n",
" <td>652000000</td>\n",
" <td>41333333</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>19998</th>\n",
" <td>676000000</td>\n",
" <td>45333333</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>19999</th>\n",
" <td>732000000</td>\n",
" <td>26000000</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>20000 rows × 3 columns</p>\n",
"</div>"
],
"text/plain": [
" balance stock label\n",
"0 30000000 22500000 normal\n",
"1 280000000 48000000 diamond\n",
"2 300000000 40666666 diamond\n",
"3 54000000 28000000 normal\n",
"4 768000000 32000000 vip\n",
"... ... ... ...\n",
"19995 628000000 44666666 diamond\n",
"19996 276000000 20000000 normal\n",
"19997 652000000 41333333 diamond\n",
"19998 676000000 45333333 diamond\n",
"19999 732000000 26000000 diamond\n",
"\n",
"[20000 rows x 3 columns]"
]
},
"execution_count": 39,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"csData.drop(columns=[\"label_new\"],inplace=True)\n",
"csData"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [],
"source": [
"corrDf = csData.corr()"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>balance</th>\n",
" <th>stock</th>\n",
" <th>label_new</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>balance</th>\n",
" <td>1.000000</td>\n",
" <td>0.565942</td>\n",
" <td>0.883144</td>\n",
" </tr>\n",
" <tr>\n",
" <th>stock</th>\n",
" <td>0.565942</td>\n",
" <td>1.000000</td>\n",
" <td>0.824174</td>\n",
" </tr>\n",
" <tr>\n",
" <th>label_new</th>\n",
" <td>0.883144</td>\n",
" <td>0.824174</td>\n",
" <td>1.000000</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" balance stock label_new\n",
"balance 1.000000 0.565942 0.883144\n",
"stock 0.565942 1.000000 0.824174\n",
"label_new 0.883144 0.824174 1.000000"
]
},
"execution_count": 44,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"corrDf"
]
},
{
"cell_type": "code",
"execution_count": 45,
"metadata": {},
"outputs": [],
"source": [
"corrStd = 0.5"
]
},
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [],
"source": [
"features = list (corrDf.loc[ (abs(corrDf[labelColumn]) > corrStd) & (corrDf[labelColumn] != 1)].index )"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['balance', 'stock']"
]
},
"execution_count": 52,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"features"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 2- 2 데이터 분리"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### 특성 선정 시에서는, 정답지와 관련이 있는 피쳐를 설정해야 하기에, 해당 컬럼값을 숫자로 바꿔서 계산을 해서 상관관계 여부를 찾은거고, 이후에 Label_new는 필요없다. \n",
"#### 다시 복구해준다."
]
},
{
"cell_type": "code",
"execution_count": 73,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'label'"
]
},
"execution_count": 73,
"metadata": {},
"output_type": "execute_result"
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": 72,
"metadata": {},
"outputs": [],
"source": [
"labelColumn = \"label\""
]
},
{
"cell_type": "code",
"execution_count": 64,
"metadata": {},
"outputs": [],
"source": [
"## loc[:] 는 전체라는 뜻\n",
"trainingDataFeatures, testDataFeatures, trainingDataLabel, testDataLabe = train_test_split( csData.loc[:, features],\n",
" csData.loc[:, labelColumn],\n",
" random_state=1,\n",
" test_size=0.2 )"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(16000, 2)"
]
},
"execution_count": 65,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"trainingDataFeatures.shape"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(4000, 2)"
]
},
"execution_count": 66,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"testDataFeatures.shape"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(16000,)"
]
},
"execution_count": 67,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"trainingDataLabel.shape"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(4000,)"
]
},
"execution_count": 68,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"testDataLabe.shape"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 3. 모델 선언 / 학습"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [],
"source": [
"## C는 마진. 하이퍼 파라미터 튜닝으로 자주 사용\n",
"model_svm = SVC(random_state = 5)"
]
},
{
"cell_type": "raw",
"metadata": {},
"source": [
"model_svm.fit(trainingDataFeatures, trainingDataLabel)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n",
"## 4. 예측"
]
},
{
"cell_type": "code",
"execution_count": 74,
"metadata": {},
"outputs": [],
"source": [
"predictSvm = model_svm.predict(testDataFeatures)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 5. 데이터 정리"
]
},
{
"cell_type": "code",
"execution_count": 77,
"metadata": {},
"outputs": [],
"source": [
"## 원래 데이터에서 답안지의 데이터 뽑기\n",
"testDataAll = csData.loc [testDataLabe.index]"
]
},
{
"cell_type": "code",
"execution_count": 78,
"metadata": {},
"outputs": [],
"source": [
"testDataAll[\"predict_svm\"] = predictSvm"
]
},
{
"cell_type": "code",
"execution_count": 79,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>balance</th>\n",
" <th>stock</th>\n",
" <th>label</th>\n",
" <th>label_new</th>\n",
" <th>predict_svm</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>11456</th>\n",
" <td>744000000</td>\n",
" <td>38000000</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>16528</th>\n",
" <td>724000000</td>\n",
" <td>32000000</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3253</th>\n",
" <td>704000000</td>\n",
" <td>27333333</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>18614</th>\n",
" <td>240000000</td>\n",
" <td>30500000</td>\n",
" <td>normal</td>\n",
" <td>0</td>\n",
" <td>normal</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1544</th>\n",
" <td>258000000</td>\n",
" <td>28500000</td>\n",
" <td>normal</td>\n",
" <td>0</td>\n",
" <td>normal</td>\n",
" </tr>\n",
" <tr>\n",
" <th>...</th>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" <td>...</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6375</th>\n",
" <td>640000000</td>\n",
" <td>48666666</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>14837</th>\n",
" <td>652000000</td>\n",
" <td>26000000</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3931</th>\n",
" <td>564000000</td>\n",
" <td>45333333</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>18266</th>\n",
" <td>528000000</td>\n",
" <td>46000000</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" <tr>\n",
" <th>12028</th>\n",
" <td>696000000</td>\n",
" <td>36666666</td>\n",
" <td>diamond</td>\n",
" <td>1</td>\n",
" <td>diamond</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"<p>4000 rows × 5 columns</p>\n",
"</div>"
],
"text/plain": [
" balance stock label label_new predict_svm\n",
"11456 744000000 38000000 diamond 1 diamond\n",
"16528 724000000 32000000 diamond 1 diamond\n",
"3253 704000000 27333333 diamond 1 diamond\n",
"18614 240000000 30500000 normal 0 normal\n",
"1544 258000000 28500000 normal 0 normal\n",
"... ... ... ... ... ...\n",
"6375 640000000 48666666 diamond 1 diamond\n",
"14837 652000000 26000000 diamond 1 diamond\n",
"3931 564000000 45333333 diamond 1 diamond\n",
"18266 528000000 46000000 diamond 1 diamond\n",
"12028 696000000 36666666 diamond 1 diamond\n",
"\n",
"[4000 rows x 5 columns]"
]
},
"execution_count": 79,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"testDataAll"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 6. 정확도 검증"
]
},
{
"cell_type": "code",
"execution_count": 80,
"metadata": {},
"outputs": [],
"source": [
"accuracy = accuracy_score(y_true = testDataAll.label,\n",
" y_pred = testDataAll.predict_svm)"
]
},
{
"cell_type": "code",
"execution_count": 81,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"0.995"
]
},
"execution_count": 81,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"accuracy"
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {},
"outputs": [],
"source": [
"testDataAll.to_csv(\"d:/svc_result.csv\", index=False)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### 해당 모델로 미래 예측"
]
},
{
"cell_type": "code",
"execution_count": 87,
"metadata": {},
"outputs": [],
"source": [
"import pickle\n",
"\n",
"filename = 'finalized_model_svc.sav'\n",
"pickle.dump(model_svm, open(filename, 'wb'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
|
20,531 | 1564575c7684ec386159924c6aefe2ae13736578 | '''this is for question 3'''
'''pick the number closest to 0.5 for each row of a 10*3 random array in range [0, 1].'''
import numpy as np
class pick_numbers_from_array:
'''this class includes a 10*3 random array in [0, 1] and a method to get the numbers closest to 0.5'''
def __init__(self):
'''this is to generate the original 10*3 array'''
self.array = np.random.rand(10, 3)
def print_array(self):
print self.array
def pick_numbers(self):
''' find the closest number to 0.5 for each row'''
distance = np.abs(self.array - 0.5) # get the distance of each number to 0.5
index_closest = np.argsort(distance)[:, 0] # get the index of the closest number in each row
closest_numbers = self.array[np.arange(0,10),index_closest] # get the numbers closest to 0.5 in each row
return closest_numbers
|
20,532 | d5fb8516a619058cb9856f6acde1515bdf8f9927 | import sys
import numpy as np
import argparse
import heapq
import re
import os
ar=os.path.dirname(os.path.realpath(__file__)).split('/')
svtpath='/'.join(ar[0:(len(ar)-1)])
sys.path.insert(1, svtpath)
import svtools.l_bp as l_bp
from svtools.breakpoint import Breakpoint
import svtools.logspace as ls
from svtools.vcf.file import Vcf
from svtools.vcf.variant import Variant
from svtools.utils import parse_bnd_alt_string, InputStream
from svtools.exceptions import MissingProbabilitiesException
def null_format_string(format_string):
null_list = []
num_null_fields = len(format_string.split(':'))
if format_string.startswith('GT:'):
null_list = ['./.']
num_null_fields -= 1
null_list.extend(list('.' * num_null_fields))
null_string = ':'.join(null_list)
return null_string
def merge_single_bp(BP, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes):
A = BP[0].l.rstrip().split('\t')
var = Variant(A,vcf)
try:
sname = var.get_info('SNAME')
var.set_info('SNAME', sname + ':' + var.var_id)
except KeyError:
pass
var.var_id=str(v_id)
if use_product:
var.set_info('ALG', 'PROD')
else:
var.set_info('ALG', 'SUM')
GTS = None
if include_genotypes:
null_string = null_format_string(A[8])
gt_dict = { sname: A[9] }
GTS = '\t'.join([gt_dict.get(x, null_string) for x in sample_order])
var.gts = None
var.gts_string = GTS
return var
def order_cliques(BP, C):
#Sweep the set. Find the largest intersecting set. Remove it. Continue.
BP_i = range(len(BP)) # index set of each node in the graph
while len(BP_i) > 0:
h_l = [] #heap of left breakpoint end coordinates and node id (index). heapq is a min heap and the end coord is what will be used for the sorting.
max_c = []
max_c_len = 0
for i in BP_i:
# remove anything in the heap that doesn't intersect with the current breakpoint
while (len(h_l) > 0) and (h_l[0][0] < BP[i].left.start):
heapq.heappop(h_l)
heapq.heappush(h_l, (BP[i].left.end, i)) # add to the heap
# at this point everything in h_l intersects on the left
# but we need to take into account what is going on on the right
h_r = [] # heap with rightmost starts
h_l_i = [x[1] for x in h_l] # this is all of the node ids on the heap currently
h_l_i.sort(key=lambda x:BP[x].right.start) # sort them by their right start
for j in h_l_i:
# remove anything in the heap that doesn't intersect with the current breakpoint on the right end
while (len(h_r) > 0) and (h_r[0][0] < BP[j].right.start):
heapq.heappop(h_r)
# add something to the right heap
heapq.heappush(h_r, (BP[j].right.end, j))
if max_c_len < len(h_r):
# max clique! Register what nodes we have
max_c_len = len(h_r)
max_c = [y[1] for y in h_r]
C.append(max_c)
for c in max_c:
BP_i.remove(c)
def getCI95( p_L, p_R, max_i_L, max_i_R):
ninefive_i_L_start = max_i_L
ninefive_i_L_end = max_i_L
ninefive_i_L_total = p_L[max_i_L]
while (ninefive_i_L_total < 0.95):
if (ninefive_i_L_start <= 0) and (ninefive_i_L_end >= (len(p_L)-1)):
break
ninefive_i_L_start = max(0, ninefive_i_L_start - 1)
ninefive_i_L_end = min(len(p_L)-1, ninefive_i_L_end +1)
ninefive_i_L_total = sum(p_L[ninefive_i_L_start:ninefive_i_L_end+1])
ninefive_i_L_start = ninefive_i_L_start - max_i_L
ninefive_i_L_end = ninefive_i_L_end - max_i_L
ninefive_i_R_start = max_i_R
ninefive_i_R_end = max_i_R
ninefive_i_R_total = p_R[max_i_R]
while (ninefive_i_R_total < 0.95):
if (ninefive_i_R_start <= 0) and (ninefive_i_R_end >= len(p_R)-1):
break
ninefive_i_R_start = max(0, ninefive_i_R_start - 1)
ninefive_i_R_end = min(len(p_R)-1, ninefive_i_R_end +1)
ninefive_i_R_total = sum(p_R[ninefive_i_R_start:ninefive_i_R_end+1])
ninefive_i_R_end = ninefive_i_R_end - max_i_R
ninefive_i_R_start = ninefive_i_R_start - max_i_R
CIPOS95=str(ninefive_i_L_start) + ',' + str(ninefive_i_L_end)
CIEND95=str(ninefive_i_R_start) + ',' + str(ninefive_i_R_end)
return [CIPOS95, CIEND95]
def combine_pdfs(BP, c, use_product, weighting_scheme):
L = []
R = []
for b_i in c:
b = BP[b_i]
L.append([b.left.start, b.left.end, b.left.p])
R.append([b.right.start, b.right.end, b.right.p])
[start_R, end_R, a_R] = l_bp.align_intervals(R)
[start_L, end_L, a_L] = l_bp.align_intervals(L)
p_L = [0] * len(a_L[0])
p_R = [0] * len(a_R[0])
wts = [1] * len(c)
for c_i in range(len(c)):
if weighting_scheme == 'evidence_wt':
A = BP[c[c_i]].l.rstrip().split('\t', 10)
m = l_bp.to_map(A[7])
wt=int(m['SU'])
#sys.stderr.write("wt\t0\t"+str(wt)+"\n")
a_L[c_i]=[wt*ali for ali in a_L[c_i]]
a_R[c_i]=[wt*ari for ari in a_R[c_i]]
elif weighting_scheme == 'carrier_wt':
A = BP[c[c_i]].l.rstrip().split('\t', 10)
m = l_bp.to_map(A[7])
wt = 1
if 'SNAME' in m:
wt=len(m['SNAME'].split(','))
a_L[c_i]=[wt*ali for ali in a_L[c_i]]
a_R[c_i]=[wt*ari for ari in a_R[c_i]]
for i in range(len(a_L[c_i])):
#sys.stderr.write("L\t"+str(i)+"\t"+str(c_i)+"\t"+str(a_L[c_i][i])+"\n")
p_L[i] += a_L[c_i][i]
for i in range(len(a_R[c_i])):
#sys.stderr.write("R\t"+str(i)+"\t"+str(c_i)+"\t"+str(a_R[c_i][i])+"\n")
p_R[i] += a_R[c_i][i]
ALG = 'SUM'
if use_product:
pmax_i_L = p_L.index(max(p_L))
pmax_i_R = p_R.index(max(p_R))
miss = 0
for c_i in range(len(c)):
if (a_L[c_i][pmax_i_L] == 0) or (a_R[c_i][pmax_i_R] == 0):
miss += 1
if miss == 0:
ALG = "PROD"
ls_p_L = [ls.get_ls(1)] * len(a_L[0])
ls_p_R = [ls.get_ls(1)] * len(a_R[0])
for c_i in range(len(c)):
for i in range(len(a_L[c_i])):
ls_p_L[i] = ls.ls_multiply(ls_p_L[i], ls.get_ls(a_L[c_i][i]))
for i in range(len(a_R[c_i])):
ls_p_R[i] = ls.ls_multiply(ls_p_R[i], ls.get_ls(a_R[c_i][i]))
ls_sum_L = ls.get_ls(0)
ls_sum_R = ls.get_ls(0)
for ls_p in ls_p_L:
ls_sum_L = ls.ls_add(ls_sum_L, ls_p)
for ls_p in ls_p_R:
ls_sum_R = ls.ls_add(ls_sum_R, ls_p)
p_L = []
for ls_p in ls_p_L:
p_L.append(ls.get_p(ls.ls_divide(ls_p, ls_sum_L)))
p_R = []
for ls_p in ls_p_R:
p_R.append(ls.get_p(ls.ls_divide(ls_p, ls_sum_R)))
sum_L = sum(p_L)
sum_R = sum(p_R)
p_L = [x/sum_L for x in p_L]
p_R = [x/sum_L for x in p_R]
[clip_start_L, clip_end_L] = l_bp.trim(p_L)
[clip_start_R, clip_end_R] = l_bp.trim(p_R)
[ new_start_L, new_end_L ] = [ start_L + clip_start_L, end_L - clip_end_L ]
[ new_start_R, new_end_R ] = [ start_R + clip_start_R, end_R - clip_end_R ]
p_L = p_L[clip_start_L:len(p_L)-clip_end_L]
p_R = p_R[clip_start_R:len(p_R)-clip_end_R]
s_p_L = sum(p_L)
s_p_R = sum(p_R)
p_L = [x/s_p_L for x in p_L]
p_R = [x/s_p_R for x in p_R]
#sys.exit(1)
return new_start_L, new_start_R, p_L, p_R, ALG
def create_merged_variant(BP, c, v_id, vcf, use_product, weighting_scheme='unweighted'):
new_start_L, new_start_R, p_L , p_R, ALG = combine_pdfs(BP, c, use_product, weighting_scheme)
max_i_L = p_L.index(max(p_L))
max_i_R = p_R.index(max(p_R))
[cipos95, ciend95]=getCI95( p_L, p_R, max_i_L, max_i_R)
new_pos_L = new_start_L + max_i_L
new_pos_R = new_start_R + max_i_R
BP0=BP[c[0]]
A=BP0.l.rstrip().split('\t', 10)
ALT = ''
if BP0.sv_type == 'BND':
if BP0.strands[:2] == '++':
ALT = 'N]' + BP0.right.chrom + ':' + str(new_pos_R) + ']'
elif BP0.strands[:2] == '-+':
ALT = ']' + BP0.right.chrom + ':' + str(new_pos_R) + ']N'
elif BP0.strands[:2] == '+-':
ALT = 'N[' + BP0.right.chrom + ':' + str(new_pos_R) + '['
elif BP0.strands[:2] == '--':
ALT = '[' + BP0.right.chrom + ':' + str(new_pos_R) + '[N'
else:
ALT = '<' + BP0.sv_type + '>'
var_list=[ BP0.left.chrom,
new_pos_L,
str(v_id),
'N',
ALT,
0.0,
'.',
''] + A[8:]
var=Variant(var_list, vcf)
var.set_info('SVTYPE', BP0.sv_type)
var.set_info('ALG', ALG)
if var.get_info('SVTYPE')=='DEL':
var.set_info('SVLEN', new_pos_L - new_pos_R)
elif BP0.left.chrom == BP0.right.chrom:
var.set_info('SVLEN', new_pos_R - new_pos_L)
else:
SVLEN = None
if var.get_info('SVTYPE') == 'BND':
var.set_info('EVENT', str(v_id))
else:
var.set_info('END', new_pos_R )
var.set_info('CIPOS95', cipos95)
var.set_info('CIEND95', ciend95)
var.set_info('CIPOS', ','.join([str(x) for x in [-1*max_i_L, len(p_L) - max_i_L - 1]]))
var.set_info('CIEND', ','.join([str(x) for x in [-1*max_i_R, len(p_R) - max_i_R - 1]]))
var.set_info('PRPOS', ','.join([str(x) for x in p_L]))
var.set_info('PREND', ','.join([str(x) for x in p_R]))
return var
def combine_var_support(var, BP, c, include_genotypes, sample_order):
strand_map = {}
qual = 0.0
[ SU, PE, SR ] = [0,0,0]
s_name_list = []
s1_name_list = []
format_string = var.get_format_string()
gt_dict = dict()
for b_i in c:
A = BP[b_i].l.rstrip().split('\t')
if A[5].isdigit():
qual += float(A[5])
m = l_bp.to_map(A[7])
for strand_entry in m['STRANDS'].split(','):
s_type,s_count = strand_entry.split(':')
if s_type not in strand_map:
strand_map[s_type] = 0
strand_map[s_type] += int(s_count)
SU += int(m['SU'])
PE += int(m['PE'])
SR += int(m['SR'])
if 'SNAME' in m:
s_name_list.append(m['SNAME'] + ':' + A[2])
if include_genotypes:
if format_string == A[8]:
gt_dict[m['SNAME']] = A[9]
else:
format_dict = dict(zip(A[8].split(':'), A[9].split(':')))
geno = ':'.join([format_dict.get(i, '.') for i in var.format_list])
gt_dict[m['SNAME']] = geno
else:
var.format_dict=None
if s_name_list:
var.set_info('SNAME', ','.join(s_name_list))
GTS = None
if include_genotypes:
null_string = null_format_string(format_string)
GTS = '\t'.join([gt_dict.get(x, null_string) for x in sample_order])
var.gts=None
var.gts_string=GTS
strand_types_counts = []
for strand in strand_map:
strand_types_counts.append(strand + ':' + str(strand_map[strand]))
var.set_info('STRANDS', ','.join(strand_types_counts))
var.qual = qual
var.set_info('PE', str(PE))
var.set_info('SU', str(SU))
var.set_info('SR', str(SR))
def invtobnd(var):
strands=var.get_info('STRANDS')
strand_dict = dict(x.split(':') for x in strands.split(','))
for o in strand_dict.keys():
if strand_dict[o] == '0':
del(strand_dict[o])
strands=','.join(['%s:%s' % (o,strand_dict[o]) for o in strand_dict])
var.set_info('STRANDS', strands)
if strands[:2] == '++':
ALT = 'N]' + var.chrom + ':' + str(var.get_info('END')) + ']'
elif strands[:2] == '--':
ALT = '[' + var.chrom + ':' + str(var.get_info('END')) + '[N'
var.set_info('SVTYPE', 'BND')
var.alt = ALT
[ tempci, temp95 ] = [var.get_info('CIPOS'), var.get_info('CIPOS95')]
try:
temppr = var.get_info('PRPOS')
except KeyError:
raise MissingProbabilitiesException('Required tag PRPOS not found.')
var.set_info('CIPOS', var.get_info('CIEND'))
var.set_info('CIEND', tempci)
var.set_info('CIPOS95', var.get_info('CIEND95'))
var.set_info('CIEND95', temp95 )
try:
var.set_info('PRPOS', var.get_info('PREND'))
except KeyError:
raise MissingProbabilitiesException('Required tag PREND not found.')
var.set_info('PREND', temppr )
def write_var(var, vcf_out, include_genotypes=False):
v_id=var.var_id
if var.get_info('CIPOS95') != '0,0' or var.get_info('CIEND95') != '0,0':
var.set_info('IMPRECISE', True)
else:
var.set_info('IMPRECISE', False)
if var.get_info('SVTYPE') == 'INV' and ('--:0' in var.get_info('STRANDS') or '++:0' in var.get_info('STRANDS')):
invtobnd(var)
if var.alt not in ['<DEL>', '<DUP>', '<INV>']:
var.var_id=str(v_id)+'_1'
var.set_info('EVENT', v_id)
var.set_info('MATEID', str(v_id)+'_2')
var.info.pop('END', None)
var.info.pop('SVLEN', None)
varstring=var.get_var_string(use_cached_gt_string=True)
if not include_genotypes:
varstring='\t'.join(varstring.split('\t', 10)[:8])
vcf_out.write(varstring+'\n')
new_alt = ''
if var.alt[0] == '[':
new_alt = '[' + var.chrom + ':' + str(var.pos) + '[N'
elif var.alt[0] == ']':
new_alt = 'N[' + var.chrom + ':' + str(var.pos) + '['
elif var.alt[-1] == '[':
new_alt = ']' + var.chrom + ':' + str(var.pos) + ']N'
elif var.alt[-1] == ']':
new_alt = 'N]' + var.chrom + ':' + str(var.pos) + ']'
sep, chrom, pos = parse_bnd_alt_string(var.alt)
var.chrom = chrom
var.pos = int(pos)
var.var_id = str(v_id)+'_2'
var.set_info('MATEID', str(v_id)+'_1')
var.set_info('SECONDARY', True)
var.alt = new_alt
[ tempci, temp95 ] = [var.get_info('CIPOS'), var.get_info('CIPOS95')]
try:
temppr = var.get_info('PRPOS')
except KeyError:
raise MissingProbabilitiesException('Required tag PRPOS not found.')
var.set_info('CIPOS', var.get_info('CIEND'))
var.set_info('CIEND', tempci)
var.set_info('CIPOS95', var.get_info('CIEND95'))
var.set_info('CIEND95', temp95 )
try:
var.set_info('PRPOS', var.get_info('PREND'))
except KeyError:
raise MissingProbabilitiesException('Required tag PREND not found.')
var.set_info('PREND', temppr )
varstring=var.get_var_string(use_cached_gt_string=True)
if not include_genotypes:
varstring='\t'.join(varstring.split('\t', 10)[:8])
vcf_out.write(varstring+'\n')
else:
varstring=var.get_var_string(use_cached_gt_string=True)
if not include_genotypes:
varstring='\t'.join(varstring.split('\t', 10)[:8])
vcf_out.write(varstring+'\n')
def merge(BP, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes=False, weighting_scheme='unweighted'):
if len(BP) == 1:
#merge a single breakpoint
v_id+=1
var=merge_single_bp(BP, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes)
write_var(var, vcf_out, include_genotypes)
else:
BP.sort(key=lambda x: x.left.start)
ordered_cliques = []
order_cliques(BP, ordered_cliques)
#merge cliques
for cliq in ordered_cliques:
v_id+=1
var=create_merged_variant(BP, cliq, v_id, vcf, use_product, weighting_scheme)
combine_var_support(var, BP, cliq, include_genotypes, sample_order)
write_var(var, vcf_out, include_genotypes)
return v_id
def r_cluster(BP_l, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes=False, weighting_scheme='unweighted'):
# need to resort based on the right side, then extract clusters
BP_l.sort(key=lambda x: x.right.start)
BP_l.sort(key=lambda x: x.right.chrom)
BP_r = []
BP_max_end_r = -1
BP_chr_r = ''
for b in BP_l:
if (len(BP_r) == 0) or \
((b.right.start <= BP_max_end_r) and \
(b.right.chrom == BP_chr_r)):
BP_r.append(b)
BP_max_end_r = max(BP_max_end_r, b.right.end)
BP_chr_r = b.right.chrom
else:
v_id = merge(BP_r, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
BP_r = [b]
BP_max_end_r = b.right.end
BP_chr_r = b.right.chrom
if len(BP_r) > 0:
v_id = merge(BP_r, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
return v_id
def l_cluster_by_line(file_name, tempdir, percent_slop=0, fixed_slop=0, use_product=False, include_genotypes=False, weighting_scheme='unweighted'):
v_id = 0
in_header = True
header = []
vcf = Vcf()
vcf_out=sys.stdout
with InputStream(file_name, tempdir) as vcf_stream:
BP_l = []
BP_sv_type = ''
BP_max_end_l = -1
BP_chr_l = ''
sample_order = []
for line in vcf_stream:
if in_header:
if line.startswith('##'):
header.append(line)
continue
elif line.startswith('#CHROM'):
v=line.rstrip().split('\t')
for headline in header:
if headline[:8] == '##SAMPLE':
sample_order.append(headline.rstrip()[13:-1])
hline=''
if include_genotypes :
v.extend(sample_order)
hline='\t'.join(v)
else :
v=v[:8]
hline='\t'.join(v)
header.append(hline)
in_header=False
vcf.add_header(header)
vcf.add_info('ALG', '1', 'String', 'Algorithm used to merge this breakpoint')
if include_genotypes:
vcf_out.write(vcf.get_header()+'\n')
else:
vcf_out.write(vcf.get_header(False)+'\n')
continue
b = Breakpoint(l_bp.parse_vcf_record(line), percent_slop=percent_slop, fixed_slop=fixed_slop)
if (len(BP_l) == 0) or ((b.left.start <= BP_max_end_l) and (b.left.chrom == BP_chr_l) and (b.sv_type == BP_sv_type)):
BP_l.append(b)
BP_max_end_l = max(BP_max_end_l, b.left.end)
BP_chr_l = b.left.chrom
BP_sv_type = b.sv_type
else:
v_id = r_cluster(BP_l, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
BP_l = [b]
BP_max_end_l = b.left.end
BP_sv_type = b.sv_type
BP_chr_l = b.left.chrom
if len(BP_l) > 0:
v_id = r_cluster(BP_l, sample_order, v_id, use_product, vcf, vcf_out, include_genotypes, weighting_scheme)
def description():
return 'merge LUMPY calls inside a single file from svtools lsort'
def epilog():
return 'Note that if both slop parameters are set then the maximum is used.'
def add_arguments_to_parser(parser):
parser.add_argument('-i', '--inFile', metavar='<FILE>', help='a sorted VCF file generated by svtools lsort. Each INFO field must contain an SNAME tag containing the sample name (e.g. SNAME=SAMPLE_NAME)')
parser.add_argument('-p', '--percent-slop', metavar='<FLOAT>', type=float, default=0.0, help='increase the the breakpoint confidence interval both up and down stream by a given proportion of the original size')
parser.add_argument('-f', '--fixed-slop', metavar='<INT>', type=int, default=0, help='increase the the breakpoint confidence interval both up and down stream by a given fixed size')
parser.add_argument('--sum', dest='use_product', action='store_false', default=True, help='calculate breakpoint PDF and position using sum algorithm instead of product')
parser.add_argument('-g', dest='include_genotypes', action='store_true', default=False, help='include original genotypes in output. When multiple variants are merged, the last will dictate the genotype field')
parser.add_argument('-w', dest='weighting_scheme', metavar='<STRING>', default="unweighted", choices=['carrier_wt', 'evidence_wt'], help='weighting scheme (intended for use in tiered merging), options: unweighted, carrier_wt, evidence_wt')
parser.add_argument('-t', '--tempdir', metavar='<DIR>', required=False, default=None, help='Directory for temp file downloads')
parser.set_defaults(entry_point=run_from_args)
def command_parser():
parser = argparse.ArgumentParser(description=description(), epilog=epilog())
add_arguments_to_parser(parser)
return parser
def run_from_args(args):
l_cluster_by_line(args.inFile,
args.tempdir,
percent_slop=args.percent_slop,
fixed_slop=args.fixed_slop,
use_product=args.use_product,
include_genotypes=args.include_genotypes,
weighting_scheme=args.weighting_scheme)
if __name__ == "__main__":
parser = command_parser()
args = parser.parse_args()
sys.exit(args.entry_point(args))
|
20,533 | 157b3e76c22e7ab3bd49b820eccd9635b35bbd15 | """
Created on 21.01.2021
@author: Philipp
"""
import json
import os
import logging
logger = logging.getLogger(__file__)
def merge_params(experiment_config: dict, params: dict, sub_config: str = None):
if sub_config:
experiment_config[sub_config]["params"] = {**experiment_config[sub_config]["params"], **params}
else:
experiment_config["params"] = {**experiment_config["params"], **params}
def replace_placeholders(experiment_config: dict, placeholders: dict):
for holder, value in placeholders.items():
replace_placeholder_in_dict(holder, value, experiment_config)
def replace_placeholder_in_dict(placeholder: str, value: str, parameters: dict):
for name, parameter in parameters.items():
if isinstance(parameter, dict):
replace_placeholder_in_dict(placeholder, value, parameter)
if isinstance(parameter, list):
replace_placeholder_in_list(placeholder, value, parameter)
if isinstance(parameter, str):
parameters[name] = parameter.replace("$" + placeholder, value)
def replace_placeholder_in_list(placeholder: str, value: str, parameters: list):
for idx, parameter in enumerate(parameters):
if isinstance(parameter, dict):
replace_placeholder_in_dict(placeholder, value, parameter)
if isinstance(parameter, list):
replace_placeholder_in_list(placeholder, value, parameter)
if isinstance(parameter, str):
parameters[idx] = parameter.replace("$" + placeholder, value)
class ExperimentConfigLoader:
def __init__(self, config_top_dir: str, ref_words: list = None):
self.config_top_dir = config_top_dir
self.ref_words = ref_words or ["model", "dataset", "task", "env", "callbacks", "savers"]
""" Optionals """
self.experiment_params = dict()
self.dataset_params = dict()
self.placeholders = dict()
def with_experiment_params(self, **params):
""" Dynamically inject the given params into the config["params"] on load.
Usually we want to set if to 'resume' or the 'checkpoint_dir' given as a command line argument.
:param params: to be injected
"""
self.experiment_params = params
return self
def with_dataset_params(self, **params):
""" Dynamically inject the given params into the config["dataset"]["params"] on load.
Usually we want to set the 'dataset_directory' given as a command line argument.
:param params: to be injected
"""
self.dataset_params = params
return self
def with_placeholders(self, **placeholders):
""" Replace the given placeholder within the config after load.
:param placeholders: to be replaced with actual values.
"""
self.placeholders = placeholders
return self
def load(self, experiment_name, comet_user=None):
"""
:param experiment_name: the name of the experiment (file) to load. Might include a subpath e.g. 'exp-group1/exp1'
:return:
"""
experiment_config = self.__load_config(experiment_name)
cometml_config = self.__load_comet_config(comet_user)
self.__inject_and_replace(experiment_config, cometml_config)
return experiment_config
def __load_config(self, experiment_name):
experiments_dir = os.path.join(self.config_top_dir, "experiments")
experiment_subdir, experiment_name = os.path.split(experiment_name)
if experiment_subdir:
logger.info("Experiment sub-directory: %s", experiment_subdir)
experiments_dir = os.path.join(experiments_dir, experiment_subdir)
experiment_configs = [file for file in os.listdir(experiments_dir) if file.endswith(".json")]
json_name = experiment_name + ".json"
if json_name not in experiment_configs:
available_configs = "\n".join(sorted([n.replace(".json", "") for n in experiment_configs]))
err_msg = "ExperimentConfigurations %s was not found. " \
"Available experiment configurations:\n%s" % (json_name, available_configs)
raise FileNotFoundError(err_msg)
if experiment_subdir:
relative_experiment_config_path = os.path.join("experiments", experiment_subdir, json_name)
else:
relative_experiment_config_path = os.path.join("experiments", json_name)
experiment_config = self.__load_json_config_as_dict(self.config_top_dir, relative_experiment_config_path)
experiment_config["name"] = experiment_name
return experiment_config
def __inject_and_replace(self, experiment_config, cometml_config):
if "series" in experiment_config:
self.__inject_and_replace(dict([(c["name"], c) for c in experiment_config["series"]]), cometml_config)
else:
if cometml_config:
# We directly set the cometml params here
experiment_config["cometml"] = cometml_config
if len(self.experiment_params) > 0:
merge_params(experiment_config, self.experiment_params)
if len(self.dataset_params) > 0:
merge_params(experiment_config, self.dataset_params, "dataset")
# Add default placeholders
self.placeholders["experiment_name"] = experiment_config["name"]
if self.placeholders:
replace_placeholders(experiment_config, self.placeholders)
def __load_comet_config(self, comet_user):
rel_path = "cometml/offline.json"
if comet_user:
rel_path = "cometml/%s.json" % comet_user
return self.__load_json_config_as_dict(self.config_top_dir, rel_path)
def __load_json_config_as_dict(self, config_top_directory_or_file, relative_config_file_path=None):
"""
:param config_top_directory_or_file:
the top directory in which other config directories are located
or an absolute path to a config file
:param relative_config_file_path: the path to a config file relative to the config_top_directory_or_file.
Can be None, when the other parameter is already pointing to a config file.
"""
config_path = config_top_directory_or_file
if os.path.isdir(config_top_directory_or_file):
config_path = os.path.join(config_top_directory_or_file, relative_config_file_path)
with open(config_path, "r", encoding="utf8", newline='') as json_file:
loaded_config = json.load(json_file)
expanded_config = self.__expand_config_values(config_top_directory_or_file, loaded_config)
return expanded_config
def __expand_dict_values(self, config_top_directory_or_file, loaded_value):
if not isinstance(loaded_value, dict):
return loaded_value
for key in loaded_value.keys():
if key in self.ref_words and loaded_value[key].endswith(".json"):
file_name = os.path.basename(loaded_value[key])[:-len(".json")]
loaded_value[key] = self.__load_json_config_as_dict(config_top_directory_or_file, loaded_value[key])
loaded_value[key]["name"] = file_name
else: # go deeper if necessary
loaded_value[key] = self.__expand_dict_values(config_top_directory_or_file, loaded_value[key])
return loaded_value
def __expand_config_values(self, config_top_directory_or_file, loaded_config):
config = dict()
for key in loaded_config.keys():
# Note: These are special pointer keys to dict or file configs (which could also be inlined)
if key in self.ref_words:
key_value = loaded_config[key]
if isinstance(key_value, dict):
# if the value is a dict with values that refer to configs
config[key] = self.__expand_dict_values(config_top_directory_or_file, loaded_config[key])
elif isinstance(key_value, list):
config[key] = key_value # simply copy
elif key_value.endswith(".json"):
file_name = os.path.basename(loaded_config[key])[:-len(".json")]
config[key] = self.__load_json_config_as_dict(config_top_directory_or_file, loaded_config[key])
config[key]["name"] = file_name
else:
raise Exception("Cannot handle key_value for ref_word %s: %s" % (key, key_value))
else:
# Note: The key value is a potentially a dict anyway (like 'params')
config[key] = self.__expand_dict_values(config_top_directory_or_file, loaded_config[key])
if "series" in loaded_config.keys(): # special case that should only occur once in top level
series = loaded_config["series"]
config["series"] = [self.__expand_config_values(config_top_directory_or_file, entry) for entry in series]
# copy all the values from the "series" config to each actual series entry
for entry in config["series"]:
for series_key in loaded_config.keys():
if series_key not in ["name", "series", "params"]: # do now overwrite name or copy the series
entry[series_key] = loaded_config[series_key]
if series_key == "params": # merge params if possible
if series_key in entry:
if isinstance(entry[series_key], dict) and isinstance(loaded_config[series_key], dict):
entry[series_key] = {**entry[series_key], **loaded_config[series_key]}
else:
entry[series_key] = loaded_config[series_key]
return config
|
20,534 | c11e9fe21c4730882930956b6999b28713bf8895 | #!/usr/bin/env python3.7
#title :stopwatch_v2.py
#description :Build a timer with specific modules from (time package)
#author :Thuan Q Nguyen
#date :20190118
#version :0.2
#usage :
#notes :
#python_version :3.7.2
#==============================================================================
from time import localtime, mktime, strftime
start_time = localtime()
print(f"Timer started at {strftime('%X', start_time)}")
# Wait for user to stop timer
input("Press 'Enter' to stop timer ")
stop_time= localtime()
difference = mktime(stop_time) - mktime(start_time)
print(f"Timer stopped at {strftime('%X', stop_time)}")
print(f"Total time: {difference} seconds")
|
20,535 | 407e725236e68c97082855203a0c1c5dd385d64f | def total(x,y):
z = x+y;
print(z);
# return z;
print(total(4,5)); #kalo ga direturn none
# print(z); |
20,536 | b45ae0426aa43106eda3961341779709db46d09c | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: packetdt.proto
# plugin: python-betterproto
from dataclasses import dataclass
import betterproto
from . import cmddt
from . import sensordt
@dataclass
class Packet(betterproto.Message):
uid: str = betterproto.string_field(1)
timestamp: int = betterproto.int64_field(2)
cmds: cmddt.Cmdin = betterproto.message_field(3, group="type")
out: sensordt.Sensorout = betterproto.message_field(4, group="type")
|
20,537 | 815296a0a6d0780dbb534e4297ac4c75275f5754 | from django.contrib import admin
from ModelApp.models import *
class RolePropertiesInline(admin.StackedInline):
model = RoleProperties
extra = 1
class RoleAdmin(admin.ModelAdmin):
search_fields = ('name',)
inlines = [RolePropertiesInline]
class PlatformProductPropertiesInline(admin.StackedInline):
model = PlatformProductProperties
class PlatformProductWorkingValue(admin.TabularInline):
model = PlatformProductWorkingValue
extra = 2
class PlatformProductAdmin(admin.ModelAdmin):
search_fields = ('name',)
inlines = [PlatformProductPropertiesInline, PlatformProductWorkingValue]
class GSSProductPropertiesInline(admin.StackedInline):
model = GSSProductProperties
class GSSProductWorkingValue(admin.TabularInline):
model = GSSProductWorkingValue
extra = 3
class GSSProductAdmin(admin.ModelAdmin):
search_fields = ('name',)
inlines = [GSSProductPropertiesInline, GSSProductWorkingValue]
class TimeAndMaterialsPropertiesInline(admin.StackedInline):
model = TimeAndMaterialsProperties
class TimeAndMaterialsWorkingValue(admin.TabularInline):
model = TimeAndMaterialsWorkingValue
extra = 2
class TimeAndMaterialsAdmin(admin.ModelAdmin):
search_fields = ('name',)
inlines = [TimeAndMaterialsPropertiesInline, TimeAndMaterialsWorkingValue]
class PlatformProductForecastAdmin(admin.ModelAdmin):
search_fields = ('product__name',)
admin.site.register(Role, RoleAdmin)
admin.site.register(PlatformProduct, PlatformProductAdmin)
admin.site.register(GSSProduct, GSSProductAdmin)
admin.site.register(TimeAndMaterials, TimeAndMaterialsAdmin)
admin.site.register(GDRAT)
admin.site.register(PlatformProductForecast, PlatformProductForecastAdmin)
admin.site.register(Year)
|
20,538 | ab4e7d5098cf30d80eed27c609ac6ae7f57df7f1 | r"""
Calculate the potential fields of a homogeneous sphere.
**Magnetic**
Calculates the total field anomaly. Uses the formula in Blakely (1995).
* :func:`~fatiando.gravmag.sphere.tf`: calculates the total-field anomaly
Remember that:
The magnetization :math:`\mathbf{M}` and the dipole moment :math:`\mathbf{m}`
are related with the volume V:
.. math::
\mathbf{M} = \dfrac{\mathbf{m}}{V}.
The total-field anomaly is:
.. math::
\Delta T = |\mathbf{T}| - |\mathbf{F}|,
where :math:`\mathbf{T}` is the measured field and :math:`\mathbf{F}` is a
reference (regional) field. The forward modeling functions
:func:`~fatiando.gravmag.sphere.bx`, :func:`~fatiando.gravmag.sphere.by`,
and :func:`~fatiando.gravmag.sphere.bz` calculate the 3 components of the
field perturbation :math:`\Delta\mathbf{F}`
.. math::
\Delta\mathbf{F} = \mathbf{T} - \mathbf{F}.
Then the total-field anomaly caused by the sphere is
.. math::
\Delta T \approx \hat{\mathbf{F}}\cdot\Delta\mathbf{F}.
**Gravity**
Calculates the gravitational acceleration and gravity gradient tensor
components.
* :func:`fatiando.gravmag.sphere.gz`
* :func:`fatiando.gravmag.sphere.gxx`
* :func:`fatiando.gravmag.sphere.gxy`
* :func:`fatiando.gravmag.sphere.gxz`
* :func:`fatiando.gravmag.sphere.gyy`
* :func:`fatiando.gravmag.sphere.gyz`
* :func:`fatiando.gravmag.sphere.gzz`
**References**
Blakely, R. J. (1995), Potential Theory in Gravity and Magnetic Applications,
Cambridge University Press.
----
"""
import numpy
from fatiando.constants import SI2MGAL, G, CM, T2NT, SI2EOTVOS
from fatiando import utils
def tf(xp, yp, zp, spheres, inc, dec, pmag=None):
"""
Calculate the total-field anomaly of spheres.
.. note:: Input units are SI. Output is in nT
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the anomaly will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the physical property
``'magnetization'``. Spheres without ``'magnetization'`` will be
ignored.
* inc : float
The inclination of the regional field (in degrees)
* dec : float
The declination of the regional field (in degrees)
* pmag : [mx, my, mz] or None
A magnetization vector. If not None, will use this value instead of the
``'magnetization'`` property of the spheres. Use this, e.g., for
sensitivity matrix building.
Returns:
* tf : array
The total-field anomaly
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
tf = numpy.zeros_like(xp)
# Calculate the 3 components of the unit vector in the direction of the
# regional field
fx, fy, fz = utils.dircos(inc, dec)
if pmag is not None:
if isinstance(pmag, float) or isinstance(pmag, int):
pintensity = pmag
pmx, pmy, pmz = fx, fy, fz
else:
pintensity = numpy.linalg.norm(pmag)
pmx, pmy, pmz = numpy.array(pmag) / pintensity
for sphere in spheres:
if sphere is None or ('magnetization' not in sphere.props
and pmag is None):
continue
radius = sphere.radius
# Get the intensity and unit vector from the magnetization
if pmag is None:
mag = sphere.props['magnetization']
if isinstance(mag, float) or isinstance(mag, int):
intensity = mag
mx, my, mz = fx, fy, fz
else:
intensity = numpy.linalg.norm(mag)
mx, my, mz = numpy.array(mag) / intensity
else:
intensity = pintensity
mx, my, mz = pmx, pmy, pmz
# First thing to do is make the computation point P the origin of the
# coordinate system
x = sphere.x - xp
y = sphere.y - yp
z = sphere.z - zp
# Calculate the 3 components of B
dotprod = mx * x + my * y + mz * z
r_sqr = x ** 2 + y ** 2 + z ** 2
r5 = r_sqr ** (2.5)
moment = intensity * (4. * numpy.pi * (radius ** 3) / 3.)
bx = moment * (3 * dotprod * x - r_sqr * mx) / r5
by = moment * (3 * dotprod * y - r_sqr * my) / r5
bz = moment * (3 * dotprod * z - r_sqr * mz) / r5
tf += (fx * bx + fy * by + fz * bz)
tf *= CM * T2NT
return tf
def gz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_z` gravity acceleration component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in mGal!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros_like(xp)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
radius = sphere.radius
dx = sphere.x - xp
dy = sphere.y - yp
dz = sphere.z - zp
r_cb = (dx ** 2 + dy ** 2 + dz ** 2) ** (1.5)
mass = density * 4. * numpy.pi * (radius ** 3) / 3.
res += mass * dz / r_cb
res *= G * SI2MGAL
return res
def gxx(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{xx}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros_like(xp)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
radius = sphere.radius
dx = sphere.x - xp
dy = sphere.y - yp
dz = sphere.z - zp
r_2 = (dx ** 2 + dy ** 2 + dz ** 2)
r_5 = r_2 ** (2.5)
mass = density * 4. * numpy.pi * (radius ** 3) / 3.
res += mass * (((3 * dx ** 2) - r_2) / r_5)
res *= G * SI2EOTVOS
return res
def gxy(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{xy}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros_like(xp)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
radius = sphere.radius
dx = sphere.x - xp
dy = sphere.y - yp
dz = sphere.z - zp
r_2 = (dx ** 2 + dy ** 2 + dz ** 2)
r_5 = r_2 ** (2.5)
mass = density * 4. * numpy.pi * (radius ** 3) / 3.
res += mass * (3 * dx * dy) / r_5
res *= G * SI2EOTVOS
return res
def gxz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{xz}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros_like(xp)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
radius = sphere.radius
dx = sphere.x - xp
dy = sphere.y - yp
dz = sphere.z - zp
r_2 = (dx ** 2 + dy ** 2 + dz ** 2)
r_5 = r_2 ** (2.5)
mass = density * 4. * numpy.pi * (radius ** 3) / 3.
res += mass * (3 * dx * dz) / r_5
res *= G * SI2EOTVOS
return res
def gyy(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{yy}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros_like(xp)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
radius = sphere.radius
dx = sphere.x - xp
dy = sphere.y - yp
dz = sphere.z - zp
r_2 = (dx ** 2 + dy ** 2 + dz ** 2)
r_5 = r_2 ** (2.5)
mass = density * 4. * numpy.pi * (radius ** 3) / 3.
res += mass * (((3 * dy ** 2) - r_2) / r_5)
res *= G * SI2EOTVOS
return res
def gyz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{yz}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros_like(xp)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
radius = sphere.radius
dx = sphere.x - xp
dy = sphere.y - yp
dz = sphere.z - zp
r_2 = (dx ** 2 + dy ** 2 + dz ** 2)
r_5 = r_2 ** (2.5)
mass = density * 4. * numpy.pi * (radius ** 3) / 3.
res += mass * (3 * dy * dz) / r_5
res *= G * SI2EOTVOS
return res
def gzz(xp, yp, zp, spheres, dens=None):
"""
Calculates the :math:`g_{zz}` gravity gradient component.
.. note:: The coordinate system of the input parameters is to be
x -> North, y -> East and z -> Down.
.. note:: All input values in SI and output in Eotvos!
Parameters:
* xp, yp, zp : arrays
The x, y, and z coordinates where the field will be calculated
* spheres : list of :class:`fatiando.mesher.Sphere`
The spheres. Spheres must have the property ``'density'``. Those
without will be ignored.
* dens : float or None
If not None, will use this value instead of the ``'density'`` property
of the spheres. Use this, e.g., for sensitivity matrix building.
Returns:
* res : array
The field calculated on xp, yp, zp
"""
if xp.shape != yp.shape != zp.shape:
raise ValueError("Input arrays xp, yp, and zp must have same shape!")
res = numpy.zeros_like(xp)
for sphere in spheres:
if sphere is None or ('density' not in sphere.props and dens is None):
continue
if dens is None:
density = sphere.props['density']
else:
density = dens
radius = sphere.radius
dx = sphere.x - xp
dy = sphere.y - yp
dz = sphere.z - zp
r_2 = (dx ** 2 + dy ** 2 + dz ** 2)
r_5 = r_2 ** (2.5)
mass = density * 4. * numpy.pi * (radius ** 3) / 3.
res += mass * (((3 * dz ** 2) - r_2) / r_5)
res *= G * SI2EOTVOS
return res
|
20,539 | b54b0291c40eb6727546c55dddf6324f86db8e98 | """
"""
testcases = [
{
'input': (4, 2),
'output': 5,
},
]
def staircaseTraversal(height, maxSteps):
ret = 0
steps = [i for i in range(1, maxSteps+1)]
queue = [height]
while queue:
h = queue.pop()
for step in steps:
n = h - step
if n == 0:
ret += 1
elif n > 0:
queue.append(n)
return ret
if __name__ == '__main__':
for tc in testcases:
ret = staircaseTraversal(*tc['input'])
assert(ret == tc['output'])
|
20,540 | bc68fe81a147b4e9113d88b99d78dd96386711f8 | """twido URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from . import views
# to render full template path
def t(template):
return 'twido/' + template
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
# (r'^robots\.txt$', 'django.views.generic.simple.direct_to_template', {'template': 'robots.txt', 'mimetype': 'text/plain'}),
# (r'^favicon\.ico$', 'django.views.generic.simple.redirect_to', {'url': '/static/img/favicon.ico'}),
url(r'^login/$', views.LoginView.as_view(template_name='registration/login.html'), name='login'),
url(r'', include('django.contrib.auth.urls')),
url(r'^register/$', views.RegisterView.as_view(template_name='registration/register.html'), name='register'),
url(r'^profile/$', views.ProfileView.as_view(template_name='registration/profile.html'), name='profile'),
url(r'^$', views.IndexView.as_view(template_name=t('index.html')), name='index'),
url(r'^home/$', views.HomeView.as_view(template_name=t('home.html')), name='home'),
url(r'^setting/$', views.SettingView.as_view(template_name=t('setting.html')), name='setting'),
url(r'^social/$', views.SocialView.as_view(template_name=t('social.html')), name='social'),
url(r'^social/(?P<action>link)/$', views.SocialView.as_view(template_name=t('social.html')), name='social'),
url(r'^social/(?P<action>update)/$', views.SocialView.as_view(template_name=t('social.html')), name='social'),
url(r'^social/(?P<action>login)/$', views.SocialView.as_view(template_name=t('social.html')), name='social'),
url(r'^list/$', views.ListView.as_view(template_name=t('list.html')), name='list'),
url(r'^list/(?P<pk>[0-9]+)/$', views.ListView.as_view(template_name=t('list.html')), name='list'),
url(r'^task/(?P<pk>[0-9]+)/$', views.TaskView.as_view(template_name=t('task.html')), name='task'),
url(r'^json/usernames/$', views.ProfileUsernamesJsonView.as_view()),
# url(r'^', include('user.urls')),
]
# debug & test
if settings.DEBUG:
import debug_toolbar
urlpatterns.extend([
url(r'^test/$', views.test, name='test'),
url(r'^test/(?P<pk>[0-9]+)/$', views.test, name='test'),
url(r'^debug/', include(debug_toolbar.urls)),
])
|
20,541 | 680150d71549bab7d5f95e85da477466b5ccd41f | #'game' that asks the user list 5 of the first 20 elements from the periodic table
#gives feedback to the user based on correct answers
def get_answers():
#read answers from file to list and return it
answers = []
with open('alkuaineet.txt', 'r') as file_1:
for line in file_1:
answers.append(line.replace('\n', ''))
file_1.close()
return answers
def user_input():
#read user input until there is 5 items in the list return it
answers = []
while len(answers) < 5:
user_input = input(f"Syötä aine {len(answers)+1}: ")
if len(user_input) < 1:
print("Syötit tyhjän rivin!")
continue
#check for duplicates, if input already in list ask again without adding the dup
if user_input.lower() in answers:
print(f"{user_input.lower()} on jo syötetty. Duplikaatit eivät ole sallittuja!")
if user_input.lower() not in answers:
answers.append(user_input.lower())
return answers
def check_answers(user_answers: list, correct_answers: list):
correct_dict = {'Oikein': [], 'Väärin': []}
for item in user_answers:
if item in correct_answers:
correct_dict['Oikein'].append(f"{item[0].upper()}{item[1:]}")
else:
correct_dict['Väärin'].append(f"{item[0].upper()}{item[1:]}")
return correct_dict
def format_output(user_score: dict):
#calculate percentage of correct answers
correct_percentage = len(user_score['Oikein']) / (len(user_score['Oikein'])+len(user_score['Väärin'])) * 100
#format correct and incorrect answers to string
if len(user_score['Oikein']) > 0:
correct_string = ", ".join(user_score['Oikein'])
else:
correct_string = 'Ei oikeita vastauksia'
if len(user_score['Väärin']) > 0:
incorrect_string = ", ".join(user_score['Väärin'])
else:
incorrect_string = "Ei vääriä vastauksia"
#assign rank based on correct answers
if correct_percentage < 41:
rank = 'Noob'
elif correct_percentage < 75:
rank = 'Average'
else:
rank = 'Good'
#return string to print out, add the three created variables together for data in a easy to read form
return f"\n{correct_percentage}% oikein. Oikein: {correct_string}. Väärin: {incorrect_string}.\nTaitotaso: {rank}"
###MAIN###
#get correct answ
correct = get_answers()
#get user input
print('Kerro viisi ensimmäisistä 20:stä alkuaineesta jaksollisessa järjestelmässä:\n')
user = user_input()
#compare user input to correct answers
user_score = check_answers(user, correct)
print(format_output(user_score))
|
20,542 | bd486de78f71830da896f549205cda89fe479db3 | import hashlib
class GCoinBlock:
def __init__(self, prev_block_hash, transaction_list):
self.prev_block_hash = prev_block_hash
self.transaction_list = transaction_list
self.block_data = ''
self.block_data += f"Sender: {self.transaction_list[0]}\n"
self.block_data += f"Recevier: {self.transaction_list[1]}\n"
self.block_data += f"Sum: {self.transaction_list[2]} G coin\n"
self.block_data += f'Prev block hash: {self.prev_block_hash}'
self.hash = hashlib.sha256(self.block_data.encode()).hexdigest()
|
20,543 | b54f21a78a77347bb51bd99ca33454d928d67af0 | import re
from typing import cast
from flask import current_app
from flask_wtf import FlaskForm
from wtforms import (
Field,
StringField,
BooleanField,
TextAreaField,
IntegerField,
SelectField,
ValidationError)
from wtforms.validators import DataRequired, Optional
from wtforms.widgets.html5 import NumberInput
from amanuensis.config import ReadOnlyOrderedDict, AttrOrderedDict
from amanuensis.models import ModelFactory, UserModel
from amanuensis.server.forms import User
index_regex = re.compile(
r'(char|prefix|etc)' # index type
r'(\[(-?\d+)\])?' # index pri
r':(.+)') # index pattern
class SettingTranslator():
"""
Base class for the translation layer between internal config data
and user-friendly display in the settings form. By default the data
is returned as-is.
"""
def load(self, cfg_value):
return cfg_value
def save(self, field_data):
return field_data
class UsernameTranslator(SettingTranslator):
"""
Converts an internal user id to a public-facing username.
"""
def load(self, cfg_value):
model_factory: ModelFactory = current_app.config['model_factory']
user: UserModel = model_factory.user(cfg_value)
return user.cfg.username
def save(self, field_data):
model_factory: ModelFactory = current_app.config['model_factory']
user: UserModel = model_factory.try_user(field_data)
if user:
return user.uid
class IndexListTranslator(SettingTranslator):
"""
Converts internal index representations into the index
specification format used in the editable list.
"""
def load(self, cfg_value):
index_list = []
for index in cfg_value:
if index.pri == 0:
index_list.append('{type}:{pattern}'.format(**index))
else:
index_list.append('{type}[{pri}]:{pattern}'.format(**index))
return '\n'.join(index_list)
def save(self, field_data):
index_list = []
has_etc = False
for index in field_data.split('\n'):
match = index_regex.fullmatch(index)
itype, _, pri, pattern = match.groups()
index_list.append(dict(
type=itype,
pri=pri or 0,
pattern=pattern.strip()))
if itype == 'etc':
has_etc = True
if not has_etc:
index_list.append(dict(
type='etc',
pri=0,
pattern='&c'))
return index_list
class TmpAsgnTranslator(SettingTranslator):
def load(self, cfg_value):
import json
print(cfg_value)
return json.dumps(cfg_value)
def save(self, field_data):
import json
return json.loads(field_data)
class Setting():
"""
Represents a relation between a node in a lexicon config and a
field in a public-facing form that exposes it to the editor for
modification.
"""
def __init__(
self,
cfg_key: str,
field: Field,
translator: SettingTranslator = SettingTranslator()):
"""
Creates a setting. Optionally, defines a nontrivial translation
between internal and public values.
"""
self.cfg_path = cfg_key.split('.')
self.field = field
self.translator = translator
def load(self, cfg: ReadOnlyOrderedDict, field: Field):
"""
Sets the field's value to the corresponding config node
"""
for key in self.cfg_path[:-1]:
cfg = cast(ReadOnlyOrderedDict, cfg.get(key))
data = cfg.get(self.cfg_path[-1])
field.data = self.translator.load(data)
def save(self, cfg: AttrOrderedDict, field: Field):
"""
Updates the editable config with this field's value
"""
for key in self.cfg_path[:-1]:
cfg = cast(AttrOrderedDict, cfg.get(key))
data = field.data
cfg[self.cfg_path[-1]] = self.translator.save(data)
def IndexList(form, field):
if not field.data:
raise ValidationError('You must specify an index list.')
etc_count = 0
for index in field.data.split('\n'):
match = index_regex.fullmatch(index)
if not match:
raise ValidationError(f'Bad index: "{index}"')
if match.group(1) == 'etc':
etc_count += 1
if etc_count > 1:
raise ValidationError("Can't have more than one etc index")
class Settings():
@staticmethod
def settings():
for name, setting in vars(Settings).items():
if name.startswith('s_'):
yield name, setting
s_editor = Setting('editor',
SelectField('Editor', validators=[DataRequired(), User(True)]),
translator=UsernameTranslator())
s_turnCurrent = Setting('turn.current',
IntegerField(
'Current turn',
widget=NumberInput(),
validators=[Optional()]))
s_turnAssignment = Setting('turn.assignment',
TextAreaField('index assignment raw'),
translator=TmpAsgnTranslator())
s_publishDeadlines = Setting('publish.deadlines',
StringField(
'Turn deadline, as a crontab specification',
validators=[Optional()]))
s_publishAsap = Setting('publish.asap',
BooleanField(
'Publish the turn immediately when the last article is accepted'))
s_publishQuorum = Setting('publish.quorum',
IntegerField(
'Quorum to publish incomplete turn',
widget=NumberInput(),
validators=[Optional()]))
s_publishBlockOnReady = Setting('publish.block_on_ready',
BooleanField(
'Block turn publish if any articles are awaiting editor review'))
s_articleCitationAllowSelf = Setting('article.citation.allow_self',
BooleanField('Allow players to cite themselves'))
s_articleCitationMinExtant = Setting('article.citation.min_extant',
IntegerField(
'Minimum number of extant articles to cite',
widget=NumberInput(),
validators=[Optional()]))
s_articleCitationMaxExtant = Setting('article.citation.max_extant',
IntegerField(
'Maximum number of extant articles to cite',
widget=NumberInput(),
validators=[Optional()]))
s_articleCitationMinPhantom = Setting('article.citation.min_phantom',
IntegerField(
'Minimum number of phantom articles to cite',
widget=NumberInput(),
validators=[Optional()]))
s_articleCitationMaxPhantom = Setting('article.citation.max_phantom',
IntegerField(
'Maximum number of phantom articles to cite',
widget=NumberInput(),
validators=[Optional()]))
s_articleCitationMinTotal = Setting('article.citation.min_total',
IntegerField(
'Minimum number of articles to cite in total',
widget=NumberInput(),
validators=[Optional()]))
s_articleCitationMaxTotal = Setting('article.citation.max_total',
IntegerField(
'Maximum number of articles to cite in total',
widget=NumberInput(),
validators=[Optional()]))
s_articleCitationMinChars = Setting('article.citation.min_chars',
IntegerField(
'Minimum number of characters to cite articles by',
widget=NumberInput(),
validators=[Optional()]))
s_articleCitationMaxChars = Setting('article.citation.max_chars',
IntegerField(
'Maximum number of characters to cite articles by',
widget=NumberInput(),
validators=[Optional()]))
s_articleWordLimitSoft = Setting('article.word_limit.soft',
IntegerField(
'Soft word limit',
widget=NumberInput(),
validators=[Optional()]))
s_articleWordLimitHard = Setting('article.word_limit.hard',
IntegerField(
'Hard word limit',
widget=NumberInput(),
validators=[Optional()]))
s_articleAddendumAllowed = Setting('article.addendum.allowed',
BooleanField('Allow addendum articles'))
s_articleAddendumMax = Setting('article.addendum.max',
IntegerField(
'Maximum number of addendum articles per character per turn',
widget=NumberInput(),
validators=[Optional()]))
class ConfigFormBase(FlaskForm):
def __init__(self, lexicon):
super().__init__()
editor_field = getattr(self, 'editor', None)
if editor_field:
model_factory: ModelFactory = current_app.config['model_factory']
editor_field.choices = list(map(
lambda s: (s, s),
map(
lambda uid: model_factory.user(uid).cfg.username,
lexicon.cfg.join.joined)))
def load(self, lexicon):
for name, setting in Settings.settings():
field = getattr(self, name[2:], None)
if field:
setting.load(lexicon.cfg, field)
def save(self, lexicon):
with lexicon.ctx.edit_config() as cfg:
for name, setting in Settings.settings():
field = getattr(self, name[2:], None)
if field:
setting.save(cfg, field)
for k, v in Settings.settings():
setattr(ConfigFormBase, k[2:], v.field)
|
20,544 | d411d5edf3fb0c9117288cb61835af5c8df54b80 | import theano
import theano.tensor as T
import numpy as np
from neupy.utils import asfloat
from neupy.core.properties import ProperFractionProperty, Property
from .base import MinibatchGradientDescent
from hessian_utils import find_hessian_and_gradient
from neupy.algorithms.utils import (parameters2vector, count_parameters,
iter_parameters, setup_parameter_updates)
__all__ = ('Momentum',)
class Momentum(MinibatchGradientDescent):
"""
Momentum algorithm for :network:`GradientDescent` optimization.
Parameters
----------
momentum : float
Control previous gradient ratio. Defaults to ``0.9``.
nesterov : bool
Instead of classic momentum computes Nesterov momentum.
Defaults to ``False``.
{MinibatchGradientDescent.Parameters}
Attributes
----------
{MinibatchGradientDescent.Attributes}
Methods
-------
{MinibatchGradientDescent.Methods}
Examples
--------
Simple example
>>> import numpy as np
>>> from neupy import algorithms
>>>
>>> x_train = np.array([[1, 2], [3, 4]])
>>> y_train = np.array([[1], [0]])
>>>
>>> mnet = algorithms.Momentum(
... (2, 3, 1),
... verbose=False
... )
>>> mnet.train(x_train, y_train)
See Also
--------
:network:`GradientDescent` : GradientDescent algorithm.
"""
momentum = ProperFractionProperty(default=0.9)
nesterov = Property(default=False, expected_type=bool)
def init_layers(self):
super(Momentum, self).init_layers()
for layer in self.layers:
for parameter in layer.parameters:
parameter_shape = T.shape(parameter).eval()
parameter.prev_param_delta = theano.shared(
name="prev_param_delta_" + parameter.name,
value=asfloat(np.zeros(parameter_shape)),
)
def init_param_updates(self, layer, parameter):
n_parameters = count_parameters(self)
self.variables.hessian = theano.shared(
value=asfloat(np.zeros((n_parameters, n_parameters))),
name='hessian_inverse')
step = self.variables.step
gradient = T.grad(self.variables.error_func, wrt=parameter)
prev_param_delta = parameter.prev_param_delta
parameter_delta = self.momentum * prev_param_delta - step * gradient
if self.nesterov:
parameter_delta = self.momentum * parameter_delta - step * gradient
# modified for hessian
n_parameters = count_parameters(self)
parameters = list(iter_parameters(self))
param_vector = parameters2vector(self)
# penalty_const = asfloat(self.penalty_const)
hessian_matrix, full_gradient = find_hessian_and_gradient(
self.variables.error_func, parameters
)
# hessian_inverse = T.nlinalg.matrix_inverse(
# hessian_matrix + 0.02 * T.eye(n_parameters)
# )
# Modified fo hessian
return [
(parameter, parameter + parameter_delta),
(prev_param_delta, parameter_delta),(self.variables.hessian, hessian_matrix)
]
|
20,545 | 183ed54a8052d87b6886ca146f032bfa46a13a5d | """Django admin interface for `~watching.models.PageView`.
"""
from watching.models import PageView
from django.contrib import admin
class PageViewAdmin(admin.ModelAdmin):
list_display=('project', 'url','count')
admin.site.register(PageView, PageViewAdmin)
|
20,546 | 0be57144a78e4e5d790ba40f9af5d7073e76f66a | class ExtratorArgumentosUrl:
def __init__(self, url):
if self.url_eh_valida(url):
self.url = url
else:
raise LookupError("Url Inválida!!!")
@staticmethod
def url_eh_valida(url):
if url:
return True
else:
return False
def extrai_argumentos(self):
busca_moeda_origem = 'moedaorigem'
busca_moeda_destino = 'moedadestino'
indice_inicial_moeda_destino = self.encontra_indice_inicial(busca_moeda_destino)
indice_inicial_moeda_origem = self.encontra_indice_inicial(busca_moeda_origem)
indice_final_moeda_origem = self.url.find('&')
moeda_origem = self.url[indice_inicial_moeda_origem:indice_final_moeda_origem]
moeda_destino = self.url[indice_inicial_moeda_destino:]
return moeda_origem, moeda_destino
def encontra_indice_inicial(self, moeda_buscada):
return self.url.find(moeda_buscada) + len(moeda_buscada) + 1
def __str__(self):
return self.url
def __len__(self):
return len(self.url) |
20,547 | 4edc251a1de1462d484376c02d52d79df0a9b9f6 | expected_output = {
'vrf': {
'vrf-blue': {
'address_family': {
'ipv4': {
'routes': {
'172.16.10.10/32': {
'active': True,
'metric': 0,
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '10.0.0.2',
'updated': '00:04:47',
'vrf': 'vrf-red',
},
},
},
'route': '172.16.10.10/32',
'route_preference': 200,
'source_protocol': 'bgp',
'source_protocol_codes': 'B',
},
'192.168.1.0/24': {
'active': True,
'next_hop': {
'outgoing_interface': {
'GigabitEthernet2': {
'outgoing_interface': 'GigabitEthernet2',
},
},
},
'route': '192.168.1.0/24',
'source_protocol': 'connected',
'source_protocol_codes': 'C',
},
'192.168.1.1/32': {
'active': True,
'next_hop': {
'outgoing_interface': {
'GigabitEthernet2': {
'outgoing_interface': 'GigabitEthernet2',
},
},
},
'route': '192.168.1.1/32',
'source_protocol': 'local',
'source_protocol_codes': 'L',
},
'192.168.11.11/32': {
'active': True,
'metric': 130816,
'next_hop': {
'next_hop_list': {
1: {
'index': 1,
'next_hop': '192.168.1.2',
'outgoing_interface': 'GigabitEthernet2',
'updated': '01:03:35',
},
},
},
'route': '192.168.11.11/32',
'route_preference': 90,
'source_protocol': 'eigrp',
'source_protocol_codes': 'D',
},
},
},
},
},
},
}
|
20,548 | 462a5705a6783afe2b0d33931cacc71018aeba1c | from back_testing.back_test import TradeSimulator
from back_testing.limit_order_strategies import MarketMaker, MarketMakerSimulator
import back_testing.post_trade as pt
from back_testing.post_trade import get_limit_strategy_statistics
from data_base.data_base import instance as db
import plotter.plotter as pltr
import plotter.back_test_plotting as bpltr
import matplotlib.pyplot as plt
import math
import pandas as pd
import numpy as np
import definitions
# ------- custom inputs ------
bar_size = "100"
period = "morning"
date = "20170131"
ask_reference = definitions.OPEN
bid_reference = definitions.OPEN
threshold = 1
tick_size = 5
time_col = definitions.VOLUME_TIME
position_max_duration = 5
limit_order_standing = 3
take_profit = 5
stop_loss = math.inf
size = 1
# ----- initialize objects -----
bar_df = db.get_vol_bar_wlimit(date, period, bar_size)
trade_sim = TradeSimulator()
mm_sim = MarketMakerSimulator(trade_simulator = trade_sim, ask_reference = ask_reference, bid_reference = bid_reference,
threshold = threshold, tick_size = tick_size, position_max_duration = position_max_duration,
limit_order_standing = limit_order_standing, take_profit = take_profit, stop_loss = stop_loss,
size = size)
# ----- trade ------
mm_sim.trade(bar_df)
trade_df = mm_sim.get_results()
bar_df = pt.process_trade_df(bar_df, trade_df)
result = pt.get_trade_result(bar_df, trade_df)
print(result)
# ------ plot pnl ------
fig, (ax1, ax2) = plt.subplots(2,1)
fig.suptitle(date + " " + period)
ax1.plot(bar_df[time_col], bar_df[definitions.VWAP])
ax1.set_ylabel("VWAP")
ax2.plot(bar_df[time_col], bar_df["pnl"])
ax2.set_ylabel("pnl")
ax2.set_xlabel("volume traded")
plt.show() |
20,549 | 413193df03bf4784fcb20ae7b2dbc0aa8f84a19d | import sys
input = sys.stdin.readline
N = int(input())
solution_list = list(map(int, input().split()))
solution_list.sort()
def binary_search(num, idx):
sp = idx + 1
ep = N - 1
min_v = float('inf')
ans = []
while sp < ep:
res = num + solution_list[sp] + solution_list[ep]
if sp == ep:
break
if abs(res) <= min_v:
ans = [num, solution_list[sp], solution_list[ep]]
min_v = abs(res)
if res >= 0:
ep -= 1
else:
sp += 1
return min_v, ans
def solution():
min_v = float('inf')
ans = []
for i in range(N):
res = binary_search(solution_list[i], i)
if min_v >= res[0]:
min_v = res[0]
ans = res[1]
return ans
print(*solution())
|
20,550 | 40099757b16faa29a31b353ec73165abdd472685 | '''Ejercicio 3.
Escribir un programa que guarde en un diccionario los precios de las frutas de la tabla, pregunte al usuario por una fruta, un número de kilos y muestre por pantalla el precio de ese número de kilos de fruta. Si la fruta no está en el diccionario debe mostrar un mensaje informando de ello.'''
def run():
frutas = {'Platano' : 1.35, 'Manzana': 0.80, 'Pera': 0.85, 'Naranja' : 0.70}
try:
fruta = str(input('que fruta queres saber su precio: ')).capitalize()
if not fruta in frutas:
print(f'Lo siento no tengo el precio de la fruta {fruta}.')
else:
cantidad = float(input('cuantos kilos son: '))
precio = frutas[fruta]*cantidad
print(f'{cantidad}Kg. de {fruta} tiene un precio de: {round(precio, 2)}')
except:
print('Lo siento los datos que me pides son incorrectos.')
if __name__ == '__main__':
run() |
20,551 | 96d5f68be1f9fc792ffa67b3dd624134de371ebd | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
18 <= input n <= 24
0+4=4 <= print answer
0+11=11
1+10=11
2+2=4
2+7=9
4+0=4
7+2=9
10+1=11
11+0=11
一共可以拼出9个不同的等式
'''
# Quiz url
# http://bbs.codeaha.com/problem-12026.html
# read
n = int(input()) - 4
L = [6, 2, 5, 5, 4, 5, 6, 3, 7, 6]
def fun(x):
num = 0
while x//10:
num += L[x%10]
x = int(x / 10)
else:
num += L[x]
return num
sum = 0
for a in range(1112):
for b in range(1112):
c = a + b
if fun(a) + fun(b) + fun(c) == n:
print('%d+%d=%d'%(a, b, c))
sum += 1
utf8stdout = open(1, 'w', encoding='utf-8')
print('一共可以拼出%d个不同的等式' % sum, file=utf8stdout)
|
20,552 | be4edc59ef244812edb3c4f43203ab20abc14ec9 | # Author: Bohua Zhan
from kernel.type import Type, TFun, boolT
from kernel.term import Term, Var, Const
from kernel.macro import global_macros
from logic import nat
from logic import function
from logic import logic
from logic.conv import arg_conv, then_conv, top_conv, beta_conv, binop_conv, \
every_conv, rewr_conv, assums_conv, beta_norm
from logic.proofterm import ProofTerm, ProofTermMacro, ProofTermDeriv
from logic.logic_macro import apply_theorem
from prover import z3wrapper
"""Automation for Hoare logic."""
def comT(T):
return Type("com", T)
def Skip(T):
return Const("Skip", comT(T))
def Assign(Ta, Tb):
return Const("Assign", TFun(Ta, TFun(TFun(Ta, Tb), Tb), comT(TFun(Ta, Tb))))
def Seq(T):
return Const("Seq", TFun(comT(T), comT(T), comT(T)))
def Cond(T):
return Const("Cond", TFun(TFun(T, boolT), comT(T), comT(T), comT(T)))
def While(T):
return Const("While", TFun(TFun(T, boolT), TFun(T, boolT), comT(T), comT(T)))
def Sem(T):
return Const("Sem", TFun(comT(T), T, T, boolT))
def Valid(T):
return Const("Valid", TFun(TFun(T, boolT), comT(T), TFun(T, boolT), boolT))
def Entail(T):
return Const("Entail", TFun(TFun(T, boolT), TFun(T, boolT), boolT))
# Normalize evaluation of function as well as arithmetic.
norm_cv = then_conv(top_conv(function.fun_upd_eval_conv()), nat.norm_full())
# Normalize a condition.
norm_cond_cv = every_conv(
norm_cv,
top_conv(nat.nat_eq_conv()),
logic.norm_bool_expr()
)
def eval_Sem(thy, com, st):
"""Evaluates the effect of program com on state st."""
f, args = com.strip_comb()
T = st.get_type()
if f.is_const_name("Skip"):
return apply_theorem(thy, "Sem_Skip", tyinst={"a": T}, inst={"s": st})
elif f.is_const_name("Assign"):
a, b = args
Ta = a.get_type()
Tb = b.get_type().range_type()
pt = apply_theorem(thy, "Sem_Assign", tyinst={"a": Ta, "b": Tb}, inst={"a": a, "b": b, "s": st})
return pt.on_arg(thy, arg_conv(norm_cv))
elif f.is_const_name("Seq"):
c1, c2 = args
pt1 = eval_Sem(thy, c1, st)
pt2 = eval_Sem(thy, c2, pt1.prop.arg)
pt = apply_theorem(thy, "Sem_seq", pt1, pt2)
return pt.on_arg(thy, function.fun_upd_norm_one_conv())
elif f.is_const_name("Cond"):
b, c1, c2 = args
b_st = beta_norm(thy, b(st))
b_eval = norm_cond_cv.get_proof_term(thy, b_st)
if b_eval.prop.arg == logic.true:
b_res = rewr_conv("eq_true", sym=True).apply_to_pt(thy, b_eval)
pt1 = eval_Sem(thy, c1, st)
return apply_theorem(thy, "Sem_if1", b_res, pt1, concl=Sem(T)(com, st, pt1.prop.arg))
else:
b_res = rewr_conv("eq_false", sym=True).apply_to_pt(thy, b_eval)
pt2 = eval_Sem(thy, c2, st)
return apply_theorem(thy, "Sem_if2", b_res, pt2, concl=Sem(T)(com, st, pt2.prop.arg))
elif f.is_const_name("While"):
b, inv, c = args
b_st = beta_norm(thy, b(st))
b_eval = norm_cond_cv.get_proof_term(thy, b_st)
if b_eval.prop.arg == logic.true:
b_res = rewr_conv("eq_true", sym=True).apply_to_pt(thy, b_eval)
pt1 = eval_Sem(thy, c, st)
pt2 = eval_Sem(thy, com, pt1.prop.arg)
pt = apply_theorem(thy, "Sem_while_loop", b_res, pt1, pt2,
concl=Sem(T)(com, st, pt2.prop.arg), inst={"s3": pt1.prop.arg})
return pt.on_arg(thy, function.fun_upd_norm_one_conv())
else:
b_res = rewr_conv("eq_false", sym=True).apply_to_pt(thy, b_eval)
return apply_theorem(thy, "Sem_while_skip", b_res, concl=Sem(T)(com, st, st))
else:
raise NotImplementedError
class eval_Sem_macro(ProofTermMacro):
"""Prove a theorem of the form Sem com st st2."""
def __init__(self):
self.level = 10
self.sig = Term
def get_proof_term(self, thy, args, pts):
assert len(pts) == 0, "eval_Sem_macro"
f, (com, st, st2) = args.strip_comb()
pt = eval_Sem(thy, com, st)
assert st2 == pt.prop.arg, "eval_Sem_macro: wrong result."
return pt
def compute_wp(thy, T, c, Q):
"""Compute the weakest precondition for the given command
and postcondition. The computation is by case analysis on
the form of c. Returns the validity theorem.
"""
if c.head.is_const_name("Assign"): # Assign a b
a, b = c.args
s = Var("s", T)
P2 = Term.mk_abs(s, Q(function.mk_fun_upd(s, a, b(s).beta_conv())))
return apply_theorem(thy, "assign_rule", inst={"b": b}, concl=Valid(T)(P2, c, Q))
elif c.head.is_const_name("Seq"): # Seq c1 c2
c1, c2 = c.args
wp1 = compute_wp(thy, T, c2, Q) # Valid Q' c2 Q
wp2 = compute_wp(thy, T, c1, wp1.prop.args[0]) # Valid Q'' c1 Q'
return apply_theorem(thy, "seq_rule", wp2, wp1)
elif c.head.is_const_name("While"): # While b I c
_, I, _ = c.args
pt = apply_theorem(thy, "while_rule", concl=Valid(T)(I, c, Q))
pt0 = ProofTerm.assume(pt.assums[0])
pt1 = vcg(thy, T, pt.assums[1])
return ProofTerm.implies_elim(pt, pt0, pt1)
else:
raise NotImplementedError
def vcg(thy, T, goal):
"""Compute the verification conditions for the goal."""
P, c, Q = goal.args
pt = compute_wp(thy, T, c, Q)
entail_P = ProofTerm.assume(Entail(T)(P, pt.prop.args[0]))
return apply_theorem(thy, "pre_rule", entail_P, pt)
class vcg_macro(ProofTermMacro):
"""Compute the verification conditions for a hoare triple, then
normalizes the verification conditions.
"""
def __init__(self):
self.level = 10
self.sig = Term
def get_proof_term(self, thy, goal, pts):
f, (P, c, Q) = goal.strip_comb()
T = Q.get_type().domain_type()
pt = vcg(thy, T, goal)
for A in reversed(pt.hyps):
pt = ProofTerm.implies_intr(A, pt)
return pt.on_assums(thy, rewr_conv("Entail_def"), top_conv(beta_conv()),
top_conv(function.fun_upd_eval_conv()))
def vcg_solve(thy, goal):
"""Compute the verification conditions for a hoare triple, then
solves the verification conditions using SMT.
"""
pt = ProofTermDeriv("vcg", thy, goal, [])
vc_pt = [ProofTermDeriv("z3", thy, vc, []) for vc in pt.assums]
return ProofTerm.implies_elim(pt, *vc_pt)
global_macros.update({
"eval_Sem": eval_Sem_macro(),
"vcg": vcg_macro(),
})
|
20,553 | 0a9894154bc29d931500af11f4fee8920f7918fc | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\primitives\routing_utils.py
# Compiled at: 2018-08-20 22:30:05
# Size of source mod 2**32: 7207 bytes
from sims4.math import MAX_FLOAT
from sims4.tuning.tunable import Tunable
import operator, routing, sims4.log, build_buy, services
logger = sims4.log.Logger('RoutingUtils')
class DistanceEstimationTuning:
DISTANCE_PER_FLOOR = Tunable(float, 50, description='\n The cost per floor difference in the two points. Ex: if this is tuned to 50 and a Sim is trying to use an object on the third floor of their house while on the first floor, the distance estimate would be 100 meters.')
DISTANCE_PER_ROOM = Tunable(float, 10, description='\n The cost per room between the points. This should be the average diameter of rooms that people tend to build.')
UNREACHABLE_GOAL_COST = 100000
def get_block_id_for_node(node):
block_id = build_buy.get_block_id(services.current_zone_id(), (sims4.math.Vector3)(*node.position), node.routing_surface_id.secondary_id)
return block_id
def estimate_distance(obj_a, obj_b, options=routing.EstimatePathDistance_DefaultOptions):
if obj_a is obj_b:
return 0.0
else:
inv = obj_a.get_inventory()
if inv is not None:
if inv.owner.is_sim:
obj_a = inv.owner
else:
obj_a_choices = inv.owning_objects_gen()
obj_a = None
inv = obj_b.get_inventory()
if inv is not None:
if inv.owner.is_sim:
obj_b = inv.owner
else:
obj_b_choices = inv.owning_objects_gen()
obj_b = None
best_dist = MAX_FLOAT
if obj_a is None:
if obj_b is None:
for a in obj_a_choices:
for b in obj_b_choices:
dist = estimate_distance_helper(a, b, options=options)
if dist < best_dist:
best_dist = dist
else:
for a in obj_a_choices:
dist = estimate_distance_helper(a, obj_b, options=options)
if dist < best_dist:
best_dist = dist
return best_dist
if obj_b is None:
for b in obj_b_choices:
dist = estimate_distance(obj_a, b, options=options)
if dist < best_dist:
best_dist = dist
return best_dist
return estimate_distance_helper(obj_a, obj_b, options=options)
def estimate_distance_helper(obj_a, obj_b, options=routing.EstimatePathDistance_DefaultOptions):
floor_a = obj_a.intended_routing_surface.secondary_id
floor_b = obj_b.intended_routing_surface.secondary_id
floor_difference = abs(floor_a - floor_b)
floor_cost = floor_difference * DistanceEstimationTuning.DISTANCE_PER_FLOOR
distance = (obj_a.intended_position_with_forward_offset - obj_b.intended_position_with_forward_offset).magnitude_2d()
return distance + floor_cost
def estimate_distance_between_multiple_points(sources, dests, routing_context=None, allow_permissive_connections=False):
min_distance = routing.estimate_distance_between_multiple_points(sources, dests, routing_context, allow_permissive_connections)
if min_distance is not None:
return min_distance
return DistanceEstimationTuning.UNREACHABLE_GOAL_COST
def sorted_estimated_distances_between_multiple_handles(source_handles, dest_handles, routing_context=None, allow_permissive_connections=False):
if source_handles:
if dest_handles:
distances = routing.estimate_path_batch(source_handles, dest_handles, routing_context=routing_context, allow_permissive_connections=allow_permissive_connections,
ignore_objects=True)
if distances:
distances.sort(key=(operator.itemgetter(2)))
return distances
return []
def estimate_distance_between_points(position_a, routing_surface_a, position_b, routing_surface_b, routing_context=None, allow_permissive_connections=False):
return estimate_distance_between_multiple_points(((position_a, routing_surface_a),), (
(
position_b, routing_surface_b),), routing_context, allow_permissive_connections) |
20,554 | 62267e57168c285a430ad670b2338f9a99e242d5 | #!/user/bin/python
# coding=utf-8
import sys
import cv2
import torch
import torchvision
import initialize
import atexit
from excutions._train import train
from excutions._test import test, multiscale_test
from os.path import join, split, isfile
from PIL import Image
from torch.utils.data import DataLoader
from torch.optim import lr_scheduler
from models.Fusion import Fusion
from functions.cross_entropy_loss_nCRF import cross_entropy_loss_nCRF
from constant import args, TMP_DIR, TEST_LIST_DIR
from utils.save_checkpoint import save_checkpoint
from utils.Averagvalue import Averagvalue
from utils.Logger import Logger
from utils.weights_init import normal_weights_init, pretrained_weights_init
from dataloaders.BSDLoader import BSDLoader
def main():
# dataset
train_dataset = BSDLoader(root=args.dataset, split="train")
test_dataset = BSDLoader(root=args.dataset, split="test")
train_loader = DataLoader(
train_dataset, batch_size=args.batch_size,
num_workers=8, drop_last=True, shuffle=True)
test_loader = DataLoader(
test_dataset, batch_size=args.batch_size,
num_workers=8, drop_last=True, shuffle=False)
with open(TEST_LIST_DIR, 'r') as f:
test_list = f.readlines()
test_list = [split(i.rstrip())[1] for i in test_list]
assert len(test_list) == len(test_loader), "%d vs %d" % (
len(test_list), len(test_loader))
# model
# model = N_CRF()
model = Fusion()
model.cuda()
if args.enable_pretrain:
model.apply(pretrained_weights_init)
else:
model.apply(normal_weights_init)
if args.resume:
if isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}'"
.format(args.resume))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
# For the nCRF network
optimizer = torch.optim.SGD(model.parameters(
), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = lr_scheduler.StepLR(
optimizer, step_size=args.stepsize, gamma=args.gamma)
# Log
log = Logger(join(TMP_DIR, '%s-%d-log.txt' % ('sgd', args.lr)))
sys.stdout = log
train_loss = []
train_loss_detail = []
test(model, test_loader, epoch=None, test_list=test_list,
save_dir=join(TMP_DIR, 'init-testing-record-view'))
for epoch in range(args.start_epoch, args.maxepoch):
tr_avg_loss, tr_detail_loss = train(
train_loader, model, optimizer, epoch,
save_dir=join(TMP_DIR, 'epoch-%d-training-record' % epoch))
test(model, test_loader, epoch=epoch, test_list=test_list,
save_dir=join(TMP_DIR, 'epoch-%d-testing-record-view' % epoch))
multiscale_test(model, test_loader, epoch=epoch, test_list=test_list,
save_dir=join(TMP_DIR, 'epoch-%d-testing-record' % epoch))
# write log
log.flush()
# Save checkpoint
save_file = os.path.join(
TMP_DIR, 'checkpoint_epoch{}.pth'.format(epoch))
save_checkpoint({
'epoch': epoch,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()
}, filename=save_file)
scheduler.step() # will adjust learning rate
# save train/val loss/accuracy, save every epoch in case of early stop
train_loss.append(tr_avg_loss)
train_loss_detail += tr_detail_loss
if __name__ == '__main__':
main()
|
20,555 | 1929da58da2251578eb89e4d57d08146781a0b26 | import json
import click
from click import echo
from click_spinner import spinner
from funcy import silent
from prettytable import PrettyTable
from steem.amount import Amount
from tabulate import tabulate
from .config import get_config, new_config, set_config
from .feeds import run_price_feeds
from .markets import Markets
from .utils import generate_signing_key
from .watchdog import (
watchdog,
enable_witness,
disable_witness,
is_witness_enabled,
current_signing_key,
total_missed,
get_witness,
witness_create,
witness_set_props,
)
def heading(title):
echo('%s:\n' % title + (len(title) + 1) * '-')
def output(data, title=None):
if title:
heading(title)
if type(data) == dict:
print(json.dumps(data, indent=4))
else:
echo(data)
echo('')
context_settings = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=context_settings)
@click.pass_context
def conductor(ctx):
"""Steem Witness Toolkit."""
if ctx.invoked_subcommand not in ['init', 'tickers', 'keygen']:
ensure_witness_hook()
# Config Commands
# ---------------
@conductor.command()
def init():
"""Add your witness account."""
account = click.prompt('What is your witness account name?', type=str)
witness = get_witness(account)
if witness:
c = new_config()
c['witness']['name'] = account
c['witness']['url'] = witness['url']
c['props'] = witness['props']
set_config(c)
echo('Imported a witness %s from its existing settings.' % account)
else:
click.confirm('Witness %s does not exist. Would you like to create it?' % account, abort=True)
c = new_config()
c['witness']['name'] = account
c['witness']['url'] = click.prompt(
'What should be your witness URL?',
default=c['witness']['url'],
)
creation_fee = click.prompt(
'How much do you want the account creation fee to be (STEEM)?',
default=c['props']['account_creation_fee'],
)
if silent(float)(creation_fee):
creation_fee = "%s STEEM" % float(creation_fee)
c['props']['account_creation_fee'] = str(Amount(creation_fee))
c['props']['maximum_block_size'] = click.prompt(
'What should be the maximum block size?',
default=c['props']['maximum_block_size'],
)
c['props']['sbd_interest_rate'] = click.prompt(
'What should be the SBD interest rate?',
default=c['props']['sbd_interest_rate'],
)
c['props']['account_subsidy_budget'] = click.prompt(
'What should be the account subsidy budget?',
default=c['props']['account_subsidy_budget'],
)
c['props']['account_subsidy_decay'] = click.prompt(
'What should be the account subsidy decay?',
default=c['props']['account_subsidy_decay'],
)
set_config(c)
witness_create(c)
echo('Witness %s created!' % account)
@conductor.command()
def update():
"""Update witness properties."""
c = get_config()
c['witness']['url'] = click.prompt(
'What should be your witness URL?',
default=c['witness']['url'],
)
creation_fee = click.prompt(
'How much do you want the account creation fee to be (STEEM)?',
default=c['props']['account_creation_fee'],
)
if silent(float)(creation_fee):
creation_fee = "%s STEEM" % float(creation_fee)
c['props']['account_creation_fee'] = str(Amount(creation_fee))
c['props']['maximum_block_size'] = click.prompt(
'What should be the maximum block size?',
default=c['props']['maximum_block_size'],
)
c['props']['sbd_interest_rate'] = click.prompt(
'What should be the SBD interest rate?',
default=c['props']['sbd_interest_rate'],
)
c['props']['account_subsidy_budget'] = click.prompt(
'What should be the account subsidy budget?',
default=c['props']['account_subsidy_budget'],
)
c['props']['account_subsidy_decay'] = click.prompt(
'What should be the account subsidy decay?',
default=c['props']['account_subsidy_decay'],
)
# verify
output(c, '\nConfiguration')
click.confirm('Do you want to commit the updated values?', abort=True)
# update
set_config(c)
witness_set_props(c['witness']['url'], c['props'])
output('Witness %s Updated' % c['witness']['name'])
@conductor.command(name='keygen')
def keygen():
"""Generate a random signing key-pair."""
pk, pub = generate_signing_key()
t = PrettyTable(["Private (install on your witness node)",
"Public (publish with 'conductor enable' command)"])
t.align = "l"
t.add_row([pk, pub])
output(t, '')
# Operational Commands
# --------------------
@conductor.command()
@click.option('--sbd-peg/--no-sbd-peg', default=False)
def feed(sbd_peg):
"""Update Price Feeds."""
run_price_feeds(support_peg=sbd_peg)
@conductor.command()
@click.argument('signing_key')
def enable(signing_key):
"""Enable a witness, or change key."""
tx = enable_witness(signing_key) or 'This key is already set'
output(tx)
@conductor.command()
@click.confirmation_option(help='Are you sure you want to stop the witness?')
def disable():
"""Disable a witness."""
tx = disable_witness() or 'Witness already disabled'
output(tx)
@conductor.command(name='kill-switch')
@click.option('--disable-after', '-n', default=5)
@click.option('--keys', '-k', default=None, multiple=True)
def kill_switch(disable_after, keys):
"""Monitor for misses w/ disable."""
watchdog(disable_after, keys)
# Status Commands
# ---------------
@conductor.command()
def tickers():
"""Print Tickers."""
with spinner():
m = Markets()
data = {
"BTC/USD": round(m.btc_usd(), 2),
"SBD/USD": round(m.sbd_usd_implied(), 3),
"STEEM/USD": round(m.steem_usd_implied(), 3),
}
echo(tabulate(
data.items(),
headers=['Symbol', 'Price'],
numalign="right", tablefmt='orgtbl'))
@conductor.command(name='status')
def status():
"""Print basic witness info."""
with spinner():
is_enabled = is_witness_enabled()
signing_key = current_signing_key()
misses = total_missed()
t = PrettyTable(["Enabled", "Misses", "Key"])
t.align = "l"
t.add_row([is_enabled, misses, signing_key])
output(t, 'Status')
output(get_config(), 'Configuration')
def ensure_witness_hook():
""" Ensure the config file exists. Sync witness props from steem."""
try:
c = get_config()
witness = get_witness(c['witness']['name'])
c['witness']['url'] = witness['url']
c['props'] = witness['props']
set_config(c)
except FileNotFoundError:
print("Your witness has not been setup yet. Please run:\n",
"conductor init")
quit(1)
|
20,556 | cbd6abbd08b466a4002b4b1bb0d935167b5494b6 | import sys
sys.path.append('../..')
import re
from collections import defaultdict
from map import Map, Node
USE_EXAMPLE = False
PRINT_DEBUG = False
class Board:
def __init__(self, p1, p2) -> None:
self.p1 = p1
self.p2 = p2
def movep1(self, amt):
self.p1 += amt
while self.p1 > 10:
self.p1 -= 10
def movep2(self, amt):
self.p2 += amt
while self.p2 > 10:
self.p2 -= 10
class Die100:
def __init__(self) -> None:
self.pos = 0
self.count = 0
def roll(self):
self.count += 1
self.pos += 1
if self.pos > 100:
self.pos = 1
return self.pos
# Part 1
with open('example.txt' if USE_EXAMPLE else 'input.txt') as file:
p1 = int(file.readline().strip()[-1])
p2 = int(file.readline().strip()[-1])
scores = [0,0]
b = Board(p1, p2)
d = Die100()
win = None
while True:
diceRoll = d.roll() + d.roll() + d.roll()
b.movep1(diceRoll)
scores[0] += b.p1
if scores[0] >= 1000:
win = 'p1'
break
diceRoll = d.roll() + d.roll() + d.roll()
b.movep2(diceRoll)
scores[1] += b.p2
if scores[1] >= 1000:
win = 'p2'
break
# print(win + ' wins with ' + str(scores[0] if win == 'p1' else scores[1]))
if win == 'p1':
print(scores[1] * d.count)
else:
print(scores[0] * d.count)
winnings = {}
def takeTurn(p1, p2, s1, s2):
if s1 >= 21:
return (1, 0)
if s2 >= 21:
return (0, 1)
if (p1, p2, s1, s2) in winnings:
return winnings[(p1,p2,s1,s2)]
res = (0, 0)
for roll1 in [1,2,3]:
for roll2 in [1,2,3]:
for roll3 in [1,2,3]:
p1mod = p1+roll1+roll2+roll3
while p1mod > 10:
p1mod -= 10
s1mod = s1+p1mod
r1, r2 = takeTurn(p2, p1mod, s2, s1mod)
res = (res[0]+r2, res[1]+r1)
winnings[(p1,p2,s1,s2)] = res
return res
# Part 2
with open('example.txt' if USE_EXAMPLE else 'input.txt') as file:
p1 = int(file.readline().strip()[-1])
p2 = int(file.readline().strip()[-1])
results = takeTurn(p1, p2, 0, 0)
print(max(results))
|
20,557 | 39347d6eae39bb17ef9e76804dcfa43ce0f4009b | # Tests for bleparser
|
20,558 | d98950086bead651b1d0724010d4dcb5607cc261 | output_a = "{:15.3f}".format(52.273)
output_b = "{:15.2f}".format(52.273)
output_c = "{:15.1f}".format(52.273)
print(output_a)
print(output_b)
print(output_c) |
20,559 | 2dad602293642873154ca853cea80fcf2ce5c9c8 | from xml.dom import minidom
xml="""<HostipLookupResultSet xmlns:gml="http://www.opengis.net/gml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" version="1.0.1" xsi:noNamespaceSchemaLocation="http://www.hostip.info/api/hostip-1.0.1.xsd">
<gml:description>This is the Hostip Lookup Service</gml:description>
<gml:name>hostip</gml:name>
<gml:boundedBy>
<gml:Null>inapplicable</gml:Null>
</gml:boundedBy>
<gml:featureMember>
<Hostip>
<ip>203.199.146.114</ip>
<gml:name>Pune</gml:name>
<countryName>INDIA</countryName>
<countryAbbrev>IN</countryAbbrev>
<!-- Co-ordinates are available as lng,lat -->
<ipLocation>
<gml:pointProperty>
<gml:Point srsName="http://www.opengis.net/gml/srs/epsg.xml#4326">
<gml:coordinates>73.8667,18.5333</gml:coordinates>
</gml:Point>
</gml:pointProperty>
</ipLocation>
</Hostip>
</gml:featureMember>
</HostipLookupResultSet>"""
def coordinates(xml):
c = minidom.parseString(xml)
co = c.getElementsByTagName("gml:coordinates")
if co:
coordinate = co[0].childNodes[0].nodeValue
if coordinate:
st = coordinate.split(",")
logitude = st[1]
lattitude = st[0]
t = (logitude,lattitude)
return t
print coordinates(xml)
|
20,560 | a8a848f254520a0a2468c953acf8eb78196deeb7 | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# 第一个图,我们来探索下英雄与坏蛋们眼睛颜色的分布
f, ax = plt.subplots(figsize=(12, 20))
data = np.load('feature_diff.npy')
with open('V3_namelist.txt', 'r') as f_r:
extra_names = f_r.readlines()
extra_names = extra_names
for i,v in enumerate(extra_names):
extra_names[i]=extra_names[i].strip('\n')
extra_names[i]=extra_names[i].replace(' ','-')
print(extra_names)
# sns.barplot(y=[i for i in range(100)], x=data[:100], orient='h')
sns.barplot(y=extra_names, x=data, orient='h')
plt.xticks(rotation='horizontal')
plt.show()
|
20,561 | 0c9aa0856f014c69b4181452c651df96982e6184 | def mymin(a, b):
(x, y) = a
(c, d) = b
if x > c or (x == c and y > d):
return b
else:
return a
bathdata = {}
def bath(n, k):
if (n, k) in bathdata:
return bathdata[(n, k)]
if k == 1:
return (n//2, (n-1)//2)
elif k == 2:
return bath(n//2, 1)
else:
bathdata[(n, k)] = mymin(bath(n//2, k//2), bath((n-1)//2, (k-1)//2))
return bathdata[(n, k)]
import sys
T = sys.stdin.readline()
for i, line in enumerate(sys.stdin):
n, k = line.split(" ")
n = int(n)
k = int(k)
n1, n2 = bath(n, k)
print("CASE #{}: {} {}".format(i+1, n1, n2))
if T == i+1:
break
|
20,562 | aaaef03ac02a53e6d0c55cc06ce2ef13436a640f | # Generated by Django 3.0.2 on 2020-02-18 04:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PersonalInfo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mobile', models.CharField(max_length=20)),
('address', models.CharField(max_length=100)),
('nationality', models.CharField(max_length=50)),
('gender', models.CharField(max_length=10)),
('religion', models.CharField(max_length=20)),
('photo', models.ImageField(upload_to='info/')),
('email', models.EmailField(max_length=254)),
('date_of_birth', models.DateField()),
],
options={
'db_table': 'personal_info',
},
),
]
|
20,563 | 9ceab1b614e2d4b3d77894e87c622ed9e0b772df | from django.urls import path, include
from deblur.views import DeblurDataList, DeblurDataDetail
app_name = "deblur"
deblur_patterns = [
path("deblur/", DeblurDataList.as_view(), name="deblur_list",),
path("deblur/<int:pk>/", DeblurDataDetail.as_view(), name="deblur_detail",),
]
urlpatterns = [
path("", include(deblur_patterns)),
]
|
20,564 | 2452d3715e46345b2a803d298b5212c06a456253 | from unittest import TestCase
from datetime import timedelta
from django.utils import timezone
from dhisdash.common.data_set_downloader import DataSetDownloader
from dhisdash.models import DataSyncTrackerStatus
from dhisdash.tests.my_test_case import MyTestCaseHelper
class TestDataSetDownloader(TestCase, MyTestCaseHelper):
def setUp(self):
self.setup_environment()
def test_that_download_url_has_not_lastUpdated_field_at_beginning(self):
downloader = DataSetDownloader(self.data_set, 201505, 'xxxx', DataSyncTrackerStatus.UNKNOWN)
url = downloader.get_download_url()
self.assertTrue('lastUpdated' not in url)
def test_that_download_url_has_lastUpdated_if_status_of_tracker_changed(self):
five_days_ago = timezone.now() - timedelta(days=5)
downloader = DataSetDownloader(self.data_set, 201505, 'xxxx', DataSyncTrackerStatus.INIT_DOWNLOAD)
url = downloader.get_download_url()
self.assertTrue(('lastUpdated=%s&' % five_days_ago.strftime('%Y-%m-%d')) in url) |
20,565 | f0c5b98d4188b3e7ba066d51d6fd3f2334f82395 | from django.db import models
class Team(models.Model):
name = models.CharField(max_length=100)
is_active = models.BooleanField(default=True)
email = models.CharField(max_length=100)
slack_name = models.CharField(max_length=100)
class SlackIdentity(models.Model):
name = models.CharField(max_length=100)
slack_id = models.CharField(max_length=12)
|
20,566 | 806273c8922ea64dbbf5906356a3425012bf4783 | numeros = []
aux = 0
tamnio = 1
terminar = ''
while True:
aux = (input(f'Introduzca el {tamnio}º número: '))
terminar = str(aux)
if terminar != 'fin':
numeros.append(aux)
tamnio += 1
print(f'Números introducidos: {numeros}')
else:
break
numeros.sort()
print(f'Números introducidos: {numeros}')
mayor = numeros[0]
for x in range(1,tamnio-1):
if numeros[x] > mayor:
mayor = numeros[x]
print(f'Número mayor de la lista: {mayor}') |
20,567 | 70a11ea3382d3ce472d7fa57b6c85192789c4105 | version https://git-lfs.github.com/spec/v1
oid sha256:d51189b5c2b76eb214834870e22395dbc8d9c6b2043243f932733beec7c09dad
size 1515
|
20,568 | e4553ad721d8cb0c72e4b9fa65648785b5e9be68 | class Node:
def __init__(self, d = None):
self.data = d
self.Next = None
def __del__(self):
print("Node is deleted")
class linkedlist:
head = None
def __init__(self, size):
if size > 0:
self.insertAtHead(int(input("Enter the head value: ")))
for i in range(1, size):
self.insertAtEnd(int(input("Enter the Node value: ")))
def __del__(self):
while not self.isEmpty():
self.deleteFromStart()
print("LinkedList is Deleted")
def isEmpty(self):
return self.head==None
def display(self):
first = self.head
while first!=None:
print(first.data,"->", sep='',end='')
first = first.Next
print("Null")
def find(self, n):
first = self.head
while first!=None:
if first.data == n:
return True
first = first.Next
return False
def insertAtHead(self, d):
added = Node(d)
added.Next = self.head
self.head = added
def insertAtEnd(self, d):
if self.head!=None:
first = self.head
while first.Next!=None:
first = first.Next
added = Node(d)
first.Next = added
else:
self.head = Node(d)
def insertNode(self, ind, d):
if ind==0:
self.insertAtHead(d)
else:
first = self.head
for i in range(ind):
second = first
first = first.Next
added = Node(d)
added.Next = second.Next
second.Next = added
def deleteFromStart(self):
if self.head!=None:
d = self.head
self.head = d.Next
del d
def deleteFromEnd(self):
if self.head!=None:
first = self.head
second = None
while first.Next!=None:
second = first
first = first.Next
if second!=None:
ToDel = second.Next
second.Next = None
del ToDel
else:
self.head=None
def deleteNode(self, d):
first = self.head
flag = 1
while first.Next!=None:
flag = 1
if first.Next.data==d:
ToDel = first.Next
first.Next = first.Next.Next
del ToDel
flag = 0
if first.Next!=None and flag:
first = first.Next
if self.head.data==d:
self.deleteFromStart()
def merge(self, l):
first = self.head
while first.Next!=None:
first = first.Next
first.Next = l.head
def remDub(self):
aa = linkedlist(0)
if self.head!=None:
flag = 0
aa.head = Node(self.head.data)
first = self.head.Next
while first!=None:
flag=0
f = aa.head
while f!=None:
if f.data==first.data:
flag = 1
break
f = f.Next
if not flag:
aa.insertAtEnd(first.data)
first = first.Next
return aa
def intersection(self, l):
aa = linkedlist(0)
if self.head!=None or l.head!=None:
flag = 0
first = self.head
while first!=None:
flag=0
f = l.head
while f!=None:
if f.data==first.data:
flag = 1
break
f = f.Next
if flag:
aa.insertAtEnd(first.data)
first = first.Next
return aa
def reverse(self):
if self.head!=None and self.head.Next!=None:
first = None
second = self.head
third = second.Next
while third!=None:
second.Next = first
first = second
second = third
third = second.Next
second.Next = first
self.head = second
def Sort(self):
if self.head!=None and self.head.Next!=None:
first = self.head
while first!=None:
second = first.Next
while second!=None:
if first.data > second.data:
temp = first.data
first.data = second.data
second.data = temp
second = second.Next
first = first.Next
class stackList:
def __init__(self):
self.arr=[]
def Push(self, x):
self.arr.append(x)
def Pop(self):
if self.isEmpty():
print("Pop: Stack is Empty...")
self.arr.pop()
def Top(self):
if self.isEmpty():
return "Top: Stack is Empty..."
return self.arr[-1]
def isEmpty(self):
return len(self.arr)==0
class stackLinkedList:
def __init__(self, size):
self.arr = linkedlist(size)
self.top = None
self.flag = 0
def Push(self, x):
self.arr.insertAtEnd(x)
self.top = x
self.flag = 1
def Pop(self):
if not self.isEmpty():
self.arr.deleteFromEnd()
else:
print("Pop: Stack is already Empty!!!")
self.flag = 0
def Top(self):
if self.flag:
return self.top
if not self.isEmpty():
first = self.arr.head
while first.Next!=None:
first=first.Next
self.top = first.data
flag = 1
return self.top
else:
return "Top: Stack is Empty"
def isEmpty(self):
return self.arr.isEmpty()
class QueueList:
def __init__(self):
self.arr = []
def EnQueue(self, x):
self.arr.insert(0, x)
def deQueue(self):
if self.isEmpty():
return "Dequeue: Queue is Empty..."
self.arr.pop()
def peek(self):
if self.isEmpty():
return "Peek: Queue is Empty"
return self.arr[-1]
def isFull(self):
return False
def isEmpty(self):
return len(self.arr)==0
class QueueLinkedList:
def __init__(self, size):
self.arr = linkedlist(size)
def EnQueue(self, x):
self.arr.insertAtEnd(x)
def deQueue(self):
if self.isEmpty():
print("DeQueue: Queue is Empty")
return
self.arr.deleteFromStart()
def peek(self):
if self.isEmpty():
return "Peek: Queue is Empty"
return self.arr.head.data
def isFull(self):
return False
def isEmpty(self):
return self.arr.isEmpty()
def Q1():
A = linkedlist(int(input("Enter the length of the linkedlist: ")))
print("IsNull:", A.isEmpty())
A.display()
if(A.find(int(input("Enter the integer you want to find in linkedList: ")))):
print("The number lies in the linkedList...")
else:
print("The number is not found in the linkedList...")
A.insertAtHead(int(input("Enter the Value to add in the start of the linkedList: ")))
A.display()
A.insertAtEnd(int(input("Enter the Value to add in the end of the linkedList: ")))
A.display()
A.insertNode(int(input("Enter the index to add value in the linkedList: ")), int(input("Enter the Value to add in the required index of the linkedList: ")))
A.display()
print("Deleting From Start")
A.deleteFromStart()
A.display()
print("Deleting From End")
A.deleteFromEnd()
A.display()
A.deleteNode(int(input("Enter number to del from all the list: ")))
A.display()
print("removing Duplicates: ", end='')
A = A.remDub()
A.display()
print("Reverese: ", end='')
A.reverse()
A.display()
print("Sorted: ", end='')
A.Sort()
A.display()
print("\nMake a new LinkedList for interaction checks!!!")
B = linkedlist(int(input("Enter the length of the new linkedList: ")))
C = A.intersection(B)
print("Intersection: ", end='')
C.display()
A.merge(B)
print("merge: ", end = '')
A.display()
def Q2aa():
B = stackList()
print("Pushing '10' in stack...")
B.Push(10)
print("\nPrinting Top of the Stack")
print(B.Top())
print("\nPopping element from the Stack")
B.Pop()
print("\nPrinting Top of the Stack")
print(B.Top())
print("\nPushing '3' in the Stack")
B.Push(3)
print("\nPrinting Top of the Stack")
print(B.Top())
print("\nPopping element from the Stack")
B.Pop()
print("\nPrinting Top of the Stack")
print(B.Top())
print()
def Q2ab():
A = stackLinkedList(3)
print("\nPrinting Top of the Stack")
print(A.Top())
print("\nPopping element from the Stack")
A.Pop()
print("\nPrinting Top of the Stack")
print(A.Top())
print("\nPushing '3' in the Stack")
A.Push(3)
print("\nPrinting Top of the Stack")
print(A.Top())
print("\nPopping element from the Stack")
A.Pop()
print("\nPrinting Top of the Stack")
print(A.Top())
print()
def Q2ba():
A = QueueList()
print("\nEnqueuing '10' from the Queue")
A.EnQueue(10)
print("\nPeeking the Queue")
print(A.peek())
print("\nDequeuing element from the Queue")
A.deQueue()
print("\nPeeking the Queue")
print(A.peek())
A.EnQueue(11)
print("\nPeeking the Queue")
print(A.peek())
print("\nDequeuing element from the Queue")
A.deQueue()
print("\nPeeking the Queue")
print(A.peek())
print()
def Q2bb():
A = QueueLinkedList(3)
print("\nEnqueuing '10' from the Queue")
A.EnQueue(10)
print("\nPeeking the Queue")
print(A.peek())
print("\nDequeuing element from the Queue")
A.deQueue()
print("\nPeeking the Queue")
print(A.peek())
A.EnQueue(11)
print("\nPeeking the Queue")
print(A.peek())
print("\nDequeuing element from the Queue")
A.deQueue()
print("\nPeeking the Queue")
print(A.peek())
print()
if __name__ == "__main__":
Q1()
print("\n\nStack with List")
print()
Q2aa()
print("\n\nStack with LinkedList")
print()
Q2ab()
print("\n\nQueue with List")
print()
Q2ba()
print("\n\nQueue with LinkedList")
print()
Q2bb() |
20,569 | 648a0d059289ec6cfcedc84459c1f64d77748821 | """Config4Py configuration management utility"""
__version__ = '1.0.0a1'
from config4py.config import Config
from config4py.spec import Configspec
from config4py.parsing import CheckParser
|
20,570 | 99f3eb59903915aae0f8c9e0438652c567510695 | #projection interference for 1/4 fits file and PCP. using matrix instead of loop ,t_sample,f_sample
import numpy as np
from torch._C import device
import config
import torch
import sys
t_sample = config.t_sample
f_sample = config.f_sample
beam = config.beam
t_shape = config.t_shape
f_shape = config.f_shape
def make_covariance_matrix(data):
t_step = int(t_shape/t_sample)
f_step = int(f_shape/f_sample)
D=np.zeros((beam,beam,t_shape,f_shape))
for i in range(beam):
for j in range(beam):
a = np.sqrt(data[i]*data[j])
D[i][j] = a.reshape(t_shape,f_shape)
D_ms = np.transpose(D,(2,3,0,1))
D = D.reshape(beam,beam,t_sample,t_step,f_shape).mean(axis=3)
D = D.reshape(beam,beam,t_sample,f_shape,f_step).mean(axis=-1).squeeze()
D = np.transpose(D,(2,3,0,1))
print('correlation shape:',D.shape)
return D_ms,D
def make_matrix(D_ms,correlation):
t_step = int(t_shape/t_sample)
f_step = int(f_shape/f_sample)
D=D_ms
data_clean = np.zeros((beam,t_shape,f_shape))
cuda0 = torch.device('cuda:0')
correlation_cpu = torch.tensor(correlation.reshape(-1,beam,beam))
u_cpu, s_cpu, vh_cpu = torch.svd(correlation_cpu)
u_cpu = u_cpu.reshape(t_sample,f_sample,beam,beam)
#correlation_gpu = torch.tensor(correlation.reshape(-1,beam,beam),device=cuda0)
#u_gpu, s_gpu, vh_gpu = torch.svd(correlation_gpu)
#u_gpu = u_gpu.reshape(t_sample,f_sample,beam,beam)
#u_cpu = u_gpu.cpu()
#torch.cuda.empty_cache()
u = np.array(u_cpu)
spectrum = np.zeros((t_shape,f_shape,beam))
for i in range(t_shape):
for j in range(f_shape):
u_sample = u[int(i/t_step)][int(j/f_step)].squeeze()
u_rfi = u_sample[:,:1]
P = np.dot(u_rfi,u_rfi.T)
c=D[i][j]
matrix_clean = c - np.dot(P,np.dot(c,P))
spectrum[i][j]=np.diag(matrix_clean)
spectrum = np.transpose(spectrum,(2,0,1))
for k in range(beam):
data_clean[k]=spectrum[k]
data_clean[data_clean<0]=0
return data_clean
|
20,571 | ea02f70c40325c6d73f72a201c1b93b0ac37895c | import pygame
import time
import lucidity
from lucidity.app.delegate import MainDelegate
from lucidity.core.arrangement import Sequence
from lucidity.gui.grid import MainGrid
from lucidity.gui.layout import PanelSizer
from lucidity.gui.skinning import Skin
from lucidity.gui.toolbars import TopToolbar, BottomToolbar
from lucidity.media.library import MediaRequestLoop, MediaRequest, MediaRequestDelegate, MediaFileConverter
from lucidity.media.media import MediaFile
from lucidity.midi.midi import MidiEventLoop
from lucidity.system.log import logger
from lucidity.system.performance import SystemUsageLoop
from lucidity.system.settings import Settings
from lucidity.system.status import StatusLoop, ObtuseStatusProvider
class MainWindow(MediaRequestDelegate):
def __init__(self, delegate:MainDelegate, sequence:Sequence, settings:Settings,
mediaRequestLoop:MediaRequestLoop, midiEventLoop:MidiEventLoop,
statusLoop:StatusLoop, systemUsageLoop:SystemUsageLoop):
# Important variables for delegate, system sequence, settings
self.mainDelegate = delegate
self.mainDelegate.mainWindow = self
self.sequence = sequence
self.settings = settings
# References to system threads
self._mediaRequestLoop = mediaRequestLoop
self._midiEventLoop = midiEventLoop
self._systemUsageLoop = systemUsageLoop
self._systemUsageLoop.fpsProvider = self
self._statusLoop = statusLoop
# Initialize display
pygame.display.init()
pygame.display.set_caption("Lucidity")
# Variables related to display
self.surface = None
self.mainGrid = None
self._ready = False
self._shouldQuit = False
self._resolution = (1440, 900)
self._containers = []
self._skin = Skin(self.settings.getString("gui.skin"), self.settings.getInt("gui.colorInterval"))
self._setStatusTextFunction = None
self._getCurrentItemProvider = self.getCurrentItem
self._framesProcessed = 0
def open(self):
"""
Open up the window for the application. This must, sadly, be done in the main
thread, or else the window will not properly respond to events.
"""
windowFlags = self.getWindowFlags(self.settings)
self.surface = pygame.display.set_mode(self._resolution, windowFlags)
self._printVideoInfo(pygame.display.Info())
logger.info("Initialized display with driver: " + pygame.display.get_driver())
self.surface.fill(self._skin.guiColor("Background"))
self._initializePanels(self._resolution, self._skin)
pygame.display.flip()
self._statusLoop.statusProvider = self.getStatusProvider(self.settings)
def run(self):
maxFps = self.settings.getFloat("gui.maxFps")
frameRenderTimeInSec = 1 / maxFps
while not self._ready:
startTime = time.time()
pygame.event.pump()
self._processFrame(startTime, frameRenderTimeInSec)
while not self._shouldQuit:
startTime = time.time()
for event in pygame.event.get():
self._processEvent(event)
self._processFrame(startTime, frameRenderTimeInSec)
pygame.display.quit()
pygame.quit()
def _processFrame(self, startTime, frameRenderTimeInSec):
self.sequence.tick()
for container in self._containers:
container.draw()
sleepTime = frameRenderTimeInSec - (time.time() - startTime)
if sleepTime > 0:
pygame.time.delay(int(sleepTime * 1000))
self._framesProcessed += 1
def _processEvent(self, event):
eventType = pygame.event.event_name(event.type)
try:
processFunction = getattr(self.mainDelegate, "on" + eventType)
processFunction(event.dict)
except AttributeError as exception:
logger.info("Error handling event '" + eventType + "': " + str(exception))
except pygame.error as exception:
logger.error("Error from pygame: " + str(exception))
def _initializePanels(self, resolution, skin:Skin):
panelSizer = PanelSizer()
self.mainGrid = MainGrid(self.surface,
panelSizer.getMainGridRect(resolution[0], resolution[1]),
skin, self.sequence)
self._containers.append(self.mainGrid)
self.mainDelegate.mainGrid = self.mainGrid
toolbarBackgroundColor = self._skin.guiColor("Toolbar")
topToolbar = TopToolbar(self.surface,
panelSizer.getTopToolbarRect(resolution[0]),
skin, toolbarBackgroundColor, self.mainDelegate)
self._containers.append(topToolbar)
self._setStatusTextFunction = topToolbar.onStatusUpdate
bottomToolbar = BottomToolbar(self.surface,
panelSizer.getBottomToolbarRect(resolution[0], resolution[1]),
skin, toolbarBackgroundColor, self.mainDelegate)
self._containers.append(bottomToolbar)
self._systemUsageLoop.delegate = bottomToolbar
self._statusLoop.delegate = topToolbar
def onReady(self):
self._ready = True
def quit(self):
self._shouldQuit = True
def minimize(self):
logger.debug("Minimizing")
pygame.display.iconify()
# TODO: This should be replaced by a method in the bottom toolbar
def getCurrentItem(self):
file = MediaFile("/foo/bar")
file.title = "Sample Block"
file.timeInSeconds = 30.0
return file
def insert(self):
mediaFile = self._getCurrentItemProvider()
cursorPosition = self.mainGrid.getCursorPosition()
item = MediaFileConverter.getItemForMediaFile(mediaFile, self.mainGrid.getCursorTrack(),
cursorPosition.beats,
self.sequence.getTempo())
self.sequence.addItem(item)
def onRequestComplete(self, request:MediaRequest, args):
if request.type == MediaRequest.Operations.RESCAN:
self.setStatusText(args[0])
elif request.type == MediaRequest.Operations.SEARCH:
pass
def setStatusText(self, text):
self._setStatusTextFunction(text)
def onMouseButtonDown(self, eventDict):
# logger.debug("Down at " + str(eventDict['pos']))
clickPosition = eventDict['pos']
for container in self._containers:
if container.absRect.collidepoint(clickPosition[0], clickPosition[1]):
container.onMouseDown(clickPosition)
def onMouseButtonUp(self, eventDict):
# logger.debug("Up at " + str(eventDict['pos']))
clickPosition = eventDict['pos']
for container in self._containers:
if container.absRect.collidepoint(clickPosition[0], clickPosition[1]):
container.onMouseUp(clickPosition)
def onStartMidiMapping(self):
for container in self._containers:
container.onStartMidiMapping()
def onStopMidiMapping(self):
for container in self._containers:
container.onStopMidiMapping()
def getFramesPerSec(self):
totalTime = self.sequence.getTime() - self.sequence.clock.startTime
if totalTime > 0:
return self._framesProcessed / totalTime
else:
return 0.0
def getStatusProvider(self, settings):
providerName = settings.getString("gui.statusProvider")
if providerName == "system":
return self._systemUsageLoop
elif providerName == "obtuse":
return ObtuseStatusProvider()
elif providerName == "debug":
return lucidity.system.log.statusHandler
else:
return None
def getWindowFlags(self, settings):
windowFlags = 0
for setting in ["fullscreen", "doublebuf", "hwsurface", "opengl"]:
fullSettingName = "gui" + "." + setting
if settings.getInt(fullSettingName) > 0:
pygameWindowFlagAttr = getattr(pygame, setting.upper())
windowFlags |= pygameWindowFlagAttr
return windowFlags
def _printVideoInfo(self, videoInfo):
resolutionWidth = str(videoInfo.current_w)
resolutionHeight = str(videoInfo.current_h)
logger.debug("Current resolution: " + resolutionWidth + "x" + resolutionHeight)
videoInfoAttributes = {'hw': 'Hardware acceleration',
'wm': 'Windowed display',
'bitsize': 'Display depth',
}
for key in videoInfoAttributes.keys():
logger.debug(videoInfoAttributes[key] + ": " + str(getattr(videoInfo, key)))
|
20,572 | 5150179dbc52efd29503b11eb6741f17214ea2d7 |
import pytest
import serverlib.USBevents as USBevents
@pytest.mark.skip(reason="USBlib non-functional under debian-stretch and not used in code")
class Test_Inotify:
def test01(self):
usb_set = USBevents.get_USB_set()
assert isinstance(usb_set, set), "set expected!"
print("USB_SET: {}".format(usb_set))
# assert False, "Force Fail"
def test02(self):
vend, prod = 0x1050, 0x0407
usbstate = USBevents.USBState((vend, prod))
for i in range(60):
print("state is {}".format(usbstate.isPresent()))
# time.sleep(1)
# assert False, "Force Fail"
|
20,573 | f5b9bcb307cc036a67c60092b4e9b13120b50162 | from nytimesarticle import articleAPI
class NewsWidgetService():
def __init__(self):
self.__nyt_api = articleAPI('fcc2bc866f604fd6b5f9706f8065f6ef');
def get_news(self):
# Returns top 3 news from nyt
articles = self.__nyt_api.search();
response = articles['response'];
docs = response['docs'];
news = [];
counter = 0;
for doc in docs:
if counter < 3:
print doc
publish_date = doc['pub_date'] if 'pub_date' in doc else None;
headline = doc['headline'] if 'headline' in doc else None;
if headline != None:
headline = headline['main'] if 'main' in headline else None;
web_url = doc['web_url'] if 'web_url' in doc else None;
multimedia = doc['multimedia'] if 'multimedia' in doc else None;
image_url = None;
if multimedia != None and len(multimedia) != 0:
image_url = multimedia[0]['url'] if 'url' in multimedia[0] else None;
if publish_date != None and headline != None and web_url != None and image_url != None:
snippet = {
'headline': headline,
'date': publish_date,
'web_url': web_url,
'image_url': 'http://www.nytimes.com/' + image_url
};
print snippet
news.append(snippet);
counter = counter + 1;
else:
break;
return news;
|
20,574 | e73e2943247656a964752058ec7d8aff69b3c28e | env = Environment(CCFLAGS='-DHAVE_INTTYPES_H -DHAVE_NETINET_IN_H', CPPPATH=['/usr/local/include/thrift', '../gen-cpp'], LIBPATH=['/usr/local/lib'])
targets = {"server" : "server.cpp" , "client" : "client.cpp"}
linkflags = ''
if int(ARGUMENTS.get('static', 0)):
linkflags = '-Wl,-static -static-libgcc'
common_sources = ['../gen-cpp/SharedService.cpp', '../gen-cpp/shared_types.cpp', '../gen-cpp/tutorial_types.cpp', '../gen-cpp/Calculator.cpp']
CommonObjects = env.Object(common_sources)
for target, file in targets.iteritems():
env.Program(target = target, source = [file, CommonObjects], LINKFLAGS=linkflags, LIBS = ['thrift', 'pthread'])
|
20,575 | 2a6f3a69f45cfdcc1c582f4cf3322d9f3980d166 | # 看到下面代码,请不要怂,老规矩,你只需要修改有注释的地方
# 其他未备注的地方,看得懂代码的人,请自行修改,需要需要了解numpy,pyaudio
#conding = utf8
from __future__ import print_function
from __future__ import division
import os
DEVICE = 'esp8266'
if DEVICE == 'esp8266':
# 下面是你刚才在Arduino中为ESP8266设置的IP地址,不要填错了。
UDP_IP = '192.168.2.150'
UDP_PORT = 7777
SOFTWARE_GAMMA_CORRECTION = False
if DEVICE == 'pi':
LED_PIN = 18
LED_FREQ_HZ = 800000
LED_DMA = 5
BRIGHTNESS = 255
LED_INVERT = True
SOFTWARE_GAMMA_CORRECTION = True
if DEVICE == 'blinkstick':
SOFTWARE_GAMMA_CORRECTION = True
USE_GUI = True
DISPLAY_FPS = True
# 下面是你LED灯的数量,这个也不能搞错,错了,程序就运行出错!
N_PIXELS = 120
GAMMA_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'gamma_table.npy')
MIC_RATE = 44100
FPS = 60
_max_led_FPS = int(((N_PIXELS * 30e-6) + 50e-6)**-1.0)
assert FPS <= _max_led_FPS, 'FPS must be <= {}'.format(_max_led_FPS)
MIN_FREQUENCY = 200
MAX_FREQUENCY = 12000
N_FFT_BINS = 24
N_ROLLING_HISTORY = 2
MIN_VOLUME_THRESHOLD = 1e-7
|
20,576 | 5f31f7ca60d1df85536ae5a8114227b6adf40201 | ############################################################################
###################### Unit Testing for Log ############################
############################################################################
"""
Expected output:
Interleaved output from multiple threads
Each thread logs
(a) when it is starting
(b) multiple lines of output
(c) when it has ended
There should be NO BLANK LINES in the log and
each line should contain output from a single thread.
The line '....MIDPOINT...' appears half way through.
In the first half lines are about 90 characters long.
In the second half lines are twice as long.
"""
from unittest import TestCase
import threading
from Log import gLog
from time import sleep
class PrintManyLinesToLog(threading.Thread):
def __init__(self, character):
threading.Thread.__init__(self)
self.character = character
self.lineCount = 0
def run(self):
gLog.print("Starting " + self.character)
tempStr = ""
for i in range(80):
tempStr = tempStr + self.character
for i in range(50):
self.lineCount += 1
gLog.print(tempStr + ": count = " + str(self.lineCount))
gLog.print("Ending " + self.character)
class TestLogClass(TestCase):
def test_assertions(self):
gLog.open('1')
self.assertRaises(AssertionError, gLog.print, 3) # trying to print a non string
gLog.close()
self.assertRaises(AssertionError, gLog.open, 1, 40) # name is not a string
self.assertRaises(AssertionError, gLog.open, 'a', 'cat') # flushFrequency is not an integer
self.assertRaises(AssertionError, gLog.open, 'a', 2.5) # flushFrequency is not an integer
self.assertRaises(AssertionError, gLog.open, 'a', -2) # flushFrequency is < 0
def test_log(self):
gLog.open('2', 10)
gLog.print(
"""
Expected output:
Interleaved output from multiple threads
Each thread logs
(a) when it is starting
(b) multiple lines of output
(c) when it has ended
There should be NO BLANK LINES in the log and
each line should contain output from a single thread.
The line '....MIDPOINT...' appears half way through.
In the first half lines are about 90 characters long.
In the second half lines are twice as long.
"""
)
for digit in range(10):
PrintManyLinesToLog(str(digit)).start()
sleep(2)
gLog.flush()
gLog.print('...................... MIDPOINT.........................')
for digit in range(10):
PrintManyLinesToLog(str(digit) + str(digit) + str(digit)).start()
sleep(5)
gLog.close()
raise Exception ('EXCEPTION EXPECTED: Check correctness of output in file log_test.txt') |
20,577 | cdc5aebd2d6cbec1669283ec41eef426d0f20f7a | # -*- coding: utf-8 -*-
###################################################################
##Replicate a dictionary, changing it to string
#print('')
dicta = {'name': 'Peter', 'age': '29', 'Gender': 'Male', 'married': 'Yes'}
dictb = repr(dicta)
print ("dictb\n",type(dictb), dictb [0:10]) # MAKES IT INTO A STRING
print ("dictA\n",type(dicta), dicta)
|
20,578 | 9bc51344ac6788d8337c7add0fede4eb665107e9 | import pytest
from flex.constants import (
PARAMETER_IN_VALUES,
PATH,
BODY,
QUERY,
HEADER,
FORM_DATA,
)
from flex.error_messages import MESSAGES
from flex.exceptions import ValidationError
from flex.loading.definitions.parameters import (
single_parameter_validator,
)
from tests.factories import ParameterFactory
from tests.utils import (
assert_path_not_in_errors,
assert_message_in_errors,
)
def test_name_is_required():
context = {'deferred_references': set()}
with pytest.raises(ValidationError) as err:
single_parameter_validator({}, context=context)
assert_message_in_errors(
MESSAGES['required']['required'],
err.value.detail,
'name',
)
@pytest.mark.parametrize(
'value',
([1, 2], None, {'a': 1}, True, 1, 1.1),
)
def test_in_with_invalid_types(value):
with pytest.raises(ValidationError) as err:
single_parameter_validator({'in': value})
assert_message_in_errors(
MESSAGES['type']['invalid'],
err.value.detail,
'in.type',
)
def test_in_must_be_one_of_valid_values():
with pytest.raises(ValidationError) as err:
single_parameter_validator({'in': 'not-a-valid-in-value'})
assert_message_in_errors(
MESSAGES['enum']['invalid'],
err.value.detail,
'in.enum',
)
@pytest.mark.parametrize(
'value',
PARAMETER_IN_VALUES,
)
def test_in_with_valid_values(value):
try:
single_parameter_validator({'in': value})
except ValidationError as err:
errors = err.detail
else:
errors = {}
assert_path_not_in_errors(
'in.enum',
errors,
)
def test_when_in_value_is_path_required_must_be_true():
with pytest.raises(ValidationError) as err:
single_parameter_validator(ParameterFactory(**{
'in': PATH,
'required': False,
}))
assert_message_in_errors(
MESSAGES['required']['path_parameters_must_be_required'],
err.value.detail,
'^required',
)
def test_when_in_value_is_body_a_schema_is_required():
parameter = ParameterFactory(**{
'in': BODY,
})
parameter.pop('schema', None)
with pytest.raises(ValidationError) as err:
single_parameter_validator(parameter)
assert_message_in_errors(
MESSAGES['schema']['body_parameters_must_include_a_schema'],
err.value.detail,
'^schema',
)
@pytest.mark.parametrize(
'in_',
(QUERY, PATH, HEADER, FORM_DATA),
)
def test_when_in_value_is_not_body_type_is_required(in_):
parameter = ParameterFactory(**{
'in': in_,
})
parameter.pop('type', None)
with pytest.raises(ValidationError) as err:
single_parameter_validator(parameter)
assert_message_in_errors(
MESSAGES['type']['non_body_parameters_must_declare_a_type'],
err.value.detail,
'^type',
)
|
20,579 | 637685ae64cdbf465e092a73ce85304c33897bf4 | """
viscount.api.cytoscape
Workflow rendereding for cytoscape.js json
"""
from flask import jsonify
from ..services import workflows as _workflows, tasks as _tasks
from ..models import Workflow
from ..core import db
from ..core import ViscountException
# exceptions for cytoscape rendering
class CytoscapeException(ViscountException):
status_code = 200
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['error'] = self.message
return rv
def handle_CytoscapeException(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
def render_workflow_to_cytoscape(wf, parent=None):
nodes = []
edges = []
for ti in wf.task_instances.all():
nodes.append( { 'data' : { 'id' : 'ti' + str(ti.id), 'name' : ti.task.name, 'description' : ti.description, 'color' : 'gray' } } )
for tif in ti.task.inputs.all():
nodes.append( { 'data' : {
'id' : 'ti' + str(ti.id) + 'tif' + str(tif.id),
'parent' : 'ti' + str(ti.id),
'name' : tif.name,
'description' : tif.description,
'file_type' : tif.file_type.name,
'classes' : 'input',
'color' : 'cyan'
} } )
for tof in ti.task.outputs.all():
nodes.append( { 'data' : {
'id' : 'ti' + str(ti.id) + 'tof' + str(tof.id),
'parent' : 'ti' + str(ti.id),
'name' : tof.name,
'description' : tof.description,
'file_type' : tof.file_type.name,
'classes' : 'output',
'color' : 'magenta'
} } )
for tii in ti.inputs:
edges.append( { 'data' : { 'id' : 'tii' + str(tii.id), 'source' : 'ti' + str(tii.output_task_instance.id) + 'tof' + str(tii.output_task_file_id), 'target' : 'ti' + str(ti.id) + 'tif' + str(tii.input_task_file_id), 'color' : 'purple' } } )
graph = { 'elements' : { 'nodes' : nodes , 'edges' : edges },
'style' : [
{
'selector' : 'node',
'css' : {
'content' : 'data(name)',
'text-valign' : 'center',
'text-halign' : 'center',
'text-outline-width' : 2,
'text-outline-color' : 'data(color)',
'background-color' : 'data(color)',
'color' : '#fff'
},
}, {
'selector' : '$node > node',
'css' : {
'padding-top' : '10px',
'padding-left' : '10px',
'padding-bottom' : '10px',
'padding-right' : '10px',
'text-valign' : 'top',
'text-halign' : 'center',
}
}, {
'selector' : 'edge',
'css' : {
'opacity' : 0.666,
'target-arrow-shape' : 'triangle',
'source-arrow-shape' : 'circle',
'line-color' : 'magenta',
'source-arrow-color' : 'cyan',
'target-arrow-color' : 'magenta',
}
}, {
'selector' : ':selected',
'css' : {
'background-color' : 'black',
'line-color' : 'black',
'target-arrow-color' : 'black',
'source-arrow-color' : 'black',
}
}
],
}
return graph
def render_to_cytoscape(obj):
"""renders an object in the ORM to a cytoscape.js json"""
if isinstance(obj, Workflow):
return render_workflow_to_cytoscape(obj)
elif isinstance(obj, Project):
# for workflows, render each workflow with a parent compound node
raise CytoscapeException('not implemented')
else:
raise CytoscapeException('Cytoscape rendering not supported for object type ' + str(type(obj)))
|
20,580 | f73535b860d821a104cd12fba2f8658729f284cf | import argparse
import libvirt
class LibvirtConn:
def __init__(self):
self.conn = libvirt.open("qemu:///system")
def find_vm_dec(func):
def closure(self, name, *args, **kwargs):
vm = self.conn.lookupByName(name)
return func(self, name, vm, *args, **kwargs)
return closure
def create_vm(self, xml_file):
with open(xml_file) as f:
xml = f.read()
self.conn.defineXML(xml)
@find_vm_dec
def delete_vm(self, name, vm):
if vm.isActive():
vm.destroy()
vm.undefine()
@find_vm_dec
def power_on(self, name, vm):
vm.create()
@find_vm_dec
def power_off(self, name, vm):
vm.shutdown()
@find_vm_dec
def reboot(self, name, vm):
vm.reboot()
def lvirt_parse__args():
parser = argparse.ArgumentParser(description="Pythonic wrapper "
"for libvirt. Author: Ruslan Aliev")
parser.add_argument('--create', dest='xml_file', metavar='VM_NAME',
help='Create VM defined in XML_FILE')
parser.add_argument('--delete', dest='name_del', metavar='VM_NAME',
help='Delete VM')
parser.add_argument('--power-on', dest='name_on', metavar='VM_NAME',
help='Power ON VM')
parser.add_argument('--power-off', dest='name_off', metavar='VM_NAME',
help='Power OFF VM')
parser.add_argument('--reboot', dest='name_reboot', metavar='VM_NAME',
help='Reboot VM')
return parser.parse_args()
def lvirt_exec(args):
try:
lvirt = LibvirtConn()
if args.xml_file:
lvirt.create_vm(args.xml_file)
elif args.name_del:
lvirt.delete_vm(args.name_del)
elif args.name_on:
lvirt.power_on(args.name_on)
elif args.name_off:
lvirt.power_off(args.name_off)
elif args.name_reboot:
lvirt.reboot(args.name_reboot)
else:
print "No options specified. Use -h for help"
except libvirt.libvirtError as le:
raise SystemExit(le.get_error_code())
except IOError as ioe:
print "IOError:", ioe.filename, ioe.strerror
raise SystemExit(ioe.errno)
if __name__ == '__main__':
lvirt_exec(lvirt_parse__args())
|
20,581 | 3407ddd30cb29ab5bd6021a70841844716590759 | # Generated by Django 2.2.6 on 2019-11-02 09:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('appointments', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='appointment',
old_name='category',
new_name='department',
),
]
|
20,582 | c29bc2b044c50a18b82145e6eb3d73c577cc262c | from django.db import models
from django.contrib.auth.models import AbstractUser
from Lotte_datasetApp.models import lotteData
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
# Create your models here.
class wear_mywear(models.Model):
shopping_want_wear = models.ImageField(upload_to='images/shoplist',blank=True)
def name_func(instance, filename):
blocks = filename.split('.')
filename = "%s.jpg" % (instance.id)
return filename
class CustomUser(AbstractUser):
def __str__(self):
return self.name
ADDRESS = (
('seoul','서울'),
('gyeonggi','경기'),
('chungnam','충남'),
('chungbuk','충북'),
('gyeongbuk','경북'),
('gyeongnam','경남'),
('jeonbuk','전북'),
('jeonnam','전남'),
)
GENDER = (
('여성','여성'),
('남성','남성'),
)
username = models.CharField(max_length=50, unique=True)
password = models.CharField(max_length=50)
name = models.CharField(max_length=50)
phone_number = models.CharField(max_length=50)
address = models.CharField(max_length=50,choices=ADDRESS)
gender = models.CharField(max_length=50,choices=GENDER)
class itemsaved(models.Model): ## 이미지 검색할 때 임시저장 이미지 모델
image = models.ImageField(upload_to='images/temp/', blank=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
#장바구니 구현
class CartItem(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
product = models.ForeignKey(lotteData, on_delete=models.CASCADE)
active = models.BooleanField(default=False)
# 수량은 -1 과 같은 수량이 없기 때문에 아래의 필드로 선언하여 최소값을 1 로 설정
quantity = models.PositiveSmallIntegerField(null=True, default=1, validators=[MinValueValidator(1), MaxValueValidator(100)])
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = '장바구니'
verbose_name_plural = f'{verbose_name} 목록'
ordering = ['-pk']
# def sub_total(self):
# # 템플릿에서 사용하는 변수로 장바구니에 담긴 각 상품의 합계
# return self.lotte.price * self.quantity
def __str__(self):
return self.product.lotteName
|
20,583 | a47013136170f3dd5c0670b5e56acf99097607c0 | import os
from Utils.IO import *
from Utils.Path import *
class Available:
def __init__(self):
modulesLoaded, modulePaths = getEnvModLoadedAndPaths()
centreTextInConsole(" Available Modules ")
i = 0
for mod in modulePaths:
if not os.path.exists(mod):
printErr("Search path " + mod + " does not exist!")
continue
printErr("Path: " + mod)
printDivider()
for root, dirs, files in os.walk(mod):
headPath, tailPath = os.path.split(mod)
rootMinusMod = root.replace(headPath, "").strip(os.sep)
for name in files:
if not name.endswith(("~", "#")):
moduleFileName = os.path.join(root, name)
alreadyLoaded=" "
for modAlreadyLoaded in modulesLoaded:
if modAlreadyLoaded == moduleFileName:
alreadyLoaded = "(Loaded) "
startOfFile = file(moduleFileName).read(8)
if "#%Module" in startOfFile:
printErr(alreadyLoaded + (rootMinusMod + os.sep + name).lstrip(os.sep))
printErr("")
|
20,584 | 8f9508b5a94c795c22443da2809e985b4396b08d | i18n = lambda x: x
profile = {
'profile': 'pardus',
'save_filter': 'PARDUS-IN-USER PARDUS-OUT-USER',
'save_mangle': '',
'save_nat': '',
'save_raw': ''
}
filter = {
# Incoming Connections
# All incoming connections are rejected by default
'inMail': (
['-A PARDUS-IN-USER -p tcp -m multiport --dports 25,110 -j ACCEPT'],
i18n('Mail services'),
'25, 110',
),
'inWeb': (
['-A PARDUS-IN-USER -p tcp -m multiport --dports 80,443 -j ACCEPT'],
i18n('Web services'),
'80, 443',
),
'inRemote': (
['-A PARDUS-IN-USER -p tcp -m multiport --dports 22 -j ACCEPT'],
i18n('Remote login service'),
'22',
),
'inWFS': (
[
'-A PARDUS-IN-USER -p udp -m multiport --dports 137:139,445 -j ACCEPT',
'-A PARDUS-IN-USER -p tcp -m multiport --dports 137:139,445 -j ACCEPT',
'-A PARDUS-IN-USER -p udp -m multiport --sports 137:139,445 -j ACCEPT',
],
i18n('Windows file sharing service'),
'137, 138, 139, 445',
),
'inIRC': (
['-A PARDUS-IN-USER -p tcp -m multiport --dports 6667:6669 -j ACCEPT'],
i18n('Internet relay chat service'),
'6667, 6668, 6669',
),
'inFTP': (
['-A PARDUS-IN-USER -p tcp -m multiport --dports 21 -j ACCEPT'],
i18n('File transfer service'),
'21',
),
}
|
20,585 | e11826b31482b30f54669c7f184931174aeda577 | # Jacob Miller
# 4/19/2021
# Project Code: Python's Built in Multiprogramming Library Messaging Function
import multiprocessing as mp
import cProfile
import os
def processMessage(parentProcess):
print(f'Hello {parentProcess}, this is {os.getpid()}.')
def doWork():
processes = []
for _ in range(5):
p = mp.Process(target=processMessage, args=[os.getpid()])
processes.append(p)
p.start()
for p in processes:
p.join()
print('Done')
def main():
cProfile.run('doWork()')
if __name__ == '__main__':
main() |
20,586 | 7183b16e4a340a20fb64119a23bbaebd28e0d40d | from pyowm import OWM
from pyowm.utils.config import get_default_config
from pyowm.commons.exceptions import NotFoundError
def check_weather():
while True:
try:
place = input('В каком городе?: ')
config_dict = get_default_config()
config_dict['language'] = 'ru'
owm = OWM('267f53ba6ee5620175b02e58315c4a56', config_dict)
mgr = owm.weather_manager()
observation = mgr.weather_at_place(place)
w = observation.weather
print(f'В городе {place} температура сейчас {w.temperature("celsius")["temp"]}°\n'
f'Погода: {w.detailed_status}, влажность {w.humidity}%, скорость ветра {w.wind()["speed"]}м/c')
break
except NotFoundError:
print("Город не найден")
check_weather()
|
20,587 | 02736cbbd0edffd92cf2ebf65559b35f0a0e137c | # Generated by Django 2.2.5 on 2019-11-13 02:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('project_manager', '0003_delete_team'),
]
operations = [
migrations.AlterField(
model_name='task',
name='project',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='project_tasks', to='project_manager.Project'),
),
]
|
20,588 | 4022ba2a62f3975073f9537cdedd7ddb3fcb8574 | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 23 17:18:43 2015
@author: DFW
"""
import pandas as pd
import numpy as np
import os
###
Gid2Taxid = {}
Taxid2Gid = {}
for line in open('../Result0923/Gid2Taxid.txt'):
line = line.strip().split('\t')
Gid2Taxid[line[0]] = line[1]
Taxid2Gid[line[1]] = Taxid2Gid.get(line[1], set())
Taxid2Gid[line[1]].add(line[0])
print len(Gid2Taxid), len(Taxid2Gid)
###
Gid2Type = {}
for line in open('../Result0923/Gid2Type.txt'):
line = line.strip().split('\t')
Gid2Type[line[0]] = line[1]
print len(Gid2Type)
###
barcodeTypes = ['ITS', 'rbcL', 'matK', 'trnL-trnF', 'psbA-trnH',
'trnL', 'ITS2', 'ndhF', 'trnH-psbA', 'rpoC1']
###
genuspath = '../Result0923/Sepcies/'
genusfiles = os.listdir(genuspath)
#print genusfiles
##
print genusfiles[0]
genus = pd.read_excel(genuspath+genusfiles[0], 0, header=0)
print genus
SpeciesCount = {} ### {type => set(taxid)}
SeqCount = {} ### {taxid => {type => set(gid)}}
for i in genus.index:
Taxid = str(genus.loc[i, 'ID'])
print i, Taxid, len(Taxid2Gid.get(Taxid, set()))
SeqCount[Taxid] = {}
for gid in Taxid2Gid.get(Taxid, set()):
SeqCount[Taxid][Gid2Type[gid]] = SeqCount[Taxid].get(Gid2Type[gid], set())
SeqCount[Taxid][Gid2Type[gid]].add(gid)
SpeciesCount[Gid2Type[gid]] = SpeciesCount.get(Gid2Type[gid], set())
SpeciesCount[Gid2Type[gid]].add(Taxid)
if Gid2Type[gid] in barcodeTypes:
print '', gid, Gid2Type[gid]
print SeqCount
print SpeciesCount
###
|
20,589 | 614dc5c039f7449a18c9a864a65c35f687fe6713 | import os
from os.path import dirname, abspath
import glob
from donkeybarn import fileio
import json
from PIL import Image, ImageDraw
import numpy as np
#TODO: Fatten the labels datastructure in the dataset so that they are accessable from
# dataset.label[0]
# dataset.img[0]
# dataset[0, ['img']]
# merged = dataset1 + dataset2
class BaseDataset:
base_dir = None
img_paths = []
labels = None
class LoadedDataset(BaseDataset):
url = None
file_format = ".tar.gz"
@classmethod
def load(cls, data_dir=None):
if data_dir is None:
data_dir = cls.get_data_dir()
if not os.path.exists(data_dir):
os.makedirs(data_dir)
filename = cls.__name__ + cls.file_format
filepath = os.path.join(data_dir, filename)
fileio.download_file(cls.url, filepath)
extracted_folder = fileio.extract_file(filepath, data_dir)
obj = cls(extracted_folder)
return obj
@classmethod
def get_data_dir(cls, ):
from donkeybarn import BARN_DATA_DIR
return BARN_DATA_DIR
def __init__(self, base_dir):
self.base_dir = base_dir
self.img_paths = glob.glob(os.path.join(base_dir, '*.jpg'))
self.img_paths = sorted(self.img_paths, key=lambda x: int(os.path.split(x)[-1].split('_')[0]))
try:
labels_path = os.path.join(base_dir, 'labels.json')
self.labels = LabelBoxData(labels_path)
self.labels.gen_external_key_index()
except FileNotFoundError as e:
print('could not filed labels.json in {}, not loading labels.'.format(base_dir))
class LabelBoxData:
def __init__(self, json_path):
with open(json_path, 'r') as f:
self.data = json.load(f)
sorted(self.data, key=lambda x: int(x["External ID"].split('_')[0]))
def gen_external_key_index(self):
self.key_index = {}
for i, rec in enumerate(self.data):
self.key_index[rec['External ID']] = i
def get_mask_from_key(self, key, label_name):
ix = self.key_index[key]
rec = self.data[ix]
mask = self.create_mask_from_label(label_name, rec)
return mask
@staticmethod
def create_mask_from_label(label_name, rec, img_size=(120, 160)):
label_data = rec['Label'][label_name]
mask = Image.fromarray(np.zeros(img_size), mode='L')
for geometry in label_data:
poly = create_polygon_tuple(geometry['geometry'], img_size[0])
ImageDraw.Draw(mask).polygon(poly, fill=255)
return mask
def create_polygon_tuple(pts_list, img_height):
pt_array = []
for pt in pts_list:
pt_array.append((pt['x'], img_height - pt['y']))
return pt_array
class Donkey2CalibrationImages(LoadedDataset):
url = "https://drive.google.com/uc?export=download&id=1yk758anknZqAwPBcrWa4vGZ_3Xgh1gmU"
file_format = ".tar.gz"
checkerboard_size = (7, 9)
class AmericanSteelLabeled(LoadedDataset):
url = 'https://drive.google.com/uc?export=download&id=1GKkB_xMgOoUPf0J3OGzj6wtke1eqPU0Q'
format = ".tar.gz"
class DriveaiLabeled(LoadedDataset):
url = 'https://drive.google.com/uc?export=download&id=10R8VOHyzd0QD0zNLzLel5Mg6EWFOKSMX'
format = ".tar.gz"
class MakerFaireLabeled(LoadedDataset):
url = 'https://drive.google.com/uc?export=download&id=1ohTZYbuQwxLb63uZTajlDNn8cJmoG_az'
format = ".tar.gz"
class AWSTrack(LoadedDataset):
url = 'https://drive.google.com/uc?export=download&id=1h1zu6VN_txhyb86hHx3K6COXs_UqMIRL'
format = ".tar.gz"
if __name__ == "__main__":
obj = Donkey2CalibrationImages.load()
print('test')
print(obj.img_paths) |
20,590 | ac984204afe658bb22c6ebcd362b80216ae70fe9 | #Instructions:https://adventofcode.com/2020/day/7
import os
import re
def get_data(filename):
folder = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(folder, filename)
data=[]
with open(input_file, 'r') as filehandle:
data = filehandle.read()
data=data.split('\n')
return data
def parse_rules(data):
rules_bags={}
for rule in data:
if rule =="":
continue
bags=[]
main_bag=rule.split('bags contain')[0].strip()
bags_inside=rule.split('bags contain')[1]
result = re.split(",", bags_inside)
for i in result:
s = re.sub(r'[^\w\s]','',i).replace("bags", "").replace("bag", "").replace(" other","").strip()
if s==" " or s=="" or s[0]=="n":
continue
else:
bags.append((int(s[0]),s[1:].strip()))
rules_bags[main_bag]=bags
return rules_bags
#data=get_data("test.txt")
#rules_bags=parse_rules(data)
#print(rules_bags)
def find_parents(rules_bags,child_bag):
parents_count=0
parents=set()
while True:
for parent in rules_bags:
for child in rules_bags[parent]:
if child[1]==child_bag or child[1] in parents:
parents.add(parent)
if len(parents)>parents_count:
parents_count=len(parents)
else:
break
return parents
def bag_contains(target,bag):
if bag==None or len(bag)==0: #if no children are found return false
return False
for key in bag:
if key[1]==target: #check if bag contain target and return true
return True
for key in bag:
if bag_contains(target,data.get(key[1])) == True: #else look up bagContains for every child
return True
return False
def count_parents(data,target):
parents_count=0
for key,bag in data.items():
if bag_contains(target,bag)==True:
parents_count+=1
return parents_count
#data={'shiny gold': [(10, 'dark olive'), (2, 'vibrant plum')], 'dark olive': [(3, 'faded blue'), (4, 'dotted black')]}
#bag=[(1, 'dark olive'), (2, 'vibrant plum')]
def bag_children(target):
count = 0
children=data.get(target)
if children==None:
return count
for bag in children:
#print("bag",bag,"count start",count)
count += bag[0] + bag[0] * bag_children(bag[1])
#print("count end",count)
return count
target="shiny gold"
data=parse_rules(get_data("input.txt"))
print(count_parents(data,"shiny gold"),"bag colors can eventually contain at least one shiny gold bag")
print(bag_children(target),"individual bags are required inside single shiny gold bag")
|
20,591 | 76fa32bf76d33a6926bfae23115684b0ce98ec15 | # -*-coding:utf-8-*-
import requests
url = 'https://www.baidu.com/s?wd=IP'
headers = {
'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/89.0.4389.90Safari/537.36'
}
# 25.122.144.141 9999
res_text = requests.get(url=url,headers=headers,proxies={'http':'25.122.144.141:9999'}).text
print(res_text)
with open('ip.html','w') as f:
f.write(res_text.encode('gbk').decode('utf-8'))
|
20,592 | 23d93c2f2974291181710e54aa40e70be6e6a394 | """
This file contains the main plugin system for texstats
"""
REGISTRY = {}
def register(name):
"""
Register Plugin class
>>> @register('name')
>>> class Foo(Plugin):
"""
def func(cls):
"""
See register
"""
REGISTRY[name] = cls()
return cls
return func
class Plugin(object):
"""
Plugin base class
"""
def options(self, parser):
"""
Define comand line arguments in parser
"""
pass
def configure(self, args):
"""
Store comand line arguments for execution
"""
pass
def execute(self):
"""
Execute Plugin code
"""
pass
|
20,593 | 291271505ee97c016b3be12a2b8eca9887124e8d | import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pandas as pd
from flowcat import utils, io_functions
input_data = {
p.name: io_functions.load_json(p / "quantization_error.json")
for p in
map(utils.URLPath, [
"output/4-flowsom-cmp/quantization-error/flowsom-10",
"output/4-flowsom-cmp/quantization-error/flowcat-refit-s10",
"output/4-flowsom-cmp/quantization-error/flowsom-32",
"output/4-flowsom-cmp/quantization-error/flowcat-refit-s32",
])
}
input_data = [
{
"dataset": k,
"id": label,
"tube": tube,
"qe": value,
"algo": k.split("-")[0],
"size": int(k.split("-")[-1].lstrip("s")),
}
for k, vv in input_data.items() for label, tube, value in vv
]
data = pd.DataFrame(input_data)
sns.set_style("white")
sns.boxplot(x="size", y="qe", hue="algo", data=data)
plt.ylabel("Mean quantization error")
plt.xlabel("Grid size of SOM")
plt.savefig("output/4-flowsom-cmp/quantization_error_boxplot.png")
plt.close()
|
20,594 | 9f8d8663285610ee2bb178ec6ab1553d6726abd0 | __author__ = 'marafi'
# This command is used to construct an element and add it to the Domain.
# element eleType? arg1? ...
#
# The type of element created and the additional arguments required depends on the eleType? provided in the command.
#
# NOTE:
# The valid queries to any element when creating an ElementRecorder are documented in the NOTES section for each element.
#
# The following contain information about eleType? and the args required for each of the available element types:
#
# Zero-Length Elements
# zeroLength Element
# zeroLengthND Element
# zeroLengthSection Element
# CoupledZeroLength Element
# zeroLengthContact Element
# zeroLengthContactNTS2D
# zeroLengthInterface2D
# zeroLengthImpact3D
# Truss Elements
# Truss Element
# Corotational Truss Element
# Beam-Column Elements
# Elastic Beam Column Element
# Elastic Beam Column Element with Stiffness Modifiers
# Elastic Timoshenko Beam Column Element
# Beam With Hinges Element
# Displacement-Based Beam-Column Element
# Force-Based Beam-Column Element
# Flexure-Shear Interaction Displacement-Based Beam-Column Element
# Joint Elements
# BeamColumnJoint Element
# ElasticTubularJoint Element
# Joint2D Element
# Link Elements
# Two Node Link Element
# Bearing Elements
# Elastomeric Bearing (Plasticity) Element
# Elastomeric Bearing (Bouc-Wen) Element
# Flat Slider Bearing Element
# Single Friction Pendulum Bearing Element
# TFP Bearing
# Triple Friction Pendulum Element
# MultipleShearSpring Element
# KikuchiBearing Element
# YamamotoBiaxialHDR Element
# ElastomericX
# LeadRubberX
# HDR
# RJ-Watson EQS Bearing Element
# Quadrilateral Elements
# Quad Element
# Shell Element
# ShellNL
# Bbar Plane Strain Quadrilateral Element
# Enhanced Strain Quadrilateral Element
# SSPquad Element
# Triangular Elements
# Tri31 Element
# Brick Elements
# Standard Brick Element
# Bbar Brick Element
# Twenty Node Brick Element
# Twenty Seven Node Brick Element
# SSPbrick Element
# u-p Elements
# UC San Diego u-p element (saturated soil)
# Four Node Quad u-p Element
# Brick u-p Element
# bbarQuad u-p Element
# bbarBrick u-p Element
# Nine Four Node Quad u-p Element
# Twenty Eight Node Brick u-p Element
# Twenty Node Brick u-p Element
# Brick Large Displacement u-p Element
# SSPquadUP Element
# SSPbrickUP Element
# Misc.
# ShallowFoundationGen
# SurfaceLoad Element
# Contact Elements
# SimpleContact2D Element
# SimpleContact3D Element
# BeamContact2D Element
# BeamContact3D Element
# BeamEndContact3D Element
# zeroLengthImpact3D
from OpenSeesAPI.OpenSees import OpenSees
class Truss(OpenSees):
"""
One way is to specify an area and a UniaxialMaterial identifier:
element truss $eleTag $iNode $jNode $A $matTag <-rho $rho> <-cMass $cFlag> <-doRayleigh $rFlag>
the other is to specify a Section identifier:
element trussSection $eleTag $iNode $jNode $secTag <-rho $rho> <-cMass $cFlag> <-doRayleigh $rFlag>
$eleTag unique element object tag
$iNode $jNode end nodes
$A cross-sectional area of element
$matTag tag associated with previously-defined UniaxialMaterial
$secTag tag associated with previously-defined Section
$rho mass per unit length, optional, default = 0.0
$cFlag consistent mass flag, optional, default = 0
cFlag = 0 lumped mass matrix (default)
cFlag = 1 consistent mass matrix
$rFlag Rayleigh damping flag, optional, default = 0
rFlag = 0 NO RAYLEIGH DAMPING (default)
rFlag = 1 include Rayleigh damping
"""
def __init__(self, id, NodeI, NodeJ, Area, Material, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._Area = Area
self._Material = Material
self._CommandLine = 'element truss %d %s %s %f %s'%(self.id, self._NodeI.id, self._NodeJ.id, self._Area, self._Material.id)
self.__dict__.update(kwargs)
class ZeroLength(OpenSees):
"""
element zeroLength $eleTag $iNode $jNode -mat $matTag1 $matTag2 ... -dir $dir1 $dir2 ...<-doRayleigh $rFlag> <-orient $x1 $x2 $x3 $yp1 $yp2 $yp3>
$eleTag unique element object tag
$iNode $jNode end nodes
$matTag1 $matTag2 ... tags associated with previously-defined UniaxialMaterials
$dir1 $dir2 ... material directions:
1,2,3 - translation along local x,y,z axes, respectively;
4,5,6 - rotation about local x,y,z axes, respectively
$x1 $x2 $x3 vector components in global coordinates defining local x-axis (optional)
$yp1 $yp2 $yp3 vector components in global coordinates defining vector yp which lies in the local x-y plane for the element. (optional)
$rFlag optional, default = 0
rFlag = 0 NO RAYLEIGH DAMPING (default)
rFlag = 1 include rayleigh damping
"""
def __init__(self, id, NodeI, NodeJ, MaterialList, DOFList, OrientDirection=None, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._MaterialList = MaterialList
self._DOFList = DOFList
self._OrientDirection = OrientDirection
self.__dict__.update(kwargs)
OD = ''
if OrientDirection != None:
OD = '-orient %f %f %f %f %f %f'%tuple(OrientDirection)
self._CommandLine = 'element zeroLength %d %d %d -mat %s -dir %s %s'%(self._id, self._NodeI.id, self._NodeJ.id, ''.join([' %d'%s.id for s in MaterialList]), ''.join([' %d'%s for s in DOFList]), OD)
class ZeroLengthSection(OpenSees):
"""
This command is used to construct a zero length element object, which is defined by two nodes at the same location. The nodes are connected by a single section object to represent the force-deformation relationship for the element.
element zeroLengthSection $eleTag $iNode $jNode $secTag <-orient $x1 $x2 $x3 $yp1 $yp2 $yp3> <-doRayleigh $rFlag>
$eleTag unique element object tag
$iNode $jNode end nodes
$secTag tag associated with previously-defined Section object
$x1 $x2 $x3 vector components in global coordinates defining local x-axis (optional)
$yp1 $yp2 $yp3 vector components in global coordinates defining vector yp which lies in the local x-y plane for the element. (optional)
$rFlag optional, default = 1
rFlag = 0 no Rayleigh damping
rFlag = 1 include Rayleigh damping (default)
"""
def __init__(self, id, NodeI, NodeJ, Section, OrientDirection=None, Optional='',**kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._Section = Section
self._OrientDirection = OrientDirection
self._Optional = Optional
self.__dict__.update(kwargs)
OD = ''
if OrientDirection != None:
OD = '-orient %f %f %f %f %f %f'%tuple(OrientDirection)
self._CommandLine = 'element zeroLengthSection %d %d %d %d %s %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._Section.id, OD, self._Optional)
class ElasticBeamColumn(OpenSees):
"""
For a two-dimensional problem:
element elasticBeamColumn $eleTag $iNode $jNode $A $E $Iz $transfTag <-mass $massDens> <-cMass>
For a three-dimensional problem:
element elasticBeamColumn $eleTag $iNode $jNode $A $E $G $J $Iy $Iz $transfTag <-mass $massDens> <-cMass>
$eleTag unique element object tag
$iNode $jNode end nodes
$A cross-sectional area of element
$E Young's Modulus
$G Shear Modulus
$J torsional moment of inertia of cross section
$Iz second moment of area about the local z-axis
$Iy second moment of area about the local y-axis
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
$massDens element mass per unit length (optional, default = 0.0)
-cMass to form consistent mass matrix (optional, default = lumped mass matrix)
"""
def __init__(self, id, NodeI, NodeJ, A, E, Iz, TransTag, G, J, Iy, Mass=None, Option='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._A = A
self._E = E
self._Iz = Iz
self._TransTag = TransTag
self._G = G
self._J = J
self._Iy = Iy
self._Mass = Mass
self._Option = Option
self.__dict__.update(kwargs)
if self._Mass != None:
self._EndCommand = '-mass %f %s'%(self._Mass,self._Option)
else:
self._EndCommand = self._Option
if G == None:
self._CommandLine = 'element elasticBeamColumn %d %d %d %f %f %f %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._A, self._E, self._Iz, self._TransTag.id, self._EndCommand)
else:
self._CommandLine = 'element elasticBeamColumn %d %d %d %f %f %f %f %f %f %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._A, self._E, self._G, self._J, self._Iy, self._Iz, self._TransTag.id, self._EndCommand)
class DispBeamColumn(OpenSees):
"""
element dispBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-cMass> <-integration $intType>
To change the sections along the element length, the following form of command may be used:
element dispBeamColumn $eleTag $iNode $jNode $numIntgrPts -sections $secTag1 $secTag2 ... $transfTag <-mass $massDens> <-cMass> <-integration $intType>
$eleTag unique element object tag
$iNode $jNode end nodes
$numIntgrPts number of integration points along the element.
$secTag identifier for previously-defined section object
$secTag1 $secTag2 ... $numIntgrPts identifiers of previously-defined section object
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
$massDens element mass density (per unit length), from which a lumped-mass matrix is formed (optional, default = 0.0)
-cMass to form consistent mass matrix (optional, default = lumped mass matrix)
$intType numerical integration type, options are Lobotto, Legendre, Radau, NewtonCotes, Trapezoidal (optional, default = Legendre)
"""
def __init__(self, id, NodeI, NodeJ, numIntgrPts, Section, GeomTrans, Mass=None, Optional='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._numIntgrPts = numIntgrPts
self._Section = Section
self._GeoTrans = GeomTrans
self._Mass = Mass
self._Optional = Optional
self.__dict__.update(kwargs)
if self._Mass != None:
self._EndCommand = '-mass %f %s'%(self._Mass,self._Optional)
else:
self._EndCommand = self._Optional
if list != self._Section:
self._CommandLine = 'element dispBeamColumn %d %d %d %d %d %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._numIntgrPts, self._Section.id, self._GeoTrans.id, self._EndCommand)
else:
self._CommandLine = 'element dispBeamColumn %d %d %d %d -section %s %d %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._numIntgrPts, ''.join([' %d'%x.id for x in self._Section]), self._GeoTrans.id, self._EndCommand)
class ForceBeamColumn(OpenSees):
"""
element forceBeamColumn $eleTag $iNode $jNode $transfTag "IntegrationType arg1 arg2 ..." <-mass $massDens> <-iter $maxIters $tol>
$eleTag unique element object tag
$iNode $jNode end nodes
$transfTag identifier for previously-defined coordinate-transformation (CrdTransf) object
IntegrationType arg1 arg2 ... specifies locations and weights of integration points and their associated section force-deformation models (see File:IntegrationTypes.pdf)
$massDens element mass density (per unit length), from which a lumped-mass matrix is formed (optional, default=0.0)
$maxIters maximum number of iterations to undertake to satisfy element compatibility (optional, default=10)
$tol tolerance for satisfaction of element compatibility (optional, default=10-12)
Original command that assumes Gauss-Lobatto integration with a copy of the same section force-deformation model at each integration point:
element forceBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-iter $maxIters $tol> <-integration $intType>
$eleTag unique element object tag
$numIntgrPts number of Gauss-Lobatto integration points along the element.
$secTag identifier for previously-defined section object
Alternative command (kept for backward compatability):
element nonlinearBeamColumn $eleTag $iNode $jNode $numIntgrPts $secTag $transfTag <-mass $massDens> <-iter $maxIters $tol> <-integration $intType>
$eleTag unique element object tag
$intType numerical integration type, options are Lobatto, Legendre, Radau, NewtonCotes, Trapezoidal (optional, default= Lobatto)
"""
def __init__(self, id, NodeI, NodeJ, GeomTrans, IntegrationType, Mass=None, Optional='', **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._GeomTrans = GeomTrans
self._IntegrationType = IntegrationType # Assume to be a string of args
self._Mass = Mass
self._Optional = Optional
self.__dict__.update(kwargs)
if self._Mass != None:
self._EndCommand = '-mass %f %s'%(self._Mass,self._Optional)
else:
self._EndCommand = ' %s'%(self._Optional)
self._CommandLine = 'element forceBeamColumn %d %d %d %d %s %s'%(self._id, self._NodeI.id, self._NodeJ.id, self._GeomTrans.id, self._IntegrationType, self._EndCommand)
class Joint2D(OpenSees):
"""
element Joint2D $eleTag $Nd1 $Nd2 $Nd3 $Nd4 $NdC <$Mat1 $Mat2 $Mat3 $Mat4> $MatC $LrgDspTag
$eleTag unique element object tag
$Nd1 $Nd2 $Nd3 $Nd4 integer tags indicating four external nodes where the joint element is connected to the adjoining beam-column element
$NdC integer tags indicating the central node of beam-column joint (the tag is used to generate the internal node, thus, the node should not exist in the domain or be used by any other node)
$Mat1 uniaxial material tag for interface rotational spring at node 1. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$Mat2 uniaxial material tag for interface rotational spring at node 2. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$Mat3 uniaxial material tag for interface rotational spring at node 3. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$Mat4 uniaxial material tag for interface rotational spring at node 4. Use a zero tag to indicate the case that a beam-column element is rigidly framed to the joint. (optional)
$MatC uniaxial material tag for rotational spring of the central node that describes shear panel behavior
$LrgDspTag an integer indicating the flag for considering large deformations:
0 - for small deformations and constant geometry
1 - for large deformations and time varying geometry
"""
def __init__(self, id, NodeI, NodeJ, NodeK, NodeL, NodeCTag, MatC, LargeDispTag, MatIJKL=None, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._NodeK = NodeK
self._NodeL = NodeL
self._NodeCTag = NodeCTag
self._MatC = MatC
self._LargeDispTag = LargeDispTag
self._MatIJKL = MatIJKL
self.__dict__.update(kwargs)
if MatIJKL==None:
self._CommandLine = 'element Joint2D %d %d %d %d %d %d %d %d'%(self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._NodeCTag, self._MatC.id, self._LargeDispTag)
else:
self._CommandLine = 'element Joint2D %d %d %d %d %d %d %d %d %d %d %d %d'%(self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._NodeCTag, self._MatIJKL[0].id, self._MatIJKL[1].id, self._MatIJKL[2].id, self._MatIJKL[3].id,self._MatC.id, self._LargeDispTag)
class ShellMITC4(OpenSees):
def __init__(self, id, NodeI, NodeJ, NodeK, NodeL, Section, **kwargs):
self._id = id
self._NodeI = NodeI
self._NodeJ = NodeJ
self._NodeK = NodeK
self._NodeL = NodeL
self._Section = Section
self.__dict__.update(kwargs)
self._CommandLine = 'element ShellMITC4 %d %d %d %d %d %d'%(self._id, self._NodeI.id, self._NodeJ.id, self._NodeK.id, self._NodeL.id, self._Section.id)
|
20,595 | 9ac565a87218026d93fcfed86c166b8d587c4492 | def gen():
s=290797
while 1:
s=s**2%50515093
yield s%500
def lcm(a,b):
return a*b
def main(a):
g=gen()
lines=[]
points=set([])
for x in range(a):
x1=g.next()
y1=g.next()
x2=g.next()
y2=g.next()
n=y2-y1
d=x2-x1
if d==0:
continue
b=y1-n*1./d*x1
lines.append((n,d,b))
for x in range(len(lines)):
for y in range(x+1,len(lines)):
l1=lines[x]
l2=lines[y]
b1,b2=l1[2],l2[2]
n1,n2=l1[0],l2[0]
d1,d2=l1[1],l2[1]
b1*=d2
b2*=d2
n1*=d2
b1*=d1
b2*=d1
n2*=d1
n1-=n2
b2-=b1
if n1==0:
continue
x3=b2*1./n1
y3=n1*1./d1*x+b1
points.add((x3,y3))
print len(points)
main(100)
|
20,596 | 211a982a8dcdb1e92fd62e3442a0cbee438f1c2e | '''
http://ororo.tv/en
Copyright (C) 2013 Coolwave
'''
from entertainment.plugnplay.interfaces import MovieIndexer
from entertainment.plugnplay.interfaces import TVShowIndexer
from entertainment.plugnplay.interfaces import MovieSource
from entertainment.plugnplay.interfaces import TVShowSource
from entertainment.plugnplay.interfaces import CustomSettings
from entertainment.plugnplay import Plugin
from entertainment import common
import os
from entertainment.xgoogle.search import GoogleSearch
import xbmc
import xbmcgui
import urllib,urllib2,urlparse,re,datetime,base64,xbmcaddon
class ororo(TVShowSource,CustomSettings):
implements = [TVShowSource,CustomSettings]
name = "ororo"
display_name = "Ororo.tv"
base_url = 'http://ororo.tv/nl'
login_url = 'http://ororo.tv/users/sign_in'
img=''
default_indexer_enabled = 'false'
source_enabled_by_default = 'true'
cookie_file = os.path.join(common.cookies_path, 'ORlogin.cookie')
icon = common.notify_icon
def __init__(self):
xml = '<settings>\n'
xml += '<category label="Account">\n'
xml += '<setting id="tv_user" type="text" label="Email" default="anonxbmc@gmail.com" />\n'
xml += '<setting id="tv_pwd" type="text" option="hidden" label="Password" default="anonxbmc" />'
xml += '</category>\n'
xml += '</settings>\n'
self.CreateSettings(self.name, self.display_name, xml)
def GetFileHosts(self, url, list, lock, message_queue):
import re
from entertainment.net import Net
net = Net(cached=False,user_agent='Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25')
net.set_cookies(self.cookie_file)
content = net.http_GET(url).content
r = "<source src='(.+?)' type='(.+?)'>"
match = re.compile(r).findall(content)
for url,res in match:
url = '%s|User-Agent=%s&Cookie=%s' % (url,urllib.quote_plus('Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.85 Safari/537.36'), urllib.quote_plus('video=true'))
quality = 'HD'
if 'video/mp4' in res:
quality = 'HD'
else:
quality = 'SD'
self.AddFileHost(list, quality, url,'ORORO.TV')
def login(self):
headers={'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25',
'Host':'ororo.tv',
'Pragma':'no-cache',
'Referer':'http://ororo.tv/en',
'Upgrade-Insecure-Requests':'1',
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate, sdch',
'Accept-Language':'en-US,en;q=0.8',
'Cache-Control':'no-cache',
'Connection':'keep-alive'}
tv_user = self.Settings().get_setting('tv_user')
tv_pwd = self.Settings().get_setting('tv_pwd')
from entertainment.net import Net
net = Net(cached=False,user_agent='Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25')
tries = 0
while True:
html = net.http_GET('http://ororo.tv/nl/',headers=headers).content
if html.startswith('http://') and tries < MAX_REDIRECT:
tries += 1
url = html
else:
break
data = {'user[email]': tv_user, 'user[password]': tv_pwd, 'user[remember_me]': 1}
html = net.http_POST('http://ororo.tv/en/users/sign_in',data,headers=headers).content
net.save_cookies(self.cookie_file)
def GetFileHostsForContent(self, title, name, year, season, episode, type, list, lock, message_queue):
if os.path.exists(common.cookies_path) == False:
os.makedirs(common.cookies_path)
import re
from entertainment.net import Net
net = Net(cached=False,user_agent='Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X) AppleWebKit/536.26 (KHTML, like Gecko) Version/6.0 Mobile/10A5376e Safari/8536.25')
title = self.CleanTextForSearch(title)
name = self.CleanTextForSearch(name)
main_url=self.base_url
helper = '%s (%s)' %(name,year)
if type == 'movies':
self.login()
url='http://ororo.tv/nl/movies'
html = net.http_GET(url).content
net.save_cookies(self.cookie_file)
name_lower = common.CreateIdFromString(name)
r = '<span class=\'value\'>(\d{4}).*?href="([^"]+)[^>]+>([^<]+)'
match = re.compile(r,re.DOTALL).findall(html)
for item_year,item_url,item_title in match:
item_title=item_title.lower()
if item_title in name_lower:
self.GetFileHosts(item_url, list, lock, message_queue)
elif type == 'tv_episodes':
self.login()
name_lower = common.CreateIdFromString(name)
name_lower = name_lower.replace('_','-')
title_url='http://ororo.tv/en/shows/'+name_lower
net.set_cookies(self.cookie_file)
html2 = net.http_GET(title_url).content
net.save_cookies(self.cookie_file)
r = '%s-%s' % (season, episode)
match = re.compile('data-href="(.+?)".+?class="episode" href="#(.+?)">').findall(html2)
for item_url , passer in match:
item_url='http://ororo.tv/'+item_url
if r in passer:
self.GetFileHosts(item_url, list, lock, message_queue)
|
20,597 | 0f20cda14b914aa161c56f07650da8eb47955d75 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-05-16 09:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mezzanine.core.fields
class Migration(migrations.Migration):
dependencies = [
('pages', '0003_auto_20150527_1555'),
('club', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pages.Page')),
('content', mezzanine.core.fields.RichTextField(verbose_name='Content')),
('header_heading', models.CharField(help_text='The heading of the header.', max_length=200)),
('header_subheading', models.CharField(help_text='The heading of the sub-header.', max_length=200)),
('header_text', mezzanine.core.fields.RichTextField(verbose_name='Header text.')),
('header_link', models.CharField(help_text='Header link.', max_length=200)),
('featured_club', models.ForeignKey(blank=True, help_text='Items from a club will be shown on the home page', null=True, on_delete=django.db.models.deletion.CASCADE, to='club.Club')),
],
options={
'ordering': ('_order',),
},
bases=('pages.page', models.Model),
),
migrations.CreateModel(
name='Signup',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pages.Page')),
('heading', models.CharField(help_text='Signup heading', max_length=200)),
],
options={
'ordering': ('_order',),
},
bases=('pages.page',),
),
migrations.CreateModel(
name='Testimonial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='')),
('quote', mezzanine.core.fields.RichTextField(verbose_name='Testimonial quote')),
],
),
]
|
20,598 | 4850a63b3368d24c347d17801e7716333a434aae | from guillotina import app_settings
from guillotina.utils import get_current_request
import logging
logger = logging.getLogger("guillotina")
RECAPTCHA_VALIDATION_URL = "https://www.google.com/recaptcha/api/siteverify"
VALIDATION_HEADER = "X-VALIDATION-G"
class RecaptchaValidator:
# Not valid to generate a user
for_validators = ()
async def validate(self):
request = get_current_request()
token = request.headers.get(VALIDATION_HEADER)
if token == app_settings.get("_fake_recaptcha_") and token is not None:
return True
if app_settings.get("recaptcha") is None or app_settings["recaptcha"].get("private") is None:
logger.warning("Validating with recaptcha and no configuration found")
return True
import aiohttp
async with aiohttp.ClientSession() as session:
async with await session.post(
RECAPTCHA_VALIDATION_URL,
data=dict(secret=app_settings["recaptcha"]["private"], response=token),
) as resp:
try:
data = await resp.json()
except Exception: # pragma: no cover
logger.warning("Did not get json response", exc_info=True)
return
try:
return data["success"]
except Exception: # pragma: no cover
return False
|
20,599 | 37c8a2ccbd3224a6923f4065a16c003dacb7f65b | from django.urls import path
from book.views import test,index,JDlogin,Ifelse_checkorders,Check_orders
urlpatterns = [
path('test/',test), # 测试是否正常运行
path('index/',index), # 面向过程方式判断请求为get\post
path('Center/',JDlogin.as_view()), # 面向对象方式判断请求为get\post
path('center/',Ifelse_checkorders.as_view()), # 单继承实现未登录时查看订单跳转登录界面
path('check/',Check_orders.as_view()), # 多继承未登录跳转url为 accounts/login/?next=/check/
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.