content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import json
import torch
import ast
from argparse import ArgumentParser
from mlflow.deployments import get_deploy_client
if __name__ == "__main__":
parser = ArgumentParser(description="Iris Classifiation Model")
parser.add_argument(
"--target",
type=str,
default="torchserve",
help="MLflow target (default: torchserve)",
)
parser.add_argument(
"--deployment_name",
type=str,
default="iris_classification",
help="Deployment name (default: iris_classification)",
)
parser.add_argument(
"--input_file_path",
type=str,
default="sample.json",
help="Path to input image for prediction (default: sample.json)",
)
parser.add_argument(
"--mlflow-model-uri",
type=str,
default="model",
help="MLFlow model URI)",
)
args = parser.parse_args()
predict(vars(args))
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
28034,
198,
11748,
6468,
198,
6738,
1822,
29572,
1330,
45751,
46677,
198,
198,
6738,
285,
1652,
9319,
13,
2934,
1420,
902,
1330,
651,
62,
2934,
1420,
62,
16366,
628,
628,
198,
361,
11593,
367... | 2.381313 | 396 |
import torch
from predictor import COCODemo
from maskrcnn_benchmark.config import cfg
COCO_PERSON_INDEX = 1
if __name__ == '__main__':
import cv2
# import numpy as np
maskrcnn = MaskRCNN()
img_path = '/home/dh/Pictures/studio8-30Nov18/DSC03887.JPG'
img = cv2.imread(img_path)
# masks, bboxes =
chipsandmasks = maskrcnn.get_chips_and_masks(img)
print(len(chipsandmasks))
for chip, mask in chipsandmasks:
masked = chip * mask
cv2.imshow( '', masked )
cv2.waitKey(0)
# input() | [
11748,
28034,
198,
6738,
41568,
1330,
327,
4503,
3727,
41903,
198,
6738,
9335,
6015,
20471,
62,
26968,
4102,
13,
11250,
1330,
30218,
70,
198,
198,
34,
4503,
46,
62,
47,
29086,
62,
12115,
6369,
796,
352,
198,
198,
361,
11593,
3672,
834... | 2.189516 | 248 |
#*****************************************************************************#
#* Copyright (c) 2004-2008, SRI International. *#
#* All rights reserved. *#
#* *#
#* Redistribution and use in source and binary forms, with or without *#
#* modification, are permitted provided that the following conditions are *#
#* met: *#
#* * Redistributions of source code must retain the above copyright *#
#* notice, this list of conditions and the following disclaimer. *#
#* * Redistributions in binary form must reproduce the above copyright *#
#* notice, this list of conditions and the following disclaimer in the *#
#* documentation and/or other materials provided with the distribution. *#
#* * Neither the name of SRI International nor the names of its *#
#* contributors may be used to endorse or promote products derived from *#
#* this software without specific prior written permission. *#
#* *#
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS *#
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT *#
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR *#
#* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT *#
#* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *#
#* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *#
#* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *#
#* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY *#
#* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT *#
#* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *#
#* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *#
#*****************************************************************************#
#* "$Revision:: 129 $" *#
#* "$HeadURL:: https://svn.ai.sri.com/projects/spark/trunk/spark/src/spar#$" *#
#*****************************************************************************#
from __future__ import generators
from spark.internal.version import *
from spark.pylang.implementation import ActImpInt, PredImpInt, FunImpInt
from spark.internal.repr.varbindings import optBZ
from spark.internal.parse.usagefuns import termEvalErr, termEvalEnd
from spark.internal.parse.basicvalues import List
"""
Support for explicit representation and use of SPARK internal bindings and zexpr objects from SPARK language.
DO NOT USE THIS UNLESS YOU HAVE A GOOD UNDERSTANDING OF THE SPARK INTERNALS!
"""
################################################################
# Generic routines for dealing with bindings and zexprs
################################################################
# Idiom for making a non-deterministic function into a SPARK predicate
# rather than a SPARK action.
# A SPARK function must return values that equal each other if the KB
# hasn't changed since the last time it was called. SPARK function
# calls and predicate tests are n't allowed to change the
# KB. Therefore for example, a random number generator could not be
# used as a SPARK function, since if you call it twice in succession
# you *want* it to return different values.
# You could represent it by a SPARK action, but this restricts you
# from using it in term expressions and predicate expressions.
# Instead you can represent it by a predicate with multiple solutions,
# where testing the predicate "picks" one of the many possible
# solutions tat it could return. This is fine if you only ever ask for
# one solution to the predicate, but technically, it should return
# *every* possible value if you ask for every solution.
# To get around this, we have a generator that returns one solution,
# and if another is requested, generates an error.
# You also need to ensure that if the predicate is used with the
# (normally output) parameter bound, the implementation should raise
# an exception or test that it is a valid output value, rather than
# generate an output value and test for equality.
# TODO: We should probably implement a variant of pyPredicate for this.
def partialBZ(partial, bindings, zexprs):
"Bindings and zexprs are derived from a partial sequence where missing values denote output arguments"
if partial is None:
raise LowError("First argument must be bound")
if bindings is None:
if zexprs is None:
return generateBZ(partial)
raise LowError("Neither the second nor third argument may be bound")
# The following is based on code from the CALO task_manager/tirmodes.py.
# Unlike the applyact action, which can create tframes using the same
# bindings and a modified zexpr (just dropping the first element),
# applyToBZ is more complex. It must use a new bindings and zexpr
# constructed from the given partial structure, and after the chosen
# tframe is executed there must be a separate step to construct the
# output structure. Thus applyToBZ is implemented by a procedure
# that uses a function to generate the bindings/zexpr pair, an action
# to apply the functor of the input structure to this bindings/zexpr
# pair, and then a function to extract the output values from
# bindings/zexpr pair.
| [
2,
17174,
17174,
4557,
35625,
2,
201,
198,
2,
9,
15069,
357,
66,
8,
5472,
12,
11528,
11,
311,
7112,
4037,
13,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
2... | 2.958911 | 1,947 |
#!/usr/bin/env python
# I read s [slab thickness], a [<sig^2>/<sig^2>], and lamcsig [log-normal lamc],
# generate the real log-normal covariance function, and curve fit an exponential
# to that data, yielding lamcw [Gaussian lamcw]. This allows use of exponential
# covariance with fitted correlation length with only minimal error to the
# real covariance.
import numpy as np
from scipy.optimize import curve_fit
#read input
s,a,lamcsig = np.loadtxt('auxiliary/expfit/s.a.lamcsig.txt')
x = np.linspace(0.0000001,0.0000001+s,100)
yn= origfunc(x,lamcsig,a)
lamcw,pcov = curve_fit(fittingfunc,x,yn) #fit curve
#print output
np.savetxt('auxiliary/expfit/lamcw.txt',lamcw)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
314,
1100,
264,
685,
6649,
397,
20735,
4357,
257,
685,
27,
82,
328,
61,
17,
29,
14,
27,
82,
328,
61,
17,
29,
4357,
290,
30592,
6359,
328,
685,
6404,
12,
11265,
30592,
66,
4357... | 2.739837 | 246 |
"""
This module contains the helpers functions for reading genetic code table
information from json file.
"""
import json
# Read the list of genetic codes and associated files in a dictionary.
with open("gc_files/gc_file_associations.json") as gc_directory:
gc_file_associations = json.load(gc_directory)
def codon_to_aa(codon, gc=1):
"""
This functions returns 3 letter notation e.g. 'ala' for amino acid
respective to given codon.
:param codon: Codon (string) e.g. AAA
:param gc: genetic code (Integer) default=1 i.e. standard_genetic_code
:return:
"""
try:
if str(gc) not in gc_file_associations.keys():
# No entry for the required genetic code
return None
# Read the file
with open(gc_file_associations.get(str(gc))) as gc_file:
gc_data = json.load(gc_file)
for key in gc_data.keys():
aa_data = gc_data.get(key)
if codon.upper() in aa_data["codons"]:
# found the codon, return AA key.
return key
# Could not find this codon in any AA's data.
return None
except Exception:
return None
def aa_to_codon(aa, gc=1):
"""
This function returns the list of codons for given amino acid according
to respective genetic code table.
:param aa: amino acid notation/ name (String) e.g. Full name e.g. Alanine or
3-letter notation e.g. Ala or single letter notation e.g. A
:param gc: genetic code (Integer) default=1 i.e. standard_genetic_code
:return:
"""
try:
if str(gc) not in gc_file_associations.keys():
# No entry for the required genetic code
return None
# Read the file for genetic code table information
with open(gc_file_associations.get(str(gc))) as gc_file:
gc_data = json.load(gc_file)
# if notation is given
if len(aa) == 3:
if aa.lower() in gc_data.keys():
return gc_data.get(aa.lower())["codons"]
# lookup for fullname or notation
for key in gc_data.keys():
aa_data = gc_data.get(key)
if aa_data["name"].lower() == aa.lower() or \
aa_data["symbol"].lower() == aa.lower():
return aa_data["codons"]
# If nothing is found, return None
return None
except Exception:
return None
def get_aa_using_name(aa, gc=1):
"""
This function returns a dictionary object containing
:param aa: amino acid notation/ name (String) e.g. Full name e.g. Alanine or
3-letter notation e.g. Ala or single letter notation e.g. A
:param gc: genetic code (Integer) default=1 i.e. standard_genetic_code
:return:
"""
try:
if str(gc) not in gc_file_associations.keys():
# No entry for the required genetic code
return None
# Read the file for genetic code table information
with open(gc_file_associations.get(str(gc))) as gc_file:
gc_data = json.load(gc_file)
# if notation is given
if len(aa) == 3:
if aa.lower() in gc_data.keys():
return gc_data.get(aa.lower())
# lookup for fullname or notation
for key in gc_data.keys():
aa_data = gc_data.get(key)
if aa_data["name"].lower() == aa.lower() or \
aa_data["symbol"].lower() == aa.lower():
return aa_data
# If nothing is found, return None
return None
except Exception:
return None
def get_aa_using_codon(codon, gc=1):
"""
This functions returns dictionary object containing data for respective
amino acid for the given codon.
:param codon: Codon (string) e.g. AAA
:param gc: genetic code (Integer) default=1 i.e. standard_genetic_code
:return:
"""
try:
if str(gc) not in gc_file_associations.keys():
# No entry for the required genetic code
return None
# Read the file
with open(gc_file_associations.get(str(gc))) as gc_file:
gc_data = json.load(gc_file)
for key in gc_data.keys():
aa_data = gc_data.get(key)
if codon.upper() in aa_data["codons"]:
# found the codon, return AA key.
return aa_data
# Could not find this codon in any AA's data.
return None
except Exception:
return None
def get_synonymous_codons(codon, gc=1):
"""
This functions returns list object containing synonymous codons for given
codon.
:param codon: Codon (string) e.g. AAA
:param gc: genetic code (Integer) default=1 i.e. standard_genetic_code
:return:
"""
try:
if str(gc) not in gc_file_associations.keys():
# No entry for the required genetic code
return None
# Read the file
with open(gc_file_associations.get(str(gc))) as gc_file:
gc_data = json.load(gc_file)
for key in gc_data.keys():
aa_data = gc_data.get(key)
if codon.upper() in aa_data["codons"]:
# found the codon, return AA key.
return aa_data["codons"]
# Could not find this codon in any AA's data.
return None
except Exception:
return None
| [
37811,
198,
1212,
8265,
4909,
262,
49385,
5499,
329,
3555,
8513,
2438,
3084,
198,
17018,
422,
33918,
2393,
13,
198,
37811,
198,
198,
11748,
33918,
198,
198,
2,
4149,
262,
1351,
286,
8513,
12416,
290,
3917,
3696,
287,
257,
22155,
13,
1... | 2.22527 | 2,406 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: grantmcgovern
# @Date: 2015-07-12 15:42:46
# @Email: me@grantmcgovern.com
# @Web: http://grantmcgovern.com
#
# @Last Modified by: grantmcgovern
# @Last Modified time: 2015-07-12 21:20:40
from django.contrib import admin
from models import WorkExperience
from django.contrib.admin import site, ModelAdmin
admin.site.register(WorkExperience, WorkExperienceAdmin) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
2488,
13838,
25,
7264,
23209,
47866,
198,
2,
2488,
10430,
25,
220,
220,
1853,
12,
2998,
12,
1065,
1... | 2.751592 | 157 |
#!/usr/bin/env python
#-*-coding:utf-8*-
# 程序实现的功能:计算分数
student_num = int(input('请输入学生数量: '))
student_name, physics_score, maths_score, history_score = '', 0, 0, 0
all_scores = []
for i in range(student_num):
student_name = input('请输入第 {} 个学生的姓名: '.format(i+1))
physics_score = float(input('请输入 {} 的物理成绩: '.format(student_name)))
maths_score = float(input('请输入 {} 的数学成绩: '.format(student_name)))
history_score = float(input('请输入 {} 的历史成绩: '.format(student_name)))
this_student = {'student_name': student_name, 'physics_score': physics_score, 'maths_score': maths_score, 'history_score': history_score}
all_scores.append(this_student)
# 开始处理学生成绩
for i in range(student_num):
total_score = float(all_scores[i]['physics_score'] + all_scores[i]['maths_score'] + all_scores[i]['history_score'])
print('{} 的总成绩: {}'.format(all_scores[i]['student_name'], total_score))
if total_score > 120:
is_pass = all_scores[i]['student_name'] + ' 通过:)'
else:
is_pass = all_scores[i]['student_name'] + ' 未通过:('
print('{} 的总成绩: {}'.format(all_scores[i]['student_name'], is_pass)) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12,
9,
12,
66,
7656,
25,
40477,
12,
23,
9,
12,
198,
2,
13328,
101,
233,
41753,
237,
22522,
252,
163,
236,
108,
21410,
27950,
253,
47797,
121,
171,
120,
248,
164,
106,
94,
163,... | 1.963899 | 554 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Description: run job
# ChangeLog
#
# ChangeLog 2015-02-12
# submit individual sequences to the workflow, so that the result of each
# sequence can be cached and the progress can be shown for a job with many
# sequences
# ChangeLog 2015-03-26
# the tmpdir is removed if RunJob is succeeded
# ChangeLog 2015-04-01
# result from cache just make a soft link,
# zip -rq will replace the symbolic link with the actual data when making the
# zip file
# ChangeLog 2016-06-30
# cache results is saved at static/result/cache using md5 keys
# the folder static/md5 is not used anymore
# ChangeLog 2018-09-04
# when the cached job is retrieved, the folder is directly copied to the
# result folder instead of creating just the symlink. This is because the
# size of cached results are too big (>500GB) and it will be difficult to
# delete outdated cached result if the result is just symbolically linked
# how to create md5
# import hashlib
# md5_key = hashlib.md5(string).hexdigest()
# subfolder = md5_key[:2]
#
import os
import sys
import time
from libpredweb import myfunc
from libpredweb import webserver_common as webcom
import glob
import hashlib
import shutil
import site
import fcntl
import json
progname = os.path.basename(sys.argv[0])
rootname_progname = os.path.splitext(progname)[0]
wspace = ''.join([" "]*len(progname))
rundir = os.path.dirname(os.path.realpath(__file__))
basedir = os.path.realpath("%s/.."%(rundir)) # path of the application, i.e. pred/
path_result = "%s/static/result"%(basedir)
webserver_root = os.path.realpath("%s/../../../"%(rundir))
activate_env="%s/env/bin/activate_this.py"%(webserver_root)
exec(compile(open(activate_env, "r").read(), activate_env, 'exec'), dict(__file__=activate_env))
FORMAT_DATETIME = webcom.FORMAT_DATETIME
TZ = webcom.TZ
blastdir = "%s/%s"%(rundir, "soft/topcons2_webserver/tools/blast-2.2.26")
os.environ['SCAMPI_DIR'] = "/server/scampi"
os.environ['MODHMM_BIN'] = "/server/modhmm/bin"
os.environ['BLASTMAT'] = "%s/data"%(blastdir)
os.environ['BLASTBIN'] = "%s/bin"%(blastdir)
os.environ['BLASTDB'] = "%s/%s"%(rundir, "soft/topcons2_webserver/database/blast/")
blastdb = "%s/%s"%(os.environ['BLASTDB'], "uniref90.fasta" )
runscript = "%s/%s"%(rundir, "soft/topcons2_webserver/workflow/pfam_workflow.py")
script_scampi = "%s/%s/%s"%(rundir, "other", "mySCAMPI_run.pl")
basedir = os.path.realpath("%s/.."%(rundir)) # path of the application, i.e. pred/
path_md5cache = "%s/static/md5"%(basedir)
path_cache = "%s/static/result/cache"%(basedir)
path_log = "%s/static/log"%(basedir)
finished_date_db = "%s/cached_job_finished_date.sqlite3"%(path_log)
gen_logfile = "%s/%s.log"%(path_log, progname)
gen_errfile = "%s/%s.err"%(path_log, progname)
contact_email = "nanjiang.shu@scilifelab.se"
vip_user_list = [
"nanjiang.shu@scilifelab.se"
]
# note that here the url should be without http://
usage_short="""
Usage: %s seqfile_in_fasta
%s -jobid JOBID -outpath DIR -tmpdir DIR
%s -email EMAIL -baseurl BASE_WWW_URL
%s -only-get-cache [-force]
"""%(progname, wspace, wspace, wspace)
usage_ext="""\
Description:
run job
OPTIONS:
-only-get-cache Only get the cached results, this will be run on the front-end
-force Do not use cahced result
-h, --help Print this help message and exit
Created 2015-02-05, updated 2015-02-12, Nanjiang Shu
"""
usage_exp="""
Examples:
%s /data3/tmp/tmp_dkgSD/query.fa -outpath /data3/result/rst_mXLDGD -tmpdir /data3/tmp/tmp_dkgSD
"""%(progname)
#}}}
#}}}
#}}}
if __name__ == '__main__' :
g_params = InitGlobalParameter()
configfile = "%s/config/config.json"%(basedir)
config = {}
if os.path.exists(configfile):
text = myfunc.ReadFile(configfile)
config = json.loads(text)
if rootname_progname in config:
g_params.update(config[rootname_progname])
status = main(g_params)
if os.path.exists(g_params['lockfile']):
try:
os.remove(g_params['lockfile'])
except:
myfunc.WriteFile("Failed to delete lockfile %s\n"%(g_params['lockfile']), gen_errfile, "a", True)
sys.exit(status)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
12489,
25,
1057,
1693,
198,
198,
2,
9794,
11187,
220,
198,
2,
198,
2,
9794,
11187,
1853,
12,
2999,
12,
1065,
220... | 2.456826 | 1,714 |
# Purpose: math and construction tools
# Created: 27.03.2010, 2018 integrated into ezdxf
# Copyright (c) 2010-2020, Manfred Moitzi
# License: MIT License
from .vector import Vector, Vec2, X_AXIS, Y_AXIS, Z_AXIS, NULLVEC
from .construct2d import (
is_close_points, closest_point, convex_hull_2d, intersection_line_line_2d, distance_point_line_2d,
is_point_on_line_2d, is_point_in_polygon_2d, is_point_left_of_line, point_to_line_relation,
linspace, enclosing_angles, reflect_angle_x_deg,
reflect_angle_y_deg, sign,
)
from .construct3d import (
is_planar_face, subdivide_face, subdivide_ngons, Plane, LocationState, intersection_ray_ray_3d, normal_vector_3p,
)
from .matrix44 import Matrix44
from .linalg import (
Matrix, LUDecomposition, gauss_jordan_inverse, gauss_jordan_solver, gauss_vector_solver, gauss_matrix_solver,
freeze_matrix, tridiagonal_matrix_solver, tridiagonal_vector_solver, detect_banded_matrix, compact_banded_matrix,
BandedMatrixLU, banded_matrix,
)
from .parametrize import estimate_tangents, estimate_end_tangent_magnitude
from .bspline import (
fit_points_to_cad_cv, global_bspline_interpolation,
rational_spline_from_arc, rational_spline_from_ellipse,
uniform_knot_vector, open_uniform_knot_vector, required_knot_values, BSpline, BSplineU, BSplineClosed,
local_cubic_bspline_interpolation,
)
from .bezier import Bezier
from .bezier4p import Bezier4P, cubic_bezier_from_arc, cubic_bezier_from_ellipse, cubic_bezier_interpolation
from .surfaces import BezierSurface
from .eulerspiral import EulerSpiral
from .ucs import OCS, UCS, PassTroughUCS
from .bulge import bulge_to_arc, bulge_3_points, bulge_center, bulge_radius, arc_to_bulge
from .arc import ConstructionArc
from .line import ConstructionRay, ConstructionLine, ParallelRaysError
from .circle import ConstructionCircle
from .ellipse import ConstructionEllipse, angle_to_param, param_to_angle, rytz_axis_construction
from .box import ConstructionBox
from .shape import Shape2d
from .bbox import BoundingBox2d, BoundingBox
from .offset2d import offset_vertices_2d
from .transformtools import NonUniformScalingError, InsertTransformationError
def xround(value: float, rounding: float = 0.) -> float:
"""
Extended rounding function, argument `rounding` defines the rounding limit:
======= ======================================
0 remove fraction
0.1 round next to x.1, x.2, ... x.0
0.25 round next to x.25, x.50, x.75 or x.00
0.5 round next to x.5 or x.0
1.0 round to a multiple of 1: remove fraction
2.0 round to a multiple of 2: xxx2, xxx4, xxx6 ...
5.0 round to a multiple of 5: xxx5 or xxx0
10.0 round to a multiple of 10: xx10, xx20, ...
======= ======================================
Args:
value: float value to round
rounding: rounding limit
"""
if rounding == 0:
return round(value)
factor = 1. / rounding
return round(value * factor) / factor
| [
2,
32039,
25,
10688,
290,
5103,
4899,
198,
2,
15622,
25,
2681,
13,
3070,
13,
10333,
11,
2864,
11521,
656,
304,
89,
67,
26152,
198,
2,
15069,
357,
66,
8,
3050,
12,
42334,
11,
1869,
39193,
4270,
4224,
72,
198,
2,
13789,
25,
17168,
... | 2.696589 | 1,114 |
main()
| [
220,
220,
220,
220,
198,
12417,
3419,
628
] | 1.625 | 8 |
"""
1309. Decrypt String from Alphabet to Integer Mapping
Given a string s formed by digits ('0' - '9') and '#' . We want to map s to English lowercase characters as follows:
Characters ('a' to 'i') are represented by ('1' to '9') respectively.
Characters ('j' to 'z') are represented by ('10#' to '26#') respectively.
Return the string formed after mapping.
It's guaranteed that a unique mapping will always exist.
Example 1:
Input: s = "10#11#12"
Output: "jkab"
Explanation: "j" -> "10#" , "k" -> "11#" , "a" -> "1" , "b" -> "2".
Example 2:
Input: s = "1326#"
Output: "acz"
Example 3:
Input: s = "25#"
Output: "y"
Example 4:
Input: s = "12345678910#11#12#13#14#15#16#17#18#19#20#21#22#23#24#25#26#"
Output: "abcdefghijklmnopqrstuvwxyz"
Constraints:
1 <= s.length <= 1000
s[i] only contains digits letters ('0'-'9') and '#' letter.
s will be valid string such that mapping is always possible.
"""
| [
37811,
198,
12952,
24,
13,
4280,
6012,
10903,
422,
45695,
284,
34142,
337,
5912,
198,
15056,
257,
4731,
264,
7042,
416,
19561,
19203,
15,
6,
532,
705,
24,
11537,
290,
705,
2,
6,
764,
775,
765,
284,
3975,
264,
284,
3594,
2793,
7442,
... | 2.644509 | 346 |
from Section05_Singleton.SingletonTestability.Singleton import Singleton
| [
6738,
7275,
2713,
62,
29974,
10565,
13,
29974,
10565,
14402,
1799,
13,
29974,
10565,
1330,
5573,
10565,
628
] | 4.111111 | 18 |
# to get instagram dms and reply to them
from social_media.instagram.base import InstagramEngine
i = InstagramEngine()
i.login()
profile = i.get_profile('a_the_rva', hard=True)
profile.mount()
messages = profile.get_messages()
for message in messages:
message.reply("I love you!") | [
2,
284,
651,
916,
6713,
288,
907,
290,
10971,
284,
606,
198,
6738,
1919,
62,
11431,
13,
8625,
6713,
13,
8692,
1330,
10767,
13798,
198,
72,
796,
10767,
13798,
3419,
198,
72,
13,
38235,
3419,
198,
198,
13317,
796,
1312,
13,
1136,
62,
... | 3.108696 | 92 |
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from transient_keras.transient_keras_classifier import TransientKerasClassifier
from test_tools import getTrainDict, getTestSequence
def testKerasSequenceClassifier():
'''
Use randomly generated vector sequences to evaluate the ``TransientKerasClassifier`` class. The
output of the ``classify`` method isn't explicitly evaluated, but rather, this test is to just
ensure this class can properly process unknown data.
'''
# Define parameters
EVAL_PERCENT = 0.33
NUM_INIT = 2
NUM_EPOCH = 10
BATCH_SIZE = 32
OBS_PER_EVENT = 100
EVENTS_PER_CLASS = 10
VECT_DIM = 9
# Retrieve train and test data
train_event_dict = getTrainDict(OBS_PER_EVENT, EVENTS_PER_CLASS, VECT_DIM)
test_array, _ = getTestSequence(VECT_DIM)
num_classes = len(train_event_dict)
# Initialize classifier and generate results
model = Sequential()
model.add(Dense(6, activation='relu', input_dim=2*VECT_DIM))
model.add(Dense(num_classes, activation='softmax'))
model_config = model.get_config()
classifier = TransientKerasClassifier(train_event_dict, model_config, EVAL_PERCENT, NUM_INIT,
NUM_EPOCH, BATCH_SIZE, 0)
for test_idx in range(test_array.shape[1]):
classifier.classify(test_array[:, test_idx])
num_vect = test_array.shape[1]
assert len(classifier.class_labels) == num_vect
classifier.reset()
assert not classifier.class_labels
if __name__ == '__main__':
testKerasSequenceClassifier()
| [
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
198,
198,
6738,
32361,
62,
6122,
292,
13,
7645,
1153,
62,
6122,
292,
62,
4871,
7483,
1330,
3602,... | 2.587859 | 626 |
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib import messages
from django.contrib.auth import (
get_user_model, update_session_auth_hash
)
from django.contrib.auth.forms import (
PasswordResetForm, SetPasswordForm, PasswordChangeForm
)
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.template import Context
from django.template.loader import get_template
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.contrib.auth.models import User
from django.contrib.auth import logout, login
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import resolve_url, render, redirect
from django.utils.encoding import force_text
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views import View
from django.template.response import TemplateResponse
from django.template.loader import render_to_string
from .tokens import account_activation_token
from .forms import SignUpForm, ProfileForm, ContactForm
@login_required
@csrf_protect
@sensitive_post_parameters()
@never_cache
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
current_app=None, extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('session:password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
if current_app is not None:
request.current_app = current_app
return TemplateResponse(request, template_name, context)
| [
11748,
14601,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
2,
24390,
9082,
278,
262,
17594,
3419,
290,
2604,
448,
3419,
5009,
2174,
13,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,
42625,
14208,
13,
3642,
... | 2.68932 | 1,339 |
"""
This module contains code that dynamically scans for available apps
at module load time. For all apps found, a loader is generated which,
when called, imports the particular app and behaves like the app
class itself. The loaders are made available as module attributes
under the same name the corresponding app classes would have
(e.g. HeatyApp).
The __all__ list is populated with these loaders, hence a wildcard
import will fetch them all.
"""
import typing as T
import types
import importlib
import os
import sys
def _generate_app_loaders() -> T.Iterable[T.Tuple[str, T.Callable]]:
"""Scans for apps and yields tuples of the app class name and a
deferred loader for each app found."""
dirpath = os.path.realpath(os.path.dirname(__file__))
for name in os.listdir(dirpath):
path = os.path.join(dirpath, name)
if not os.path.isdir(path) or \
not os.path.isfile(os.path.join(path, "app.py")):
continue
parts = [part.capitalize() for part in name.split("_")]
attr = "{}App".format("".join(parts))
loader = _build_app_loader(name, attr)
yield attr, loader
# make app loaders available as module attributes
__all__ = []
for _attr, _loader in _generate_app_loaders():
setattr(sys.modules[__name__], _attr, _loader)
__all__.append(_attr)
| [
37811,
198,
1212,
8265,
4909,
2438,
326,
32366,
23824,
329,
1695,
6725,
198,
265,
8265,
3440,
640,
13,
1114,
477,
6725,
1043,
11,
257,
40213,
318,
7560,
543,
11,
198,
12518,
1444,
11,
17944,
262,
1948,
598,
290,
39341,
588,
262,
598,
... | 2.883621 | 464 |
"""Implement pygmo optimizers."""
import warnings
import numpy as np
from estimagic import batch_evaluators
from estimagic.config import IS_PYGMO_INSTALLED
from estimagic.decorators import mark_minimizer
from estimagic.exceptions import NotInstalledError
from estimagic.optimization.algo_options import CONVERGENCE_RELATIVE_PARAMS_TOLERANCE
from estimagic.optimization.algo_options import (
STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,
)
STOPPING_MAX_ITERATIONS_GENETIC = 250
try:
import pygmo as pg
except ImportError:
pass
@mark_minimizer(
name="pygmo_gaco",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_gaco(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
kernel_size=63,
speed_parameter_q=1.0,
oracle=0.0,
accuracy=0.01,
threshold=1,
speed_of_std_values_convergence=7,
stopping_max_n_without_improvements=100000,
stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,
focus=0.0,
cache=False,
):
"""Minimize a scalar function using the generalized ant colony algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
if isinstance(speed_of_std_values_convergence, float):
if not speed_of_std_values_convergence.is_integer():
raise ValueError(
"The speed_of_std_values_convergence parameter must be an integer. "
f"You specified {speed_of_std_values_convergence}."
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"ker": kernel_size,
"q": speed_parameter_q,
"oracle": oracle,
"acc": accuracy,
"threshold": threshold,
"n_gen_mark": int(speed_of_std_values_convergence),
"impstop": stopping_max_n_without_improvements,
"evalstop": stopping_max_criterion_evaluations,
"focus": focus,
"memory": cache,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="gaco",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_bee_colony",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_bee_colony(
criterion,
x,
lower_bounds,
upper_bounds,
*,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
max_n_trials=1,
population_size=None,
):
"""Minimize a scalar function using the artifical bee colony algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=20
)
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options={
"limit": max_n_trials,
"gen": int(stopping_max_iterations),
},
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="bee_colony",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_de",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_de(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
weight_coefficient=0.8,
crossover_probability=0.9,
mutation_variant="rand/1/exp",
convergence_criterion_tolerance=1e-6,
convergence_relative_params_tolerance=CONVERGENCE_RELATIVE_PARAMS_TOLERANCE,
):
"""Minimize a scalar function using the differential evolution algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=10
)
# support both integer and string specification of the mutation variant
mutation_variant_str_to_int = {
"best/1/exp": 1,
"rand/1/exp": 2,
"rand-to-best/1/exp": 3,
"best/2/exp": 4,
"rand/2/exp": 5,
"best/1/bin": 6,
"rand/1/bin": 7,
"rand-to-best/1/bin": 8,
"best/2/bin": 9,
"rand/2/bin": 10,
}
mutation_variant = _convert_str_to_int(
str_to_int=mutation_variant_str_to_int, value=mutation_variant
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"F": weight_coefficient,
"CR": crossover_probability,
"variant": mutation_variant,
"ftol": convergence_criterion_tolerance,
"xtol": convergence_relative_params_tolerance,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="de",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_sea",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_sea(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=10_000, # Each generation will compute the objective once
):
r"""Minimize a scalar function using the (N+1)-ES simple evolutionary algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=10
)
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options={"gen": int(stopping_max_iterations)},
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="sea",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_sga",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_sga(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
crossover_probability=0.9,
crossover_strategy="exponential",
eta_c=None,
mutation_probability=0.02,
mutation_strategy="polynomial",
mutation_polynomial_distribution_index=None,
mutation_gaussian_width=None,
selection_strategy="tournament",
selection_truncated_n_best=None,
selection_tournament_size=None,
):
"""Minimize a scalar function using a simple genetic algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
if eta_c is not None and crossover_strategy != "sbx":
warnings.warn(
f"You specified crossover strategy {crossover_strategy} and eta_c. "
"However, eta_c is ignored because it is only used when the "
"crossover_strategy is set to sbx."
)
eta_c = 1.0 if eta_c is None else eta_c
if (
mutation_polynomial_distribution_index is not None
) and mutation_strategy != "polynomial":
warnings.warn(
"You specified a mutation_polynomial_distribution_index but did not choose "
"polynomial as your mutation_strategy. Thus, "
"mutation_polynomial_distribution_index will be ignored."
)
if mutation_gaussian_width is not None and mutation_strategy != "gaussian":
warnings.warn(
"You specified a mutation_gaussian_width but did not choose gaussion as "
"your mutation_strategy. Thus, mutation_gaussian_width will be ignored."
)
if selection_strategy != "truncated" and selection_truncated_n_best is not None:
warnings.warn(
"You specified selection_truncated_n_best but did not specify truncated as "
"your selection strategy. Therefore, selection_truncated_n_best is ignored."
)
if selection_strategy != "tournament" and selection_tournament_size is not None:
warnings.warn(
"You specified selection_tournament_size but did not specify tournament as "
"your selection strategy. Therefore, selection_tournament_size is ignored."
)
if mutation_strategy == "gaussian" and mutation_gaussian_width is not None:
param_m = mutation_gaussian_width
elif (
mutation_strategy == "polynomial"
and mutation_polynomial_distribution_index is not None
):
param_m = mutation_polynomial_distribution_index
else:
param_m = 1.0
if selection_strategy == "truncated" and selection_truncated_n_best is not None:
param_s = selection_truncated_n_best
elif selection_strategy == "tournament" and selection_tournament_size is not None:
param_s = selection_tournament_size
else:
param_s = 2
algo_specific_options = {
"gen": int(stopping_max_iterations),
"cr": crossover_probability,
"eta_c": eta_c,
"m": mutation_probability,
"param_m": param_m,
"crossover": crossover_strategy,
"mutation": mutation_strategy,
"selection": selection_strategy,
"param_s": param_s,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="sga",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_sade",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_sade(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
jde=True,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
mutation_variant="rand/1/exp",
keep_adapted_params=False,
ftol=1e-6,
xtol=1e-6,
):
"""Minimize a scalar function using Self-adaptive Differential Evolution.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
mutation_variant_str_to_int = {
"best/1/exp": 1,
"rand/1/exp": 2,
"rand-to-best/1/exp": 3,
"best/2/exp": 4,
"rand/2/exp": 5,
"best/1/bin": 6,
"rand/1/bin": 7,
"rand-to-best/1/bin": 8,
"best/2/bin": 9,
"rand/2/bin": 10,
"rand/3/exp": 11,
"rand/3/bin": 12,
"best/3/exp": 13,
"best/3/bin": 14,
"rand-to-current/2/exp": 15,
"rand-to-current/2/bin": 16,
"rand-to-best-and-current/2/exp": 17,
"rand-to-best-and-current/2/bin": 18,
}
mutation_variant = _convert_str_to_int(
str_to_int=mutation_variant_str_to_int, value=mutation_variant
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"variant": mutation_variant,
"variant_adptv": 1 if jde else 2,
"ftol": ftol,
"xtol": xtol,
"memory": keep_adapted_params,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="sade",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_cmaes",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_cmaes(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
backward_horizon=None,
variance_loss_compensation=None,
learning_rate_rank_one_update=None,
learning_rate_rank_mu_update=None,
initial_step_size=0.5,
ftol=1e-6,
xtol=1e-6,
keep_adapted_params=False,
):
r"""Minimize a scalar function using the Covariance Matrix Evolutionary Strategy.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"cc": _replace_none(var=backward_horizon, none_value=-1.0),
"cs": _replace_none(var=variance_loss_compensation, none_value=-1.0),
"c1": _replace_none(var=learning_rate_rank_one_update, none_value=-1.0),
"cmu": _replace_none(var=learning_rate_rank_mu_update, none_value=-1.0),
"sigma0": initial_step_size,
"ftol": ftol,
"xtol": xtol,
"memory": keep_adapted_params,
"force_bounds": True,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="cmaes",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_simulated_annealing",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_simulated_annealing(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
start_temperature=10.0,
end_temperature=0.01,
n_temp_adjustments=10,
n_range_adjustments=10,
bin_size=10,
start_range=1.0,
):
"""Minimize a function with the simulated annealing algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
algo_specific_options = {
"Ts": start_temperature,
"Tf": end_temperature,
"n_T_adj": int(n_temp_adjustments),
"n_range_adj": int(n_range_adjustments),
"bin_size": bin_size,
"start_range": start_range,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="simulated_annealing",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_pso",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_pso(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
omega=0.7298,
force_of_previous_best=2.05,
force_of_best_in_neighborhood=2.05,
max_velocity=0.5,
algo_variant=5,
neighbor_definition="lbest",
neighbor_param=None,
keep_velocities=False,
):
r"""Minimize a scalar function using Particle Swarm Optimization.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
if neighbor_definition in [1, 3] and neighbor_param is not None:
warnings.warn(
"You gave a neighbor parameter but selected a neighbor_definition "
"that ignores this parameter."
)
neighbor_param = _replace_none(neighbor_param, 4)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=10
)
neighbor_definition_str_to_int = {
"gbest": 1,
"lbest": 2,
"Von Neumann": 3,
"Adaptive random": 4,
}
algo_variant_str_to_int = {
"canonical_inertia": 1,
"social_and_cog_rand": 2,
"all_components_rand": 3,
"one_rand": 4,
"canonical_constriction": 5,
"fips": 6,
}
algo_specific_options = {
"gen": int(stopping_max_iterations),
"omega": omega,
"eta1": force_of_previous_best,
"eta2": force_of_best_in_neighborhood,
"max_vel": max_velocity,
"variant": _convert_str_to_int(algo_variant_str_to_int, algo_variant),
"neighb_type": _convert_str_to_int(
neighbor_definition_str_to_int, neighbor_definition
),
"neighb_param": neighbor_param,
"memory": keep_velocities,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="pso",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_pso_gen",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_pso_gen(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
omega=0.7298,
force_of_previous_best=2.05,
force_of_best_in_neighborhood=2.05,
max_velocity=0.5,
algo_variant="canonical_constriction",
neighbor_definition=2,
neighbor_param=None,
keep_velocities=False,
):
r"""Minimize a scalar function with generational Particle Swarm Optimization.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
if neighbor_definition in [1, 3] and neighbor_param is not None:
warnings.warn(
"You gave a neighbor parameter but selected a neighbor_definition "
"that ignores this parameter."
)
neighbor_param = _replace_none(neighbor_param, 4)
neighbor_str_to_int = {
"gbest": 1,
"lbest": 2,
"Von Neumann": 3,
"Adaptive random": 4,
}
neighbor_param = _convert_str_to_int(neighbor_str_to_int, neighbor_param)
algo_variant_str_to_int = {
"canonical_inertia": 1,
"social_and_cog_rand": 2,
"all_components_rand": 3,
"one_rand": 4,
"canonical_constriction": 5,
"fips": 6,
}
algo_variant = _convert_str_to_int(algo_variant_str_to_int, algo_variant)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=10
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"omega": omega,
"eta1": force_of_previous_best,
"eta2": force_of_best_in_neighborhood,
"max_vel": max_velocity,
"variant": algo_variant,
"neighb_type": neighbor_definition,
"neighb_param": neighbor_param,
"memory": keep_velocities,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="pso_gen",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_mbh",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_mbh(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
inner_algorithm=None,
# this is 30 instead of 5 in pygmo for our sum of squares test to pass
stopping_max_inner_runs_without_improvement=30,
perturbation=0.01,
):
"""Minimize a scalar function using generalized Monotonic Basin Hopping.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
# the min default population size is this large to pass our sum of squares tests.
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=250
)
algo_specific_options = {
"algo": inner_algorithm,
"stop": stopping_max_inner_runs_without_improvement,
"perturb": perturbation,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="mbh",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_xnes",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_xnes(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
learning_rate_mean_update=1.0,
learning_rate_step_size_update=None,
learning_rate_cov_matrix_update=None,
initial_search_share=1.0,
ftol=1e-6,
xtol=1e-6,
keep_adapted_params=False,
):
r"""Minimize a scalar function using Exponential Evolution Strategies.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"eta_mu": _replace_none(learning_rate_mean_update, -1),
"eta_sigma": _replace_none(learning_rate_step_size_update, -1),
"eta_b": _replace_none(learning_rate_cov_matrix_update, -1),
"sigma0": initial_search_share,
"ftol": ftol,
"xtol": xtol,
"memory": keep_adapted_params,
"force_bounds": True,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="xnes",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_gwo",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_gwo(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
):
"""Minimize a scalar function using the Grey Wolf Optimizer.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options={"gen": int(stopping_max_iterations)},
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="gwo",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_compass_search",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_compass_search(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_criterion_evaluations=STOPPING_MAX_CRITERION_EVALUATIONS_GLOBAL,
start_range=0.1,
stop_range=0.01,
reduction_coeff=0.5,
):
"""Minimize a scalar function using compass search.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
if population_size is not None:
warnings.warn(
f"You specified population size {population_size}. "
"compass_search does not have a population so this argument is ignored."
)
else:
# if discard_start_params is False population_size - 1 must still be positive
population_size = 100
algo_specific_options = {
"max_fevals": stopping_max_criterion_evaluations,
"start_range": start_range,
"stop_range": stop_range,
"reduction_coeff": reduction_coeff,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="compass_search",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_ihs",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_ihs(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
choose_from_memory_probability=0.85,
min_pitch_adjustment_rate=0.35,
max_pitch_adjustment_rate=0.99,
min_distance_bandwidth=1e-5,
max_distance_bandwidth=1.0,
):
"""Minimize a scalar function using the improved harmony search algorithm.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
if population_size is not None:
warnings.warn("The population size has no effect on IHS' performance.")
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=1
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"phmcr": choose_from_memory_probability,
"ppar_min": min_pitch_adjustment_rate,
"ppar_max": max_pitch_adjustment_rate,
"bw_min": min_distance_bandwidth,
"bw_max": max_distance_bandwidth,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="ihs",
algo_options=algo_options,
)
return res
@mark_minimizer(
name="pygmo_de1220",
primary_criterion_entry="value",
parallelizes=True,
needs_scaling=True,
disable_cache=False,
is_available=IS_PYGMO_INSTALLED,
)
def pygmo_de1220(
criterion,
x,
lower_bounds,
upper_bounds,
*,
population_size=None,
batch_evaluator=None,
n_cores=1,
seed=None,
discard_start_params=False,
#
jde=True,
stopping_max_iterations=STOPPING_MAX_ITERATIONS_GENETIC,
allowed_variants=None,
keep_adapted_params=False,
ftol=1e-6,
xtol=1e-6,
):
"""Minimize a scalar function using Self-adaptive Differential Evolution, pygmo flavor.
For details see :ref:`list_of_pygmo_algorithms`.
"""
_check_that_every_param_is_bounded(lower_bounds, upper_bounds)
variant_str_to_int = {
"best/1/exp": 1,
"rand/1/exp": 2,
"rand-to-best/1/exp": 3,
"best/2/exp": 4,
"rand/2/exp": 5,
"best/1/bin": 6,
"rand/1/bin": 7,
"rand-to-best/1/bin": 8,
"best/2/bin": 9,
"rand/2/bin": 10,
"rand/3/exp": 11,
"rand/3/bin": 12,
"best/3/exp": 13,
"best/3/bin": 14,
"rand-to-current/2/exp": 15,
"rand-to-current/2/bin": 16,
"rand-to-best-and-current/2/exp": 17,
"rand-to-best-and-current/2/bin": 18,
}
if allowed_variants is None:
allowed_variant_codes = [2, 3, 7, 10, 13, 14, 15, 16]
else:
allowed_variant_codes = []
for variant in allowed_variants:
allowed_variant_codes.append(
_convert_str_to_int(variant_str_to_int, variant)
)
population_size = _determine_population_size(
population_size=population_size, x=x, lower_bound=64
)
algo_specific_options = {
"gen": int(stopping_max_iterations),
"variant_adptv": 1 if jde else 2,
"ftol": ftol,
"xtol": xtol,
"memory": keep_adapted_params,
"allowed_variants": allowed_variant_codes,
}
algo_options = _create_algo_options(
population_size=population_size,
n_cores=n_cores,
seed=seed,
discard_start_params=discard_start_params,
batch_evaluator=batch_evaluator,
algo_specific_options=algo_specific_options,
)
res = _minimize_pygmo(
criterion=criterion,
x=x,
lower_bounds=lower_bounds,
upper_bounds=upper_bounds,
method="de1220",
algo_options=algo_options,
)
return res
# ====================================================================================
def _minimize_pygmo(
criterion,
x,
lower_bounds,
upper_bounds,
method,
algo_options,
derivative=None,
):
"""Minimize a function with pygmo.
Args:
criterion (callable):
x (np.ndarray): Starting values of the parameters.
lower_bounds (np.ndarray):
upper_bounds (np.ndarray):
method (str): One of the optimizers of the pygmo package.
algo_options (dict): Options for the optimizer. In addition to
the algo options that will be passed directly to the pygmo
algorithms we have the following entries:
- population_size (int): Population size for genetic algorithms.
- batch_evaluator (str or callable): An estimagic batch evaluator,
default joblib batch evaluator.
- n_cores (int): Number of cores used for parallel evaluation of
the criterion function. Default 1.
- seed (int or None): Random seed for drawing the initial
population.
- discard_start_params (bool): If True, the start params are not
guaranteed to be part of the initial population. This saves one
criterion function evaluation that cannot be done in parallel
with other evaluations. Default False.
Returns:
results (dict): Dictionary with optimization results.
"""
if not IS_PYGMO_INSTALLED:
raise NotInstalledError(
f"The {method} algorithm requires the pygmo package to be installed. "
"You can install it with 'conda install -c conda-forge pygmo'. Visit "
"https://esa.github.io/pygmo2/install.html for more detailed installation "
"instructions."
)
population_size = algo_options.pop("population_size", 1)
batch_evaluator = algo_options.pop("batch_evaluator", "joblib_batch_evaluator")
if isinstance(batch_evaluator, str):
batch_evaluator = getattr(batch_evaluators, batch_evaluator)
n_cores = algo_options.pop("n_cores", 1)
seed = algo_options.pop("seed", None)
discard_start_params = algo_options.pop("discard_start_params", False)
bounds = (lower_bounds, upper_bounds)
prob = _create_problem(
func=criterion,
bounds=bounds,
dim=len(x),
batch_evaluator=batch_evaluator,
n_cores=n_cores,
)
algo = _create_algorithm(method, algo_options, n_cores)
pop = _create_population(
prob, population_size, x, seed=seed, discard_start_params=discard_start_params
)
evolved = algo.evolve(pop)
result = _process_pygmo_results(evolved)
return result
def _create_algorithm(method, algo_options, n_cores):
"""Create a pygmo algorithm."""
pygmo_uda = getattr(pg, method)
algo = pygmo_uda(**algo_options)
try:
algo.set_bfe(pg.bfe())
except AttributeError:
if n_cores >= 2:
warnings.warn(
f"Your specified algorithm {method} does not support parallelization. "
"Choose another algorithm such as pygmo_gaco to parallelize."
)
out = pg.algorithm(algo)
return out
def _create_population(problem, population_size, x, seed, discard_start_params):
"""Create a pygmo population object.
Args:
problem (pygmo.Problem)
algo_options (dict)
x (np.ndarray)
Todo:
- constrain random initial values to be in some bounds
"""
if not discard_start_params:
population_size = population_size - 1
pop = pg.population(
problem,
size=population_size,
seed=seed,
b=pg.bfe(),
)
if not discard_start_params:
pop.push_back(x)
return pop
| [
37811,
3546,
26908,
12972,
70,
5908,
6436,
11341,
526,
15931,
198,
11748,
14601,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3959,
9083,
1330,
15458,
62,
18206,
84,
2024,
198,
6738,
3959,
9083,
13,
11250,
1330,
3180,
62,
47,
56,... | 2.17869 | 17,494 |
import PySimpleGUI as sg
| [
11748,
9485,
26437,
40156,
355,
264,
70,
628
] | 3.25 | 8 |
#! /usr/bin/env python
# Copyright (c) 2015, Xerox Corporation (Xerox) and Palo Alto Research Center, Inc (PARC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL XEROX OR PARC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ################################################################################
# #
# # PATENT NOTICE
# #
# # This software is distributed under the BSD 2-clause License (see LICENSE
# # file). This BSD License does not make any patent claims and as such, does
# # not act as a patent grant. The purpose of this section is for each contributor
# # to define their intentions with respect to intellectual property.
# #
# # Each contributor to this source code is encouraged to state their patent
# # claims and licensing mechanisms for any contributions made. At the end of
# # this section contributors may each make their own statements. Contributor's
# # claims and grants only apply to the pieces (source code, programs, text,
# # media, etc) that they have contributed directly to this software.
# #
# # There is no guarantee that this section is complete, up to date or accurate. It
# # is up to the contributors to maintain their portion of this section and up to
# # the user of the software to verify any claims herein.
# #
# # Do not remove this header notification. The contents of this section must be
# # present in all distributions of the software. You may only modify your own
# # intellectual property statements. Please provide contact information.
#
# - Palo Alto Research Center, Inc
# This software distribution does not grant any rights to patents owned by Palo
# Alto Research Center, Inc (PARC). Rights to these patents are available via
# various mechanisms. As of January 2016 PARC has committed to FRAND licensing any
# intellectual property used by its contributions to this software. You may
# contact PARC at cipo@parc.com for more information or visit http://www.ccnx.org
#
# @author Glenn Scott, Palo Alto Research Center (PARC)
# @copyright (c) 2015, Xerox Corporation (Xerox) and Palo Alto Research Center, Inc (PARC). All rights reserved.
import sys
import os
import pprint
import subprocess
import difflib
import csv
import LongBow
def concatenateContinuationLines(lines):
'''
Parse doxygen log lines.
Lines that are indented by a space are continutations of the previous line.
'''
result = list()
accumulator = ""
for line in lines:
line = line.rstrip()
if line.startswith(" ") == False and line.startswith(" ") == False:
if len(accumulator) > 0:
result.append(accumulator)
accumulator = line
else:
accumulator = accumulator + " " + line.lstrip()
result.append(accumulator)
return result
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
1853,
11,
44264,
1140,
10501,
357,
55,
263,
1140,
8,
290,
44878,
34317,
4992,
3337,
11,
3457,
357,
47,
25793,
8,
198,
2,
1439,
2489,
10395,
13,
198,
2,
19... | 3.539497 | 1,114 |
while True:
num = float(input("Please enter a number: "))
if num > 10:
print(f"\nYour number, {num}, is more than 10!")
elif num < 10:
print(f"\nYour number, {num}, is less than 10!")
else:
print("\nYour number is equal to 10!")
| [
4514,
6407,
25,
201,
198,
220,
220,
220,
997,
796,
12178,
7,
15414,
7203,
5492,
3802,
257,
1271,
25,
366,
4008,
201,
198,
201,
198,
220,
220,
220,
611,
997,
1875,
838,
25,
201,
198,
220,
220,
220,
220,
220,
220,
220,
3601,
7,
69... | 2.143939 | 132 |
# Generated by Django 2.2 on 2019-08-30 01:26
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
319,
13130,
12,
2919,
12,
1270,
5534,
25,
2075,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
628
] | 2.904762 | 42 |
# @Title: 丑数 (丑数 LCOF)
# @Author: 18015528893
# @Date: 2021-01-26 17:11:46
# @Runtime: 140 ms
# @Memory: 14.9 MB
if __name__ == '__main__':
s = Solution()
s.nthUglyNumber(10)
| [
198,
2,
2488,
19160,
25,
220,
10310,
239,
46763,
108,
357,
10310,
239,
46763,
108,
406,
8220,
37,
8,
198,
2,
2488,
13838,
25,
1248,
486,
2816,
2078,
49682,
198,
2,
2488,
10430,
25,
33448,
12,
486,
12,
2075,
1596,
25,
1157,
25,
351... | 1.947917 | 96 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
# class Bar:
# x = 1
#
# print(Bar.x)
(___,) = (1,)
print _.x
(Bar,) = (_,)
| [
2,
220,
1398,
2409,
25,
198,
2,
220,
220,
220,
220,
220,
2124,
796,
352,
198,
2,
198,
2,
220,
3601,
7,
10374,
13,
87,
8,
198,
198,
7,
17569,
35751,
796,
357,
16,
35751,
198,
198,
4798,
4808,
13,
87,
198,
7,
10374,
35751,
796,
... | 1.734694 | 49 |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import requests
import csv
import sys
from datetime import datetime
from geopy.distance import vincenty
import json
def write_csv(file_name, content):
"""write csv"""
with open(file_name, 'w') as output_file:
writer = csv.writer(output_file)
writer.writerows(content)
def write_json(file_name, content):
"""write json"""
with open(file_name, 'w') as output_file:
json.dump(content, output_file)
def get_circle_data(input_data, prec):
"""get circle data"""
circle = []
count = 0
for each in input_data:
p1 = (each[-2], each[-1])
for item in input_data:
p2 = (item[-2], item[-1])
dis = vincenty(p1, p2).meters
if dis < 500:
#print (dis, each[2], item[2])
count += 1
if count > len(input_data)*prec:
#print (circle)
tmp = each.copy()
tmp.append(count)
circle.append(tmp)
count = 0
return circle
if __name__ == '__main__':
data = read_csv('../data/dengue_all.csv')
header = None
now = datetime.strptime(data[-1][1], '%Y/%m/%d').date()
seven_data = []
three_days = []
five_days = []
one_days = []
for row in data:
if not header:
row[1] = '日期'
row[2] = '區別'
row[-2] = 'Latitude'
row[-1] = 'Longitude'
header = row
continue
event_date = datetime.strptime(row[1], '%Y/%m/%d').date()
delta = (now-event_date)
row[-1], row[-2] = float(row[-1]), float(row[-2])
if delta.days < 7:
tmp = row.copy()
if delta.days < 3:
three_days.append(row)
tmp.append('red')
else:
tmp.append('cadetblue')
if delta.days < 1:
one_days.append(row)
if delta.days < 5:
five_days.append(row)
seven_data.append(tmp)
header += ['color']
seven_data = [header] + seven_data
write_csv('../data/seven_data.csv', seven_data)
header.pop()
data_header = header.copy() + ['count']
one_0025_data = [data_header] + get_circle_data(one_days, 0.025)
three_0025_data = [data_header] + get_circle_data(three_days, 0.025)
five_0025_data = [data_header] + get_circle_data(five_days, 0.025)
one_003_data = [data_header] + get_circle_data(one_days, 0.03)
three_003_data = [data_header] + get_circle_data(three_days, 0.03)
five_003_data = [data_header] + get_circle_data(five_days, 0.03)
one_002_data = [data_header] + get_circle_data(one_days, 0.02)
three_002_data = [data_header] + get_circle_data(three_days, 0.02)
five_002_data = [data_header] + get_circle_data(five_days, 0.02)
write_csv('../data/one_002_data.csv', one_002_data)
write_csv('../data/three_002_data.csv', three_002_data)
write_csv('../data/five_002_data.csv', five_002_data)
write_csv('../data/one_0025_data.csv', one_0025_data)
write_csv('../data/three_0025_data.csv', three_0025_data)
write_csv('../data/five_0025_data.csv', five_0025_data)
write_csv('../data/one_003_data.csv', one_003_data)
write_csv('../data/three_003_data.csv', three_003_data)
write_csv('../data/five_003_data.csv', five_003_data)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
13,
22,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
7007,
198,
11748,
269,
21370,
198,
11748,
25064,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,... | 2.047187 | 1,653 |
import re
import os
import shutil
import scrapy
from sohu.items import BBSItem
bbs_city_url_list = [
"https://bbs.focus.cn/anshan/",
"https://bbs.focus.cn/ankang/",
"https://bbs.focus.cn/anqing/",
"https://bbs.focus.cn/anshun/",
"https://bbs.focus.cn/anyang/",
"https://bbs.focus.cn/aomen/",
"https://bbs.focus.cn/byne/",
"https://bbs.focus.cn/bazhong/",
"https://bbs.focus.cn/baiyin/",
"https://bbs.focus.cn/baise/",
"https://bbs.focus.cn/bengbu/",
"https://bbs.focus.cn/baotou/",
"https://bbs.focus.cn/bd/",
"https://bbs.focus.cn/baoshan/",
"https://bbs.focus.cn/baoji/",
"https://bbs.focus.cn/beihai/",
"https://bbs.focus.cn/bj/",
"https://bbs.focus.cn/benxi/",
"https://bbs.focus.cn/bijie/",
"https://bbs.focus.cn/binzhou/",
"https://bbs.focus.cn/bozhou/",
"https://bbs.focus.cn/cangzhou/",
"https://bbs.focus.cn/changdu/",
"https://bbs.focus.cn/changde/",
"https://bbs.focus.cn/cz/",
"https://bbs.focus.cn/cc/",
"https://bbs.focus.cn/cs/",
"https://bbs.focus.cn/changzhi/",
"https://bbs.focus.cn/chaozhou/",
"https://bbs.focus.cn/chenzhou/",
"https://bbs.focus.cn/cd/",
"https://bbs.focus.cn/chengde/",
"https://bbs.focus.cn/chizhou/",
"https://bbs.focus.cn/chifeng/",
"https://bbs.focus.cn/chongzuo/",
"https://bbs.focus.cn/chuzhou/",
"https://bbs.focus.cn/cixi/",
"https://bbs.focus.cn/cq/",
"https://bbs.focus.cn/dazhou/",
"https://bbs.focus.cn/dali/",
"https://bbs.focus.cn/dl/",
"https://bbs.focus.cn/dq/",
"https://bbs.focus.cn/datong/",
"https://bbs.focus.cn/dandong/",
"https://bbs.focus.cn/deyang/",
"https://bbs.focus.cn/dz/",
"https://bbs.focus.cn/dingxi/",
"https://bbs.focus.cn/dongying/",
"https://bbs.focus.cn/dg/",
"https://bbs.focus.cn/danzhou/",
"https://bbs.focus.cn/erds/",
"https://bbs.focus.cn/ezhou/",
"https://bbs.focus.cn/enshi/",
"https://bbs.focus.cn/fcg/",
"https://bbs.focus.cn/fs/",
"https://bbs.focus.cn/fz/",
"https://bbs.focus.cn/fushun/",
"https://bbs.focus.cn/fuzhou/",
"https://bbs.focus.cn/fuxin/",
"https://bbs.focus.cn/fuyang/",
"https://bbs.focus.cn/ganzhou/",
"https://bbs.focus.cn/guyuan/",
"https://bbs.focus.cn/guangan/",
"https://bbs.focus.cn/guangyuan/",
"https://bbs.focus.cn/gz/",
"https://bbs.focus.cn/gl/",
"https://bbs.focus.cn/guigang/",
"https://bbs.focus.cn/gy/",
"https://bbs.focus.cn/hrb/",
"https://bbs.focus.cn/haidong/",
"https://bbs.focus.cn/hn/",
"https://bbs.focus.cn/handan/",
"https://bbs.focus.cn/hanzhong/",
"https://bbs.focus.cn/hz/",
"https://bbs.focus.cn/heze/",
"https://bbs.focus.cn/hf/",
"https://bbs.focus.cn/hechi/",
"https://bbs.focus.cn/heyuan/",
"https://bbs.focus.cn/hebi/",
"https://bbs.focus.cn/hezhou/",
"https://bbs.focus.cn/heihe/",
"https://bbs.focus.cn/hs/",
"https://bbs.focus.cn/hengyang/",
"https://bbs.focus.cn/hhht/",
"https://bbs.focus.cn/hlbe/",
"https://bbs.focus.cn/huludao/",
"https://bbs.focus.cn/huzhou/",
"https://bbs.focus.cn/huaihua/",
"https://bbs.focus.cn/huaian/",
"https://bbs.focus.cn/huaibei/",
"https://bbs.focus.cn/huainan/",
"https://bbs.focus.cn/huanggang/",
"https://bbs.focus.cn/huangshan/",
"https://bbs.focus.cn/huangshi/",
"https://bbs.focus.cn/huizhou/",
"https://bbs.focus.cn/jixi/",
"https://bbs.focus.cn/jian/",
"https://bbs.focus.cn/jilin/",
"https://bbs.focus.cn/jn/",
"https://bbs.focus.cn/jining/",
"https://bbs.focus.cn/jiaxing/",
"https://bbs.focus.cn/jiayuguan/",
"https://bbs.focus.cn/jiamusi/",
"https://bbs.focus.cn/jiangmen/",
"https://bbs.focus.cn/jiaozuo/",
"https://bbs.focus.cn/jieyang/",
"https://bbs.focus.cn/jinchang/",
"https://bbs.focus.cn/jinhua/",
"https://bbs.focus.cn/jinzhou/",
"https://bbs.focus.cn/jinzhong/",
"https://bbs.focus.cn/jingmen/",
"https://bbs.focus.cn/jingzhou/",
"https://bbs.focus.cn/jingdezhen/",
"https://bbs.focus.cn/jiujiang/",
"https://bbs.focus.cn/jiuquan/",
"https://bbs.focus.cn/kf/",
"https://bbs.focus.cn/km/",
"https://bbs.focus.cn/kunshan/",
"https://bbs.focus.cn/lasa/",
"https://bbs.focus.cn/laiwu/",
"https://bbs.focus.cn/laibin/",
"https://bbs.focus.cn/lz/",
"https://bbs.focus.cn/langfang/",
"https://bbs.focus.cn/leshan/",
"https://bbs.focus.cn/lijiang/",
"https://bbs.focus.cn/ls/",
"https://bbs.focus.cn/lyg/",
"https://bbs.focus.cn/liaocheng/",
"https://bbs.focus.cn/liaoyang/",
"https://bbs.focus.cn/linzhi/",
"https://bbs.focus.cn/lincang/",
"https://bbs.focus.cn/linfen/",
"https://bbs.focus.cn/linyi/",
"https://bbs.focus.cn/liuzhou/",
"https://bbs.focus.cn/luan/",
"https://bbs.focus.cn/lps/",
"https://bbs.focus.cn/longyan/",
"https://bbs.focus.cn/longnan/",
"https://bbs.focus.cn/loudi/",
"https://bbs.focus.cn/luoyang/",
"https://bbs.focus.cn/luzhou/",
"https://bbs.focus.cn/mas/",
"https://bbs.focus.cn/maoming/",
"https://bbs.focus.cn/meizhou/",
"https://bbs.focus.cn/meishan/",
"https://bbs.focus.cn/mianyang/",
"https://bbs.focus.cn/mdj/",
"https://bbs.focus.cn/nc/",
"https://bbs.focus.cn/nanchong/",
"https://bbs.focus.cn/nj/",
"https://bbs.focus.cn/nn/",
"https://bbs.focus.cn/nanping/",
"https://bbs.focus.cn/nt/",
"https://bbs.focus.cn/nanyang/",
"https://bbs.focus.cn/neijiang/",
"https://bbs.focus.cn/nb/",
"https://bbs.focus.cn/ningde/",
"https://bbs.focus.cn/panzhihua/",
"https://bbs.focus.cn/panjin/",
"https://bbs.focus.cn/pds/",
"https://bbs.focus.cn/pingliang/",
"https://bbs.focus.cn/pt/",
"https://bbs.focus.cn/puer/",
"https://bbs.focus.cn/puyang/",
"https://bbs.focus.cn/qqhe/",
"https://bbs.focus.cn/qinzhou/",
"https://bbs.focus.cn/qhd/",
"https://bbs.focus.cn/qd/",
"https://bbs.focus.cn/qingyuan/",
"https://bbs.focus.cn/qingyang/",
"https://bbs.focus.cn/qujing/",
"https://bbs.focus.cn/quanzhou/",
"https://bbs.focus.cn/quzhou/",
"https://bbs.focus.cn/rikaze/",
"https://bbs.focus.cn/rizhao/",
"https://bbs.focus.cn/smx/",
"https://bbs.focus.cn/sanming/",
"https://bbs.focus.cn/sansha/",
"https://bbs.focus.cn/sanya/",
"https://bbs.focus.cn/shannan/",
"https://bbs.focus.cn/shantou/",
"https://bbs.focus.cn/shanwei/",
"https://bbs.focus.cn/shangluo/",
"https://bbs.focus.cn/shangqiu/",
"https://bbs.focus.cn/sh/",
"https://bbs.focus.cn/shangrao/",
"https://bbs.focus.cn/shaoguan/",
"https://bbs.focus.cn/shaoyang/",
"https://bbs.focus.cn/sx/",
"https://bbs.focus.cn/sz/",
"https://bbs.focus.cn/sy/",
"https://bbs.focus.cn/shiyan/",
"https://bbs.focus.cn/sjz/",
"https://bbs.focus.cn/shizuishan/",
"https://bbs.focus.cn/shuangyashan/",
"https://bbs.focus.cn/suzhou/",
"https://bbs.focus.cn/suqian/",
"https://bbs.focus.cn/ahsuzhou/",
"https://bbs.focus.cn/suizhou/",
"https://bbs.focus.cn/suihua/",
"https://bbs.focus.cn/suining/",
"https://bbs.focus.cn/tz/",
"https://bbs.focus.cn/taian/",
"https://bbs.focus.cn/jstaizhou/",
"https://bbs.focus.cn/ty/",
"https://bbs.focus.cn/ts/",
"https://bbs.focus.cn/tj/",
"https://bbs.focus.cn/tianshui/",
"https://bbs.focus.cn/tieling/",
"https://bbs.focus.cn/tongliao/",
"https://bbs.focus.cn/tongchuan/",
"https://bbs.focus.cn/tongling/",
"https://bbs.focus.cn/tongren/",
"https://bbs.focus.cn/luohe/",
"https://bbs.focus.cn/weihai/",
"https://bbs.focus.cn/weifang/",
"https://bbs.focus.cn/weinan/",
"https://bbs.focus.cn/wenzhou/",
"https://bbs.focus.cn/wuhai/",
"https://bbs.focus.cn/wlcb/",
"https://bbs.focus.cn/wlmq/",
"https://bbs.focus.cn/wuxi/",
"https://bbs.focus.cn/wuhu/",
"https://bbs.focus.cn/wuzhou/",
"https://bbs.focus.cn/wuzhong/",
"https://bbs.focus.cn/wh/",
"https://bbs.focus.cn/wuwei/",
"https://bbs.focus.cn/xian/",
"https://bbs.focus.cn/xichang/",
"https://bbs.focus.cn/xining/",
"https://bbs.focus.cn/xishuangbanna/",
"https://bbs.focus.cn/xm/",
"https://bbs.focus.cn/xianning/",
"https://bbs.focus.cn/xianyang/",
"https://bbs.focus.cn/xiangyang/",
"https://bbs.focus.cn/xiangtan/",
"https://bbs.focus.cn/xiangxi/",
"https://bbs.focus.cn/xiaogan/",
"https://bbs.focus.cn/xinxiang/",
"https://bbs.focus.cn/xinyang/",
"https://bbs.focus.cn/xingtai/",
"https://bbs.focus.cn/xuzhou/",
"https://bbs.focus.cn/xuchang/",
"https://bbs.focus.cn/xuancheng/",
"https://bbs.focus.cn/yaan/",
"https://bbs.focus.cn/yt/",
"https://bbs.focus.cn/yancheng/",
"https://bbs.focus.cn/yanan/",
"https://bbs.focus.cn/yangzhou/",
"https://bbs.focus.cn/yj/",
"https://bbs.focus.cn/yibin/",
"https://bbs.focus.cn/yichang/",
"https://bbs.focus.cn/yichun/",
"https://bbs.focus.cn/yiyang/",
"https://bbs.focus.cn/yinchuan/",
"https://bbs.focus.cn/yingtan/",
"https://bbs.focus.cn/yingkou/",
"https://bbs.focus.cn/yongzhou/",
"https://bbs.focus.cn/sxyulin/",
"https://bbs.focus.cn/yulin/",
"https://bbs.focus.cn/yuxi/",
"https://bbs.focus.cn/yy/",
"https://bbs.focus.cn/yunfu/",
"https://bbs.focus.cn/yuncheng/",
"https://bbs.focus.cn/chaoyang/",
"https://bbs.focus.cn/zaozhuang/",
"https://bbs.focus.cn/zhanjiang/",
"https://bbs.focus.cn/zhangzhou/",
"https://bbs.focus.cn/zjj/",
"https://bbs.focus.cn/zjk/",
"https://bbs.focus.cn/zhangye/",
"https://bbs.focus.cn/zhaotong/",
"https://bbs.focus.cn/zhaoqing/",
"https://bbs.focus.cn/zhenjiang/",
"https://bbs.focus.cn/zz/",
"https://bbs.focus.cn/zs/",
"https://bbs.focus.cn/zhongwei/",
"https://bbs.focus.cn/zhoushan/",
"https://bbs.focus.cn/zhoukou/",
"https://bbs.focus.cn/zh/",
"https://bbs.focus.cn/zhuzhou/",
"https://bbs.focus.cn/zmd/",
"https://bbs.focus.cn/ziyang/",
"https://bbs.focus.cn/zibo/",
"https://bbs.focus.cn/zigong/",
"https://bbs.focus.cn/zunyi/"
]
| [
11748,
302,
198,
11748,
28686,
198,
11748,
4423,
346,
198,
11748,
15881,
88,
198,
6738,
523,
13415,
13,
23814,
1330,
347,
4462,
7449,
198,
198,
65,
1443,
62,
19205,
62,
6371,
62,
4868,
796,
685,
198,
220,
220,
220,
366,
5450,
1378,
... | 1.933858 | 5,337 |
import luigi
from tifffile import imread
from faim_sl.MultiFileTask import MultiFileTask
| [
11748,
300,
84,
25754,
198,
6738,
256,
361,
487,
576,
1330,
545,
961,
198,
198,
6738,
277,
1385,
62,
6649,
13,
29800,
8979,
25714,
1330,
15237,
8979,
25714,
628
] | 3.137931 | 29 |
import sys
ifunc, g = lambda: [*map(int, sys.stdin.readline().rstrip().split())], range
n = ifunc()[0]
iList = ifunc()
m = ifunc()[0]
dp = [[-1]*3 for _ in g(n)]
pSum = [0]
aSum = 0
for item in iList:
aSum += item
pSum.append(aSum)
for idx in g(n-1, -1, -1):
for cnt in g(3):
aVal = -1
bVal = -1
if idx+m >= n or cnt-1 < 0:
aVal = 0
else:
aVal = dp[idx+m][cnt-1]
if idx+1 >= n:
bVal = 0
else:
bVal = dp[idx+1][cnt]
end = min(idx+m, n)
dp[idx][cnt] = max(aVal + pSum[end] - pSum[idx], bVal)
print(dp[0][2])
| [
11748,
25064,
198,
198,
361,
19524,
11,
308,
796,
37456,
25,
30138,
8899,
7,
600,
11,
25064,
13,
19282,
259,
13,
961,
1370,
22446,
81,
36311,
22446,
35312,
28955,
4357,
2837,
198,
198,
77,
796,
611,
19524,
3419,
58,
15,
60,
198,
72,... | 1.668407 | 383 |
"""Functions for caching per-user context."""
import hashlib
from django.core.cache import cache
from . import IPFabricConfig
def _get_cache_key(user: str) -> str:
"""Key generator for the cache, adding the plugin prefix name."""
key_string = "-".join([IPFabricConfig.name, user])
return hashlib.md5(key_string.encode("utf-8")).hexdigest() # nosec
def get_context(user: str) -> dict:
"""Return context stored for user."""
return cache.get(_get_cache_key(user)) or {}
def set_context(user: str, updated_context: dict) -> dict:
"""Update user context."""
context = get_context(user)
cache.set(_get_cache_key(user), {**context, **updated_context}, timeout=86400)
| [
37811,
24629,
2733,
329,
40918,
583,
12,
7220,
4732,
526,
15931,
198,
11748,
12234,
8019,
198,
6738,
42625,
14208,
13,
7295,
13,
23870,
1330,
12940,
198,
6738,
764,
1330,
6101,
43957,
1173,
16934,
628,
198,
4299,
4808,
1136,
62,
23870,
... | 2.936975 | 238 |
#!/usr/bin/env python
from .giant import giant | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
6738,
764,
70,
3014,
1330,
6175
] | 2.9375 | 16 |
import random
from matplotlib import pyplot as plt
plt.style.use("ggplot")
| [
11748,
4738,
198,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
198,
489,
83,
13,
7635,
13,
1904,
7203,
1130,
29487,
4943,
628
] | 2.888889 | 27 |
#!/usr/bin/python3
from distutils.core import setup
#from setuptools import setup
from setuptools.command.install import install
import os
setup(
name = "Bing Translate",
version = "1.0",
author = "poemdistance",
author_email = "poemdistance@gmail.com",
url = "",
packages = ['bing'],
cmdclass={'install':command}
)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
6738,
1233,
26791,
13,
7295,
1330,
9058,
198,
2,
6738,
900,
37623,
10141,
1330,
9058,
198,
6738,
900,
37623,
10141,
13,
21812,
13,
17350,
1330,
2721,
198,
11748,
28686,
198,
198,
404... | 2.724409 | 127 |
# Copyright (c) 2015 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import functools
import click
import sys
from ..util import parse_b32_key
click_force_option = click.option('-f', '--force', is_flag=True,
help='Confirm the action without prompting.')
class UpperCaseChoice(click.Choice):
"""
Support lowercase option values for uppercase options.
Does not support token normalization.
"""
@click_callback()
| [
2,
15069,
357,
66,
8,
1853,
575,
549,
3713,
9564,
198,
2,
1439,
2489,
10395,
13,
198,
2,
198,
2,
220,
220,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
198,
2,
220,
220,
1231,
17613,
11,
389,
10431,
2810... | 3.149565 | 575 |
# Now make a simple example using the custom projection.
import pdb
import sys
import os
import pkg_resources
pkg_resources.require('matplotlib==1.4.0')
import datetime
from dateutil.relativedelta import relativedelta
import re
import math
from matplotlib.ticker import ScalarFormatter, MultipleLocator
from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
from StringIO import StringIO
import numpy as np
from numpy import load
import imp
imp.load_source('SoundingRoutines', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Sounding_Routines.py')
imp.load_source('TephigramPlot', '/nfs/see-fs-01_users/eepdw/python_scripts/Tephigram/Tephigram_Functions.py')
from TephigramPlot import *
from SoundingRoutines import *
imp.load_source('GenMetFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeneralMeteoFunctions.py')
from GenMetFuncs import *
imp.load_source('GeogFuncs', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/GeogFunctions.py')
from GeogFuncs import *
pmin=200.
station_list_cs=[42182, 43003, 43014, 42867, 43371, 43353, 43285, 43192, 43150, 42339, 40990, 40948]
#station_list_cs=[42867]
#station_list_cs=[42971, 42339]
#station_list_cs=[42809]
# plot_dates = [datetime.datetime(2012,5,16,0,0,0),
# datetime.datetime(2012,6,1,0,0,0),
# datetime.datetime(2012,6,15,0,0,0),
# datetime.datetime(2012,7,15,0,0,0)]
#plot_dates = [datetime.datetime(2012,5,15,0,0,0),
# datetime.datetime(2012,5,16,0,0,0),
# datetime.datetime(2012,5,17,0,0,0),
# datetime.datetime(2012,5,18,0,0,0),
# datetime.datetime(2012,5,19,0,0,0)]
# plot_dates = [datetime.datetime(2012,5,31,0,0,0),
# datetime.datetime(2012,6,1,0,0,0),
# datetime.datetime(2012,6,2,0,0,0),
# datetime.datetime(2012,6,3,0,0,0),
# datetime.datetime(2012,6,5,0,0,0),
# datetime.datetime(2012,7,1,0,0,0)]
#plot_dates = [datetime.datetime(2012,6,14,0,0,0),
# datetime.datetime(2012,6,15,0,0,0),
# datetime.datetime(2012,6,16,0,0,0),
# datetime.datetime(2012,6,17,0,0,0),
# datetime.datetime(2012,6,18,0,0,0)]
#plot_dates = [datetime.datetime(2012,7,14,0,0,0),
# datetime.datetime(2012,7,15,0,0,0),
# datetime.datetime(2012,7,16,0,0,0),
# datetime.datetime(2012,7,17,0,0,0),
# datetime.datetime(2012,7,18,0,0,0)]
# plot_dates = [datetime.datetime(2012,6,2,0,0,0),
# datetime.datetime(2012,6,3,0,0,0),
# datetime.datetime(2012,6,5,0,0,0),
# datetime.datetime(2012,7,1,0,0,0)]
# plot_dates = [datetime.datetime(2012,5,15,0,0,0),
# datetime.datetime(2012,5,16,0,0,0),
# datetime.datetime(2012,5,17,0,0,0),
# datetime.datetime(2012,5,18,0,0,0),
# datetime.datetime(2012,5,19,0,0,0),
# datetime.datetime(2012,5,31,0,0,0),
# datetime.datetime(2012,6,1,0,0,0),
# datetime.datetime(2012,6,2,0,0,0),
# datetime.datetime(2012,6,3,0,0,0),
# datetime.datetime(2012,6,5,0,0,0),
# datetime.datetime(2012,6,14,0,0,0),
# datetime.datetime(2012,6,15,0,0,0),
# datetime.datetime(2012,6,16,0,0,0),
# datetime.datetime(2012,6,17,0,0,0),
# datetime.datetime(2012,6,18,0,0,0),
# datetime.datetime(2012,7,14,0,0,0),
# datetime.datetime(2012,7,15,0,0,0),
# datetime.datetime(2012,7,16,0,0,0),
# datetime.datetime(2012,7,17,0,0,0),
# datetime.datetime(2012,7,18,0,0,0),
# datetime.datetime(2012,6,2,0,0,0),
# datetime.datetime(2012,6,3,0,0,0),
# datetime.datetime(2012,6,5,0,0,0),
# datetime.datetime(2012,7,1,0,0,0)]
#plot_dates = [datetime.datetime(2012,5,2,0,0,0),
# datetime.datetime(2012,6,3,0,0,0),
# datetime.datetime(2012,7,5,0,0,0)]
#pdb.set_trace()
#date_min=datetime.datetime(2011,5,1,0,0,0)
#date_max=datetime.datetime(2014,10,1,0,0,0)
# match_header = re.compile(r'(#.....20|#.....19)')
# delta = relativedelta(weeks=+1)
variable_list={'pressures': 0, 'temps':1, 'dewpoints':2, 'winddirs':3, 'windspeeds':4, 'pot_temp':5,
'sat_vap_pres':6, 'vap_press':7, 'rel_hum':8, 'wvmr':9, 'sp_hum':10, 'sat_temp':11,
'theta_e':12, 'theta_e_sat':13, 'theta_e_minus_theta_e_sat':14}
variable_list_line={'lcl_temp': 0, 'lcl_vpt':1, 'pbl_pressure':2, 'surface_pressure':3, 'T_eq_0':4}
for stat in station_list_cs:
#pd_year= [p.year for p in plot_date]
#pd_month= [p.month for p in plot_dates]
#pd_day= [p.day for p in plot_dates]
station_name,la,lo, st_height = StationInfoSearch(stat)
load_file=load('/nfs/a90/eepdw/Data/Observations/Radiosonde_Numpy/Radiosonde_Single_Station_PRESSURES__IND_INTERP_SOUNDING_%s.npz' % stat)
#pressures_for_plotting=pressures_for_plotting, single_value_vars=single_value_vars, dates_for_plotting_single=dates_for_plotting_single, dates_for_plotting_single_single=dates_for_plotting_single_single, variable_list=variable_list, variable_list_line=variable_list_line)
#pdb.set_trace()
data=load_file['pressures_for_plotting']
dates=load_file['dates_for_plotting_single']
d_year = [d.year for d in dates]
d_month = [d.month for d in dates]
d_day = [d.day for d in dates]
dates_single = load_file['dates_for_plotting_single_single']
d_year_single = [d.year for d in dates_single]
d_month_single = [d.month for d in dates_single]
d_day_single = [d.day for d in dates_single]
for plot_date in plot_dates:
#pdb.set_trace()
#date_match_idx = np.where((np.array(d_year)==plot_date.year) & (np.array(d_month)==plot_date.month) & (np.array(d_day)==plot_date.day))[0]
date_match_idx = np.where((np.array(d_year)==plot_date.year) & (np.array(d_month)==plot_date.month))[0]
#date_match_idx_single = np.where((np.array(d_year_single)==plot_date.year) & (np.array(d_month_single)==plot_date.month) & (np.array(d_day_single)==plot_date.day))[0]
for ds, d in enumerate (date_match_idx):
try:
#plot_date_2 = dates[d]
#ds_idx = date_match_idx_single[ds]
#pdb.set_trace()
plot_data = data[:, d]
p=plot_data[0, :]/100
T=plot_data[1, :]-273.15
Td=T-plot_data[2, :]
h=plot_data[15, :]
da=dates[d]
#print T
#print p
#print Td
#pdb.set_trace()
u_wind,v_wind = UVWinds(plot_data[3, :], plot_data[4, :])
p_wind = p[~np.isnan(u_wind)]
u_wind = u_wind[~np.isnan(u_wind)]
v_wind = v_wind[~np.isnan(v_wind)]
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(10, 8), frameon=False)
#fig.patch.set_visible(False)
tephigram_plot_height=0.85
tephigram_plot_bottom=.085
ax = fig.add_axes([.085,tephigram_plot_bottom,.65,tephigram_plot_height], projection='skewx', frameon=False, axisbg='w')
ax.set_yscale('log')
plt.grid(True)
wbax = fig.add_axes([0.75,tephigram_plot_bottom,0.12,tephigram_plot_height],frameon=False, sharey=ax, label='barbs')
ax_text_box = fig.add_axes([0.85,0.085,.12,tephigram_plot_height], frameon=False, axisbg='w')
#pdb.set_trace()
#tmax=math.ceil(nanmax(T)/10)*10
#tmin=math.floor(nanmin(Td[p>400])/10)*10
pmax=math.ceil(nanmax(p)/50)*50
tmax=40.
tmin=-40.
P=linspace(pmax,pmin,37)
w = array([0.0001,0.0004,0.001, 0.002, 0.004, 0.007, 0.01, 0.016, 0.024, 0.032, 0.064, 0.128])
ax.add_mixratio_isopleths(w,linspace(pmax, 700., 37),color='m',ls='-',alpha=.5,lw=0.5)
ax.add_dry_adiabats(linspace(-40,40,9),P,color='k',ls='-',alpha=.5,lw=0.8)
ax.add_moist_adiabats(linspace(-40,40,18),P,color='k',ls='--',alpha=.5,lw=0.8, do_labels=False)
ax.other_housekeeping(pmax, pmin, tmax,tmin)
#pres_s,temp_s,dwpt_s = surface_parcel_data_input(p, T, Td ,mixdepth=50)
#ax.LiftParcel(pres_s, temp_s, dwpt_s)
wbax = fig.add_axes([0.75,tephigram_plot_bottom,0.12,tephigram_plot_height],frameon=False, sharey=ax, label='barbs')
ax_text_box = fig.add_axes([0.85,0.085,.12,tephigram_plot_height], frameon=False, axisbg='w')
# Calculate surface parcel ascent
#pdb.set_trace()
#pdb.set_trace()
# Plot the data using normal plotting functions, in this case using semilogy
ax.semilogy(T[~np.isnan(T)], p[~np.isnan(T)], 'r', linewidth=2)
ax.semilogy(Td[~np.isnan(Td)], p[~np.isnan(Td)], 'r',linewidth=2)
row_labels=(
'SLAT',
'SLON',
'SELV',
'SHOW',
'LIFT',
'LFTV',
'SWET',
'KINX',
'CTOT',
'VTOT',
'TOTL',
'CAPE',
'CINS',
'CAPV',
'CINV',
'LFCT',
'LFCV',
'BRCH',
'BRCV',
'LCLT',
'LCLP',
'MLTH',
'MLMR',
'THCK',
'PWAT')
variable='pbl_pressure'
var_index = variable_name_index_match(variable, variable_list_line)
print load_file['single_value_vars'].shape
#pbl_pressure = load_file['single_value_vars'][var_index, ds_idx]
#print pbl_pressure
# EQLV, pp, lclp,lfcp, lclt, delta_z, CAPE, CIN=CapeCinPBLInput(p, T, Td, h, st_height, pbl_pressure/100)
# print lclp
# table_vals=(
# #'%s' % station_name,
# #'Climatology - Week beg. %s' % da,
# '%s' % la,
# '%s' % lo,
# '%s' % st_height,
# '%.1f' % ShowalterIndex(T, Td, p), # ['Showalter index',
# '%.1f' % LiftedIndex(T, Td, p, h, st_height), # 'Lifted index',
# '--', # 'LIFT computed using virtual temperature',
# '--', # 'SWEAT index',
# '%.1f' % KIndex(T, Td, p), # 'K index',
# '%.1f' % CrossTotalsIndex(T, Td, p), # 'Cross totals index',
# '%.1f' % VerticalTotalsIndex(T, p), # 'Vertical totals index',
# '%.1f' % TotalTotalsIndex(T, Td, p), # 'Total totals index',
# '%.1f' % CAPE, # 'CAPE',
# '%.1f' % CIN, # 'CIN',
# '--', # 'CAPE using virtual temperature',
# '--', # 'CINS using virtual temperature',
# '%.1f' % lfcp, # 'Level of free convection',
# '--', # 'LFCT using virtual temperature',
# '--' , # 'Bulk Richardson number',
# '--', # 'Bulk richardson using CAPV',
# '%.1f' % lclt, # 'Temp [K] of the Lifted Condensation Level',
# '%.1f' % lclp , # 'Pres [hPa] of the Lifted Condensation Level',
# '--', # 'Mean mixed layer potential temperature',
# '--', # 'Mean mixed layer mixing ratio',
# '--', # '1000 hPa to 500 hPa thickness',
# '--') # 'Precipitable water [mm] for entire sounding']
# Wind barbs
#pdb.set_trace()
barbs_idx=np.logspace(np.log10(10),np.log10(max(len(u_wind))),num=32).astype(int)
#barbs_idx=np.logspace(np.log10(10),np.log10(max(len(u_wind))),num=32).astype(int)
wbax.set_yscale('log')
wbax.xaxis.set_ticks([],[])
wbax.yaxis.grid(True,ls='-',color='y',lw=0.5)
wbax.set_xlim(-1.5,1.5)
wbax.get_yaxis().set_visible(False)
wbax.set_ylim(pmax+100,pmin)
#pdb.set_trace()
wbax.barbs((zeros(p_wind.shape)),p_wind, u_wind, v_wind)
# Disables the log-formatting that comes with semilogy
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_yticks(linspace(100,1000,10))
ax.set_ylim(pmax,pmin)
ax.set_xlim(tmin,tmax)
ax.xaxis.set_ticks([],[])
ax_text_box.xaxis.set_visible(False)
ax_text_box.yaxis.set_visible(False)
for tick in wbax.yaxis.get_major_ticks():
# tick.label1On = False
pass
#wbax.get_yaxis().set_tick_params(size=0,color='y')
y_loc=1.
max_string_length = max([len(line) for line in row_labels])
# for t,r in zip(row_labels,table_vals):
# label_rightjust=('{:>%i}' % max_string_length).format(t)
# ax_text_box.text(0.5, y_loc, ' %s:' % (label_rightjust), size=8, horizontalalignment='right')
# ax_text_box.text(0.5, y_loc, ' %s' % (r), size=8, horizontalalignment='left')
# y_loc-=0.04
fig.text(.02,0.965, '%s %s' %(stat, station_name), size=12, horizontalalignment='left')
fig.text(.02,0.035, '%s ' %(da.strftime('%Y-%m-%d %H:%M')), size=12, horizontalalignment='left')
#plt.show()
plt.savefig('/nfs/a90/eepdw/Figures/Radiosonde/Tephigrams/%s_%s_%s_Skew_T_Vars_To_Right_Barbs.png' % (station_name.replace('/','_').replace(' ', '_'), stat, dates[d].strftime('%Y%m%d')))
plt.close()
except Exception:
print PrintException()
| [
2,
2735,
787,
257,
2829,
1672,
1262,
262,
2183,
20128,
13,
198,
11748,
279,
9945,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
11748,
279,
10025,
62,
37540,
198,
35339,
62,
37540,
13,
46115,
10786,
6759,
29487,
8019,
855,
16,
13,
1... | 1.614691 | 10,088 |
#################################################################################
# Copyright (c) 2018-2021, Texas Instruments Incorporated - http://www.ti.com
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#################################################################################
import torch
| [
29113,
29113,
14468,
2,
198,
2,
15069,
357,
66,
8,
2864,
12,
1238,
2481,
11,
3936,
43953,
3457,
40132,
532,
2638,
1378,
2503,
13,
20259,
13,
785,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,... | 3.889868 | 454 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Visualising the results
"""## Get Wine Dataset"""
from sklearn.datasets import load_wine
data = load_wine()
data.keys()
X = data.data
y = data.target
from sklearn.model_selection import KFold
kf = KFold(n_splits=4)
kf.get_n_splits(X)
print(kf)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
logistic_regression(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
ridge_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
svm_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
mlp_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
linearsvm_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
rf_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
dt_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
gb_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
sgd_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
perceptron_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
knn_classification(X_train, X_test, y_train, y_test)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
nb_classification(X_train, X_test, y_train, y_test) | [
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
15612,
1710,
262,
2482,
198,
198,
37811,
2235,
3497,
20447,
16092,
292,
316,
37811,
1... | 2.359793 | 1,159 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"Main Class to communicate with gs108e and gs105e netgear switches"
import time
import binascii
import pprint
import random
import struct
import socket
import fcntl
import psl_typ
import inspect
import errno
def get_hw_addr(ifname):
"gives the hardware (mac) address of an interface (eth0,eth1..)"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifname = ifname.encode('ascii') # struct.pack requires bytes in Python 3
info = fcntl.ioctl(sock.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
if type(info) is str:
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
else:
# Python 3 returns a list of bytes from ioctl, no need for ord()
return ''.join(['%02x:' % char for char in info[18:24]])[:-1]
def get_ip_address(ifname):
"returns the first ip address of an interface"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifname = ifname.encode('ascii') # struct.pack requires bytes in Python 3
try:
# 0x8915 = SIOCGIFADDR
addr = socket.inet_ntoa(fcntl.ioctl(sock.fileno(), 0x8915,
struct.pack('256s',
ifname[:15]))[20:24])
return addr
except IOError as err:
if err.errno == errno.EADDRNOTAVAIL:
return None
raise
def pack_mac(value):
"packs the hardware address (mac) to the internal representation"
value = value.encode() # binascii.unhexlify() requires bytes in Python 3
if (len(value) == 17):
return binascii.unhexlify(value[0:2] + value[3:5] + value[6:8] +
value[9:11] + value[12:14] + value[15:17])
if (len(value) == 12):
return binascii.unhexlify(value)
raise "unknown mac format=" + value
def unpack_mac(value):
"unpack an internal representation to a hardware address"
mac = binascii.hexlify(value)
return (mac[0:2] + ":" + mac[2:4] + ":" + mac[4:6] + ":" + mac[6:8] +
":" + mac[8:10] + ":" + mac[10:12])
class ProSafeLinux:
"Main class to communicate with a ProSafe gs108e gs105e Switch"
CMD_MODEL = psl_typ.PslTypStringQueryOnly(0x0001, "model")
CMD_FIMXE2 = psl_typ.PslTypHex(0x0002, "fixme2")
CMD_NAME = psl_typ.PslTypString(0x0003, "name")
CMD_MAC = psl_typ.PslTypMac(0x0004, "MAC")
CMD_FIMXE5 = psl_typ.PslTypHex(0x0005, "fixme5")
CMD_IP = psl_typ.PslTypIpv4(0x0006, "ip")
CMD_NETMASK = psl_typ.PslTypIpv4(0x0007, "netmask")
CMD_GATEWAY = psl_typ.PslTypIpv4(0x0008, "gateway")
CMD_NEW_PASSWORD = psl_typ.PslTypPassword(0x0009, "new_password", True)
CMD_PASSWORD = psl_typ.PslTypPassword(0x000a, "password", False)
CMD_DHCP = psl_typ.PslTypDHCP(0x000b, "dhcp")
CMD_FIXMEC = psl_typ.PslTypHex(0x000c, "fixmeC")
CMD_FIRMWAREV = psl_typ.PslTypStringQueryOnly(0x000d, "firmwarever")
CMD_FIMXEE = psl_typ.PslTypHex(0x000e, "fixmeE")
CMD_FIXMEF = psl_typ.PslTypHex(0x000f, "fixmeF")
CMD_REBOOT = psl_typ.PslTypAction(0x0013, "reboot")
CMD_FACTORY_RESET = psl_typ.PslTypAction(0x0400, "factory_reset")
CMD_SPEED_STAT = psl_typ.PslTypSpeedStat(0x0c00, "speed_stat")
CMD_PORT_STAT = psl_typ.PslTypPortStat(0x1000, "port_stat")
CMD_RESET_PORT_STAT = psl_typ.PslTypAction(0x1400, "reset_port_stat")
CMD_TEST_CABLE = psl_typ.PslTypHexNoQuery(0x1800, "test_cable")
CMD_TEST_CABLE_RESP = psl_typ.PslTypHexNoQuery(0x1c00, "test_cable_resp")
CMD_VLAN_SUPPORT = psl_typ.PslTypVlanSupport(0x2000, "vlan_support")
CMD_VLAN_ID = psl_typ.PslTypVlanId(0x2400, "vlan_id")
CMD_VLAN802_ID = psl_typ.PslTypVlan802Id(0x2800, "vlan802_id")
CMD_VLANPVID = psl_typ.PslTypVlanPVID(0x3000, "vlan_pvid")
CMD_QUALITY_OF_SERVICE = psl_typ.PslTypQos(0x3400, "qos")
CMD_PORT_BASED_QOS = psl_typ.PslTypPortBasedQOS(0x3800, "port_based_qos")
CMD_BANDWIDTH_INCOMING_LIMIT = psl_typ.PslTypBandwidth(
0x4c00, "bandwidth_in")
CMD_BANDWIDTH_OUTGOING_LIMIT = psl_typ.PslTypBandwidth(
0x5000, "bandwidth_out")
CMD_FIXME5400 = psl_typ.PslTypHex(0x5400, "fxime5400")
CMD_BROADCAST_BANDWIDTH = psl_typ.PslTypBandwidth(0x5800,
"broadcast_bandwidth")
CMD_PORT_MIRROR = psl_typ.PslTypPortMirror(0x5c00, "port_mirror")
CMD_NUMBER_OF_PORTS = psl_typ.PslTypHex(0x6000, "number_of_ports")
CMD_IGMP_SNOOPING = psl_typ.PslTypIGMPSnooping(0x6800, "igmp_snooping")
CMD_BLOCK_UNKNOWN_MULTICAST = psl_typ.PslTypBoolean(
0x6c00, "block_unknown_multicast")
CMD_IGMP_HEADER_VALIDATION = psl_typ.PslTypBoolean(0x7000,
"igmp_header_validation")
CMD_FIXME7400 = psl_typ.PslTypHex(0x7400, "fixme7400")
CMD_END = psl_typ.PslTypEnd(0xffff, "END")
CTYPE_QUERY_REQUEST = 0x0101
# CTYPE_QUERY_RESPONSE = 0x0102
CTYPE_TRANSMIT_REQUEST = 0x103
# CTYPE_TRANSMIT_RESPONSE = 0x104
RECPORT = 63321
SENDPORT = 63322
def __init__(self):
"constructor"
self.myhost = None
self.srcmac = None
self.ssocket = None
self.rsocket = None
self.seq = random.randint(100, 2000)
self.outdata = {}
self.debug = False
self.mac_cache = {}
self.cmd_by_id = {}
self.cmd_by_name = {}
for key, value in inspect.getmembers(ProSafeLinux):
if key.startswith("CMD_"):
self.cmd_by_name[value.get_name()] = value
self.cmd_by_id[value.get_id()] = value
def bind(self, interface):
"bind to an interface"
self.myhost = get_ip_address(interface)
if not self.myhost:
return False
self.srcmac = pack_mac(get_hw_addr(interface))
# send socket
self.ssocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.ssocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.ssocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# The following requires root permission so we do not do this:
# self.socket.setsockopt(socket.SOL_SOCKET,
# IN.SO_BINDTODEVICE,"eth1"+'\0')
self.ssocket.bind((self.myhost, self.RECPORT))
# receive socket
self.rsocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.rsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.rsocket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.rsocket.bind(("255.255.255.255", self.RECPORT))
return True
def get_query_cmds(self):
"return all commands which can be used in a query"
rtn = []
for cmd in list(self.cmd_by_name.values()):
if cmd.is_queryable():
rtn.append(cmd)
return rtn
def get_setable_cmds(self):
"returns all commands which can be set"
rtn = []
for cmd in list(self.cmd_by_name.values()):
if cmd.is_setable():
rtn.append(cmd)
return rtn
def get_cmd_by_name(self, name):
"return a command by its name"
return self.cmd_by_name[name]
def set_debug_output(self):
"set debugging"
self.debug = True
def recv(self, recvfunc, maxlen=8192, timeout=0.5):
"receive a packet from the switch"
self.rsocket.settimeout(timeout)
try:
message, address = self.rsocket.recvfrom(maxlen)
except socket.timeout:
return (None, None)
except socket.error as error:
# according to the Python documentation this error
# is system-specifc; this works on Linux
if error.errno == errno.EAGAIN:
return (None, None)
raise
if self.debug:
message_hex = binascii.hexlify(message).decode()
print("recv=" + message_hex)
if recvfunc is not None:
recvfunc(message, address)
return (message, address)
def recv_all(self, recvfunc, maxlen=8192, timeout=0.5):
"receive all pending packets"
while True:
(message, address) = self.recv(recvfunc, maxlen, timeout)
if message is None:
return
def parse_packet(self, pack, unknown_warn):
"unpack packet send by the switch"
if self.debug:
pprint.pprint(len(pack[2:4]))
data = {}
if struct.unpack(">H", pack[2:4])[0] != 0x0000:
data["error"] = struct.unpack(">H", pack[4:6])[0]
# data["seq"] = struct.unpack(">H", pack[22:24])[0]
# data["ctype"] = struct.unpack(">H", pack[0:2])[0]
# data["mymac"] = binascii.hexlify(pack[8:14])
data["theirmac"] = binascii.hexlify(pack[14:20]).decode()
pos = 32
cmd_id = 0
while (pos<len(pack)):
if self.debug:
print("pos:%d len: %d" %(pos,len(pack)))
cmd_id = struct.unpack(">H", pack[pos:(pos + 2)])[0]
if cmd_id in self.cmd_by_id:
cmd = self.cmd_by_id[cmd_id]
else:
if unknown_warn:
print("Unknown Response %d" % cmd_id)
cmd = psl_typ.PslTypHex(cmd_id, "UNKNOWN %d" % cmd_id)
pos = pos + 2
cmdlen = struct.unpack(">H", pack[pos:(pos + 2)])[0]
pos = pos + 2
if cmdlen > 0:
value = cmd.unpack_py(pack[pos:(pos + cmdlen)])
else:
value = None
if cmd in data and value != None:
if type(data[cmd]) != type(list()):
data[cmd] = [data[cmd]]
data[cmd].append(value)
elif value != None:
data[cmd] = value
if self.debug:
print("cmd_id %d of length %d :" % (cmd_id, cmdlen))
data_hex = binascii.hexlify(pack[pos:(pos + cmdlen)]).decode()
print("data=" + data_hex)
pos = pos + cmdlen
return data
def discoverfunc(self, msg, adr):
"executed by discover to display any switch in the network"
data = self.parse_packet(msg, True)
dhcpstr = ""
if (data[self.CMD_DHCP]):
dhcpstr = " DHCP=on"
print(" * %s\t%s\t%s\t%s\t%s" % (data[self.CMD_MAC],
data[self.CMD_IP],
data[self.CMD_MODEL],
data.get(self.CMD_NAME, ''),
dhcpstr))
def storediscoverfunc(self, msg, adr):
"store discover ip"
data = self.parse_packet(msg, True)
if self.debug:
print("Store MAC, IP: " + (data[self.CMD_MAC] + " " +
data[self.CMD_IP]))
self.mac_cache[data[self.CMD_MAC]] = data[self.CMD_IP]
#print " * %s\t%s\t%s\t%s\t%s" % (data[self.CMD_MAC],
# data[self.CMD_IP], data[self.CMD_MODEL], data[self.CMD_NAME], dhcpstr)
def transfunc(self, msg, adr):
"analyse response, after transfer"
#print "==FOUND SWITCH=="
data = self.parse_packet(msg, True)
if self.debug:
pprint.pprint(data)
if data["error"]:
try:
print("Error with " + self.cmd_by_id(self.outdata["error"]))
except KeyError:
print("Unknown Error")
def storefunc(self, msg, adr):
"store data in outdata"
self.outdata = self.parse_packet(msg, True)
if self.debug:
pprint.pprint(self.outdata)
if "error" in self.outdata:
try:
print("Error with " + self.cmd_by_id(self.outdata["error"]))
except KeyError:
print("Unknown Error")
def rec_raw(self, msg, adr):
"receive raw data"
try:
self.outdata = self.parse_packet(msg, False)
except:
pass
self.outdata["raw"] = binascii.hexlify(msg)
def send(self, host, port, data):
"send data to host on port"
if self.debug:
# binascii.unhexlify() requires bytes in Python 3
data_hex = binascii.hexlify(data).decode()
print("send to ip " + host + " data = " + data_hex)
self.ssocket.sendto(data, (host, port))
self.seq += 1
def baseudp(self, ctype, destmac):
"Base UDP Package"
reserved = b"\x00"
if destmac is None:
destmac = 6 * b"\x00"
if len(destmac) > 6:
destmac = pack_mac(destmac)
data = (struct.pack(">h", ctype) + 6 * reserved + self.srcmac +
destmac + 2 * reserved)
data += struct.pack(">h", self.seq)
data += b"NSDP" + 4 * reserved
return data
@staticmethod
def addudp(cmd, datain=None):
"Additional data to the base package"
data = struct.pack(">H", cmd.get_id())
if (datain is None):
data += struct.pack(">H", 0)
else:
pdata = cmd.pack_py(datain)
data += struct.pack(">H", len(pdata))
data += pdata
return data
# why? we get the ip address in the reply back?
def ip_from_mac(self, mac):
"query for the ip of a switch with a given mac address"
if mac is None:
return "255.255.255.255"
if mac in self.mac_cache:
return self.mac_cache[mac]
#print "mac="+mac
# FIXME: Search in /proc/net/arp if mac there use this one
#with open("/proc/net/arp") as f:
# for line in f:
# print line
query_arr = [self.CMD_MAC, self.CMD_IP]
self.query(query_arr, mac, self.storediscoverfunc, use_ip_func=False)
if mac in self.mac_cache:
return self.mac_cache[mac]
print("can't find mac: " + mac)
return "255.255.255.255"
def send_query(self, cmd_arr, mac, use_ip_func=True):
"request some values from a switch, without changing them"
if use_ip_func:
ipadr = self.ip_from_mac(mac)
else:
ipadr = "255.255.255.255"
data = self.baseudp(destmac=mac, ctype=self.CTYPE_QUERY_REQUEST)
for cmd in cmd_arr:
data += self.addudp(cmd)
data += self.addudp(self.CMD_END)
self.outdata = {}
self.send(ipadr, self.SENDPORT, data)
def query(self, cmd_arr, mac, func, use_ip_func=True):
"get some values from the switch, but do not change them"
self.send_query(cmd_arr, mac, use_ip_func)
self.recv_all(func)
def transmit(self, cmd_arr, mac, func):
"change something in the switch, like name, mac ..."
ipadr = self.ip_from_mac(mac)
data = self.baseudp(destmac=mac, ctype=self.CTYPE_TRANSMIT_REQUEST)
if self.CMD_PASSWORD in cmd_arr:
data += self.addudp(self.CMD_PASSWORD, cmd_arr[self.CMD_PASSWORD])
for cmd, pdata in list(cmd_arr.items()):
if cmd != self.CMD_PASSWORD:
data += self.addudp(cmd, pdata)
data += self.addudp(self.CMD_END)
self.send(ipadr, self.SENDPORT, data)
time.sleep(0.7)
self.recv_all(func)
def passwd(self, mac, old, new, func):
"change password from old to new"
# The order of the CMD_PASSWORD and CMD_NEW_PASSWORD is important
ipadr = self.ip_from_mac(mac)
data = self.baseudp(destmac=mac, ctype=self.CTYPE_TRANSMIT_REQUEST)
data += self.addudp(self.CMD_PASSWORD, old)
data += self.addudp(self.CMD_NEW_PASSWORD, new)
data += self.addudp(self.CMD_END)
self.send(ipadr, self.SENDPORT, data)
time.sleep(0.7)
self.recv_all(func)
def passwd_exploit(self, mac, new, func):
"exploit in current (2012) firmware version, set a new password"
# The order of the CMD_PASSWORD and CMD_NEW_PASSWORD is important
ipadr = self.ip_from_mac(mac)
data = self.baseudp(destmac=mac, ctype=self.CTYPE_TRANSMIT_REQUEST)
data += self.addudp(self.CMD_NEW_PASSWORD, new)
data += self.addudp(self.CMD_PASSWORD, new)
data += self.addudp(self.CMD_END)
self.send(ipadr, self.SENDPORT, data)
time.sleep(0.7)
self.recv_all(func)
def send_discover(self):
"find any switch in the network"
query_arr = [self.CMD_MODEL,
self.CMD_NAME,
self.CMD_MAC,
self.CMD_DHCP,
self.CMD_IP]
self.send_query(query_arr, None)
def discover(self):
"find any switch in the network"
query_arr = [self.CMD_MODEL,
self.CMD_NAME,
self.CMD_MAC,
self.CMD_DHCP,
self.CMD_IP]
self.query(query_arr, None, self.discoverfunc)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
1,
13383,
5016,
284,
10996,
351,
308,
82,
15711,
68,
290,
308,
82,
13348,
68,
2010,
31763,
18225,
1,
198,
11748,
640,
... | 1.918409 | 8,898 |
import unittest
from cpptypeinfo import (
TypeParser,
Float,
Double,
Bool,
Void,
Int8,
Int16,
Int32,
Int64,
UInt8,
UInt16,
UInt32,
UInt64,
)
from cpptypeinfo.usertype import (Pointer, Array, Field, Struct, Param,
Function)
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
201,
198,
6738,
269,
381,
4906,
10951,
1330,
357,
201,
198,
220,
220,
220,
5994,
46677,
11,
201,
198,
220,
220,
220,
48436,
11,
201,
198,
220,
220,
220,
11198,
11,
201,
198,
220,
220,
220,
347,
970,
11,
201,
... | 1.801887 | 212 |
# import sys
# sys.path.append("/home/ruhil/Dropbox/projects/python3-SmartSchema/SmartSchema/SmartSchema")
from SmartSchema import SmartSchema
if __name__ == "__main__":
InvoiceSchema = {
"type": "object",
"required": [
"reciver",
"issuer",
"inv_date"
],
"properties": {
"reciver": {
"type": "string",
"description": "uid of reciver"
},
"issuer": {
"type": "string",
"description": "uid of issuer"
},
"inv_date": {
"type": "string",
"description": "date of invoice",
"accessor": lambda inst: "yolo",
},
"_id": {
"type": "integer",
"description": "ref_no for the invoice",
"__comment__": "invoice in single series for now"
},
"txbl": {
"type": "number",
"description": "taxable amount",
"accessor": lambda inst: inst['val'] * 100 / (100 + inst['tax']),
},
"tax": {
"type": "number",
"description": "tax rate",
"accessor": lambda inst: (inst['val'] - inst['txbl']) / inst['txbl']
},
"net": {
"type": "number",
"description": "taxable + tax%",
"accessor": lambda inst: inst['tax'] * inst['txbl'] / 100 + inst['txbl']
},
"qty": {
"type": "array",
"items": {
"type": "object",
"properties": {
"qty": {
"type": "number"
},
"half": {
"type": "number",
}
},
"required": ["qty"]
}},
"sumqty": {
"type": "number",
"description": "sum of qty",
"accessor": reducedqty
}
}
}
schema = SmartSchema(InvoiceSchema)
i = {
"_id": 678,
"issuer": "rke",
"reciver": "@tfcgpl",
# "tax": 18,
"qty": [{"qty": 12}, {"qty": 13}],
"txbl": 30000,
"tax": 35400
}
schema.resolve(i)
print(i)
if not schema.validate(i):
print("success: No issue found")
| [
198,
2,
1330,
25064,
198,
2,
25064,
13,
6978,
13,
33295,
7203,
14,
11195,
14,
622,
71,
346,
14,
26932,
3524,
14,
42068,
14,
29412,
18,
12,
25610,
27054,
2611,
14,
25610,
27054,
2611,
14,
25610,
27054,
2611,
4943,
198,
198,
6738,
108... | 1.648666 | 1,537 |
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class subscribergxinterface(base_resource) :
""" Configuration for Gx interface Parameters resource. """
@property
def vserver(self) :
r"""Name of the load balancing, or content switching vserver to which the Gx connections are established. The service type of the virtual server must be DIAMETER/SSL_DIAMETER. Mutually exclusive with the service parameter. Therefore, you cannot set both service and the Virtual Server in the Gx Interface.<br/>Minimum length = 1.
"""
try :
return self._vserver
except Exception as e:
raise e
@vserver.setter
def vserver(self, vserver) :
r"""Name of the load balancing, or content switching vserver to which the Gx connections are established. The service type of the virtual server must be DIAMETER/SSL_DIAMETER. Mutually exclusive with the service parameter. Therefore, you cannot set both service and the Virtual Server in the Gx Interface.<br/>Minimum length = 1
"""
try :
self._vserver = vserver
except Exception as e:
raise e
@property
def service(self) :
r"""Name of DIAMETER/SSL_DIAMETER service corresponding to PCRF to which the Gx connection is established. The service type of the service must be DIAMETER/SSL_DIAMETER. Mutually exclusive with vserver parameter. Therefore, you cannot set both Service and the Virtual Server in the Gx Interface.<br/>Minimum length = 1.
"""
try :
return self._service
except Exception as e:
raise e
@service.setter
def service(self, service) :
r"""Name of DIAMETER/SSL_DIAMETER service corresponding to PCRF to which the Gx connection is established. The service type of the service must be DIAMETER/SSL_DIAMETER. Mutually exclusive with vserver parameter. Therefore, you cannot set both Service and the Virtual Server in the Gx Interface.<br/>Minimum length = 1
"""
try :
self._service = service
except Exception as e:
raise e
@property
def pcrfrealm(self) :
r"""PCRF realm is of type DiameterIdentity and contains the realm of PCRF to which the message is to be routed. This is the realm used in Destination-Realm AVP by Netscaler Gx client (as a Diameter node).
<br/>Minimum length = 1.
"""
try :
return self._pcrfrealm
except Exception as e:
raise e
@pcrfrealm.setter
def pcrfrealm(self, pcrfrealm) :
r"""PCRF realm is of type DiameterIdentity and contains the realm of PCRF to which the message is to be routed. This is the realm used in Destination-Realm AVP by Netscaler Gx client (as a Diameter node).
<br/>Minimum length = 1
"""
try :
self._pcrfrealm = pcrfrealm
except Exception as e:
raise e
@property
def holdonsubscriberabsence(self) :
r"""Set this setting to yes if Netscaler needs to Hold pakcets till subscriber session is fetched from PCRF. Else set to NO. By default set to yes. If this setting is set to NO, then till NetScaler fetches subscriber from PCRF, default subscriber profile will be applied to this subscriber if configured. If default subscriber profile is also not configured an undef would be raised to expressions which use Subscriber attributes. .<br/>Default value: NO<br/>Possible values = YES, NO.
"""
try :
return self._holdonsubscriberabsence
except Exception as e:
raise e
@holdonsubscriberabsence.setter
def holdonsubscriberabsence(self, holdonsubscriberabsence) :
r"""Set this setting to yes if Netscaler needs to Hold pakcets till subscriber session is fetched from PCRF. Else set to NO. By default set to yes. If this setting is set to NO, then till NetScaler fetches subscriber from PCRF, default subscriber profile will be applied to this subscriber if configured. If default subscriber profile is also not configured an undef would be raised to expressions which use Subscriber attributes. .<br/>Default value: NO<br/>Possible values = YES, NO
"""
try :
self._holdonsubscriberabsence = holdonsubscriberabsence
except Exception as e:
raise e
@property
def requesttimeout(self) :
r"""q!Time, in seconds, within which the Gx CCR request must complete. If the request does not complete within this time, the request is retransmitted for requestRetryAttempts time. If still reuqest is not complete then default subscriber profile will be applied to this subscriber if configured. If default subscriber profile is also not configured an undef would be raised to expressions which use Subscriber attributes.
Zero disables the timeout. !.<br/>Default value: 10<br/>Maximum length = 86400.
"""
try :
return self._requesttimeout
except Exception as e:
raise e
@requesttimeout.setter
def requesttimeout(self, requesttimeout) :
r"""q!Time, in seconds, within which the Gx CCR request must complete. If the request does not complete within this time, the request is retransmitted for requestRetryAttempts time. If still reuqest is not complete then default subscriber profile will be applied to this subscriber if configured. If default subscriber profile is also not configured an undef would be raised to expressions which use Subscriber attributes.
Zero disables the timeout. !.<br/>Default value: 10<br/>Maximum length = 86400
"""
try :
self._requesttimeout = requesttimeout
except Exception as e:
raise e
@property
def requestretryattempts(self) :
r"""If the request does not complete within requestTimeout time, the request is retransmitted for requestRetryAttempts time.<br/>Default value: 3.
"""
try :
return self._requestretryattempts
except Exception as e:
raise e
@requestretryattempts.setter
def requestretryattempts(self, requestretryattempts) :
r"""If the request does not complete within requestTimeout time, the request is retransmitted for requestRetryAttempts time.<br/>Default value: 3
"""
try :
self._requestretryattempts = requestretryattempts
except Exception as e:
raise e
@property
def idlettl(self) :
r"""q!Idle Time, in seconds, after which the Gx CCR-U request will be sent after any PCRF activity on a session. Any RAR or CCA message resets the timer.
Zero value disables the idle timeout. !.<br/>Default value: 900<br/>Maximum length = 86400.
"""
try :
return self._idlettl
except Exception as e:
raise e
@idlettl.setter
def idlettl(self, idlettl) :
r"""q!Idle Time, in seconds, after which the Gx CCR-U request will be sent after any PCRF activity on a session. Any RAR or CCA message resets the timer.
Zero value disables the idle timeout. !.<br/>Default value: 900<br/>Maximum length = 86400
"""
try :
self._idlettl = idlettl
except Exception as e:
raise e
@property
def revalidationtimeout(self) :
r"""q!Revalidation Timeout, in seconds, after which the Gx CCR-U request will be sent after any PCRF activity on a session. Any RAR or CCA message resets the timer.
Zero value disables the idle timeout. !.<br/>Default value: 0<br/>Maximum length = 86400.
"""
try :
return self._revalidationtimeout
except Exception as e:
raise e
@revalidationtimeout.setter
def revalidationtimeout(self, revalidationtimeout) :
r"""q!Revalidation Timeout, in seconds, after which the Gx CCR-U request will be sent after any PCRF activity on a session. Any RAR or CCA message resets the timer.
Zero value disables the idle timeout. !.<br/>Default value: 0<br/>Maximum length = 86400
"""
try :
self._revalidationtimeout = revalidationtimeout
except Exception as e:
raise e
@property
def negativettl(self) :
r"""q!Negative TTL, in seconds, after which the Gx CCR-I request will be resent for sessions that have not been resolved by PCRF due to server being down or no response or failed response. Instead of polling the PCRF server constantly, negative-TTL makes NS stick to un-resolved session. Meanwhile Netscaler installs a negative session to avoid going to PCRF.
For Negative Sessions, Netcaler inherits the attributes from default subscriber profile if default subscriber is configured. A default subscriber could be configured as 'add subscriber profile *'. Or these attributes can be inherited from Radius as well if Radius is configued.
Zero value disables the Negative Sessions. And Netscaler does not install Negative sessions even if subscriber session could not be fetched. !.<br/>Default value: 600<br/>Maximum length = 86400.
"""
try :
return self._negativettl
except Exception as e:
raise e
@negativettl.setter
def negativettl(self, negativettl) :
r"""q!Negative TTL, in seconds, after which the Gx CCR-I request will be resent for sessions that have not been resolved by PCRF due to server being down or no response or failed response. Instead of polling the PCRF server constantly, negative-TTL makes NS stick to un-resolved session. Meanwhile Netscaler installs a negative session to avoid going to PCRF.
For Negative Sessions, Netcaler inherits the attributes from default subscriber profile if default subscriber is configured. A default subscriber could be configured as 'add subscriber profile *'. Or these attributes can be inherited from Radius as well if Radius is configued.
Zero value disables the Negative Sessions. And Netscaler does not install Negative sessions even if subscriber session could not be fetched. !.<br/>Default value: 600<br/>Maximum length = 86400
"""
try :
self._negativettl = negativettl
except Exception as e:
raise e
@property
def servicepathavp(self) :
r""" The AVP code in which PCRF sends service path applicable for subscriber.<br/>Minimum length = 1.
"""
try :
return self._servicepathavp
except Exception as e:
raise e
@servicepathavp.setter
def servicepathavp(self, servicepathavp) :
r""" The AVP code in which PCRF sends service path applicable for subscriber.<br/>Minimum length = 1
"""
try :
self._servicepathavp = servicepathavp
except Exception as e:
raise e
@property
def servicepathvendorid(self) :
r""" The vendorid of the AVP in which PCRF sends service path for subscriber.
"""
try :
return self._servicepathvendorid
except Exception as e:
raise e
@servicepathvendorid.setter
def servicepathvendorid(self, servicepathvendorid) :
r""" The vendorid of the AVP in which PCRF sends service path for subscriber.
"""
try :
self._servicepathvendorid = servicepathvendorid
except Exception as e:
raise e
@property
def svrstate(self) :
r"""The state of the gx service.<br/>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED.
"""
try :
return self._svrstate
except Exception as e:
raise e
@property
def identity(self) :
r"""DiameterIdentity to be used by NS. DiameterIdentity is used to identify a Diameter node uniquely. Before setting up diameter configuration, Netscaler (as a Diameter node) MUST be assigned a unique DiameterIdentity.
example =>
set ns diameter -identity netscaler.com
Now whenever Netscaler system needs to use identity in diameter messages. It will use 'netscaler.com' as Origin-Host AVP as defined in RFC3588
.<br/>Minimum length = 1.
"""
try :
return self._identity
except Exception as e:
raise e
@property
def realm(self) :
r"""Diameter Realm to be used by NS.
example =>
set ns diameter -realm com
Now whenever Netscaler system needs to use realm in diameter messages. It will use 'com' as Origin-Realm AVP as defined in RFC3588
.<br/>Minimum length = 1.
"""
try :
return self._realm
except Exception as e:
raise e
@property
def status(self) :
r"""NetScaler PCRF connection Status. (Gx Protocol State).
"""
try :
return self._status
except Exception as e:
raise e
@property
def servicepathinfomode(self) :
r""" The type of info in which service path is passed from PCRF,
SERVICE_FUNCTION: Denotes the service chain is passed as servicefunction names in-order in AVPS with code servicepathAVP
SERVICE_PATH: Denotes the service path name is passed in AVPS with code servicepathAVP.q.<br/>Default value: SERVICEPATH<br/>Possible values = SERVICEFUNCTIONS, SERVICEPATH.
"""
try :
return self._servicepathinfomode
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(subscribergxinterface_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.subscribergxinterface
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
r""" Use this API to update subscribergxinterface.
"""
try :
if type(resource) is not list :
updateresource = subscribergxinterface()
updateresource.vserver = resource.vserver
updateresource.service = resource.service
updateresource.pcrfrealm = resource.pcrfrealm
updateresource.holdonsubscriberabsence = resource.holdonsubscriberabsence
updateresource.requesttimeout = resource.requesttimeout
updateresource.requestretryattempts = resource.requestretryattempts
updateresource.idlettl = resource.idlettl
updateresource.revalidationtimeout = resource.revalidationtimeout
updateresource.negativettl = resource.negativettl
updateresource.servicepathavp = resource.servicepathavp
updateresource.servicepathvendorid = resource.servicepathvendorid
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of subscribergxinterface resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = subscribergxinterface()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the subscribergxinterface resources that are configured on netscaler.
"""
try :
if not name :
obj = subscribergxinterface()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
| [
2,
198,
2,
15069,
357,
66,
8,
3648,
12,
5304,
15792,
8609,
11998,
11,
3457,
13,
198,
2,
198,
2,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
4943,
198,
2,
220,
220,
345,
743,
407,
779,
... | 3.241692 | 4,845 |
from django_test_deploy.settings import *
INSTALLED_APPS += ('bdd_tests',)
TEST_RUNNER = 'django_behave.runner.DjangoBehaveTestSuiteRunner'
#'django_behave.runner.DjangoBehaveTestSuiteRunner'
#'django_behave.runner.DjangoBehaveOnlyTestSuiteRunner'
| [
6738,
42625,
14208,
62,
9288,
62,
2934,
1420,
13,
33692,
1330,
1635,
198,
198,
38604,
7036,
1961,
62,
2969,
3705,
15853,
19203,
65,
1860,
62,
41989,
3256,
8,
198,
51,
6465,
62,
49,
4944,
21479,
796,
705,
28241,
14208,
62,
1350,
14150,... | 2.515152 | 99 |
from datetime import datetime
import worker
import configparser
import os
import sys
import const
cfgpath = os.path.join(const.ROOT_PATH, './config.ini')
conf = configparser.ConfigParser()
conf.read(cfgpath, encoding="utf-8")
tasks = conf.get('main', 'task').split(',')
for code in tasks:
token = conf.get(code, 'token')
url = conf.get('main', 'url')
worker.run(code = code, token = token, url = url)
os.system('pause')
| [
6738,
4818,
8079,
1330,
4818,
8079,
201,
198,
11748,
8383,
201,
198,
11748,
4566,
48610,
201,
198,
11748,
28686,
201,
198,
11748,
25064,
201,
198,
11748,
1500,
201,
198,
201,
198,
37581,
6978,
796,
28686,
13,
6978,
13,
22179,
7,
9979,
... | 2.645349 | 172 |
#!/usr/bin/env python
# coding: utf-8
"""
create at 2017/11/19 by allen
"""
from datetime import datetime
from app.lib.database import db, Column
user_role = db.Table('user_role',
Column('user_id', db.Integer, db.ForeignKey('user.id')),
Column('role_id', db.Integer, db.ForeignKey('role.id')),
Column('create_time', db.DateTime, nullable=False, default=datetime.now))
role_resource = db.Table('role_resource',
Column(
'role_id',
db.Integer,
db.ForeignKey('role.id')),
Column(
'resource_id',
db.Integer,
db.ForeignKey('resource.id')),
Column('create_time', db.DateTime, nullable=False, default=datetime.now))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
220,
220,
220,
2251,
379,
2177,
14,
1157,
14,
1129,
416,
477,
268,
198,
37811,
198,
198,
6738,
4818,
8079,
1330,
4818,
8079,
1... | 1.776291 | 523 |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Author: ryanss <ryanssdev@icloud.com> (c) 2014-2017
# dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2021
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date, datetime
from dateutil.easter import easter
from dateutil.relativedelta import relativedelta as rd
from holidays.constants import FRI, SUN
from holidays.constants import JAN, MAR, APR, MAY, JUN, JUL, AUG, SEP, OCT, DEC
from holidays.holiday_base import HolidayBase
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
220,
21015,
12,
3937,
13842,
198,
2,
220,
220,
24305,
198,
2,
220,
317,
3049,
11,
6942,
11361,
5888,
329,
15453,
1499,
11,
8473,
290,
1181,
198,
2,
220,
2176... | 3.038023 | 263 |
import time
print('Bem vindo!! eu sou Priscila, a assistente A.I..')
time.sleep(2)
print("Por favor...qual o seu nome?")
seu_nome = input()
time.sleep(3)
print(f"{seu_nome},Você acha que o Júlio é baitola?")
baitola = input(f"Responda 'S' ou 'N'..." )
nao_baitola = (baitola)
print("Calculando......")
time.sleep(3)
print("Hummmmmm.....")
time.sleep(3)
if baitola.upper() == "S":
print("Meus cálculos confirmam que o Júlio é baitola mesmo!!")
print('''
"Obrigada por participar! A EGB agradece sua sinceridade!!"
''')
elif nao_baitola.upper() == "N":
print("O calculado é que vc e um baitola igual a ele!!")
print('''
"Obrigada por participar! A EGB agradece sua sinceridade!!"
''')
else:
print("Se não quer participar, vai dar meia hora sem relógio!!!")
| [
11748,
640,
198,
198,
4798,
10786,
33,
368,
29178,
78,
3228,
304,
84,
24049,
1736,
2304,
10102,
11,
257,
3342,
21872,
317,
13,
40,
492,
11537,
198,
2435,
13,
42832,
7,
17,
8,
198,
4798,
7203,
47,
273,
2661,
986,
13255,
267,
384,
8... | 2.36 | 325 |
import unittest
import pymongo
import logging
logging.basicConfig(level=logging.DEBUG)
from mongomodels import connections, MongoModel, String, Integer, \
Column, or_, ValidationError, Boolean, belongs_to
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
11748,
279,
4948,
25162,
198,
11748,
18931,
198,
6404,
2667,
13,
35487,
16934,
7,
5715,
28,
6404,
2667,
13,
30531,
8,
198,
198,
6738,
285,
506,
296,
375,
1424,
1330,
8787,
11,
42591,
17633,
11,
10903,
11,
... | 3.023529 | 85 |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Ontology Engineering Group
http://www.oeg-upm.net/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2016 Ontology Engineering Group.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import logging
import rdflib
from rdflib import BNode
from rdflib import ConjunctiveGraph
from rdflib import Graph
from rdflib import Literal
from rdflib import RDF
from rdflib import RDFS
from rdflib import URIRef
from rdflib import Variable
from rdflib import plugin
from agora.engine.plan.agp import TP, AGP
from agora.engine.plan.graph import AGORA
from agora.graph.evaluate import extract_bgps
__author__ = 'Fernando Serena'
log = logging.getLogger('agora.graph')
plugin.register('agora', rdflib.query.Processor, 'agora.graph.processor', 'FragmentProcessor')
plugin.register('agora', rdflib.query.Result, 'agora.graph.processor', 'FragmentResult')
def extract_tps_from_plan(plan):
# type: (Graph) -> dict
"""
:param plan: Search Plan graph
:return: A string triple representing the pattern for all triple pattern nodes
"""
return {str(list(plan.objects(tpn, RDFS.label)).pop()): process_tp_node(tpn) for tpn in
plan.subjects(RDF.type, AGORA.TriplePattern)}
| [
37811,
198,
2,
12,
46402,
46402,
46402,
46402,
27584,
46249,
198,
220,
9463,
1435,
14044,
4912,
198,
220,
220,
220,
220,
220,
220,
220,
2638,
1378,
2503,
13,
78,
1533,
12,
929,
76,
13,
3262,
14,
198,
2,
12,
46402,
46402,
46402,
4640... | 3.403087 | 583 |
# -*- coding: utf-8 -*-
"""
Operations on Strings
"""
import re
from mathics.version import __version__ # noqa used in loading to check consistency.
from mathics.builtin.base import (
BinaryOperator,
Builtin,
)
from mathics.core.expression import (
Expression,
Integer,
Integer1,
String,
Symbol,
SymbolFalse,
SymbolList,
SymbolTrue,
from_python,
string_list,
)
from mathics.builtin.lists import python_seq, convert_seq
from mathics.builtin.strings import (
_StringFind,
_evaluate_match,
_parallel_match,
mathics_split,
to_regex,
)
class StringDrop(Builtin):
"""
<dl>
<dt>'StringDrop["$string$", $n$]'
<dd>gives $string$ with the first $n$ characters dropped.
<dt>'StringDrop["$string$", -$n$]'
<dd>gives $string$ with the last $n$ characters dropped.
<dt>'StringDrop["$string$", {$n$}]'
<dd>gives $string$ with the $n$th character dropped.
<dt>'StringDrop["$string$", {$m$, $n$}]'
<dd>gives $string$ with the characters $m$ through $n$ dropped.
</dl>
>> StringDrop["abcde", 2]
= cde
>> StringDrop["abcde", -2]
= abc
>> StringDrop["abcde", {2}]
= acde
>> StringDrop["abcde", {2,3}]
= ade
>> StringDrop["abcd",{3,2}]
= abcd
>> StringDrop["abcd",0]
= abcd
"""
messages = {
"strse": "String expected at position 1.",
"mseqs": "Integer or list of two Integers are expected at position 2.",
"drop": 'Cannot drop positions `1` through `2` in "`3`".',
}
def apply_with_n(self, string, n, evaluation):
"StringDrop[string_,n_Integer]"
if not isinstance(string, String):
return evaluation.message("StringDrop", "strse")
if isinstance(n, Integer):
pos = n.value
if pos > len(string.get_string_value()):
return evaluation.message("StringDrop", "drop", 1, pos, string)
if pos < -len(string.get_string_value()):
return evaluation.message("StringDrop", "drop", pos, -1, string)
if pos > 0:
return String(string.get_string_value()[pos:])
if pos < 0:
return String(string.get_string_value()[:(pos)])
if pos == 0:
return string
return evaluation.message("StringDrop", "mseqs")
def apply_with_ni_nf(self, string, ni, nf, evaluation):
"StringDrop[string_,{ni_Integer,nf_Integer}]"
if not isinstance(string, String):
return evaluation.message("StringDrop", "strse", string)
if ni.value == 0 or nf.value == 0:
return evaluation.message("StringDrop", "drop", ni, nf)
fullstring = string.get_string_value()
lenfullstring = len(fullstring)
posi = ni.value
if posi < 0:
posi = lenfullstring + posi + 1
posf = nf.value
if posf < 0:
posf = lenfullstring + posf + 1
if posf > lenfullstring or posi > lenfullstring or posf <= 0 or posi <= 0:
# positions out or range
return evaluation.message("StringDrop", "drop", ni, nf, fullstring)
if posf < posi:
return string # this is what actually mma does
return String(fullstring[: (posi - 1)] + fullstring[posf:])
def apply_with_ni(self, string, ni, evaluation):
"StringDrop[string_,{ni_Integer}]"
if not isinstance(string, String):
return evaluation.message("StringDrop", "strse", string)
if ni.value == 0:
return evaluation.message("StringDrop", "drop", ni, ni)
fullstring = string.get_string_value()
lenfullstring = len(fullstring)
posi = ni.value
if posi < 0:
posi = lenfullstring + posi + 1
if posi > lenfullstring or posi <= 0:
return evaluation.message("StringDrop", "drop", ni, ni, fullstring)
return String(fullstring[: (posi - 1)] + fullstring[posi:])
def apply(self, string, something, evaluation):
"StringDrop[string_,something___]"
if not isinstance(string, String):
return evaluation.message("StringDrop", "strse")
return evaluation.message("StringDrop", "mseqs")
class StringInsert(Builtin):
"""
<dl>
<dt>'StringInsert["$string$", "$snew$", $n$]'
<dd>yields a string with $snew$ inserted starting at position $n$ in $string$.
<dt>'StringInsert["$string$", "$snew$", -$n$]'
<dd>inserts a at position $n$ from the end of "$string$".
<dt>'StringInsert["$string$", "$snew$", {$n_1$, $n_2$, ...}]'
<dd>inserts a copy of $snew$ at each position $n_i$ in $string$;
the $n_i$ are taken before any insertion is done.
<dt>'StringInsert[{$s_1$, $s_2$, ...}, "$snew$", $n$]'
<dd>gives the list of resutls for each of the $s_i$.
</dl>
>> StringInsert["noting", "h", 4]
= nothing
#> StringInsert["abcdefghijklm", "X", 15]
: Cannot insert at position 15 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, 15]
#> StringInsert[abcdefghijklm, "X", 4]
: String or list of strings expected at position 1 in StringInsert[abcdefghijklm, X, 4].
= StringInsert[abcdefghijklm, X, 4]
#> StringInsert["abcdefghijklm", X, 4]
: String expected at position 2 in StringInsert[abcdefghijklm, X, 4].
= StringInsert[abcdefghijklm, X, 4]
#> StringInsert["abcdefghijklm", "X", a]
: Position specification a in StringInsert[abcdefghijklm, X, a] is not a machine-sized integer or a list of machine-sized integers.
= StringInsert[abcdefghijklm, X, a]
#> StringInsert["abcdefghijklm", "X", 0]
: Cannot insert at position 0 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, 0]
>> StringInsert["note", "d", -1]
= noted
>> StringInsert["here", "t", -5]
= there
#> StringInsert["abcdefghijklm", "X", -15]
: Cannot insert at position -15 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, -15]
>> StringInsert["adac", "he", {1, 5}]
= headache
#> StringInsert["abcdefghijklm", "X", {1, -1, 14, -14}]
= XXabcdefghijklmXX
#> StringInsert["abcdefghijklm", "X", {1, 0}]
: Cannot insert at position 0 in abcdefghijklm.
= StringInsert[abcdefghijklm, X, {1, 0}]
#> StringInsert["", "X", {1}]
= X
#> StringInsert["", "X", {1, -1}]
= XX
#> StringInsert["", "", {1}]
= #<--#
#> StringInsert["", "X", {1, 2}]
: Cannot insert at position 2 in .
= StringInsert[, X, {1, 2}]
#> StringInsert["abcdefghijklm", "", {1, 2, 3, 4 ,5, -6}]
= abcdefghijklm
#> StringInsert["abcdefghijklm", "X", {}]
= abcdefghijklm
>> StringInsert[{"something", "sometimes"}, " ", 5]
= {some thing, some times}
#> StringInsert[{"abcdefghijklm", "Mathics"}, "X", 13]
: Cannot insert at position 13 in Mathics.
= {abcdefghijklXm, StringInsert[Mathics, X, 13]}
#> StringInsert[{"", ""}, "", {1, 1, 1, 1}]
= {, }
#> StringInsert[{"abcdefghijklm", "Mathics"}, "X", {0, 2}]
: Cannot insert at position 0 in abcdefghijklm.
: Cannot insert at position 0 in Mathics.
= {StringInsert[abcdefghijklm, X, {0, 2}], StringInsert[Mathics, X, {0, 2}]}
#> StringInsert[{"abcdefghijklm", Mathics}, "X", {1, 2}]
: String or list of strings expected at position 1 in StringInsert[{abcdefghijklm, Mathics}, X, {1, 2}].
= StringInsert[{abcdefghijklm, Mathics}, X, {1, 2}]
#> StringInsert[{"", "Mathics"}, "X", {1, 1, -1}]
= {XXX, XXMathicsX}
>> StringInsert["1234567890123456", ".", Range[-16, -4, 3]]
= 1.234.567.890.123.456"""
messages = {
"strse": "String or list of strings expected at position `1` in `2`.",
"string": "String expected at position `1` in `2`.",
"ins": "Cannot insert at position `1` in `2`.",
"psl": "Position specification `1` in `2` is not a machine-sized integer or a list of machine-sized integers.",
}
def apply(self, strsource, strnew, pos, evaluation):
"StringInsert[strsource_, strnew_, pos_]"
exp = Expression("StringInsert", strsource, strnew, pos)
py_strnew = strnew.get_string_value()
if py_strnew is None:
return evaluation.message("StringInsert", "string", Integer(2), exp)
# Check and create list of position
listpos = []
if pos.has_form("List", None):
leaves = pos.get_leaves()
if not leaves:
return strsource
else:
for i, posi in enumerate(leaves):
py_posi = posi.get_int_value()
if py_posi is None:
return evaluation.message("StringInsert", "psl", pos, exp)
listpos.append(py_posi)
else:
py_pos = pos.get_int_value()
if py_pos is None:
return evaluation.message("StringInsert", "psl", pos, exp)
listpos.append(py_pos)
# Check and perform the insertion
if strsource.has_form("List", None):
py_strsource = [sub.get_string_value() for sub in strsource.leaves]
if any(sub is None for sub in py_strsource):
return evaluation.message("StringInsert", "strse", Integer1, exp)
return Expression(
"List",
*[
String(self._insert(s, py_strnew, listpos, evaluation))
for s in py_strsource
]
)
else:
py_strsource = strsource.get_string_value()
if py_strsource is None:
return evaluation.message("StringInsert", "strse", Integer1, exp)
return String(self._insert(py_strsource, py_strnew, listpos, evaluation))
class StringJoin(BinaryOperator):
"""
<dl>
<dt>'StringJoin["$s1$", "$s2$", ...]'
<dd>returns the concatenation of the strings $s1$, $s2$, .
</dl>
>> StringJoin["a", "b", "c"]
= abc
>> "a" <> "b" <> "c" // InputForm
= "abc"
'StringJoin' flattens lists out:
>> StringJoin[{"a", "b"}] // InputForm
= "ab"
>> Print[StringJoin[{"Hello", " ", {"world"}}, "!"]]
| Hello world!
"""
operator = "<>"
precedence = 600
attributes = ("Flat", "OneIdentity")
def apply(self, items, evaluation):
"StringJoin[items___]"
result = ""
items = items.flatten(SymbolList)
if items.get_head_name() == "System`List":
items = items.leaves
else:
items = items.get_sequence()
for item in items:
if not isinstance(item, String):
evaluation.message("StringJoin", "string")
return
result += item.value
return String(result)
class StringLength(Builtin):
"""
<dl>
<dt>'StringLength["$string$"]'
<dd>gives the length of $string$.
</dl>
>> StringLength["abc"]
= 3
'StringLength' is listable:
>> StringLength[{"a", "bc"}]
= {1, 2}
>> StringLength[x]
: String expected.
= StringLength[x]
"""
attributes = ("Listable",)
def apply(self, str, evaluation):
"StringLength[str_]"
if not isinstance(str, String):
evaluation.message("StringLength", "string")
return
return Integer(len(str.value))
class StringPosition(Builtin):
"""
<dl>
<dt>'StringPosition["$string$", $patt$]'
<dd>gives a list of starting and ending positions where $patt$ matches "$string$".
<dt>'StringPosition["$string$", $patt$, $n$]'
<dd>returns the first $n$ matches only.
<dt>'StringPosition["$string$", {$patt1$, $patt2$, ...}, $n$]'
<dd>matches multiple patterns.
<dt>'StringPosition[{$s1$, $s2$, ...}, $patt$]'
<dd>returns a list of matches for multiple strings.
</dl>
>> StringPosition["123ABCxyABCzzzABCABC", "ABC"]
= {{4, 6}, {9, 11}, {15, 17}, {18, 20}}
>> StringPosition["123ABCxyABCzzzABCABC", "ABC", 2]
= {{4, 6}, {9, 11}}
'StringPosition' can be useful for searching through text.
>> data = Import["ExampleData/EinsteinSzilLetter.txt"];
>> StringPosition[data, "uranium"]
= {{299, 305}, {870, 876}, {1538, 1544}, {1671, 1677}, {2300, 2306}, {2784, 2790}, {3093, 3099}}
#> StringPosition["123ABCxyABCzzzABCABC", "ABC", -1]
: Non-negative integer or Infinity expected at position 3 in StringPosition[123ABCxyABCzzzABCABC, ABC, -1].
= StringPosition[123ABCxyABCzzzABCABC, ABC, -1]
## Overlaps
#> StringPosition["1231221312112332", RegularExpression["[12]+"]]
= {{1, 2}, {2, 2}, {4, 7}, {5, 7}, {6, 7}, {7, 7}, {9, 13}, {10, 13}, {11, 13}, {12, 13}, {13, 13}, {16, 16}}
#> StringPosition["1231221312112332", RegularExpression["[12]+"], Overlaps -> False]
= {{1, 2}, {4, 7}, {9, 13}, {16, 16}}
#> StringPosition["1231221312112332", RegularExpression["[12]+"], Overlaps -> x]
= {{1, 2}, {4, 7}, {9, 13}, {16, 16}}
#> StringPosition["1231221312112332", RegularExpression["[12]+"], Overlaps -> All]
: Overlaps -> All option is not currently implemented in Mathics.
= {{1, 2}, {2, 2}, {4, 7}, {5, 7}, {6, 7}, {7, 7}, {9, 13}, {10, 13}, {11, 13}, {12, 13}, {13, 13}, {16, 16}}
#> StringPosition["21211121122", {"121", "11"}]
= {{2, 4}, {4, 5}, {5, 6}, {6, 8}, {8, 9}}
#> StringPosition["21211121122", {"121", "11"}, Overlaps -> False]
= {{2, 4}, {5, 6}, {8, 9}}
#> StringPosition[{"abc", "abcda"}, "a"]
= {{{1, 1}}, {{1, 1}, {5, 5}}}
#> StringPosition[{"abc"}, "a", Infinity]
= {{{1, 1}}}
#> StringPosition["abc"]["123AabcDEabc"]
= {{5, 7}, {10, 12}}
"""
options = {
"IgnoreCase": "False",
"MetaCharacters": "None",
"Overlaps": "True",
}
messages = {
"strse": "String or list of strings expected at position `1` in `2`.",
"overall": "Overlaps -> All option is not currently implemented in Mathics.",
"innf": "Non-negative integer or Infinity expected at position `2` in `1`.",
}
rules = {
"StringPosition[patt_][s_]": "StringPosition[s, patt]",
}
def apply(self, string, patt, evaluation, options):
"StringPosition[string_, patt_, OptionsPattern[StringPosition]]"
return self.apply_n(
string,
patt,
Expression("DirectedInfinity", Integer1),
evaluation,
options,
)
def apply_n(self, string, patt, n, evaluation, options):
"StringPosition[string_, patt_, n:(_Integer|DirectedInfinity[1]), OptionsPattern[StringPosition]]"
expr = Expression("StringPosition", string, patt, n)
# check n
if n.has_form("DirectedInfinity", 1):
py_n = float("inf")
else:
py_n = n.get_int_value()
if py_n is None or py_n < 0:
return evaluation.message("StringPosition", "innf", expr, Integer(3))
# check options
if options["System`Overlaps"] == SymbolTrue:
overlap = True
elif options["System`Overlaps"] == SymbolFalse:
overlap = False
elif options["System`Overlaps"] == Symbol("All"):
# TODO
evaluation.message("StringPosition", "overall")
overlap = True
else:
overlap = False # unknown options are teated as False
# convert patterns
if patt.has_form("List", None):
patts = patt.get_leaves()
else:
patts = [patt]
re_patts = []
for p in patts:
py_p = to_regex(p, evaluation)
if py_p is None:
return evaluation.message("StringExpression", "invld", p, patt)
re_patts.append(py_p)
compiled_patts = [re.compile(re_patt) for re_patt in re_patts]
# string or list of strings
if string.has_form("List", None):
py_strings = [s.get_string_value() for s in string.leaves]
if None in py_strings:
return
results = [
self.do_apply(py_string, compiled_patts, py_n, overlap)
for py_string in py_strings
]
return Expression(SymbolList, *results)
else:
py_string = string.get_string_value()
if py_string is None:
return
return self.do_apply(py_string, compiled_patts, py_n, overlap)
@staticmethod
class StringReplace(_StringFind):
"""
<dl>
<dt>'StringReplace["$string$", "$a$"->"$b$"]'
<dd>replaces each occurrence of $old$ with $new$ in $string$.
<dt>'StringReplace["$string$", {"$s1$"->"$sp1$", "$s2$"->"$sp2$"}]'
<dd>performs multiple replacements of each $si$ by the
corresponding $spi$ in $string$.
<dt>'StringReplace["$string$", $srules$, $n$]'
<dd>only performs the first $n$ replacements.
<dt>'StringReplace[{"$string1$", "$string2$", ...}, $srules$]'
<dd>performs the replacements specified by $srules$ on a list
of strings.
</dl>
StringReplace replaces all occurrences of one substring with another:
>> StringReplace["xyxyxyyyxxxyyxy", "xy" -> "A"]
= AAAyyxxAyA
Multiple replacements can be supplied:
>> StringReplace["xyzwxyzwxxyzxyzw", {"xyz" -> "A", "w" -> "BCD"}]
= ABCDABCDxAABCD
Only replace the first 2 occurences:
>> StringReplace["xyxyxyyyxxxyyxy", "xy" -> "A", 2]
= AAxyyyxxxyyxy
Also works for multiple rules:
>> StringReplace["abba", {"a" -> "A", "b" -> "B"}, 2]
= ABba
StringReplace acts on lists of strings too:
>> StringReplace[{"xyxyxxy", "yxyxyxxxyyxy"}, "xy" -> "A"]
= {AAxA, yAAxxAyA}
#> StringReplace["abcabc", "a" -> "b", Infinity]
= bbcbbc
#> StringReplace[x, "a" -> "b"]
: String or list of strings expected at position 1 in StringReplace[x, a -> b].
= StringReplace[x, a -> b]
#> StringReplace["xyzwxyzwaxyzxyzw", x]
: x is not a valid string replacement rule.
= StringReplace[xyzwxyzwaxyzxyzw, x]
#> StringReplace["xyzwxyzwaxyzxyzw", x -> y]
: Element x is not a valid string or pattern element in x.
= StringReplace[xyzwxyzwaxyzxyzw, x -> y]
#> StringReplace["abcabc", "a" -> "b", -1]
: Non-negative integer or Infinity expected at position 3 in StringReplace[abcabc, a -> b, -1].
= StringReplace[abcabc, a -> b, -1]
#> StringReplace["abc", "b" -> 4]
: String expected.
= a <> 4 <> c
#> StringReplace["01101100010", "01" .. -> "x"]
= x1x100x0
#> StringReplace["abc abcb abdc", "ab" ~~ _ -> "X"]
= X Xb Xc
#> StringReplace["abc abcd abcd", WordBoundary ~~ "abc" ~~ WordBoundary -> "XX"]
= XX abcd abcd
#> StringReplace["abcd acbd", RegularExpression["[ab]"] -> "XX"]
= XXXXcd XXcXXd
#> StringReplace["abcd acbd", RegularExpression["[ab]"] ~~ _ -> "YY"]
= YYcd YYYY
#> StringReplace["abcdabcdaabcabcd", {"abc" -> "Y", "d" -> "XXX"}]
= YXXXYXXXaYYXXX
#> StringReplace[" Have a nice day. ", (StartOfString ~~ Whitespace) | (Whitespace ~~ EndOfString) -> ""] // FullForm
= "Have a nice day."
#> StringReplace["xyXY", "xy" -> "01"]
= 01XY
#> StringReplace["xyXY", "xy" -> "01", IgnoreCase -> True]
= 0101
StringReplace also can be used as an operator:
>> StringReplace["y" -> "ies"]["city"]
= cities
"""
# TODO Special Characters
"""
#> StringReplace["product: A \\[CirclePlus] B" , "\\[CirclePlus]" -> "x"]
= A x B
"""
rules = {
"StringReplace[rule_][string_]": "StringReplace[string, rule]",
}
def apply(self, string, rule, n, evaluation, options):
"%(name)s[string_, rule_, OptionsPattern[%(name)s], n_:System`Private`Null]"
# this pattern is a slight hack to get around missing Shortest/Longest.
return self._apply(string, rule, n, evaluation, options, False)
class StringReverse(Builtin):
"""
<dl>
<dt>'StringReverse["$string$"]'
<dd>reverses the order of the characters in "string".
</dl>
>> StringReverse["live"]
= evil
"""
attributes = ("Listable", "Protected")
def apply(self, string, evaluation):
"StringReverse[string_String]"
return String(string.get_string_value()[::-1])
class StringRiffle(Builtin):
"""
<dl>
<dt>'StringRiffle[{s1, s2, s3, ...}]'
<dd>returns a new string by concatenating all the $si$, with spaces inserted between them.
<dt>'StringRiffle[list, sep]'
<dd>inserts the separator $sep$ between all elements in $list$.
<dt>'StringRiffle[list, {"left", "sep", "right"}]'
<dd>use $left$ and $right$ as delimiters after concatenation.
## These 2 forms are not currently implemented
## <dt>'StringRiffle[{{s11, s12, ...}, {s21, s22, ...}, ...}]'
## <dd>returns a new string by concatenating the $sij$, and inserting spaces at the lowest level and newlines at the higher level.
## <dt>'StringRiffle[list, sep1, sep2, ...]'
## <dd>inserts separator $sepi$ between elements of list at level i.
</dl>
>> StringRiffle[{"a", "b", "c", "d", "e"}]
= a b c d e
#> StringRiffle[{a, b, c, "d", e, "f"}]
= a b c d e f
## 1st is not a list
#> StringRiffle["abcdef"]
: List expected at position 1 in StringRiffle[abcdef].
: StringRiffle called with 1 argument; 2 or more arguments are expected.
= StringRiffle[abcdef]
#> StringRiffle[{"", "", ""}] // FullForm
= " "
## This form is not supported
#> StringRiffle[{{"a", "b"}, {"c", "d"}}]
: Sublist form in position 1 is is not implemented yet.
= StringRiffle[{{a, b}, {c, d}}]
>> StringRiffle[{"a", "b", "c", "d", "e"}, ", "]
= a, b, c, d, e
#> StringRiffle[{"a", "b", "c", "d", "e"}, sep]
: String expected at position 2 in StringRiffle[{a, b, c, d, e}, sep].
= StringRiffle[{a, b, c, d, e}, sep]
>> StringRiffle[{"a", "b", "c", "d", "e"}, {"(", " ", ")"}]
= (a b c d e)
#> StringRiffle[{"a", "b", "c", "d", "e"}, {" ", ")"}]
: String expected at position 2 in StringRiffle[{a, b, c, d, e}, { , )}].
= StringRiffle[{a, b, c, d, e}, { , )}]
#> StringRiffle[{"a", "b", "c", "d", "e"}, {left, " ", "."}]
: String expected at position 2 in StringRiffle[{a, b, c, d, e}, {left, , .}].
= StringRiffle[{a, b, c, d, e}, {left, , .}]
## This form is not supported
#> StringRiffle[{"a", "b", "c"}, "+", "-"]
## Mathematica result: a+b+c, but we are not support multiple separators
: Multiple separators form is not implemented yet.
= StringRiffle[{a, b, c}, +, -]
"""
attributes = ("ReadProtected",)
messages = {
"list": "List expected at position `1` in `2`.",
"argmu": "StringRiffle called with 1 argument; 2 or more arguments are expected.",
"argm": "StringRiffle called with 0 arguments; 2 or more arguments are expected.",
"string": "String expected at position `1` in `2`.",
"sublist": "Sublist form in position 1 is is not implemented yet.",
"mulsep": "Multiple separators form is not implemented yet.",
}
def apply(self, liststr, seps, evaluation):
"StringRiffle[liststr_, seps___]"
separators = seps.get_sequence()
exp = (
Expression("StringRiffle", liststr, seps)
if separators
else Expression("StringRiffle", liststr)
)
# Validate separators
if len(separators) > 1:
return evaluation.message("StringRiffle", "mulsep")
elif len(separators) == 1:
if separators[0].has_form("List", None):
if len(separators[0].leaves) != 3 or any(
not isinstance(s, String) for s in separators[0].leaves
):
return evaluation.message("StringRiffle", "string", Integer(2), exp)
elif not isinstance(separators[0], String):
return evaluation.message("StringRiffle", "string", Integer(2), exp)
# Validate list of string
if not liststr.has_form("List", None):
evaluation.message("StringRiffle", "list", Integer1, exp)
return evaluation.message("StringRiffle", "argmu", exp)
elif any(leaf.has_form("List", None) for leaf in liststr.leaves):
return evaluation.message("StringRiffle", "sublist")
# Determine the separation token
left, right = "", ""
if len(separators) == 0:
sep = " "
else:
if separators[0].has_form("List", None):
left = separators[0].leaves[0].value
sep = separators[0].leaves[1].value
right = separators[0].leaves[2].value
else:
sep = separators[0].get_string_value()
# Getting all together
result = left
for i in range(len(liststr.leaves)):
text = (
liststr.leaves[i]
.format(evaluation, "System`OutputForm")
.boxes_to_text(evaluation=evaluation)
)
if i == len(liststr.leaves) - 1:
result += text + right
else:
result += text + sep
return String(result)
class StringSplit(Builtin):
"""
<dl>
<dt>'StringSplit["$s$"]'
<dd>splits the string $s$ at whitespace, discarding the
whitespace and returning a list of strings.
<dt>'StringSplit["$s$", "$d$"]'
<dd>splits $s$ at the delimiter $d$.
<dt>'StringSplit[$s$, {"$d1$", "$d2$", ...}]'
<dd>splits $s$ using multiple delimiters.
<dt>'StringSplit[{$s_1$, $s_2, ...}, {"$d1$", "$d2$", ...}]'
<dd>returns a list with the result of applying the function to
each element.
</dl>
>> StringSplit["abc,123", ","]
= {abc, 123}
>> StringSplit["abc 123"]
= {abc, 123}
#> StringSplit[" abc 123 "]
= {abc, 123}
>> StringSplit["abc,123.456", {",", "."}]
= {abc, 123, 456}
>> StringSplit["a b c", RegularExpression[" +"]]
= {a, b, c}
>> StringSplit[{"a b", "c d"}, RegularExpression[" +"]]
= {{a, b}, {c, d}}
#> StringSplit["x", "x"]
= {}
#> StringSplit[x]
: String or list of strings expected at position 1 in StringSplit[x].
= StringSplit[x, Whitespace]
#> StringSplit["x", x]
: Element x is not a valid string or pattern element in x.
= StringSplit[x, x]
#> StringSplit["12312123", "12"..]
= {3, 3}
#> StringSplit["abaBa", "b"]
= {a, aBa}
#> StringSplit["abaBa", "b", IgnoreCase -> True]
= {a, a, a}
"""
rules = {
"StringSplit[s_]": "StringSplit[s, Whitespace]",
}
options = {
"IgnoreCase": "False",
"MetaCharacters": "None",
}
messages = {
"strse": "String or list of strings expected at position `1` in `2`.",
"pysplit": "As of Python 3.5 re.split does not handle empty pattern matches.",
}
def apply(self, string, patt, evaluation, options):
"StringSplit[string_, patt_, OptionsPattern[%(name)s]]"
if string.get_head_name() == "System`List":
leaves = [self.apply(s, patt, evaluation, options) for s in string._leaves]
return Expression(SymbolList, *leaves)
py_string = string.get_string_value()
if py_string is None:
return evaluation.message(
"StringSplit", "strse", Integer1, Expression("StringSplit", string)
)
if patt.has_form("List", None):
patts = patt.get_leaves()
else:
patts = [patt]
re_patts = []
for p in patts:
py_p = to_regex(p, evaluation)
if py_p is None:
return evaluation.message("StringExpression", "invld", p, patt)
re_patts.append(py_p)
flags = re.MULTILINE
if options["System`IgnoreCase"] == SymbolTrue:
flags = flags | re.IGNORECASE
result = [py_string]
for re_patt in re_patts:
result = [t for s in result for t in mathics_split(re_patt, s, flags=flags)]
return string_list(
SymbolList, [String(x) for x in result if x != ""], evaluation
)
class StringTake(Builtin):
"""
<dl>
<dt>'StringTake["$string$", $n$]'
<dd>gives the first $n$ characters in $string$.
<dt>'StringTake["$string$", -$n$]'
<dd>gives the last $n$ characters in $string$.
<dt>'StringTake["$string$", {$n$}]'
<dd>gives the $n$th character in $string$.
<dt>'StringTake["$string$", {$m$, $n$}]'
<dd>gives characters $m$ through $n$ in $string$.
<dt>'StringTake["$string$", {$m$, $n$, $s$}]'
<dd>gives characters $m$ through $n$ in steps of $s$.
<dt>'StringTake[{$s1$, $s2$, ...} $spec$}]'
<dd>gives the list of results for each of the $si$.
</dl>
>> StringTake["abcde", 2]
= ab
>> StringTake["abcde", 0]
= #<--#
>> StringTake["abcde", -2]
= de
>> StringTake["abcde", {2}]
= b
>> StringTake["abcd", {2,3}]
= bc
>> StringTake["abcdefgh", {1, 5, 2}]
= ace
Take the last 2 characters from several strings:
>> StringTake[{"abcdef", "stuv", "xyzw"}, -2]
= {ef, uv, zw}
StringTake also supports standard sequence specifications
>> StringTake["abcdef", All]
= abcdef
#> StringTake["abcd", 0] // InputForm
= ""
#> StringTake["abcd", {3, 2}] // InputForm
= ""
#> StringTake["", {1, 0}] // InputForm
= ""
#> StringTake["abc", {0, 0}]
: Cannot take positions 0 through 0 in "abc".
= StringTake[abc, {0, 0}]
#> StringTake[{2, 4},2]
: String or list of strings expected at position 1.
= StringTake[{2, 4}, 2]
#> StringTake["kkkl",Graphics[{}]]
: Integer or a list of sequence specifications expected at position 2.
= StringTake[kkkl, -Graphics-]
"""
messages = {
"strse": "String or list of strings expected at position 1.",
# FIXME: mseqs should be: Sequence specification (+n, -n, {+n}, {-n}, {m, n}, or {m, n, s}) or a list
# of sequence specifications expected at position 2 in
"mseqs": "Integer or a list of sequence specifications expected at position 2.",
"take": 'Cannot take positions `1` through `2` in "`3`".',
}
def apply(self, string, seqspec, evaluation):
"StringTake[string_String, seqspec_]"
result = string.get_string_value()
if result is None:
return evaluation.message("StringTake", "strse")
if isinstance(seqspec, Integer):
pos = seqspec.get_int_value()
if pos >= 0:
seq = (1, pos, 1)
else:
seq = (pos, None, 1)
else:
seq = convert_seq(seqspec)
if seq is None:
return evaluation.message("StringTake", "mseqs")
start, stop, step = seq
py_slice = python_seq(start, stop, step, len(result))
if py_slice is None:
return evaluation.message("StringTake", "take", start, stop, string)
return String(result[py_slice])
def apply_strings(self, strings, spec, evaluation):
"StringTake[strings__, spec_]"
result_list = []
for string in strings.leaves:
result = self.apply(string, spec, evaluation)
if result is None:
return None
result_list.append(result)
return Expression("List", *result_list)
class StringTrim(Builtin):
"""
<dl>
<dt>'StringTrim[$s$]'
<dd>returns a version of $s$ with whitespace removed from start and end.
</dl>
>> StringJoin["a", StringTrim[" \\tb\\n "], "c"]
= abc
>> StringTrim["ababaxababyaabab", RegularExpression["(ab)+"]]
= axababya
"""
def apply(self, s, evaluation):
"StringTrim[s_String]"
return String(s.get_string_value().strip(" \t\n"))
def apply_pattern(self, s, patt, expression, evaluation):
"StringTrim[s_String, patt_]"
text = s.get_string_value()
if not text:
return s
py_patt = to_regex(patt, evaluation)
if py_patt is None:
return evaluation.message("StringExpression", "invld", patt, expression)
if not py_patt.startswith(r"\A"):
left_patt = r"\A" + py_patt
else:
left_patt = py_patt
if not py_patt.endswith(r"\Z"):
right_patt = py_patt + r"\Z"
else:
right_patt = py_patt
m = re.search(left_patt, text)
left = m.end(0) if m else 0
m = re.search(right_patt, text)
right = m.start(0) if m else len(text)
return String(text[left:right])
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
18843,
602,
319,
4285,
654,
198,
37811,
198,
198,
11748,
302,
198,
198,
6738,
10688,
873,
13,
9641,
1330,
11593,
9641,
834,
220,
1303,
645,
20402,
973,
... | 2.207 | 15,000 |
from shapely.geometry import Polygon
import math
import numpy as np
import random
from shapely.geometry import Point
# from shapely.geometry import Polygon
| [
198,
6738,
5485,
306,
13,
469,
15748,
1330,
12280,
14520,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4738,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
6252,
198,
2,
422,
5485,
306,
13,
469,
15748,
1330,
12280,
... | 3.613636 | 44 |
# return True if the given tree is a BST, else return False
from collections import deque
# Tree Node
# Function to Build Tree
if __name__=="__main__":
t=int(input())
for _ in range(0,t):
s=input()
root=buildTree(s)
if isBST(root):
print(1)
else:
print(0)
| [
198,
220,
220,
220,
220,
198,
198,
2,
1441,
6407,
611,
262,
1813,
5509,
318,
257,
44992,
11,
2073,
1441,
10352,
628,
198,
198,
6738,
17268,
1330,
390,
4188,
198,
2,
12200,
19081,
198,
198,
2,
15553,
284,
10934,
12200,
220,
220,
220,... | 1.99422 | 173 |
import googleapiclient.discovery
from library.gcloud_accessor.rest_library.gcloud_rest_library import GcloudRestLib
| [
11748,
23645,
499,
291,
75,
1153,
13,
67,
40821,
198,
198,
6738,
5888,
13,
70,
17721,
62,
15526,
273,
13,
2118,
62,
32016,
13,
70,
17721,
62,
2118,
62,
32016,
1330,
402,
17721,
19452,
25835,
628
] | 3.277778 | 36 |
from tifa.globals import db
from tifa.db.dal import Dal
from tifa.models.system import Staff
dal = Dal(db.session)
dal.add(
Staff,
name="hey tea",
)
dal.commit()
| [
6738,
256,
19215,
13,
4743,
672,
874,
1330,
20613,
198,
6738,
256,
19215,
13,
9945,
13,
31748,
1330,
12348,
198,
6738,
256,
19215,
13,
27530,
13,
10057,
1330,
9983,
198,
198,
31748,
796,
12348,
7,
9945,
13,
29891,
8,
198,
31748,
13,
... | 2.552239 | 67 |
import os, sys, re, myIO
# Needs to determine functionality for symbols within commands
runShell()
| [
11748,
28686,
11,
25064,
11,
302,
11,
616,
9399,
198,
198,
2,
36557,
284,
5004,
11244,
329,
14354,
1626,
9729,
628,
198,
5143,
23248,
3419,
198
] | 3.923077 | 26 |
from base64 import b64decode
from base64 import b64encode
import hashlib
from Crypto.Cipher import AES
from Crypto.Random import get_random_bytes
from Crypto.Util.Padding import pad, unpad
| [
6738,
2779,
2414,
1330,
275,
2414,
12501,
1098,
198,
6738,
2779,
2414,
1330,
275,
2414,
268,
8189,
198,
11748,
12234,
8019,
198,
198,
6738,
36579,
13,
34,
10803,
1330,
34329,
198,
6738,
36579,
13,
29531,
1330,
651,
62,
25120,
62,
33661,... | 3.392857 | 56 |
# -*- coding: utf-8 -*-
import os, sys
import random
import time
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bazar.settings")
import django
django.setup()
from core.models import Cadastro
for i in range(1, 1000 ):
cad = Cadastro()
cad.senha = i
cad.cpf = generate_cpf()
cad.nome = 'Teste {}'.format(i)
cad.email = 'teste{}@teste.com'.format(i)
cad.save()
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
11,
25064,
198,
11748,
4738,
198,
11748,
640,
198,
198,
418,
13,
268,
2268,
13,
2617,
12286,
7203,
35028,
1565,
11230,
62,
28480,
51,
20754,
62,
33365,
242... | 2.331325 | 166 |
from .binding import HostGuestBindingAffinity
from .density import Density, ExcessMolarVolume
from .dielectric import DielectricConstant
from .enthalpy import EnthalpyOfMixing, EnthalpyOfVaporization
from .solvation import SolvationFreeEnergy
__all__ = [
HostGuestBindingAffinity,
Density,
ExcessMolarVolume,
DielectricConstant,
EnthalpyOfMixing,
EnthalpyOfVaporization,
SolvationFreeEnergy,
]
| [
6738,
764,
30786,
1330,
14504,
42481,
33,
6020,
35191,
6269,
198,
6738,
764,
43337,
1330,
360,
6377,
11,
1475,
919,
44,
6192,
31715,
198,
6738,
764,
11979,
801,
1173,
1330,
6733,
801,
1173,
3103,
18797,
198,
6738,
764,
34728,
9078,
1330... | 2.917241 | 145 |
import random
# import Dataset as ds
from flaskblog import poolData
from flaskblog import DoctorData as doctor
FILTER_THRESHOLD_PERC = 30
POOL_BUFFER_SIZE = 10
POOL_SIZE = 5
# if __name__ == '__main__':
# # add_to_dataset("Powai", 80)
# main()
| [
11748,
4738,
201,
198,
2,
1330,
16092,
292,
316,
355,
288,
82,
201,
198,
6738,
42903,
14036,
1330,
5933,
6601,
201,
198,
6738,
42903,
14036,
1330,
9356,
6601,
355,
6253,
220,
201,
198,
201,
198,
46700,
5781,
62,
4221,
19535,
39,
15173... | 2.214286 | 126 |
import re
from django import template
from home.models import CommissionerPage
from django.db.models import Q
register = template.Library()
@register.inclusion_tag('partials/current-commissioners.html')
| [
11748,
302,
198,
198,
6738,
42625,
14208,
1330,
11055,
198,
6738,
1363,
13,
27530,
1330,
13270,
9876,
198,
6738,
42625,
14208,
13,
9945,
13,
27530,
1330,
1195,
198,
198,
30238,
796,
11055,
13,
23377,
3419,
198,
198,
31,
30238,
13,
259,
... | 3.614035 | 57 |
import pandas as pd
import attr
from visions.core.model import TypeRelation
@attr.s(frozen=True)
@attr.s(frozen=True)
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
708,
81,
198,
198,
6738,
26096,
13,
7295,
13,
19849,
1330,
5994,
6892,
341,
628,
198,
198,
31,
35226,
13,
82,
7,
69,
42005,
28,
17821,
8,
628,
198,
31,
35226,
13,
82,
7,
69,
42005,
28... | 2.583333 | 48 |
import torch
import os
import numpy as np
import math
from einops import rearrange
import logging
logger = logging.getLogger(__name__)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
_GCONST_ = -0.9189385332046727 # ln(sqrt(2*pi))
def positionalencoding2d(D, H, W):
"""
:param D: dimension of the model
:param H: H of the positions
:param W: W of the positions
:return: DxHxW position matrix
"""
if D % 4 != 0:
raise ValueError("Cannot use sin/cos positional encoding with odd dimension (got dim={:d})".format(D))
P = torch.zeros(D, H, W)
# Each dimension use half of D
D = D // 2
div_term = torch.exp(torch.arange(0.0, D, 2) * -(math.log(1e4) / D))
pos_w = torch.arange(0.0, W).unsqueeze(1)
pos_h = torch.arange(0.0, H).unsqueeze(1)
P[0:D:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, H, 1)
P[1:D:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, H, 1)
P[D::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, W)
P[D+1::2,:, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, W)
return P
@torch.no_grad()
def plot_fwd_flow_density(model, ax, test_grid, n_pts, batch_size, args):
"""
plots square grid and flow density; where density under the flow is exp(log_flow_base_dist + logdet)
"""
xx, yy, zz = test_grid
# compute posterior approx density
zzk, logdet = [], []
B = batch_size
H=1; W=1
P = args.condition_vec
pos = positionalencoding2d(P, H, W)
cond_list = []
for layer in range(args.L_layers):
res = torch.zeros(args.L_layers, H, W)
res[layer] = 1
cond = torch.cat((pos, res), dim=0).to(args.device).unsqueeze(0).repeat(B // args.L_layers, 1, 1, 1)
cond_list.append(cond)
#### L=2일때 수정해야함!!!!!!!!
c_r = rearrange(cond, 'b c h w -> (b h w) c')
for zz_i in zz.split(batch_size, dim=0):
#zzk_i, logdet_i = model.flow(zz_i)
zzk_i, logdet_i = model(zz_i, [c_r,])
zzk += [zzk_i]
logdet += [logdet_i]
zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0)
q_log_prob = get_logp_z(zzk) / 2
log_prob = q_log_prob + logdet
prob = log_prob.exp().cpu()
# plot
ax.pcolormesh(xx, yy, prob.view(n_pts,n_pts).data, cmap=plt.cm.viridis)
ax.set_facecolor(plt.cm.viridis(0.))
ax.set_title('Flow Density', fontdict={'fontsize': 20})
def plot_boosted_fwd_flow_density(model, axs, test_grid, n_pts, batch_size, args, batch_id=None):
"""
plots square grid and flow density; where density under the flow is exp(log_flow_base_dist + logdet)
"""
xx, yy, zz = test_grid
num_fixed_plots = 2 # every image will show the true samples and the density for the full model
#plt_height = max(1, int(np.ceil(np.sqrt(args.num_components + num_fixed_plots))))
#plt_width = max(1, int(np.ceil((args.num_components + num_fixed_plots) / plt_height)))
plt_width = max(2, int(np.ceil(np.sqrt(args.num_components))))
plt_height = max(2, int(np.ceil(np.sqrt(args.num_components))) + 1)
total_prob = torch.zeros(n_pts, n_pts)
num_components_to_plot = max(1, args.num_components if model.all_trained else model.component + 1)
for c in range(num_components_to_plot):
if model.rho[c] == 0.0:
continue
#row = int(np.floor((c + num_fixed_plots) / plt_width))
#col = int((c + num_fixed_plots) % plt_width)
row = int(1 + np.floor(c / plt_width))
col = int(c % plt_width)
zzk, logdet = [], []
for zz_i in zz.split(batch_size, dim=0):
ZZ_i, _, _, logdet_i, _ = model(x=zz_i, components=c)
zzk += [ZZ_i]
logdet += [logdet_i]
zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0)
q_log_prob = model.base_dist.log_prob(zzk).sum(1)
log_prob = q_log_prob + logdet
prob = log_prob.exp().cpu().view(n_pts,n_pts).data
# plot component c
axs[row,col].pcolormesh(xx, yy, prob, cmap=plt.cm.viridis)
axs[row,col].set_facecolor(plt.cm.viridis(0.))
axs[row,col].set_title(f'c={c}', fontdict={'fontsize': 20})
# save total model probs
total_prob += log_prob.cpu().view(n_pts, n_pts).data * model.rho[c]
# plot full model
total_prob = torch.exp(total_prob / torch.sum(model.rho[0:num_components_to_plot]))
axs[0,1].pcolormesh(xx, yy, total_prob, cmap=plt.cm.viridis)
axs[0,1].set_facecolor(plt.cm.viridis(0.))
axs[0,1].set_title('GBF - All Components', fontdict={'fontsize': 20})
return total_prob
def plot_inv_flow_density(model, ax, test_grid, n_pts, batch_size, args):
"""
plots transformed grid and density; where density is exp(loq_flow_base_dist - logdet)
"""
xx, yy, zz = test_grid
# compute posterior approx density
zzk, logdet = [], []
for zz_i in zz.split(batch_size, dim=0):
zzk_i, logdet_i = model.flow(zz_i)
zzk += [zzk_i]
logdet += [logdet_i]
zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0)
log_q0 = model.base_dist.log_prob(zz).sum(1)
log_qk = log_q0 - logdet
qk = log_qk.exp().cpu()
zzk = zzk.cpu()
# plot
ax.pcolormesh(zzk[:,0].view(n_pts,n_pts).data, zzk[:,1].view(n_pts,n_pts).data, qk.view(n_pts,n_pts).data, cmap=plt.cm.viridis)
ax.set_facecolor(plt.cm.viridis(0.0))
ax.set_title('Flow Density', fontdict={'fontsize': 20})
def plot_boosted_inv_flow_density(model, axs, test_grid, n_pts, batch_size, args, plt_height, plt_width):
"""
plots transformed grid and density; where density is exp(loq_flow_base_dist - logdet)
"""
xx, yy, zz = test_grid
num_fixed_plots = 2 # every image will show the true density and samples from the full model
num_components_to_plot = args.num_components if model.all_trained else model.component + 1
for c in range(num_components_to_plot):
if model.rho[c] == 0.0:
continue
row = int(np.floor((c + num_fixed_plots) / plt_width))
col = int((c + num_fixed_plots) % plt_width)
zzk, logdet = [], []
for zz_i in zz.split(batch_size, dim=0):
ZZ_i, logdet_i = model.component_forward_flow(zz_i, c)
zzk += [ZZ_i[-1]] # grab K-th element
logdet += [logdet_i]
zzk, logdet = torch.cat(zzk, 0), torch.cat(logdet, 0)
log_q0 = model.base_dist.log_prob(zz).sum(1)
log_qk = log_q0 - logdet
qk = log_qk.exp().cpu()
zzk = zzk.cpu()
# plot component c
axs[row,col].pcolormesh(zzk[:,0].view(n_pts,n_pts).data, zzk[:,1].view(n_pts,n_pts).data, qk.view(n_pts,n_pts).data,
cmap=plt.cm.viridis)
axs[row,col].set_facecolor(plt.cm.viridis(0.0))
axs[row,col].set_title(f'Boosted Flow Density for c={c}', fontdict={'fontsize': 20})
def plot_boosted_inv_flow(model, batch_id, n_pts, batch_size, args):
"""
plots transformed grid and density; where density is a gaussian interpolation of the model's samples
"""
fname = f'{args.dataset}_{args.flow}_K{args.num_flows}_bs{args.batch_size}'
fname += f'_C{args.num_components}_reg{int(100*args.regularization_rate):d}_{args.component_type}'
fname += f'_{args.coupling_network}{args.coupling_network_depth}_hsize{args.h_size}' if args.component_type == 'realnvp' or args.flow == 'realnvp' else ''
fname += '_annealed' if args.min_beta < 1.0 else ''
fname += '_lr_scheduling' if not args.no_lr_schedule else ''
Z = []
num_components_to_plot = args.num_components if model.all_trained else model.component + 1
for c in range(num_components_to_plot):
zc = np.hstack([model.component_forward_flow(
torch.randn(n_pts, 2).to(args.device) * model.base_dist_var + model.base_dist_mean, c)[0][-1].t().cpu().data.numpy() for _ in range(n_pts)])
num_sampled = int(np.ceil(( model.rho[c] / model.rho.sum() ) * n_pts * n_pts))
Z.append(zc[:, 0:num_sampled])
# plot component c
Hc, _, _ = np.histogram2d(zc[0], zc[1], bins=(np.arange(-4, 4, 0.05), np.arange(-4, 4, 0.05)))
plt.figure(figsize=(12, 12))
plt.imshow(Hc.T, interpolation='gaussian')
plt.axis('off')
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.savefig(os.path.join(args.snap_dir, f'{c}_{fname}_step{batch_id:07d}.png'))
if model.component == 0 and not model.all_trained:
# don't bother plotting components that haven't been trained at all
break
# plot full model
Z = np.hstack(Z)
H, _, _ = np.histogram2d(Z[0], Z[1], bins=(np.arange(-4, 4, 0.05), np.arange(-4, 4, 0.05)))
plt.figure(figsize=(12, 12))
plt.imshow(H.T, interpolation='gaussian')
plt.axis('off')
plt.subplots_adjust(left=0, right=1, bottom=0, top=1)
plt.savefig(os.path.join(args.snap_dir, f'final_{fname}_step{batch_id:07d}.png'))
def plot_q0_density(model, ax, test_grid, n_pts, batch_size, args):
"""
Plot the base distribution (some type of standard gaussian)
"""
xx, yy, zz = test_grid
log_q0 = model.base_dist.log_prob(zz).sum(1)
q0 = log_q0.exp().cpu()
# plot
ax.pcolormesh(zz[:,0].view(n_pts,n_pts).data, zz[:,1].view(n_pts,n_pts).data, q0.view(n_pts,n_pts).data, cmap=plt.cm.viridis)
ax.set_facecolor(plt.cm.viridis(0.))
ax.set_title('Base q_0 Density', fontdict={'fontsize': 20})
| [
11748,
28034,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
10688,
198,
6738,
304,
259,
2840,
1330,
37825,
858,
198,
11748,
18931,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
... | 2.042881 | 4,734 |
# # Copyright Clayton Brown 2019. See LICENSE file.
# import numpy
# from types import FunctionType
# from csaps import UnivariateCubicSmoothingSpline
# class ProbabilityDistribution:
# '''
# Function to handle probability distributions.
# Parameters
# ----------
# distributionFunction:
# FunctionType which takes in a single float value and returns a second float value.
# Describes a distribution function on the interval (0-1).
# Function is not required to have an area of 1 in the interval (0-1), that interval is autmatically generated for you.
# Function must not be negative in the range (0-1). (Negative probability doesn't make much sense anyway)
# '''
# def __init__(self, distributionFunction: FunctionType):
# # Object Variables #
# # Function describing how the distribution should look.
# self.distributionFunction: FunctionType = None
# # The total area of the distribution function between 0 and 1
# self.areaModulation: float = None
# # The approximated integral for the distribution function between 0 and 1
# # This spline is also inverted. This means self.integralSpline(p), where p is between 0 and 1,
# # returns the x value where p percent of the area under the curve of self.distributionFunction in the range (0-1) is to the left.
# self.integralSpline: UnivariateCubicSmoothingSpline = None
# # Set the apropriate distribution function
# self.setDistributionFunction(distributionFunction)
# def setDistributionFunction(self, distributionFunction: FunctionType, N: int = int(1e5)):
# '''
# Assigns the appropriate distribution function and calculates the integralSpline necessary to match that distribution function.
# Parameters
# ----------
# distributionFunction:
# Districution function to create integral spline for.
# N:
# Number of points to use to create the spline approximation.
# Larger N (N > 1e5) will slow down initial generation, but be more accurate.
# Smaller N (N < 1e3) will speed up geenration significantly, but for complex functions will los accuracy.
# '''
# self.distributionFunction = distributionFunction
# # Create the xData. This is just a linspace between 0 and 1.
# # xData2 is a version of this which is offset by one index to allow for the builtin map function to work for area generation
# xData1 = numpy.linspace(0.0, 1.0, num = N + 1)
# xData2 = numpy.roll(xData1, -1)
# # Create the yData. This maps the xData values at each point to self.distributionFunction(x).
# # Also create the yData2 variable which is equivalent to the xData2, but for yValues
# yData1 = numpy.array(list(map(self.distributionFunction, xData1)))
# yData2 = numpy.roll(yData1, -1)
# # Simple area function to use with map. Use midpoint approach.
# def areaFunction(x1, x2, y1, y2):
# return (x2 - x1) * (y1 + y2) / 2
# # Get a numpy array of the areas of each trapezoid
# areas = numpy.array(list(map(areaFunction, xData1[:-1], xData2[:-1], yData1[:-1], yData2[:-1])))
# # areaModulation describes the total area underneath the probability distribution in the range (0-1)
# self.areaModulation = numpy.sum(areas)
# # Calculate the total area (the integral) at each xValue
# totalAreas = numpy.zeros((N + 1,))
# for i in range(totalAreas.shape[0] - 1):
# totalAreas[i + 1] = totalAreas[i] + areas[i]
# # Use those areas to create the integral spline
# self.integralSpline = UnivariateCubicSmoothingSpline(totalAreas, xData1)
# def __call__(self, n: int = 1):
# '''
# Generate random numbers according to the loaded distribution.
# Parameters
# ----------
# n: the number of random numbers to generate. If n == 1: will return a float instead of a numpy array
# '''
# # Create the random numbers accoring the integral spline.
# randomNumbers = self.integralSpline(numpy.random.random((n,)) * self.areaModulation) #pylint: disable=no-member
# # Return type float if n is set to 1
# if n == 1:
# return randomNumbers[0]
# # Otherwise just return a numpy array of random values according to the desired probability distribution.
# return randomNumbers
# # Testing call
# if __name__ == "__main__":
# from matplotlib import pyplot as plt
# def lightnesDistribution(x): return (.5 - abs(x - .5))**2
# def chromaDistribution(x): return x
# D = ProbabilityDistribution(chromaDistribution)
# plt.hist(D(10000000), 250)
# plt.show()
| [
2,
1303,
15069,
32108,
4373,
13130,
13,
4091,
38559,
24290,
2393,
13,
201,
198,
201,
198,
2,
1330,
299,
32152,
201,
198,
201,
198,
2,
422,
3858,
1330,
15553,
6030,
201,
198,
2,
422,
50115,
1686,
1330,
791,
42524,
43632,
291,
7556,
1... | 2.521454 | 1,981 |
"""
API core
"""
from functools import wraps
from flask import jsonify
from ..common import core
def create_app():
""" Returns API application instance """
app = core.create_app(__name__, __path__)
# Custom error handlers
# app.errorhandler(ExampleError)(on_example_error)
app.errorhandler(404)(on_404)
return app
| [
37811,
198,
220,
220,
220,
7824,
4755,
198,
37811,
198,
6738,
1257,
310,
10141,
1330,
27521,
198,
6738,
42903,
1330,
33918,
1958,
198,
198,
6738,
11485,
11321,
1330,
4755,
198,
198,
4299,
2251,
62,
1324,
33529,
198,
220,
220,
220,
37227... | 3.008621 | 116 |
import torch
import torch.nn as nn
class Coder(nn.Module):
"""
Base class for implementing encoders and decoders. All new encoders and
decoders should derive from this class.
"""
def __init__(self, num_in, num_out, in_dim):
"""
Parameters
----------
num_in: int
Number of input units for a forward pass of the coder.
num_out: int
Number of output units from a forward pass of the coder.
in_dim: in_dim
Dimension of flattened inputs to the coder.
"""
super().__init__()
self.num_in = num_in
self.num_out = num_out
def forward(self, in_data):
"""
Parameters
----------
in_data: ``torch.autograd.Variable``
Input data for a forward pass of the coder.
"""
pass
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
628,
198,
4871,
327,
12342,
7,
20471,
13,
26796,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
7308,
1398,
329,
15427,
2207,
375,
364,
290,
875,
375,
364,
13,
1439,
64... | 2.116114 | 422 |
import abc
from abc import abstractmethod
from dataclasses import dataclass
from typing import List, Optional, Type, Union
from nuplan.common.actor_state.state_representation import StateSE2
from nuplan.common.maps.abstract_map import AbstractMap
from nuplan.common.maps.maps_datatypes import TrafficLightStatusData
from nuplan.planning.simulation.history.simulation_history_buffer import SimulationHistoryBuffer
from nuplan.planning.simulation.observation.observation_type import Observation
from nuplan.planning.simulation.simulation_time_controller.simulation_iteration import SimulationIteration
from nuplan.planning.simulation.trajectory.abstract_trajectory import AbstractTrajectory
@dataclass(frozen=True)
class PlannerInitialization:
"""
This class represents required data to initialize a planner.
"""
expert_goal_state: StateSE2 # The state which was achieved by expert driver in a scenario
route_roadblock_ids: List[str] # Roadblock ids comprising goal route
mission_goal: StateSE2 # The mission goal which commonly is not achievable in a single scenario
map_api: AbstractMap # The API towards maps.
@dataclass(frozen=True)
class PlannerInput:
"""
Input to a planner for which a trajectory should be computed.
"""
iteration: SimulationIteration # Iteration and time in a simulation progress
history: SimulationHistoryBuffer # Rolling buffer containing past observations and states.
traffic_light_data: Optional[List[TrafficLightStatusData]] = None # The traffic light status data
class AbstractPlanner(abc.ABC):
"""
Interface for a generic ego vehicle planner.
"""
# Whether this planner can consume multiple scenarios at once during inference.
# If this is false, only one simulation scenario will be run at a time.
consume_batched_inputs: bool = False
# Whether the planner requires the scenario object to be passed at construction time.
# This can be set to true only for oracle planners and cannot be used for submissions.
requires_scenario: bool = False
@abstractmethod
def name(self) -> str:
"""
:return string describing name of this planner.
"""
pass
@abc.abstractmethod
def initialize(self, initialization: List[PlannerInitialization]) -> None:
"""
Initialize planner
:param initialization: List of initialization classes.
This is a list only on case consume_batched_inputs is True, otherwise it has a single entry in list
In this case the list represents batched simulations.
"""
pass
@abc.abstractmethod
def observation_type(self) -> Type[Observation]:
"""
:return Type of observation that is expected in compute_trajectory.
"""
pass
@abc.abstractmethod
def compute_trajectory(self, current_input: List[PlannerInput]) -> List[AbstractTrajectory]:
"""
Computes the ego vehicle trajectory.
:param current_input: List of planner inputs for where for each of them trajectory should be computed
In this case the list represents batched simulations. In case consume_batched_inputs is False
the list has only single element
:return: Trajectories representing the predicted ego's position in future for every input iteration
In case consume_batched_inputs is False, return only a single trajectory in a list.
"""
pass
def compute_single_trajectory(self, current_input: PlannerInput) -> AbstractTrajectory:
"""
Compute trajectory only for a single planner input
:param current_input: input to the planner
:return: Trajectories representing the predicted ego's position in future for every input iteration
In case consume_batched_inputs is False, return only a single trajectory in a list.
"""
return self.compute_trajectory([current_input])[0]
def compute_trajectory_with_check(self, current_input: List[PlannerInput]) -> List[AbstractTrajectory]:
"""
Computes the ego vehicle trajectory, where we check that if planner can not consume batched inputs,
we require that the input list has exactly one element
:param current_input: List of planner inputs for where for each of them trajectory should be computed
In this case the list represents batched simulations. In case consume_batched_inputs is False
the list has only single element
:return: Trajectories representing the predicted ego's position in future for every input iteration
In case consume_batched_inputs is False, return only a single trajectory in a list.
"""
self.validate_inputs(current_input)
return self.compute_trajectory(current_input)
def initialize_with_check(self, initialization: List[PlannerInitialization]) -> None:
"""
Initialize planner where we check that if planner can not consume batched inputs, we require that the input
list has exactly one element
:param initialization: List of initialization classes.
This is a list only in case consume_batched_inputs is True, otherwise it has a single entry in list
In this case the list represents batched simulations.
"""
self.validate_inputs(initialization)
return self.initialize(initialization)
def validate_inputs(self, data: Union[List[PlannerInput], List[PlannerInitialization]]) -> None:
"""
Validate that the size of the input data correspond the the fact whether this planner can consume batched inputs
This function will raise in case length of data and consume_batched_inputs does not match.
:param data: input data.
"""
if len(data) == 0:
raise RuntimeError("The inputs to the planner can not be an empty list!")
if not self.consume_batched_inputs and len(data) > 1:
raise RuntimeError(
f"Planner: {self.name()} can not consume batched inputs, but {len(data)} inputs was provided!"
)
| [
11748,
450,
66,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
6738,
19720,
1330,
7343,
11,
32233,
11,
5994,
11,
4479,
198,
198,
6738,
14364,
11578,
13,
11321,
13,
11218,
62,
5219,
13... | 3.08042 | 2,002 |
from tworaven_apps.solver_interfaces.util_search import (
SearchAutoSklearn,
SearchCaret,
SearchH2O,
SearchTPOT,
SearchLudwig,
SearchMLJarSupervised,
SearchMLBox,
SearchTwoRavens)
| [
6738,
665,
273,
4005,
62,
18211,
13,
82,
14375,
62,
3849,
32186,
13,
22602,
62,
12947,
1330,
357,
198,
220,
220,
220,
11140,
27722,
15739,
35720,
11,
198,
220,
220,
220,
11140,
34,
8984,
11,
198,
220,
220,
220,
11140,
39,
17,
46,
... | 2.377778 | 90 |
# contains utility functions relevant for image and coordinate transformation
from __future__ import division
from itertools import combinations
import json
import numpy as np
import cv2
def get_best_transform(x,y):
"""
given a set of N points in both x and y space, finds the best (lowest avg error)
transform of 4 points using oppenCV's getPerspectiveTransform
returns- transformation matrix M
"""
# test a simple translation
if False:
x = np.array([[0,0],[0,1],[1,0],[1,1]])
y = np.array([[1,1],[1,2],[2,1],[2,2]])
M_correct = np.array([[1,0,1],[0,1,1],[0,0,1]])
x = np.float32(x)
y = np.float32(y)
all_idx = [i for i in range(0,len(x))]
combos = tuple(combinations(all_idx,4))
min_err = np.inf
bestM = 0
for comb in combos:
M = cv2.getPerspectiveTransform(x[comb,:],y[comb,:])
xtf = transform_pt_array(x,M)
err = avg_transform_error(xtf,y)
if err < min_err:
min_err = err
bestM = M
bestComb = comb
return bestM
def transform_pt_array(point_array,M):
"""
Applies 3 x 3 image transformation matrix M to each point stored in the point array
"""
original_shape = point_array.shape
num_points = int(np.size(point_array,0)*np.size(point_array,1)/2)
# resize array into N x 2 array
reshaped = point_array.reshape((num_points,2))
# add third row
ones = np.ones([num_points,1])
points3d = np.concatenate((reshaped,ones),1)
# transform points
tf_points3d = np.transpose(np.matmul(M,np.transpose(points3d)))
# condense to two-dimensional coordinates
tf_points = np.zeros([num_points,2])
tf_points[:,0] = tf_points3d[:,0]/tf_points3d[:,2]
tf_points[:,1] = tf_points3d[:,1]/tf_points3d[:,2]
tf_point_array = tf_points.reshape(original_shape)
return tf_point_array
def transform_obj_list(object_list,M,M2 = None):
"""
Applies 3 x 3 image transformation matrix M to each point stored in object's
point list
object_list - list of KFObjects
M - 3 x 3 transformation matrix for camera to world image space
M2 - None or 3 x 3 transformation matrix for camera to gps coordinate space
"""
for i, obj in enumerate(object_list):
points = obj.all
num_points = len(points)
# add third row
ones = np.ones([num_points,1])
points3d = np.concatenate((points,ones),1)
points3d = points3d[:,[0,1,4]]
# transform points
tf_points3d = np.transpose(np.matmul(M,np.transpose(points3d)))
# condense to two-dimensional coordinates
tf_points = np.zeros([num_points,2])
tf_points[:,0] = tf_points3d[:,0]/tf_points3d[:,2]
tf_points[:,1] = tf_points3d[:,1]/tf_points3d[:,2]
object_list[i].all_world = tf_points
if M2 is not None:
tf_points3d = np.transpose(np.matmul(M2,np.transpose(points3d)))
# condense to two-dimensional coordinates
tf_points = np.zeros([num_points,2])
tf_points[:,0] = tf_points3d[:,0]/tf_points3d[:,2]
tf_points[:,1] = tf_points3d[:,1]/tf_points3d[:,2]
object_list[i].all_gps = tf_points
return object_list
def write_json(object_list, metadata,num_frames, out_file = None):
"""
"""
classes = ["person","bicycle","car","motorbike","NA","bus","train","truck"]
# metadata = {
# "camera_id": camera_id,
# "start_time":start_time,
# "num_frames":num_frames,
# "frame_rate":frame_rate
# }
data = {}
for frame_num in range(0,num_frames):
frame_data = []
# for each object
for i in range(0,len(object_list)):
obj = object_list[i]
# see if coordinate will be in range
if obj.first_frame <= frame_num:
if obj.first_frame + len(obj.all) > frame_num:
veh_data = {}
idx = frame_num - obj.first_frame
veh_data["id_num"] = i
veh_data["class"] = classes[int(obj.cls)]
veh_data["detected"] = obj.tags[idx]
veh_data["image_position"] = (obj.all[idx]).tolist()
veh_data["world_position"] = (obj.all_world[idx]).tolist()
veh_data["gps_position"] = (obj.all_gps[idx]).tolist()
frame_data.append(veh_data)
data[frame_num] = frame_data
all_data = {
"metadata":metadata,
"data":data
}
if out_file is not None:
with open(out_file, 'w') as fp:
json.dump(all_data, fp)
return all_data
| [
2,
4909,
10361,
5499,
5981,
329,
2939,
290,
20435,
13389,
198,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
220,
628,
... | 2.002019 | 2,477 |
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
# Copyright © 2019 André Santos
###############################################################################
# Imports
###############################################################################
from builtins import map
from builtins import object
from builtins import range # Python 2 and 3: forward-compatible
###############################################################################
# Field Selector
###############################################################################
###############################################################################
# Predicates
###############################################################################
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
17168,
198,
2,
15069,
10673,
13130,
843,
29350,
28458,
198,
198,
29113,
29113,
7804,
4242,
21017,
198,
2,
1846,
3742,
... | 6.327586 | 116 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 1 11:46:37 2021
@author: colin
"""
import json
import argparse
import os
parser = argparse.ArgumentParser(description="compare two amino alphabet clusterings")
parser.add_argument('-f', '--files', type=is_file, nargs=2, default=None,
help='pass two .json file names seperated by a space')
args = parser.parse_args()
f1 = args.files[0]
f2 = args.files[1]
file1 = open(f1)
file2 = open(f2)
dict1 = json.load(file1)
dict2 = json.load(file2)
print('Amino Alphabet Differences for F1='+f1+', F2='+f2)
print('Amino\tF1\tF2')
for key, value in dict1.items():
if dict2[key] is not value:
print(key+'\t'+value+'\t'+dict2[key])
file1.close()
file2.close() | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
8621,
220,
352,
1367,
25,
3510,
25,
2718,
33448,
198,
198,
31,
9800,
25,
951,
259,
... | 2.335385 | 325 |
config_fingerprint = None
config = None
log_writer = None
isTrain = True | [
11250,
62,
35461,
4798,
796,
6045,
198,
11250,
796,
6045,
198,
6404,
62,
16002,
796,
6045,
198,
271,
44077,
796,
6407
] | 3.428571 | 21 |
# Based on:
# https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
| [
2,
13403,
319,
25,
198,
2,
3740,
1378,
12567,
13,
785,
14,
17694,
1134,
316,
726,
14,
4421,
12,
411,
3262,
13,
9078,
13165,
354,
14,
2436,
672,
14,
9866,
14,
3262,
5225,
14,
4421,
62,
411,
3262,
13,
9078,
198,
198,
11748,
28034,
... | 2.698413 | 63 |
import os
from flask import Flask, request, jsonify, make_response
from transformation.generator_handler import GeneratorHandler
from transformation.generators.documents_output_generator import DocumentsOutputGenerator
from transformation.generators.generator_register import GeneratorRegister
from metamodel.model import Model
from transformation.data_manipulation import DataManipulation, VersionUnavailableError
if __name__ == '__main__':
data_manipulation = DataManipulation()
data_manipulation = data_manipulation.load_from_dill()
handler = GeneratorHandler().load_from_dill()
app = create_app(data_manipulation, handler)
app.run(use_reloader=False)
| [
11748,
28686,
198,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
33918,
1958,
11,
787,
62,
26209,
198,
6738,
13389,
13,
8612,
1352,
62,
30281,
1330,
35986,
25060,
198,
6738,
13389,
13,
8612,
2024,
13,
15390,
2886,
62,
22915,
62,
8612,
... | 3.546875 | 192 |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def date_process(data):
"""
:param data: original data
:return: groups: dict type: key is date and the corresponding intraday volumn size
"""
data_filter = data[data.isna().any(axis=1)]
data_filter["reshape_time"] = pd.to_datetime(data_filter["date"] + data_filter["time"], format='%Y-%m-%d%H:%M:%S.%f')
data_filter.index = data_filter["reshape_time"].values
result = data_filter.resample("5T").sum()
result = result.loc[(result!=0).any(axis=1)]
result["time"] = result.index
result["date"] = result["time"].dt.to_period("D")
result = result.drop(columns = ["price", "time"])
result.index = result["size"].values
groups = result.groupby("date").groups
return groups
def plot_intraday_volumn(groups, Tnum):
"""
:param groups: the dict with day and volumn size
:param Tnum: number of days shown in the plot
"""
keys = list(groups.keys())
plt.figure(0, figsize=(12, 8 ))
for i in np.arange(0, Tnum):
key_ = keys[i]
size = np.array(groups[key_])
t = np.arange(0, 1, 1 / len(size))
plt.plot(t, size/sum(size), label="Date"+str(i+1))
plt.xlabel('time', fontsize=17)
plt.ylabel('Relative Volumn', fontsize=17)
plt.legend(fontsize = 12)
plt.title("U-shape intraday relative volumn", fontsize = 23)
plt.savefig("real_volume.png")
plt.grid()
plt.show()
def statistic_result(groups, Tnum):
"""
:param groups: the dict with day and volumn size
:param Tnum: number of days in the calculation
:return: mean and std of relative volumn
"""
keys = list(groups.keys())
df_relative_volumn = pd.DataFrame()
for i in np.arange(0, Tnum):
key_ = keys[i]
size = np.array(groups[key_])
cum_size = np.cumsum(size)
relative_size = cum_size / cum_size[-1]
try:
df_relative_volumn[str(key_)] = relative_size
except:
print("not match in date:" + str(key_))
result = pd.DataFrame()
result["mean"] = df_relative_volumn.mean(axis=1)
result["std"] = df_relative_volumn.std(axis=1)
result["var"] = df_relative_volumn.var(axis=1)
result["time"] = np.arange(0, 1, 1/len(result)) + 1/len(result)
return result
if __name__ == '__main__':
trades0700_path = "trades0700.csv"
data = pd.read_csv(trades0700_path, sep=',')
groups = date_process(data)
keys = list(groups.keys())
keys_num = len(keys)
plot_intraday_volumn(groups, 4)
EVresults = statistic_result(groups, 70)
EVresults.to_csv(r'statistic_result.csv')
| [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
628,
198,
4299,
3128,
62,
14681,
7,
7890,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
1058,
... | 2.35608 | 1,143 |
from threading import Thread
from globalFile import GlobalData
import pyaudio
import base64
| [
6738,
4704,
278,
1330,
14122,
198,
6738,
3298,
8979,
1330,
8060,
6601,
198,
11748,
12972,
24051,
198,
11748,
2779,
2414,
628
] | 4.428571 | 21 |
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import urllib
from rest_framework import serializers
from rest_framework.reverse import reverse
from aether.sdk.auth.utils import parse_username, unparse_username, user_to_string
from aether.sdk.multitenancy.utils import get_path_realm
class FilteredHyperlinkedRelatedField(HyperlinkedRelatedField):
'''
This custom field does essentially the same thing as
``serializers.HyperlinkedRelatedField``.
The only difference is that the url of a foreign key relationship will be:
{
...
'children_url': '/children?parent=<parent-id>'
...
}
Instead of:
{
...
'children_url': '/parent/<parent-id>/children/'
...
}
'''
# https://www.django-rest-framework.org/api-guide/serializers/#dynamically-modifying-fields
class DynamicFieldsSerializerMixin(object):
'''
Add additional functionality to Serializers adding two arguments ``fields`` and ``omit``
that control which fields should be displayed.
'''
class UsernameField(serializers.Field):
'''
Custom serializer for username fields.
The internal value is the username with the "realm" (parsed).
The displayed value is the username without the "realm" (unparsed).
'''
class UserNameField(serializers.ReadOnlyField, serializers.RelatedField):
'''
Custom serializer to display user's full name.
'''
| [
2,
15069,
357,
34,
8,
13130,
416,
304,
18081,
5478,
1058,
2638,
1378,
2503,
13,
68,
18081,
17584,
30997,
13,
2398,
198,
2,
198,
2,
4091,
262,
28536,
2393,
9387,
351,
428,
670,
329,
3224,
1321,
198,
2,
5115,
6634,
9238,
13,
198,
2,... | 3.095989 | 698 |
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
import csv
from scipy.optimize import curve_fit
frameDeltas = []
physicsDeltas = []
openGLDeltas = []
drawCallDeltas = []
particleCounts = []
rowCount = 5120
fifth = 1024
with open('analysisResultSpaceOpenCL.csv', newline='\n') as csvfile:
analysisResultsReader = csv.reader(csvfile, delimiter=',')
for row in analysisResultsReader:
frameDeltas.append(row[1])
physicsDeltas.append(row[2])
openGLDeltas.append(row[3])
drawCallDeltas.append(row[4])
with open('particleAtFrameCount.csv', newline='\n') as csvfile:
frameCountParticleCountReader = csv.reader(csvfile, delimiter=',')
for row in frameCountParticleCountReader:
particleCounts.append(row[1])
# split into groups of 5 for averaging
frameDeltas = frameDeltas[10:rowCount+10]
physicsDeltas = physicsDeltas[10:rowCount+10]
openGLDeltas = openGLDeltas[10:rowCount+10]
drawCallDeltas = drawCallDeltas[10:rowCount+10]
particleCounts = particleCounts[10:rowCount+10]
# p = list(map(int, particleCounts))
#
# plt.title('Particle Count vs Frame')
# plt.ylabel('Number of particles')
# plt.xlabel('Number of Frames Elapsed')
# plt.plot(range(0, 5120), p)
# plt.show()
frameDeltas = np.array(frameDeltas[0:rowCount]).astype(np.float)
frd1 = np.mean(frameDeltas[0:fifth])
frd2 = np.mean(frameDeltas[fifth:(fifth * 2)])
frd3 = np.mean(frameDeltas[fifth * 2:fifth * 3])
frd4 = np.mean(frameDeltas[fifth * 3:fifth * 4])
frd5 = np.mean(frameDeltas[fifth * 4:fifth * 5])
frameDeltasAverages = (frd1, frd2, frd3, frd4, frd5)
physicsDeltas = np.array(physicsDeltas[0:rowCount]).astype(np.float)
phd1 = np.mean(physicsDeltas[0:fifth])
phd2 = np.mean(physicsDeltas[fifth:(fifth * 2)])
phd3 = np.mean(physicsDeltas[fifth * 2:fifth * 3])
phd4 = np.mean(physicsDeltas[fifth * 3:fifth * 4])
phd5 = np.mean(physicsDeltas[fifth * 4:fifth * 5])
physicsDeltasAverages = (phd1, phd2, phd3, phd4, phd5)
openGLDeltas = np.array(openGLDeltas[0:rowCount]).astype(np.float)
gld1 = np.mean(openGLDeltas[0:fifth])
gld2 = np.mean(openGLDeltas[fifth:(fifth * 2)])
gld3 = np.mean(openGLDeltas[fifth * 2:fifth * 3])
gld4 = np.mean(openGLDeltas[fifth * 3:fifth * 4])
gld5 = np.mean(openGLDeltas[fifth * 4:fifth * 5])
openGLDeltasAverages = (gld1, gld2, gld3, gld4, gld5)
drawCallDeltas = np.array(drawCallDeltas[0:rowCount]).astype(np.float)
dcd1 = np.mean(drawCallDeltas[0:fifth])
dcd2 = np.mean(drawCallDeltas[fifth:(fifth * 2)])
dcd3 = np.mean(drawCallDeltas[fifth * 2:fifth * 3])
dcd4 = np.mean(drawCallDeltas[fifth * 3:fifth * 4])
dcd5 = np.mean(drawCallDeltas[fifth * 4:fifth * 5])
drawCallDeltasAverages = (dcd1, dcd2, dcd3, dcd4, dcd5)
ind = np.arange(5) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
y_offset = physicsDeltasAverages
pFrameDelta = plt.bar(ind, frameDeltasAverages, width)
pPhysicsDelta = plt.bar(ind + width, physicsDeltasAverages, width)
pOpenGlDelta = plt.bar(ind + width, openGLDeltasAverages, width, bottom=y_offset)
y_offset = np.sum([y_offset, openGLDeltasAverages], axis=0)
pdrawCallDelta = plt.bar(ind + width, drawCallDeltasAverages, width, bottom=y_offset)
plt.title('Average Frame Delta Breakdown')
plt.ylabel('Time (second)')
plt.xlabel('Frame Average Groupings')
plt.xticks(ind + (width/2), ('0-1024', '1024-2048', '2048-3072', '3072-4096', '4096-5120'))
plt.legend((pFrameDelta[0], pPhysicsDelta[0], pOpenGlDelta[0], pdrawCallDelta[0]), ('overall frame delta', 'physics delta', 'OpenGL buffer loading delta', 'draw call delta'))
plt.show()
FrameRate = [] #Frame rate at each frame
for fd in frameDeltas:
FrameRate.append(1 / fd)
# list of lists
fRforPaticles = {}
for i in range(0, 5120):
if particleCounts[i] in fRforPaticles:
fRforPaticles[particleCounts[i]].append(FrameRate[i])
else:
fRforPaticles[particleCounts[i]] = [FrameRate[i]]
X = []
Y = []
for pc in sorted(fRforPaticles):
Y.append(np.mean(fRforPaticles[pc]))
X.append(pc)
X = np.array(list(map(float, X)))
plt.title('Average Frame Rate vs Particle Count')
plt.ylabel('Frame Rate (FPS)')
plt.xlabel('Number of Particles')
plt.plot(X, Y, '.')
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
10786,
51,
74,
46384,
11537,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,
269,
21370,
198,
6738,
629,
541,
88... | 2.376398 | 1,788 |
import os
import unittest
import collections
from wordpress_xmlrpc import Client
from wordpress_xmlrpc.compat import ConfigParser
| [
11748,
28686,
201,
198,
11748,
555,
715,
395,
201,
198,
11748,
17268,
201,
198,
201,
198,
6738,
1573,
8439,
62,
19875,
81,
14751,
1330,
20985,
201,
198,
6738,
1573,
8439,
62,
19875,
81,
14751,
13,
5589,
265,
1330,
17056,
46677,
201,
1... | 3.159091 | 44 |
import pytest
import cptac
import sys
# TODO: figure out what cwd of pytest is
sys.path.insert(0, "cptac/tests/")
from tests.cancer import Cancer
import curses
# Setting autouse=True here makes it so that this method always runs before any tests
@pytest.fixture(scope="session", autouse=True)
def get_datasets_lists():
'''
Returns: a dict of dataset lists
keys = ["public", "private"]
'''
#curses.wrapper
print(f"Getting dataset lists (public and private)...", end='\r')
data = cptac.list_datasets()["Data reuse status"]
public_datasets = []
restricted_datasets = []
for i in data.index:
if data[i] == "no restrictions":
public_datasets.append(i)
else:
restricted_datasets.append(i)
dataset_lists = {}
dataset_lists["public"] = public_datasets
dataset_lists["restricted"] = restricted_datasets
return dataset_lists
### Download all datasets
# Must have autouse=True or else this never gets called
@pytest.fixture(scope="session", autouse=True)
@pytest.fixture(scope="session", autouse=True)
def get_cancer_test_units(get_datasets_lists):
'''
Returns: a dict of this format:
{ "cancer name" : <Cancer Object>, ... }
'''
cancer_wrappers = list()
for cancer_name in get_datasets_lists["public"]:
c = getattr(cptac, cancer_name)
try:
print(f"Creating {c} object...", end='\r')
cancer_wrappers.append(Cancer(cancer_name, c()))
except:
pytest.fail(f"unable to create {c} object")
return cancer_wrappers
| [
11748,
12972,
9288,
198,
11748,
269,
457,
330,
198,
11748,
25064,
198,
2,
16926,
46,
25,
3785,
503,
644,
269,
16993,
286,
12972,
9288,
318,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
366,
66,
457,
330,
14,
41989,
14,
4943,
198,
6... | 2.486903 | 649 |
import cv2
import numpy as np
filename = '33-1.jpg'
img = cv2.imread(filename) # read image
img = cv2.resize(img, (640, 480)) # resize it
img = cv2.blur(img, (3, 3)) # blur to remove noise
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) # BGR to HSV
cv2.namedWindow('Result Image (BGR)') # window to display image
# HSV threshold
H_min = 23
H_max = 47
S_min = 100
S_max = 255
V_min = 100
V_max = 255
# Creates a trackbar to test values
cv2.createTrackbar('H_min', 'Result Image (BGR)', H_min, 255, trackbar_callback)
cv2.createTrackbar('H_max', 'Result Image (BGR)', H_max, 255, trackbar_callback)
cv2.createTrackbar('S_min', 'Result Image (BGR)', S_min, 255, trackbar_callback)
cv2.createTrackbar('S_max', 'Result Image (BGR)', S_max, 255, trackbar_callback)
cv2.createTrackbar('V_min', 'Result Image (BGR)', V_min, 255, trackbar_callback)
cv2.createTrackbar('V_max', 'Result Image (BGR)', V_max, 255, trackbar_callback)
# Should loop over a video frame by frame here
while True:
# Read updated values from trackbar
H_min = cv2.getTrackbarPos('H_min', 'Result Image (BGR)')
H_max = cv2.getTrackbarPos('H_max', 'Result Image (BGR)')
S_min = cv2.getTrackbarPos('S_min', 'Result Image (BGR)')
S_max = cv2.getTrackbarPos('S_max', 'Result Image (BGR)')
V_min = cv2.getTrackbarPos('V_min', 'Result Image (BGR)')
V_max = cv2.getTrackbarPos('V_max', 'Result Image (BGR)')
# Set mask threshold
lower_green = np.array([H_min, S_min, V_min])
upper_green = np.array([H_max, S_max, V_max])
# Apply mask, get HSV image, convert to BGR
mask = cv2.inRange(hsv, lower_green, upper_green)
res_hsv = cv2.bitwise_and(hsv, hsv, mask=mask)
res_bgr = cv2.cvtColor(res_hsv, cv2.COLOR_HSV2BGR)
# Display image
cv2.imshow('Result Image (BGR)', res_bgr)
# save image, use counter in case of video file
save_file = 'extracted.jpg'
cv2.imwrite(save_file, res_bgr)
# Press ESC key to exit
key = cv2.waitKey(33)
if key == 27:
break
# Finish
cv2.destroyAllWindows()
import numpy as np
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
628,
198,
198,
34345,
796,
705,
2091,
12,
16,
13,
9479,
6,
198,
198,
9600,
796,
269,
85,
17,
13,
320,
961,
7,
34345,
8,
220,
1303,
1100,
2939,
198,
9600,
796,
269,
85,
17,
... | 2.353075 | 878 |
"""Core of the PDFfitx."""
from typing import Dict, Union
from diffpy.srfit.equation.builder import EquationFactory
from diffpy.srfit.fitbase import FitRecipe, FitContribution
from diffpy.srfit.fitbase import FitResults
from diffpy.srfit.pdf import PDFGenerator, DebyePDFGenerator
class MyContribution(FitContribution):
"""The FitContribution with augmented features."""
@property
@property
@property
@xname.setter
class MyRecipe(FitRecipe):
"""The FitRecipe interface with augmented features."""
@property
class MyFitResults(FitResults):
"""The augmented fit result interface."""
pass
| [
37811,
14055,
286,
262,
12960,
11147,
87,
526,
15931,
198,
6738,
19720,
1330,
360,
713,
11,
4479,
198,
198,
6738,
814,
9078,
13,
27891,
11147,
13,
4853,
341,
13,
38272,
1330,
7889,
341,
22810,
198,
6738,
814,
9078,
13,
27891,
11147,
1... | 3.284974 | 193 |
#!/usr/bin/env python
"""
Class :py:class:`SegGeometryEpixHR2x2V1` describes the EpixHR2x2V1 sensor geometry
===================================================================================
In this class we use natural matrix notations like in data array
\n We assume that
\n * sensor consists of 2x2 ASICs has 288 rows and 384 columns,
\n * Epix10ka has a pixel size 100x100um, wide pixel size 100x225um
\n * X-Y coordinate system origin is in the sensor center,
\n * pixel (r,c)=(0,0) is in the top left corner of the matrix, has coordinates (xmin,ymax), as shown below
\n ::
(Xmin,Ymax) ^ Y (Xmax,Ymax)
(0,0) | (0,383)
------------------------------
| | |
| | |
| | |
| | |
| | |
| | |
| | |
--|-------------+--------------|----> X
| | |
| | |
| | |
| | |
| | |
| | |
| | |
------------------------------
(287,0) | (287,383)
(Xmin,Ymin) (Xmax,Ymin)
Usage::
from SegGeometryEpixHR2x2V1 import epix10ka_one as sg
sg.print_seg_info(0o377)
size_arr = sg.size()
rows = sg.rows()
cols = sg.cols()
shape = sg.shape()
pix_size = pixel_scale_size()
area = sg.pixel_area_array()
mask = sg.pixel_mask_array(mbits=0o377, width=5, wcentral=5)
# where mbits = +1-edges, +2-wide pixels
sizeX = sg.pixel_size_array('X')
sizeX, sizeY, sizeZ = sg.pixel_size_array()
X = sg.pixel_coord_array('X')
X,Y,Z = sg.pixel_coord_array()
logger.info('X.shape =' + str(X.shape))
xmin, ymin, zmin = sg.pixel_coord_min()
xmax, ymax, zmax = sg.pixel_coord_max()
xmin = sg.pixel_coord_min('X')
ymax = sg.pixel_coord_max('Y')
# global method for rotation of numpy arrays:
Xrot, Yrot = rotation(X, Y, C, S)
...
See:
* :py:class:`GeometryObject`
* :py:class:`SegGeometry`
* :py:class:`SegGeometryCspad2x1V1`
* :py:class:`SegGeometryEpixHR2x2V1`
* :py:class:`SegGeometryEpix10kaV1`
* :py:class:`SegGeometryEpix100V1`
* :py:class:`SegGeometryMatrixV1`
* :py:class:`SegGeometryStore`
For more detail see `Detector Geometry <https://confluence.slac.stanford.edu/display/PSDM/Detector+Geometry>`_.
This software was developed for the SIT project.
If you use all or part of it, please give an appropriate acknowledgment.
Created on 2021-09-24 by Mikhail Dubrovin
"""
from psana.pscalib.geometry.SegGeometryEpix10kaV1 import *
logger = logging.getLogger(__name__)
class SegGeometryEpixHR2x2V1(SegGeometryEpix10kaV1):
"""Self-sufficient class for generation of Epix10ka sensor (2x2 ASICs) pixel coordinate array"""
epixhr2x2_one = SegGeometryEpixHR2x2V1(use_wide_pix_center=False)
epixhr2x2_wpc = SegGeometryEpixHR2x2V1(use_wide_pix_center=True)
#----------- TEST -------------
if __name__ == "__main__":
import sys
from time import time
import psana.pyalgos.generic.Graphics as gg # For test purpose in main only
logging.basicConfig(format='[%(levelname).1s] L%(lineno)04d: %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
logging.getLogger('matplotlib').setLevel(logging.WARNING)
tname = sys.argv[1] if len(sys.argv) > 1 else '0'
if len(sys.argv)==1: logger.info(usage())
elif tname in ('1',): test_xyz_min_max()
elif tname in ('2',): test_xyz_maps()
elif tname in ('3',): test_2x2_img()
elif tname in ('4',): test_2x2_img_easy()
elif tname in ('5',): test_pix_sizes()
elif tname in ('6',): test_2x2_mask(mbits=1+2)
else: logger.warning('NON-EXPECTED TEST NAME: %s\n\n%s' % (tname, usage()))
if len(sys.argv)>1: logger.info(usage(tname))
sys.exit('END OF TEST')
# EOF
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
9487,
1058,
9078,
25,
4871,
25,
63,
41030,
10082,
15748,
13807,
844,
17184,
17,
87,
17,
53,
16,
63,
8477,
262,
4551,
844,
17184,
17,
87,
17,
53,
16,
12694,
22939,
198,
... | 2.061996 | 1,984 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2020 European Commission (JRC);
# Licensed under the EUPL (the 'Licence');
# You may not use this work except in compliance with the Licence.
# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl
"""
wltp: generate WLTC gear-shifts based on vehicle characteristics
"""
__copyright__ = "Copyright (C) 2013-2020 European Commission (JRC)"
__license__ = "EUPL 1.1+"
__title__ = "wltp"
__summary__ = __doc__.splitlines()[0]
__uri__ = "https://github.com/JRCSTU/wltp"
from ._version import __version__, __updated__
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
198,
2,
15069,
2211,
12,
42334,
3427,
4513,
357,
41,
7397,
1776,
198,
2,
49962,
739,
262,
4576,
6489,
357,
1169,
... | 2.884058 | 207 |
""" Contains conversion logic between Mongoengine Fields and Graphene Types """
from singledispatch import singledispatch
from graphene import (
String, Boolean, Int, Float, List,
ID, Dynamic, Field
)
from graphene.types.json import JSONString
from graphene.types.datetime import DateTime
from mongoengine.fields import (
ObjectIdField,
BooleanField, StringField, IntField, LongField, FloatField, DecimalField,
URLField, EmailField,
DateTimeField, ComplexDateTimeField,
SequenceField, UUIDField,
DynamicField, DictField, MapField,
GeoPointField, PolygonField, PointField, LineStringField,
MultiPointField, MultiLineStringField, MultiPolygonField,
EmbeddedDocumentField,
ListField, SortedListField,
EmbeddedDocumentListField,
ReferenceField, CachedReferenceField, LazyReferenceField,
GenericEmbeddedDocumentField, GenericLazyReferenceField, GenericReferenceField,
BinaryField, FileField, ImageField
)
from .fields import create_connection_field
from .utils import (
get_field_description, field_is_document_list, field_is_required
)
# pylint: disable=W0622
def convert_mongoengine_field(field, registry=None):
""" Wrapper method for :convert_mongoengine_type: """
return convert_mongoengine_type(field, registry)
def get_data_from_field(field, **kwargs):
""" Extracts Field data for Graphene type construction """
return {
'description': get_field_description(field, **kwargs),
'required': field_is_required(field)
}
@singledispatch
def convert_mongoengine_type(field, registry=None):
""" Generic Mongoengine Field to Graphene Type converter """
raise Exception(
f"Don't know how to convert the Mongoengine field {field} ({type})")
@convert_mongoengine_type.register(ObjectIdField)
def convert_field_to_id(field, registry=None):
""" Converts Mongoengine fields to Graphene ID type """
return ID(**get_data_from_field(field))
@convert_mongoengine_type.register(StringField)
@convert_mongoengine_type.register(URLField)
@convert_mongoengine_type.register(EmailField)
@convert_mongoengine_type.register(SequenceField)
@convert_mongoengine_type.register(UUIDField)
def convert_field_to_string(field, registry=None):
""" Converts Mongoengine fields to Graphene String type """
return String(**get_data_from_field(field))
@convert_mongoengine_type.register(DateTimeField)
@convert_mongoengine_type.register(ComplexDateTimeField)
def convert_field_to_datetime(field, registry=None):
""" Converts Mongoengine fields to Graphene DateTime type """
return DateTime(**get_data_from_field(field))
@convert_mongoengine_type.register(IntField)
def convert_field_to_int_or_id(field, registry=None):
""" Converts Mongoengine fields to Graphene Int type """
return Int(**get_data_from_field(field))
@convert_mongoengine_type.register(BooleanField)
def convert_field_to_boolean(field, registry=None):
""" Converts Mongoengine fields to Graphene Boolean type """
return Boolean(**get_data_from_field(field))
@convert_mongoengine_type.register(FloatField)
@convert_mongoengine_type.register(DecimalField)
@convert_mongoengine_type.register(LongField)
def convert_field_to_float(field, registry=None):
""" Converts Mongoengine fields to Graphene Float type """
return Float(**get_data_from_field(field))
@convert_mongoengine_type.register(DynamicField)
@convert_mongoengine_type.register(DictField)
@convert_mongoengine_type.register(MapField)
@convert_mongoengine_type.register(GeoPointField)
@convert_mongoengine_type.register(PolygonField)
@convert_mongoengine_type.register(PointField)
@convert_mongoengine_type.register(LineStringField)
@convert_mongoengine_type.register(MultiPointField)
@convert_mongoengine_type.register(MultiLineStringField)
@convert_mongoengine_type.register(MultiPolygonField)
def convert_field_to_jsonstring(field, registry=None):
""" Converts Mongoengine fields to Graphene JSONString type """
return JSONString(**get_data_from_field(field))
@convert_mongoengine_type.register(GenericEmbeddedDocumentField)
@convert_mongoengine_type.register(GenericLazyReferenceField)
@convert_mongoengine_type.register(GenericReferenceField)
def convert_field_to_jsonstring(field, registry=None):
""" Converts Mongoengine fields to Graphene JSONString type.
Generic fields can have any document type, so the best that can be done is
to convert them to JSONString
"""
return JSONString(**get_data_from_field(field))
@convert_mongoengine_type.register(ReferenceField)
@convert_mongoengine_type.register(LazyReferenceField)
@convert_mongoengine_type.register(CachedReferenceField)
@convert_mongoengine_type.register(EmbeddedDocumentField)
def convert_field_to_object(field, registry=None):
""" Converts Mongoengine fields to Graphene Object type """
field_data = get_data_from_field(field)
def type_factory():
""" Lazy type factory """
doc_type = registry.get_type_for_document(field.document_type)
if not doc_type:
return None
return Field(doc_type, **field_data)
return Dynamic(type_factory)
@convert_mongoengine_type.register(ListField)
@convert_mongoengine_type.register(SortedListField)
def convert_field_to_list(list_field, registry=None):
""" Converts Mongoengine fields to Graphene List type """
if field_is_document_list(list_field):
return convert_document_list(list_field, registry)
else:
if list_field.field is None:
inner_type = String
else:
inner_type = convert_mongoengine_field(list_field.field(), registry).__class__
return List(inner_type, **get_data_from_field(list_field))
def convert_document_list(list_field, registry=None):
""" Converts a MongoEngine List based field wrapping
a Document based field to a Graphene List or Connection Field
"""
document = list_field.field.document_type
field_data = get_data_from_field(list_field)
def type_factory():
""" Lazy type factory """
doc_type = registry.get_type_for_document(document)
if not doc_type:
return None
if doc_type._meta.connection:
return create_connection_field(doc_type, **field_data)
return Field(List(doc_type), **field_data)
return Dynamic(type_factory)
| [
37811,
49850,
11315,
9156,
1022,
42591,
18392,
23948,
290,
402,
2416,
29473,
24897,
37227,
198,
198,
6738,
31958,
8802,
963,
1330,
31958,
8802,
963,
198,
198,
6738,
42463,
1330,
357,
198,
220,
220,
220,
10903,
11,
41146,
11,
2558,
11,
4... | 2.955576 | 2,161 |
# -*- coding: utf-8 -*-
"""Updates the Synology (music) index whenever the beets library is changed.
This assumes beets is being run on Synology DiskStation Manager so synoindex is
available. Besides enabling the plugin no configuration is needed.
"""
from subprocess import run
from beets.plugins import BeetsPlugin
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
4933,
19581,
262,
16065,
1435,
357,
28965,
8,
6376,
8797,
262,
307,
1039,
5888,
318,
3421,
13,
198,
198,
1212,
18533,
307,
1039,
318,
852,
1057,
319,
16065,
... | 3.712644 | 87 |
from django.conf.urls import url
from .views import (
app_view,
skipped_app_view,
view_with_decorator_with_wraps,
view_with_decorator_without_wraps
)
url_patterns_with_decorator_with_wraps = [
url(
r'^decorator-with-wraps/$', view_with_decorator_with_wraps,
name='decorator_with_wraps'
),
]
# views with custom decorators without @functools.wraps are not supported when specifying app_name
url_patterns_with_decorator_without_wraps = [
url(
r'^decorator-without-wraps/$', view_with_decorator_without_wraps,
name='decorator_without_wraps'
),
]
skipped_app_url_patterns = [
url(r'^skipped-app-endpoint/$', skipped_app_view, name='skipped_app_endpoint'),
]
urlpatterns = [
url(r'^(/(?P<parameter>.+))?', app_view, name='app_view'),
] + url_patterns_with_decorator_with_wraps + url_patterns_with_decorator_without_wraps + \
skipped_app_url_patterns
| [
6738,
42625,
14208,
13,
10414,
13,
6371,
82,
1330,
19016,
198,
198,
6738,
764,
33571,
1330,
357,
198,
220,
220,
220,
598,
62,
1177,
11,
198,
220,
220,
220,
26684,
62,
1324,
62,
1177,
11,
198,
220,
220,
220,
1570,
62,
4480,
62,
125... | 2.323308 | 399 |
from django import forms
from ckeditor.widgets import CKEditorWidget
from .models import Track
class TrackForm(forms.ModelForm):
"""Form for ``TrackAdmin`` class."""
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
269,
9091,
2072,
13,
28029,
11407,
1330,
327,
7336,
67,
2072,
38300,
198,
198,
6738,
764,
27530,
1330,
17762,
628,
198,
4871,
17762,
8479,
7,
23914,
13,
17633,
8479,
2599,
198,
220,
220,
... | 3.283019 | 53 |
import datetime
from intake.catalog.local import YAMLFilesCatalog
from intake.source.base import Schema
from intake_xarray.base import DataSourceMixin
from intake_informaticslab import __version__
from .dataset import MODataset
from .utils import datetime_to_iso_str
DATA_DELAY = 24 + 6 # num hours from current time that data is available
| [
11748,
4818,
8079,
198,
198,
6738,
10337,
13,
9246,
11794,
13,
12001,
1330,
575,
2390,
43,
25876,
49015,
198,
6738,
10337,
13,
10459,
13,
8692,
1330,
10011,
2611,
198,
6738,
10337,
62,
87,
18747,
13,
8692,
1330,
6060,
7416,
35608,
259,
... | 3.378641 | 103 |
#!/usr/bin/python
from PyQt4 import QtCore, QtGui
'''
Created on 06/11/2009
@author: iAlex
'''
class TumblrTextEdit(QtGui.QVBoxLayout):
'''
Especial Text Editor for Tumblr it allow to create bold,italic,strike text as well
insert links and image it includes a preview
'''
pariente = None
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
6738,
9485,
48,
83,
19,
1330,
33734,
14055,
11,
33734,
8205,
72,
198,
7061,
6,
198,
41972,
319,
9130,
14,
1157,
14,
10531,
198,
198,
31,
9800,
25,
1312,
15309,
198,
7061,
6,
198,
198,
487... | 2.306667 | 150 |
#!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""Skip image element when imagedata r:id cannot be found.
:author: Shay Hill
:created: 11/15/2020
User forky2 sent a docx file with an empty imagedata element:
`<v:imagedata croptop="-65520f" cropbottom="65520f"/>`
Docx2python expects to encounter
`<v:imagedata r:id="rId689" o:title=""/>`
Where `r:id="rId689"` is mapped to an image filename in one of the `rels` files.
The missing `r:id` raises a KeyError in docx2python v1.27
```
Traceback (most recent call last):
File "./process.py", line 99, in <module>
process_zip("Specs/2020-06/Rel-16/25_series/25101-g10.zip")
File "./process.py", line 70, in process_zip
doc_data = docx2python(docx_file)
File "/home/forky2/projects/docx2python/docx2python/main.py", line 61, in docx2python
body = file_text(context["officeDocument"])
File "/home/forky2/projects/docx2python/docx2python/main.py", line 56, in file_text
return get_text(unzipped, context)
File "/home/forky2/projects/docx2python/docx2python/docx_text.py", line 264, in get_text
branches(ElementTree.fromstring(xml))
File "/home/forky2/projects/docx2python/docx2python/docx_text.py", line 248, in branches
branches(child)
File "/home/forky2/projects/docx2python/docx2python/docx_text.py", line 248, in branches
branches(child)
File "/home/forky2/projects/docx2python/docx2python/docx_text.py", line 248, in branches
branches(child)
[Previous line repeated 2 more times]
File "/home/forky2/projects/docx2python/docx2python/docx_text.py", line 239, in branches
rId = child.attrib[qn("r:id")]
KeyError: '{http://schemas.openxmlformats.org/officeDocument/2006/relationships}id'
```
Solution: skip silently when an `r:id` cannot be found for an `imagedata` element.
"""
# from docx2python import docx2python
# class TestMissingRIdInImagedata:
# def test_skips_missing_rid(self) -> None:
# """Silently skip over imagedata element if r:id not found"""
# pars = docx2python("resources/imagedata_without_rid.docx")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
4808,
9,
62,
19617,
25,
3384,
69,
12,
23,
4808,
9,
62,
198,
37811,
50232,
2939,
5002,
618,
545,
1886,
1045,
374,
25,
312,
2314,
307,
1043,
13,
198,
198,
25,
9800,
25,
3476... | 2.516667 | 840 |
from django.db import models
from lino.api import dd, _
from lino_book.projects.chooser.food import year_in_school, food_choices, food
| [
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
300,
2879,
13,
15042,
1330,
49427,
11,
4808,
198,
198,
6738,
300,
2879,
62,
2070,
13,
42068,
13,
6679,
13416,
13,
19425,
1330,
614,
62,
259,
62,
14347,
11,
2057,
62,
6679,
10... | 2.901961 | 51 |
if __name__ == "__main__":
tamanio = int(input("Ingresa el numero de escalones: "))
staircase(tamanio) | [
220,
220,
220,
220,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
220,
198,
220,
220,
220,
256,
10546,
952,
796,
493,
7,
15414,
7203,
27682,
14625,
1288,
997,
3529,
390,
12221,
1952,
25,
366,
4008,
198,
220,
220,
220... | 2.367347 | 49 |
from __future__ import print_function
import fibra
if __name__ == "__main__":
test()
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
12900,
430,
628,
198,
220,
220,
220,
220,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1332,
3419,
198
] | 2.605263 | 38 |
__author__ = 'anton'
| [
834,
9800,
834,
796,
705,
23026,
6,
198
] | 2.625 | 8 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import math
""" \brief Calcula tantos primos como se indique y los guarda en un
vector
\param primos Un vector de primos
\param hasta El mayor número que queremos comprobar si es primo. Si
hasta es primo, se añadirá al vector
"""
####################################################################################################
"""
\brief Calcula los divisores primos de un número. Hace uso de calcularPrimos
\param n El número base
\param primos Una lista de primos, posiblemente incompleta
"""
####################################################################################################
"""
\brief Calcula los divisores primos de un número. Hace uso de calcularPrimos
\param n El número base
\param primos Una lista de primos, posiblemente incompleta
"""
####################################################################################################
"""
\brief Indica si dos números son iguales (listas)
"""
####################################################################################################
"""
\brief Eleva un número a una potencia
"""
####################################################################################################
def findMaxCommDiv (num, den):
"reducir la fracción:"
divisores = range(num, 1, -1)
for divisor in divisores:
if num%divisor==0 and den%divisor==0:
return divisor;
return 1;
####################################################################################################
"""
\brief Comprueba que dos numeros comparten al menos un dígito común
\param a El primer número, entre 10 y 98
\param b El segundo número, entre a+1 y 99
"""
prodNumerador = 1
prodDenominador = 1
numeradores = range (11, 99)
for numerador in numeradores:
denominadores = range (numerador+1, 100)
for denominador in denominadores:
if (isCurious(numerador, denominador)):
print 'La fraccion ' + repr(numerador) + '/' + repr(denominador) + ' es curiosa'
prodNumerador *= numerador
prodDenominador *= denominador
divisor = findMaxCommDiv(prodNumerador, prodDenominador)
prodNumerador /= divisor
prodDenominador /= divisor;
"reducir la fracción:"
print 'La solucion es ' + repr(prodDenominador)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
10688,
198,
37811,
3467,
65,
3796,
27131,
64,
24246,
418,
2684,
418,
401,
78,
384,
773,
2350,
331,
22346,
4860,
... | 3.244253 | 696 |
# -*- coding: utf-8 -*-
from tccli.services.postgres.postgres_client import register_arg
from tccli.services.postgres.postgres_client import get_actions_info
from tccli.services.postgres.postgres_client import AVAILABLE_VERSION_LIST
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
256,
535,
4528,
13,
30416,
13,
7353,
34239,
13,
7353,
34239,
62,
16366,
1330,
7881,
62,
853,
198,
6738,
256,
535,
4528,
13,
30416,
13,
7353,
34239,
13,
7353,
342... | 2.987179 | 78 |
import bpy
#class ListIndex(bpy.types.IntProperty):
#list_index: bpy.props.IntProperty(name = "Index for my_list", default = 0)
# bpy.types.Scene.list_index = IntProperty(name = "Index for my_list", default = 0)
#class MyBoneMapIndex(bpy.types.PropertyGroup):
# use an annotation
#bone_index : bpy.props.IntProperty(name = "Index for my_list", default = 0)
#class MyBoneMapIndex(bpy.types.IntProperty):
# use an annotation
#bpy.props.IntProperty(name = "Index for my_list", default = 0)
| [
11748,
275,
9078,
198,
2,
4871,
7343,
15732,
7,
65,
9078,
13,
19199,
13,
5317,
21746,
2599,
198,
220,
220,
220,
220,
220,
1303,
4868,
62,
9630,
25,
275,
9078,
13,
1676,
862,
13,
5317,
21746,
7,
3672,
796,
366,
15732,
329,
616,
62,... | 2.616915 | 201 |
from django.shortcuts import render,redirect, get_object_or_404
from .forms import User_Form, Profile_Form
from django.contrib import messages
from django.urls import reverse
from django.contrib.auth.models import User
from django.core.mail import send_mass_mail, send_mail
from Shop_platform.settings import EMAIL_HOST_USER
from .models import Profile
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from PIL import Image
@login_required(login_url="/login/")
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
11,
445,
1060,
11,
651,
62,
15252,
62,
273,
62,
26429,
198,
6738,
764,
23914,
1330,
11787,
62,
8479,
11,
13118,
62,
8479,
198,
6738,
42625,
14208,
13,
3642,
822,
1330,
6218,
198,
6738,... | 3.36129 | 155 |
import unittest
from app import create_app
from app.questionnaire.location import Location
| [
11748,
555,
715,
395,
198,
198,
6738,
598,
1330,
2251,
62,
1324,
198,
6738,
598,
13,
25652,
24042,
13,
24886,
1330,
13397,
628
] | 4.043478 | 23 |
from GO import * | [
6738,
10351,
1330,
1635
] | 4 | 4 |
# -*- coding: utf-8 -*-
"""
Subscription implementations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
from zope import component
from zope.event import notify
from zope.interface import Interface
from zope.interface import implementer
from zope.interface import providedBy
from zope.interface.interfaces import IRegistered
from zope.interface.interfaces import IUnregistered
from zope.component.globalregistry import BaseGlobalComponents
from zope.component.persistentregistry import PersistentComponents
from zope.authentication.interfaces import IAuthentication
from zope.authentication.interfaces import IUnauthenticatedPrincipal
from zope.authentication.interfaces import PrincipalLookupError
from zope.annotation import IAttributeAnnotatable
from zope.lifecycleevent import IObjectRemovedEvent
from zope.security.interfaces import IPermission
from zope.security.management import newInteraction
from zope.security.management import queryInteraction
from zope.security.management import endInteraction
from zope.security.management import checkPermission
from zope.security.testing import Participation
from zope.container.interfaces import INameChooser
from zope.container.btree import BTreeContainer
from zope.container.sample import SampleContainer
from zope.container.constraints import checkObject
from zope.cachedescriptors.property import CachedProperty
from nti.zodb.containers import time_to_64bit_int
from nti.zodb.minmax import NumericPropertyDefaultingToZero
from nti.zodb.minmax import NumericMinimum
from nti.schema.fieldproperty import createDirectFieldProperties
from nti.schema.schema import SchemaConfigured
from nti.webhooks import MessageFactory as _
from nti.webhooks.interfaces import IWebhookDialect
from nti.webhooks.interfaces import IWebhookSubscription
from nti.webhooks.interfaces import ILimitedAttemptWebhookSubscription
from nti.webhooks.interfaces import ILimitedApplicabilityPreconditionFailureWebhookSubscription
from nti.webhooks.interfaces import IWebhookSubscriptionManager
from nti.webhooks.interfaces import IWebhookDestinationValidator
from nti.webhooks.interfaces import IWebhookDeliveryAttemptResolvedEvent
from nti.webhooks.interfaces import IWebhookDeliveryAttemptFailedEvent
from nti.webhooks.interfaces import IWebhookSubscriptionApplicabilityPreconditionFailureLimitReached
from nti.webhooks.interfaces import WebhookSubscriptionApplicabilityPreconditionFailureLimitReached
from nti.webhooks.attempts import WebhookDeliveryAttempt
from nti.webhooks.attempts import PersistentWebhookDeliveryAttempt
from nti.webhooks._util import DCTimesMixin
from nti.webhooks._util import PersistentDCTimesMixin
from nti.webhooks._util import describe_class_or_specification
from persistent import Persistent
logger = __import__('logging').getLogger(__name__)
class _CheckObjectOnSetBTreeContainer(BTreeContainer):
"""
Extending this makes you persistent.
"""
# XXX: Taken from nti.containers. Should publish that package.
class _CheckObjectOnSetSampleContainer(SampleContainer):
"""
Non-persistent.
"""
class IApplicableSubscriptionFactory(Interface): # pylint:disable=inherit-non-class
"""
A private contract between the Subscription and its SubscriptionManager.
This is only called on subscriptions that are already determined to be *active*;
if the subscription is also *applicable*, then it should be returned. Otherwise,
it should return None.
This is called when we intend to attempt delivery, so it's a good time to take cleanup
action if the subscription isn't applicable for reasons that aren't directly related
to the *data* and the *event*, for example, if the principal cannot be found.
"""
def __call__(data, event): # pylint:disable=no-self-argument,signature-differs
"""
See class documentation.
"""
@implementer(ILimitedAttemptWebhookSubscription,
ILimitedApplicabilityPreconditionFailureWebhookSubscription,
IAttributeAnnotatable,
IApplicableSubscriptionFactory)
class AbstractSubscription(SchemaConfigured):
"""
Subclasses need to extend a ``Container`` implementation.
"""
for_ = permission_id = owner_id = dialect_id = when = None
to = u''
active = None
createDirectFieldProperties(IWebhookSubscription)
createDirectFieldProperties(ILimitedAttemptWebhookSubscription)
__parent__ = None
attempt_limit = 50
applicable_precondition_failure_limit = 50
fallback_to_unauthenticated_principal = True
def pop(self):
"""Testing only. Removes and returns a random value."""
k = list(self.keys())[0]
v = self[k]
del self[k]
return v
def clear(self):
"""Testing only. Removes all delivery attempts."""
for k in list(self.keys()):
del self[k]
def __checkSecurity(self, data):
"""
Returns a boolean indicating whether *data* passes the security
checks defined for this subscription.
If we are not able to make the security check because the principal or
permission we are supposed to use is not defined, returns the special
(false) value `None`. This can be used to distinguish the case where
access is denied by the security policy from the case where requested
principals are missing.
"""
if not self.permission_id and not self.owner_id:
# If no security is requested, we're good.
return True
# OK, now we need to find the permission and the principal.
# Both should be found in the context of the data; if not
# there, then check the currently installed site.
principal = self._find_principal(data)
permission = self._find_permission(data)
if principal is None or permission is None:
# A missing permission causes zope.security to grant full access.
# It's treated the same as zope.Public. So don't let that happen.
return None
# Now, we need to set up the interaction and do the security check.
participation = Participation(principal)
current_interaction = queryInteraction()
if current_interaction is not None:
# Cool, we can add our participation to the interaction.
current_interaction.add(participation)
else:
newInteraction(participation)
try:
# Yes, this needs the ID of the permission, not the permission object.
return checkPermission(self.permission_id, data)
finally:
if current_interaction is not None:
current_interaction.remove(participation)
else:
endInteraction()
# We only ever use the ``increment()`` method of this, *or* we
# delete it (which works even if there's nothing in our ``__dict__``)
# when we are making other changes, so subclasses do not have to be
# ``PersistentPropertyHolder`` objects. (But they are.)
_delivery_applicable_precondition_failed = NumericPropertyDefaultingToZero(
'_delivery_applicable_precondition_failed',
# We have to use NumericMinimum instead of NumericMaximum or
# MergingCounter because we periodically reset to 0. And MergingCounter
# has a bug when that happens. (https://github.com/NextThought/nti.zodb/issues/6)
NumericMinimum,
)
@CachedProperty('dialect_id')
class PersistentSubscription(_CheckObjectOnSetBTreeContainer,
AbstractSubscription,
PersistentDCTimesMixin):
"""
Persistent implementation of `IWebhookSubscription`
"""
fallback_to_unauthenticated_principal = False
__repr__ = Persistent.__repr__
_p_repr = AbstractSubscription.__repr__
@component.adapter(IWebhookDeliveryAttemptResolvedEvent)
@component.adapter(IWebhookDeliveryAttemptFailedEvent)
@component.adapter(ILimitedApplicabilityPreconditionFailureWebhookSubscription,
IWebhookSubscriptionApplicabilityPreconditionFailureLimitReached)
@component.adapter(IWebhookSubscription, IRegistered)
@component.adapter(IWebhookSubscription, IUnregistered)
@component.adapter(IWebhookSubscription, IObjectRemovedEvent)
class GlobalSubscriptionComponents(BaseGlobalComponents):
"""
Exists to be pickled by name.
"""
global_subscription_registry = GlobalSubscriptionComponents('global_subscription_registry')
@implementer(IWebhookSubscriptionManager)
@implementer(IWebhookSubscriptionManager)
# The name string must match the variable name to pickle correctly
global_subscription_manager = GlobalWebhookSubscriptionManager('global_subscription_manager')
try:
from zope.testing.cleanup import addCleanUp # pylint:disable=ungrouped-imports
except ImportError: # pragma: no cover
pass
else:
addCleanUp(resetGlobals)
del addCleanUp
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
7004,
33584,
25504,
13,
198,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443... | 3.049832 | 2,970 |
from typing import List | [
6738,
19720,
1330,
7343
] | 5.75 | 4 |