hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794f799e5b9c778bf1f402253c3193dc8a3ff8e3 | 8,002 | py | Python | baselines/common/cmd_util.py | aprendizaje-de-maquinas/baselines | cb162681fe7cb5851a743031c60416c6d320d260 | [
"MIT"
] | null | null | null | baselines/common/cmd_util.py | aprendizaje-de-maquinas/baselines | cb162681fe7cb5851a743031c60416c6d320d260 | [
"MIT"
] | null | null | null | baselines/common/cmd_util.py | aprendizaje-de-maquinas/baselines | cb162681fe7cb5851a743031c60416c6d320d260 | [
"MIT"
] | null | null | null | """
Helpers for scripts like run_atari.py.
"""
import os
try:
from mpi4py import MPI
except ImportError:
MPI = None
import gym
#from gym.wrappers import FlattenDictWrapper
from baselines import logger
from baselines.bench import Monitor
from baselines.common import set_global_seeds
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
from baselines.common import retro_wrappers
from baselines.common.wrappers import ClipActionsWrapper
def make_vec_env(env_id, env_type, num_env, seed,
wrapper_kwargs=None,
env_kwargs=None,
start_index=0,
reward_scale=1.0,
flatten_dict_observations=True,
gamestate=None,
initializer=None,
force_dummy=False):
"""
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
"""
wrapper_kwargs = wrapper_kwargs or {}
env_kwargs = env_kwargs or {}
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
seed = seed + 10000 * mpi_rank if seed is not None else None
logger_dir = logger.get_dir()
def make_thunk(rank, initializer=None):
return lambda: make_env(
env_id=env_id,
env_type=env_type,
mpi_rank=mpi_rank,
subrank=rank,
seed=seed,
reward_scale=reward_scale,
gamestate=gamestate,
flatten_dict_observations=flatten_dict_observations,
wrapper_kwargs=wrapper_kwargs,
env_kwargs=env_kwargs,
logger_dir=logger_dir,
initializer=initializer
)
set_global_seeds(seed)
if not force_dummy and num_env > 1:
return SubprocVecEnv([make_thunk(i + start_index, initializer=initializer) for i in range(num_env)])
else:
return DummyVecEnv([make_thunk(i + start_index, initializer=None) for i in range(num_env)])
def make_env(env_id, env_type, mpi_rank=0, subrank=0, seed=None, reward_scale=1.0, gamestate=None, flatten_dict_observations=True, wrapper_kwargs=None, env_kwargs=None, logger_dir=None, initializer=None):
if initializer is not None:
initializer(mpi_rank=mpi_rank, subrank=subrank)
wrapper_kwargs = wrapper_kwargs or {}
env_kwargs = env_kwargs or {}
if ':' in env_id:
import re
import importlib
module_name = re.sub(':.*','',env_id)
env_id = re.sub('.*:', '', env_id)
importlib.import_module(module_name)
if env_type == 'atari':
env = make_atari(env_id)
elif env_type == 'retro':
import retro
gamestate = gamestate or retro.State.DEFAULT
env = retro_wrappers.make_retro(game=env_id, max_episode_steps=10000, use_restricted_actions=retro.Actions.DISCRETE, state=gamestate)
else:
env = gym.make(env_id, **env_kwargs)
if flatten_dict_observations and isinstance(env.observation_space, gym.spaces.Dict):
keys = env.observation_space.spaces.keys()
env = gym.wrappers.FlattenDictWrapper(env, dict_keys=list(keys))
env.seed(seed + subrank if seed is not None else None)
env = Monitor(env,
logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)),
allow_early_resets=True)
if env_type == 'atari':
env = wrap_deepmind(env, **wrapper_kwargs)
elif env_type == 'retro':
if 'frame_stack' not in wrapper_kwargs:
wrapper_kwargs['frame_stack'] = 1
env = retro_wrappers.wrap_deepmind_retro(env, **wrapper_kwargs)
if isinstance(env.action_space, gym.spaces.Box):
env = ClipActionsWrapper(env)
if reward_scale != 1:
env = retro_wrappers.RewardScaler(env, reward_scale)
return env
def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env
def make_robotics_env(env_id, seed, rank=0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
set_global_seeds(seed)
env = gym.make(env_id)
env = FlattenDictWrapper(env, ['observation', 'desired_goal'])
env = Monitor(
env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),
info_keywords=('is_success',))
env.seed(seed)
return env
def arg_parser():
"""
Create an empty argparse.ArgumentParser.
"""
import argparse
return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def atari_arg_parser():
"""
Create an argparse.ArgumentParser for run_atari.py.
"""
print('Obsolete - use common_arg_parser instead')
return common_arg_parser()
def mujoco_arg_parser():
print('Obsolete - use common_arg_parser instead')
return common_arg_parser()
def common_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2')
parser.add_argument('--env_type', help='type of environment, used when the environment type cannot be automatically determined', type=str)
parser.add_argument('--seed', help='RNG seed: The most important part of RL', type=int, default=None)
parser.add_argument('--alg', help='Algorithm', type=str, default='ppo2')
parser.add_argument('--num_timesteps', type=float, default=1e6),
parser.add_argument('--network', help='network type (mlp, cnn, lstm, cnn_lstm, conv_only)', default=None)
parser.add_argument('--gamestate', help='game state to load (so far only used in retro games)', default=None)
parser.add_argument('--num_env', help='Number of environment copies being run in parallel. When not specified, set to number of cpus for Atari, and to 1 for Mujoco', default=None, type=int)
parser.add_argument('--reward_scale', help='Reward scale factor. Default: 1.0', default=1.0, type=float)
parser.add_argument('--save_path', help='Path to save trained model to', default=None, type=str)
parser.add_argument('--save_video_interval', help='Save video every x steps (0 = disabled)', default=0, type=int)
parser.add_argument('--save_video_length', help='Length of recorded video. Default: 200', default=200, type=int)
parser.add_argument('--log_path', help='Directory to save learning curve data.', default=None, type=str)
parser.add_argument('--play', default=False, action='store_true')
return parser
def robotics_arg_parser():
"""
Create an argparse.ArgumentParser for run_mujoco.py.
"""
parser = arg_parser()
parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=None)
parser.add_argument('--num-timesteps', type=int, default=int(1e6))
return parser
def parse_unknown_args(args):
"""
Parse arguments not consumed by arg parser into a dictionary
"""
retval = {}
preceded_by_key = False
for arg in args:
if arg.startswith('--'):
if '=' in arg:
key = arg.split('=')[0][2:]
value = arg.split('=')[1]
retval[key] = value
else:
key = arg[2:]
preceded_by_key = True
elif preceded_by_key:
retval[key] = arg
preceded_by_key = False
return retval
| 38.471154 | 204 | 0.670582 |
794f79bb02206b025c4139d6ad3363dfb8e85a97 | 3,303 | py | Python | examples/src/main/python/mllib/sampled_rdds.py | hochoy18/spark-2.2 | 421979234f03e698d61b2d14010deb0c6d34d890 | [
"0BSD",
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 9 | 2018-08-08T05:40:20.000Z | 2020-07-25T12:21:41.000Z | examples/src/main/python/mllib/sampled_rdds.py | rongyousu/spark-2.2 | 421979234f03e698d61b2d14010deb0c6d34d890 | [
"0BSD",
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 2 | 2020-05-15T21:40:43.000Z | 2021-01-21T00:17:29.000Z | examples/src/main/python/mllib/sampled_rdds.py | rongyousu/spark-2.2 | 421979234f03e698d61b2d14010deb0c6d34d890 | [
"0BSD",
"BSD-3-Clause-Open-MPI",
"PSF-2.0",
"BSD-2-Clause",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"BSD-3-Clause-Clear",
"PostgreSQL",
"BSD-3-Clause"
] | 14 | 2019-01-14T07:01:36.000Z | 2021-01-06T01:45:08.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Randomly sampled RDDs.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext
from pyspark.mllib.util import MLUtils
if __name__ == "__main__":
if len(sys.argv) not in [1, 2]:
print("Usage: sampled_rdds <libsvm data file>", file=sys.stderr)
exit(-1)
if len(sys.argv) == 2:
datapath = sys.argv[1]
else:
datapath = 'data/mllib/sample_binary_classification_data.txt'
sc = SparkContext(appName="PythonSampledRDDs")
fraction = 0.1 # fraction of data to sample
examples = MLUtils.loadLibSVMFile(sc, datapath)
numExamples = examples.count()
if numExamples == 0:
print("Error: Data file had no samples to load.", file=sys.stderr)
exit(1)
print('Loaded data with %d examples from file: %s' % (numExamples, datapath))
# Example: RDD.sample() and RDD.takeSample()
expectedSampleSize = int(numExamples * fraction)
print('Sampling RDD using fraction %g. Expected sample size = %d.'
% (fraction, expectedSampleSize))
sampledRDD = examples.sample(withReplacement=True, fraction=fraction)
print(' RDD.sample(): sample has %d examples' % sampledRDD.count())
sampledArray = examples.takeSample(withReplacement=True, num=expectedSampleSize)
print(' RDD.takeSample(): sample has %d examples' % len(sampledArray))
print()
# Example: RDD.sampleByKey()
keyedRDD = examples.map(lambda lp: (int(lp.label), lp.features))
print(' Keyed data using label (Int) as key ==> Orig')
# Count examples per label in original data.
keyCountsA = keyedRDD.countByKey()
# Subsample, and count examples per label in sampled data.
fractions = {}
for k in keyCountsA.keys():
fractions[k] = fraction
sampledByKeyRDD = keyedRDD.sampleByKey(withReplacement=True, fractions=fractions)
keyCountsB = sampledByKeyRDD.countByKey()
sizeB = sum(keyCountsB.values())
print(' Sampled %d examples using approximate stratified sampling (by label). ==> Sample'
% sizeB)
# Compare samples
print(' \tFractions of examples with key')
print('Key\tOrig\tSample')
for k in sorted(keyCountsA.keys()):
fracA = keyCountsA[k] / float(numExamples)
if sizeB != 0:
fracB = keyCountsB.get(k, 0) / float(sizeB)
else:
fracB = 0
print('%d\t%g\t%g' % (k, fracA, fracB))
sc.stop()
| 37.534091 | 95 | 0.667878 |
794f7a1034dc32878ab929d8d9b7c2cc0a1d61b2 | 6,639 | py | Python | utils/s3_multipart_upload.py | kokyriakidis/cloudbiolinux | a318ecbade2b27e23c275601571b1b19c8842d7a | [
"MIT"
] | 122 | 2015-01-04T13:23:27.000Z | 2022-01-18T22:52:12.000Z | utils/s3_multipart_upload.py | kokyriakidis/cloudbiolinux | a318ecbade2b27e23c275601571b1b19c8842d7a | [
"MIT"
] | 170 | 2015-02-09T18:03:49.000Z | 2021-11-14T02:32:09.000Z | utils/s3_multipart_upload.py | kokyriakidis/cloudbiolinux | a318ecbade2b27e23c275601571b1b19c8842d7a | [
"MIT"
] | 107 | 2015-01-06T06:10:04.000Z | 2022-02-10T17:25:34.000Z | #!/usr/bin/env python
"""Split large file into multiple pieces for upload to S3.
S3 only supports 5Gb files for uploading directly, so for larger CloudBioLinux
box images we need to use boto's multipart file support.
This parallelizes the task over available cores using multiprocessing.
It checks for an up to date version of the file remotely, skipping transfer
if found.
Note: by default this will look for your default AWS Access Key ID and AWS Secret Access Key
you setup via 'aws configure'. You can store additional profiles using
'aws configure --profile <some_profile_name>'
Usage:
s3_multipart_upload.py <file_to_transfer> <bucket_name> [<s3_key_name>]
if <s3_key_name> is not specified, the filename will be used.
--norr -- Do not use reduced redundancy storage.
--public -- Make uploaded files public.
--cores=n -- Number of cores to use for upload
--profile -- The alternate AWS profile to use for your keys located in ~/.aws/config
Files are stored at cheaper reduced redundancy storage by default.
"""
import os
import sys
import glob
import subprocess
import contextlib
import functools
import multiprocessing
from multiprocessing.pool import IMapIterator
from optparse import OptionParser
import rfc822
import boto
def main(transfer_file, bucket_name, s3_key_name=None, use_rr=True,
make_public=True, cores=None, profile=None):
if s3_key_name is None:
s3_key_name = os.path.basename(transfer_file)
if profile is None:
conn = boto.connect_s3()
else:
conn = boto.connect_s3(profile_name=profile)
bucket = conn.lookup(bucket_name)
if bucket is None:
bucket = conn.create_bucket(bucket_name)
if s3_has_uptodate_file(bucket, transfer_file, s3_key_name):
print "S3 has up to date version of %s in %s. Not transferring." % \
(s3_key_name, bucket.name)
return
mb_size = os.path.getsize(transfer_file) / 1e6
if mb_size < 50:
_standard_transfer(bucket, s3_key_name, transfer_file, use_rr)
else:
_multipart_upload(bucket, s3_key_name, transfer_file, mb_size, use_rr,
cores, profile)
s3_key = bucket.get_key(s3_key_name)
if make_public:
s3_key.set_acl("public-read")
def s3_has_uptodate_file(bucket, transfer_file, s3_key_name):
"""Check if S3 has an existing, up to date version of this file.
"""
s3_key = bucket.get_key(s3_key_name)
if s3_key:
s3_size = s3_key.size
local_size = os.path.getsize(transfer_file)
s3_time = rfc822.mktime_tz(rfc822.parsedate_tz(s3_key.last_modified))
local_time = os.path.getmtime(transfer_file)
return s3_size == local_size and s3_time >= local_time
return False
def upload_cb(complete, total):
sys.stdout.write(".")
sys.stdout.flush()
def _standard_transfer(bucket, s3_key_name, transfer_file, use_rr):
print " Upload with standard transfer, not multipart",
new_s3_item = bucket.new_key(s3_key_name)
new_s3_item.set_contents_from_filename(transfer_file, reduced_redundancy=use_rr,
cb=upload_cb, num_cb=10)
print
def map_wrap(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return apply(f, *args, **kwargs)
return wrapper
def mp_from_ids(mp_id, mp_keyname, mp_bucketname, profile=None):
"""Get the multipart upload from the bucket and multipart IDs.
This allows us to reconstitute a connection to the upload
from within multiprocessing functions.
"""
if profile is None:
conn = boto.connect_s3()
else:
conn = boto.connect_s3(profile_name=profile)
bucket = conn.lookup(mp_bucketname)
mp = boto.s3.multipart.MultiPartUpload(bucket)
mp.key_name = mp_keyname
mp.id = mp_id
return mp
@map_wrap
def transfer_part(mp_id, mp_keyname, mp_bucketname, i, part, profile):
"""Transfer a part of a multipart upload. Designed to be run in parallel.
"""
mp = mp_from_ids(mp_id, mp_keyname, mp_bucketname, profile)
print " Transferring", i, part
with open(part) as t_handle:
mp.upload_part_from_file(t_handle, i+1)
os.remove(part)
def _multipart_upload(bucket, s3_key_name, tarball, mb_size, use_rr=True,
cores=None, profile=None):
"""Upload large files using Amazon's multipart upload functionality.
"""
def split_file(in_file, mb_size, split_num=5):
prefix = os.path.join(os.path.dirname(in_file),
"%sS3PART" % (os.path.basename(s3_key_name)))
# require a split size between 5Mb (AWS minimum) and 250Mb
split_size = int(max(min(mb_size / (split_num * 2.0), 250), 5))
if not os.path.exists("%saa" % prefix):
cl = ["split", "-b%sm" % split_size, in_file, prefix]
subprocess.check_call(cl)
return sorted(glob.glob("%s*" % prefix))
mp = bucket.initiate_multipart_upload(s3_key_name, reduced_redundancy=use_rr)
with multimap(cores) as pmap:
for _ in pmap(transfer_part, ((mp.id, mp.key_name, mp.bucket_name, i, part, profile)
for (i, part) in
enumerate(split_file(tarball, mb_size, cores)))):
pass
mp.complete_upload()
@contextlib.contextmanager
def multimap(cores=None):
"""Provide multiprocessing imap like function.
The context manager handles setting up the pool, worked around interrupt issues
and terminating the pool on completion.
"""
if cores is None:
cores = max(multiprocessing.cpu_count() - 1, 1)
def wrapper(func):
def wrap(self, timeout=None):
return func(self, timeout=timeout if timeout is not None else 1e100)
return wrap
IMapIterator.next = wrapper(IMapIterator.next)
pool = multiprocessing.Pool(cores)
yield pool.imap
pool.terminate()
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-r", "--norr", dest="use_rr",
action="store_false", default=True)
parser.add_option("-p", "--public", dest="make_public",
action="store_true", default=False)
parser.add_option("-c", "--cores", dest="cores",
default=multiprocessing.cpu_count())
parser.add_option("--profile", dest="profile")
(options, args) = parser.parse_args()
if len(args) < 2:
print __doc__
sys.exit()
kwargs = dict(use_rr=options.use_rr, make_public=options.make_public,
cores=int(options.cores), profile=options.profile)
main(*args, **kwargs)
| 37.721591 | 92 | 0.675403 |
794f7a5b51ab54fa014e703c3b09dd556d4c8e98 | 666 | py | Python | 01.Fundamentals/_01_Introduction-to-Programming/_15_Age.py | VProfirov/Telerik-Academy-Course_Python-version_Homeworks-and-Exams | d8dadce0a3084d466d522292038ef1ff3b876891 | [
"MIT"
] | null | null | null | 01.Fundamentals/_01_Introduction-to-Programming/_15_Age.py | VProfirov/Telerik-Academy-Course_Python-version_Homeworks-and-Exams | d8dadce0a3084d466d522292038ef1ff3b876891 | [
"MIT"
] | null | null | null | 01.Fundamentals/_01_Introduction-to-Programming/_15_Age.py | VProfirov/Telerik-Academy-Course_Python-version_Homeworks-and-Exams | d8dadce0a3084d466d522292038ef1ff3b876891 | [
"MIT"
] | null | null | null | from datetime import datetime
def get_current_age(birthdate_str):
current_date = datetime.now()
birth_date = datetime.strptime(birthdate_str,'%d.%m.%Y')
current_age = current_date.year - birth_date.year
if (birth_date.date() > current_date.replace(year= current_date.year - current_age).date()):
current_age -= 1
return current_age
def get_age_after_10years(age):
return age + 10
if __name__ == '__main__':
# get_current_age('10.05.1990')
birthdate = '10.05.1987'
current_age = get_current_age(birthdate)
age_after_10years = get_age_after_10years(current_age)
print(current_age)
print(age_after_10years)
| 27.75 | 96 | 0.720721 |
794f7aea676706767c9065cd6b84c3f63dc93949 | 21,730 | py | Python | py/vr/latex.py | acorg/ssm-report-n | eecb082c280a991116f15751bfd63da8c5680d1f | [
"MIT"
] | null | null | null | py/vr/latex.py | acorg/ssm-report-n | eecb082c280a991116f15751bfd63da8c5680d1f | [
"MIT"
] | null | null | null | py/vr/latex.py | acorg/ssm-report-n | eecb082c280a991116f15751bfd63da8c5680d1f | [
"MIT"
] | null | null | null | T_Head = r"""% !TEX encoding = UTF-8 Unicode
% generated by %program% on %now%
%documentclass%
\pagestyle{empty}
\usepackage[cm]{fullpage}
\usepackage{verbatim}
\usepackage[table]{xcolor}
\usepackage{tikz} % draw filled circles in \ColorCodedByRegion
\usepackage{graphicx} % multiple pdfs per page, pdf
\usepackage[export]{adjustbox} % frame in \includegraphics
\usepackage{grffile} % to allow .a.pdf in \includegraphics
\usepackage{pdfpages} % phylogenetic tree
\usepackage{fancyhdr} % keep page numbers in embedded phylogenetic tree
\usepackage{calc}
\usepackage{hyperref} % ToC entries as links
\usepackage{tocloft} % \cftsetindents
\usepackage[toc,page]{appendix} % Appendice
\usepackage{titletoc} % ToC entries without numbers
\usepackage[T1]{fontenc} % fonts
\usepackage{times} % font
%usepackage%
"""
# ----------------------------------------------------------------------
T_Tail = r"""
% ---------------------------------------------------------
\end{document}
"""
# ----------------------------------------------------------------------
T_RemoveSectionNumbering = r"""
% ----------------------------------------------------------------------
% remove section numbering
% ----------------------------------------------------------------------
%% http://www.ehow.com/how_8085363_hide-section-numbers-latex.html
\setcounter{secnumdepth}{-1}
"""
# ----------------------------------------------------------------------
T_ColorCodedBy = r"""
% ----------------------------------------------------------------------
% ColorCodedBy
% ----------------------------------------------------------------------
% \newcommand\cbox[1][black]{\textcolor{#1}{\rule{0.7em}{0.7em}}}
\newcommand\cbox[1][black]{\tikz[baseline=-0.5ex]\draw[black,fill=#1,radius=0.3em] (0,0) circle ;}
\newcommand{\ColorCodedByRegion}{%
\begin{tabular}{l l l}
\cbox[NorthAmerica] & North America & Dark Blue \\
\cbox[SouthAmerica] & South America & Light Blue \\
\cbox[Europe] & Europe & Green \\
\cbox[Africa] & Africa & Orange \\
\cbox[MiddleEast] & MiddleEast & Purple \\
\cbox[Russia] & Russia & Maroon \\
\cbox[Asia] & East and South East Asia & Red \\
\cbox[AustraliaOceania] & Australia and Oceania & Pink \\
\cbox[Unknown] & unknown & Grey \\
\end{tabular}
}
\newcommand{\ColorCodedByRegionOld}{%
{\color{NorthAmerica}DarkBlue = NorthAmerica} \\
{\color{SouthAmerica}LightBlue = SouthAmerica} \\
{\color{Europe}Green = Europe} \\
{\color{Africa}Orange = Africa} \\
{\color{MiddleEast}Purple = MiddleEast} \\
{\color{Russia}Maroon = Russia} \\
{\color{Asia}Red = E SE Asia} \\
{\color{AustraliaOceania}Pink = Oceania} \\
{\color{Unknown}Grey = unknown}
}
\definecolor{2a1b.2}{HTML}{1B9E77}
\definecolor{2a1b.1}{HTML}{66A61E}
\definecolor{2a1b.2a1}{HTML}{D95F02}
\definecolor{2a1b.2a2}{HTML}{E6AB02}
\definecolor{2a1b.2a2 156S}{HTML}{674d01}
\definecolor{2a1b.2a2 156Q}{HTML}{fede83}
\definecolor{2a1b1a}{HTML}{4037B3}
\definecolor{2a1b1a+192F}{HTML}{9a4ef2}
\definecolor{2a1b.1b}{HTML}{E7298A}
\newcommand{\ColorCodedHThreeSecondSet}{%
\begin{tabular}{l l}
\cbox[2a1b.2] & 2a1b.2 \\
\cbox[2a1b.1] & 2a1b.1 \\
\cbox[2a1b.2a1] & 2a1b.2a1 \\
\cbox[2a1b.2a2] & 2a1b.2a2 \\
\cbox[2a1b.2a2 156S] & 2a1b.2a2 156S \\
\cbox[2a1b.2a2 156Q] & 2a1b.2a2 156Q \\
\cbox[2a1b1a] & 2a1b1a \\
\cbox[2a1b1a+192F] & 2a1b1a+192F \\
\cbox[2a1b.1b] & 2a1b.1b \\
\end{tabular}
}
% \newcommand{\ColorCodedByLineage}{%
% {\color{Yamagata}Red = Yamagata},
% {\color{Victoria}Blue = Victoria}.
% {\color{Victoria}Green = Victoria deletion mutants}.
% }
\newcommand{\ColorCodedByLineageVicDelMut}{%
\begin{tabular}{l l l}
\cbox[Yamagata] & Yamagata & Red \\
\cbox[Victoria] & Victoria & Blue \\
\cbox[Victoria2del] & Victoria 2-del mutants & Cyan \\
\cbox[Victoria3del] & Victoria 3-del mutants & Purple \\
\end{tabular}
}
\definecolor{V1A.3 126K} {HTML}{8DA0CB}
\definecolor{V1A.3 133G 129N} {HTML}{8C6BB1}
\definecolor{V1A.3a} {HTML}{66C2A5}
\definecolor{V1A.3a1} {HTML}{33A02C}
\definecolor{V1A.3a2} {HTML}{CCFF9E}
\definecolor{V1A.3a2 122Q} {HTML}{A6D854}
\definecolor{V1A.3 133R} {HTML}{FFD92F}
\definecolor{V1A.3 133R 128K} {HTML}{FC8D62}
\definecolor{V1A.3 133R 129N} {HTML}{E78AC3}
\definecolor{V1A.3 133R 136K} {HTML}{E5C494}
\newcommand{\ColorCodedBVicSecondSet}{%
\begin{tabular}{l l}
\cbox[V1A.3 126K] & V1A.3 126K \\
\cbox[V1A.3 133G 129N] & V1A.3 133G 129N \\
\cbox[V1A.3a] & V1A.3a \\
\cbox[V1A.3a1] & V1A.3a1 \\
\cbox[V1A.3a2] & V1A.3a2 \\
\cbox[V1A.3a2 122Q] & V1A.3a2 122Q \\
\cbox[V1A.3 133R] & V1A.3 133R \\
\cbox[V1A.3 133R 128K] & V1A.3 133R 128K \\
\cbox[V1A.3 133R 129N] & V1A.3 133R 129N \\
\cbox[V1A.3 133R 136K] & V1A.3 133R 136K \\
\end{tabular}
}
% \newcommand{\ColorCodedByYear}{%
% {\color{YearGrey}Grey - before 2012},
% {\color{YearOrange}Orange - 2012},
% {\color{YearBlue}Blue - 2013},
% {\color{YearMagenta}Magenta - 2014}.
% }
"""
# ----------------------------------------------------------------------
T_WhoccStatisticsTable = r"""
% ----------------------------------------------------------------------
% Table with statistics
% ----------------------------------------------------------------------
\newcommand{\WhoccStatisticsTableCellOne}[1]{#1 & & & &}
\newcommand{\WhoccStatisticsTableCellOneTotal}[1]{\color{WhoccStatisticsTableTotal}#1 & & & & }
\newcommand{\WhoccStatisticsTableCellTwo}[2]{#1 & ( & #2 & ) &}
% \newcommand{\WhoccStatisticsTableCellTwoTotal}[2]{\textbf{#1} & \textbf{(} & \textbf{#2} & \textbf{)} & }
\newcommand{\WhoccStatisticsTableCellTwoTotal}[2]{\color{WhoccStatisticsTableTotal}#1 & \color{WhoccStatisticsTableTotal}( & \color{WhoccStatisticsTableTotal}#2 & \color{WhoccStatisticsTableTotal}) & }
\newenvironment{WhoccStatisticsTable}{
\setlength{\tabcolsep}{0pt}
\definecolor{AlternativeRow}{HTML}{F0F0F0}
\rowcolors{2}{AlternativeRow}{}
\renewcommand{\arraystretch}{1.5}
\newcommand{\ContinentHeading}[1]{\multicolumn{5}{>{\hspace{0.3em}}c<{\hspace{0.3em}}}{##1}}
\newcommand{\ContinentHeadingTotal}[1]{\multicolumn{5}{>{\hspace{0.3em}}c<{\hspace{0.3em}}}{\color{WhoccStatisticsTableTotal}##1}}
\newcommand{\ContinentHeadingLast}[1]{\multicolumn{5}{>{\hspace{0.3em}}c<{\hspace{0.3em}}|}{##1}}
\newcommand{\PeriodHeading}[1]{\multicolumn{1}{|c}{##1}}
\scriptsize
\begin{center}
\begin{tabular}{|>{\hspace{0.3em}}l<{\hspace{0.3em}} *{12}{>{\hspace{0.6em}}r >{\hspace{0.3em}}lrl r<{\hspace{0.5em}}}|} % >{\hspace{0.3em}\bfseries}r >{\hspace{0.3em}}lrl r<{\hspace{0.3em}} *{2}{>{\hspace{0.3em}}r >{\hspace{0.3em}}lrl r<{\hspace{0.3em}}}}
}{\end{tabular}\end{center}\par}
"""
# ----------------------------------------------------------------------
T_AntigenicMapTable = r"""
% ----------------------------------------------------------------------
% Table with antigenic maps
% ----------------------------------------------------------------------
\def \AntigenicMapTableMapSize {(\textheight-20pt) * 9 / 30} % size of an embedded antigenic map
\def \AntigenicMapTableMapSmallSize {(\textheight-20pt) * 17 / 60} % size of an embedded antigenic map
\newenvironment{AntigenicMapTable}{
\setlength{\tabcolsep}{7pt}
\renewcommand{\arraystretch}{3.5}
\newcommand{\AntigenicMap}[1]{\includegraphics[width=\AntigenicMapTableMapSize,frame]{##1}}
\newcommand{\AntigenicMapSmall}[1]{\includegraphics[width=\AntigenicMapTableMapSmallSize,frame]{##1}}
\begin{center}
\begin{tabular}{c c}
}{\end{tabular}\end{center}\par}
\newenvironment{AntigenicMapTableWithSep}[3]{
\setlength{\tabcolsep}{#1}
\renewcommand{\arraystretch}{#2}
\newcommand{\AntigenicMap}[1]{\includegraphics[width={(\textheight-20pt) * {#3}},frame]{##1}}
\begin{center}
\begin{tabular}{c c}
}{\end{tabular}\end{center}\par}
\newenvironment{AntigenicMapTable3WithSep}[3]{
\setlength{\tabcolsep}{#1}
\renewcommand{\arraystretch}{#2}
\newcommand{\AntigenicMap}[1]{\includegraphics[width={(\textheight-20pt) * {#3}},frame]{##1}}
\begin{center}
\begin{tabular}{c c c}
}{\end{tabular}\end{center}\par}
"""
# ----------------------------------------------------------------------
T_WholePagePdf = r"""
% ----------------------------------------------------------------------
% WholePagePdf
% ----------------------------------------------------------------------
\newenvironment{WholePagePdfEnv}{
\noindent
\begin{center}
}{\end{center}\par}
% \newcommand{\WholePagePdf}[1]{\begin{WholePagePdfEnv}\pagestyle{empty} \includepdf[pages=-,pagecommand={\thispagestyle{fancy}}]{#1}\end{WholePagePdfEnv}}
\newcommand{\WholePagePdf}[1]{\begin{WholePagePdfEnv}\pagestyle{empty} \includepdf[pages={1},noautoscale=false]{#1}\end{WholePagePdfEnv}}
\newcommand{\WholePagePdfMerge}[1]{\begin{WholePagePdfEnv}\pagestyle{empty} \includepdfmerge[]{#1}\end{WholePagePdfEnv}}
\newcommand{\WholePagePdfFit}[1]{\begin{WholePagePdfEnv}\includegraphics[page=1,scale=0.9]{#1}\end{WholePagePdfEnv}}
\newcommand{\WholePagePdfTwoToc}[3]{
\begin{WholePagePdfEnv}
\includepdf[pages=-,pagecommand={\pagestyle{fancy}}]{#1}
\addcontentsline{toc}{subsection}{#2}
\includepdf[pages=-,pagecommand={\pagestyle{fancy}}]{#3}
\end{WholePagePdfEnv}}
"""
# ----------------------------------------------------------------------
T_SignaturePage = r"""
% ----------------------------------------------------------------------
% Signature page
% ----------------------------------------------------------------------
\newenvironment{SignaturePageEnv}{
\noindent
\begin{center}
}{\end{center}\par}
\newcommand{\SignaturePageFit}[1]{\begin{SignaturePageEnv}\resizebox{!}{0.98\textheight}{\includegraphics[page=1]{#1}}\end{SignaturePageEnv}}
\newcommand{\SignaturePage}[1]{\begin{SignaturePageEnv}\includegraphics[page=1]{#1}\end{SignaturePageEnv}}
\newcommand{\SignaturePageNoResacle}[1]{\includepdf[pages=-,frame=true,noautoscale=false,fitpaper=true]{#1}}
"""
# ----------------------------------------------------------------------
T_AntigenicGeneticMapSingle = r"""
% ----------------------------------------------------------------------
% Antigenic-genetic maps
% ----------------------------------------------------------------------
\def \AntigenicGeneticMapSingleMapSize {\textheight*21/30}
\newcommand{\AntigenicGeneticMapSingle}[1]{\begin{center}\includegraphics[width=\AntigenicGeneticMapSingleMapSize,frame]{#1}\end{center}}
"""
# ----------------------------------------------------------------------
T_OverviewMapSingle = r"""
% ----------------------------------------------------------------------
% Overview maps
% ----------------------------------------------------------------------
\def \OverviewMapSingleMapSize {\textheight*21/30}
\newcommand{\OverviewMapSingle}[1]{\begin{center}\includegraphics[width=\OverviewMapSingleMapSize,frame]{#1}\end{center}}
"""
# ----------------------------------------------------------------------
T_GeographicMapsTable = r"""
% ----------------------------------------------------------------------
% Table with geographic maps
% ----------------------------------------------------------------------
\def \GeographicMapsTableMapSize {\textheight * 18 / 30} % size of an embedded geographic map
\newenvironment{GeographicMapsTable}{
\renewcommand{\arraystretch}{2.5}
\newcommand{\GeographicMap}[1]{\includegraphics[width=\GeographicMapsTableMapSize,frame]{##1}}
\begin{center}
\begin{tabular}{c}
}{\end{tabular}\end{center}\par}
"""
# ----------------------------------------------------------------------
T_BlankPage = r"""
% ----------------------------------------------------------------------
% Blank page (http://tex.stackexchange.com/questions/36880/insert-a-blank-page-after-current-page)
% ----------------------------------------------------------------------
\newcommand\blankpage{%
\newpage
\vspace*{100pt}
\thispagestyle{empty}%
\newpage}
"""
# ----------------------------------------------------------------------
T_TableOfContents = r"""
% ----------------------------------------------------------------------
% ToC table of contents
% ----------------------------------------------------------------------
%% ToC http://tex.stackexchange.com/questions/163986/format-table-of-contents-with-latex
\titlecontents{section}[0cm]{\bfseries}{\\}{\\}{}
\titlecontents{subsection}[1em]{}{}{}{\titlerule*[5pc]{}\vspace{0.8ex}\thecontentspage}
\contentsmargin{120pt}
% table of content indentation
% http://tex.stackexchange.com/questions/50471/question-about-indent-lengths-in-toc
\cftsetindents{section}{0.5in}{0.5in}
%% http://tex.stackexchange.com/questions/80113/hide-section-numbers-but-keep-numbering
% \renewcommand{\thesection}{}
% \makeatletter
% \def\@seccntformat#1{\csname #1ignore\expandafter\endcsname\csname the#1\endcsname\quad}
% \let\sectionignore\@gobbletwo
% \let\latex@numberline\numberline
% \def\numberline#1{\if\relax#1\relax\else\latex@numberline{#1}\fi}
% \makeatother
% \renewcommand{\thesubsection}{\arabic{subsection}}
"""
T_TOC = r"""%no-eol%
% ----------------------------------------------------------------------
% ToC
% ----------------------------------------------------------------------
\newpage
\tableofcontents
"""
# ----------------------------------------------------------------------
T_Cover = r"""
% ----------------------------------------------------------------------
% Cover
% ----------------------------------------------------------------------
\thispagestyle{empty}
{%cover_quotation%
\vspace*{%cover_top_space%}
{
\fontsize{22}{26} \selectfont
\noindent
\textbf{Information for the WHO Consultation\\ on the Composition of Influenza Vaccines\\ for the %report_hemisphere% Hemisphere %report_year%}
\par
}
\vspace{90pt}
{
\fontsize{19}{24} \selectfont
\noindent
%teleconference%
\vspace{10pt}
\noindent
%addendum%
\vspace{50pt}
\noindent
%meeting_date%
\par
}
\vspace{%cover_after_meeting_date_space%}
{
\large
\noindent
Center for Pathogen Evolution
% \vspace{10pt}
% \noindent
% WHO Collaborating Center for Modeling, Evolution, and Control of Emerging Infectious Diseases
\vspace{10pt}
\noindent
University of Cambridge, United Kingdom
% do not remove two empty lines below
% do not remove two empty lines above!
}
}
"""
# ----------------------------------------------------------------------
T_ = r"""
"""
# ----------------------------------------------------------------------
T_ColorsBW = r"""
% ----------------------------------------------------------------------
% Continent colors for time series
% ----------------------------------------------------------------------
\definecolor{NorthAmerica}{HTML}{000000}
\definecolor{Europe}{HTML}{000000}
\definecolor{MiddleEast}{HTML}{000000}
\definecolor{NorthAmerica}{HTML}{000000}
\definecolor{SouthAmerica}{HTML}{000000}
\definecolor{CentralAmerica}{HTML}{000000}
\definecolor{Africa}{HTML}{000000}
\definecolor{Asia}{HTML}{000000}
\definecolor{Russia}{HTML}{000000}
\definecolor{AustraliaOceania}{HTML}{000000}
\definecolor{Antarctica}{HTML}{000000}
\definecolor{ChinaSouth}{HTML}{000000}
\definecolor{ChinaNorth}{HTML}{000000}
\definecolor{ChinaUnknown}{HTML}{000000}
\definecolor{Unknown}{HTML}{000000}
% ----------------------------------------------------------------------
% Point colors
% ----------------------------------------------------------------------
\definecolor{Vaccine}{HTML}{000000}
\definecolor{PreviousVaccine}{HTML}{000000}
\definecolor{Serology}{HTML}{000000}
\definecolor{YearGrey}{HTML}{000000}
\definecolor{YearOrange}{HTML}{000000}
\definecolor{YearMagenta}{HTML}{000000}
\definecolor{YearBlue}{HTML}{000000}
\definecolor{Yamagata}{HTML}{000000}
\definecolor{Victoria}{HTML}{000000}
% ----------------------------------------------------------------------
% Other colors
% ----------------------------------------------------------------------
\definecolor{WhoccStatisticsTableTotal}{HTML}{008000}
"""
# ----------------------------------------------------------------------
T_ColorsColors = r"""
% ----------------------------------------------------------------------
% Continent colors for time series
% ----------------------------------------------------------------------
\definecolor{NorthAmerica}{HTML}{000080}
\definecolor{Europe}{HTML}{00FF00}
\definecolor{MiddleEast}{HTML}{8000FF}
\definecolor{NorthAmerica}{HTML}{00008B}
\definecolor{SouthAmerica}{HTML}{40E0D0}
\definecolor{CentralAmerica}{HTML}{AAF9FF}
\definecolor{Africa}{HTML}{FF8000}
\definecolor{Asia}{HTML}{FF0000}
\definecolor{Russia}{HTML}{B03060}
\definecolor{AustraliaOceania}{HTML}{FF69B4}
\definecolor{Antarctica}{HTML}{808080}
\definecolor{ChinaSouth}{HTML}{FF0000}
\definecolor{ChinaNorth}{HTML}{6495ED}
\definecolor{ChinaUnknown}{HTML}{808080}
\definecolor{Unknown}{HTML}{808080}
% ----------------------------------------------------------------------
% Point colors
% ----------------------------------------------------------------------
\definecolor{Vaccine}{HTML}{FF0000}
\definecolor{PreviousVaccine}{HTML}{0000FF}
\definecolor{Serology}{HTML}{FFA500}
\definecolor{YearGrey}{HTML}{B0B0B0}
\definecolor{YearOrange}{HTML}{FFA500}
\definecolor{YearMagenta}{HTML}{FF00FF}
\definecolor{YearBlue}{HTML}{0000FF}
\definecolor{Yamagata} {HTML}{FF0000}
\definecolor{Victoria} {HTML}{0000FF}
\definecolor{Victoria2del}{HTML}{00FFFF}
\definecolor{Victoria3del}{HTML}{800080}
% ----------------------------------------------------------------------
% Other colors
% ----------------------------------------------------------------------
\definecolor{WhoccStatisticsTableTotal}{HTML}{008000}
"""
# ----------------------------------------------------------------------
T_Begin = r"""
% ****************************************************************************************************
% Document
% ----------------------------------------------------------------------
\begin{document}
\rmfamily
"""
# ----------------------------------------------------------------------
T_NoPageNumbering = r" \pagenumbering{gobble} "
T_NewPage = r"\newpage"
T_VSpace = r"\vspace{%em%em}"
T_Text_NoIndent = r"\noindent %text%"
T_Text = r"%text%"
# ----------------------------------------------------------------------
T_Section = r"\newpage \section{%title%}"
T_Subsection = r"\subsection{%title%}"
# ----------------------------------------------------------------------
T_SerumCirclesDescriptionEggCell = r"""%no-eol%
\newpage
\newgeometry{top=5em, bottom=5em, left=2.5em, right=10em}
\subsection{Serum circle description}
\vspace{1em}
% \small
\noindent
Serum circles shown in blue are for sera raised against cell passaged-viruses, in red for sera raised against egg-passaged viruses,
in orange for sera raised against reassortant viruses.
\vspace{1em}
\noindent
Strains outside a serum circle are $>$4-fold low-reactors to the homologous
titer for the serum.
\vspace{1em}
\noindent
Here we describe why serum circles have different radii. One might expect the
serum circle delimiting $>$4-fold low-reactors to have radius 2. However, this
would only be the case when the homologous titer is the same as the maximum
titer for a serum - and this is not always the case.
\vspace{1em}
\noindent
The theoretical (or "target") distance in an antigenic map from serum S to a
antigen A is $\log_2$ (max titer for serum S against any antigen) - $\log_2$
(titer for serum S against antigen A). In other words, the number of 2-folds
between the maximum titer for serum S, and the titer of serum S to antigen A.
\vspace{1em}
\noindent
Thus the theoretical distance between a serum and antigen is dependent on both
the maximum titer observed for the serum and its homologous titer.
\vspace{1em}
\noindent
If low reactors were defined as $>$4-fold from the max titer for a serum then
the theoretical radius for all serum circles would be 2 units, and this text
would not be necessary. But low reactors are defined as $>$4-fold from the
homologous titer, hence the radius is 2 units plus the number of 2-folds
between max titer and the homologous titer for a serum. Saying the same thing
mathematically the theoretical radius for a serum circle is 2 + $\log_2$ (max
titer for serum S against any antigen A) - $\log_2$ (homologous titer for
serum S).
\vspace{1em}
\noindent
In addition to the theoretical serum circle radius, we also calculate an
empirical radius. The difference is that the theoretical radius is calculated
from the target distance between sera and antigens as specified by the HI
titers, whereas the empirical radius is determined from the antigenic map and
thus the actual distances in the antigenic map between the sera and antigens.
There are some extra details about the empirical calculation but they are not
central, and are omitted here.
\vspace{1em}
\noindent
The theoretical and empirical radii are similar to each other, and are
on average only about 0.5 antigenic units different. The empirical
radius is the one shown on the antigenic maps.
\vspace{1em}
\noindent
The center of a serum circle for serum S is at the serum point in the map
for serum S.
\restoregeometry
"""
# ======================================================================
| 37.081911 | 260 | 0.56093 |
794f7b3645f428521e2355d06352e4f49d1b9daf | 3,335 | py | Python | django_hello_world/django_hello_world/settings.py | sferhan/django_docker_walkthrough | 5998860cc8b357647ba9b57fd653f975ff94c6b8 | [
"MIT"
] | null | null | null | django_hello_world/django_hello_world/settings.py | sferhan/django_docker_walkthrough | 5998860cc8b357647ba9b57fd653f975ff94c6b8 | [
"MIT"
] | null | null | null | django_hello_world/django_hello_world/settings.py | sferhan/django_docker_walkthrough | 5998860cc8b357647ba9b57fd653f975ff94c6b8 | [
"MIT"
] | null | null | null | """
Django settings for django_hello_world project.
Generated by 'django-admin startproject' using Django 4.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
from dotenv import load_dotenv
import dj_database_url
import os
load_dotenv()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-&e!@xm_&*r*e7oy#ggveok9g8&+8rt7x0mv)il-d8(#87hael+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_hello_world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_hello_world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(conn_max_age=500),
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
SERVER_NAME = os.environ.get('SERVER_NAME', 'unknown')
| 26.468254 | 91 | 0.716042 |
794f7bccab9bc428b07c8f159f3ed7a575af8385 | 95,156 | py | Python | hata/discord/guild/guild.py | catzoo/hata | 49a55f4e02f4f8b5f3adcebc51952b612b4acb73 | [
"0BSD"
] | null | null | null | hata/discord/guild/guild.py | catzoo/hata | 49a55f4e02f4f8b5f3adcebc51952b612b4acb73 | [
"0BSD"
] | null | null | null | hata/discord/guild/guild.py | catzoo/hata | 49a55f4e02f4f8b5f3adcebc51952b612b4acb73 | [
"0BSD"
] | null | null | null | __all__ = ('COMMUNITY_FEATURES', 'EMOJI_UPDATE_DELETE', 'EMOJI_UPDATE_EDIT', 'EMOJI_UPDATE_CREATE',
'EMOJI_UPDATE_NONE', 'Guild', 'LARGE_GUILD_LIMIT', 'STICKER_UPDATE_DELETE', 'STICKER_UPDATE_EDIT',
'STICKER_UPDATE_CREATE', 'STICKER_UPDATE_NONE', 'VOICE_STATE_JOIN', 'VOICE_STATE_LEAVE', 'VOICE_STATE_NONE',
'VOICE_STATE_MOVE', 'VOICE_STATE_UPDATE')
from re import compile as re_compile, I as re_ignore_case, escape as re_escape
from ...env import CACHE_PRESENCE, CACHE_USER
from ...backend.utils import WeakValueDictionary
from ...backend.export import export, include
from ..bases import DiscordEntity, IconSlot, ICON_TYPE_NONE
from ..core import GUILDS
from ..utils import EMOJI_NAME_RP, DATETIME_FORMAT_CODE
from ..user import User, create_partial_user_from_id, VoiceState, ZEROUSER, ClientUserBase
from ..role import Role
from ..channel import CHANNEL_TYPE_MAP, ChannelCategory, ChannelText, ChannelGuildUndefined
from ..permission import Permission
from ..permission.permission import PERMISSION_NONE, PERMISSION_ALL, PERMISSION_MASK_ADMINISTRATOR
from ..emoji import Emoji
from ..oauth2.helpers import parse_preferred_locale, DEFAULT_LOCALE
from ..preconverters import preconvert_snowflake, preconvert_str, preconvert_preinstanced_type
from .preinstanced import GuildFeature, VoiceRegion, VerificationLevel, MessageNotificationLevel, MFA, \
ContentFilterLevel, NsfwLevel
from ..sticker import Sticker, StickerFormat
from ..http import urls as module_urls
from .flags import SystemChannelFlag
VoiceClient = include('VoiceClient')
Client = include('Client')
Stage = include('Stage')
trigger_voice_client_ghost_event = include('trigger_voice_client_ghost_event')
LARGE_GUILD_LIMIT = 250 # can be between 50 and 250
EMOJI_UPDATE_NONE = 0
EMOJI_UPDATE_CREATE = 1
EMOJI_UPDATE_DELETE = 2
EMOJI_UPDATE_EDIT = 3
STICKER_UPDATE_NONE = 0
STICKER_UPDATE_CREATE = 1
STICKER_UPDATE_DELETE = 2
STICKER_UPDATE_EDIT = 3
VOICE_STATE_NONE = 0
VOICE_STATE_JOIN = 1
VOICE_STATE_LEAVE = 2
VOICE_STATE_UPDATE = 3
VOICE_STATE_MOVE = 4
STICKER_FORMAT_STATIC = StickerFormat.png
STICKER_FORMAT_ANIMATED = StickerFormat.apng
STICKER_FORMAT_LOTTIE = StickerFormat.lottie
COMMUNITY_FEATURES = frozenset((
GuildFeature.community,
GuildFeature.discoverable,
GuildFeature.public,
))
if CACHE_USER:
GUILD_USERS_TYPE = dict
else:
GUILD_USERS_TYPE = WeakValueDictionary
MAX_PRESENCES_DEFAULT = 0
MAX_USERS_DEFAULT = 250000
MAX_VIDEO_CHANNEL_USERS_DEFAULT = 25
def user_date_sort_key(item):
"""
Sort key used inside ``Guild.get_users_like_ordered`` and in ``Guild.boosters`` to sort users by a specfiied date.
Parameters
----------
item : `tuple` of (`datetime`, ``ClientUserBase``)
The user and it's specific date.
Returns
-------
date : `datetime`
"""
return item[0]
# discord does not send `widget_channel_id`, `widget_enabled`, `max_presences`, `max_users` correctly and that is sad.
@export
class Guild(DiscordEntity, immortal=True):
"""
Represents a Discord guild (or server).
Attributes
----------
id : `int`
The unique identifier number of the guild.
_boosters : `None` or `list` of ``ClientUserBase`` objects
Cached slot for the boosters of the guild.
_permission_cache : `None` or `dict` of (`int`, ``Permission``) items
A `user_id` to ``Permission`` relation mapping for caching permissions. Defaults to `None`.
afk_channel_id : `int`
The afk channel's identifier of the guild if it has.
Defaults to `0`.
afk_timeout : `int`
The afk timeout at the `afk_channel`. Can be `60`, `300`, `900`, `1800`, `3600` in seconds.
approximate_online_count : `int`
The approximate amount of online users at the respective guild. Set as `0` if not yet requested.
approximate_user_count : `int`
The approximate amount of users at the respective guild. Set as `0` if not yet requested.
available : `bool`
Whether the guild is available.
banner_hash : `int`
The guild's banner's hash in `uint128`.
banner_type : ``IconType``
The guild's banner's type.
booster_count : `int`
The total number of boosts of the guild.
channels : `dict` of (`int`, ``ChannelGuildBase`` instance) items
The channels of the guild stored in `channel_id` - `channel` relation.
clients : `list` of ``Client``
The loaded clients, who are the member of the guild. If no clients are member of a guild, it is partial.
content_filter : ``ContentFilterLevel``
The explicit content filter level of the guild.
description : `None` or `str`
Description of the guild. The guild must be a Community guild.
discovery_splash_hash : `int`
The guild's discovery splash's hash in `uint128`. The guild must be a discoverable.
discovery_splash_type : ``IconType``
The guild's discovery splash's type.
emojis : `dict` of (`int`, ``Emoji``) items
The emojis of the guild stored in `emoji_id` - `emoji` relation.
features : `list` of ``GuildFeature``
The guild's features.
icon_hash : `int`
The guild's icon's hash in `uint128`.
icon_type : ``IconType``
The guild's icon's type.
invite_splash_hash : `int`
The guild's invite splash's hash in `uint128`. The guild must have `INVITE_SPLASH` feature.
invite_splash_type : ``IconType``
The guild's invite splash's type.
is_large : `bool`
Whether the guild is considered as a large one.
max_presences : `int`
The maximal amount of presences for the guild. If not received defaults to `0`. Only applicable for very large
guilds.
max_users : `int`
The maximal amount of users for the guild.
max_video_channel_users : `int`
The maximal amount of users in a video channel(?).
message_notification : ``MessageNotificationLevel``
The message notification level of the guild.
mfa : ``MFA``
The required Multi-factor authentication level for the guild.
name : `str`
The name of the guild.
nsfw_level : `bool`
The guild's nsfw level.
owner_id : `int`
The guild's owner's id. Defaults to `0`.
preferred_locale : `str`
The preferred language of the guild. The guild must be a Community guild, defaults to `'en-US'`.
premium_tier : `int`
The premium tier of the guild. More subs = higher tier.
public_updates_channel_id : `int`
The channel's identifier where the guild's public updates should go. The guild must be a `community` guild.
Defaults to `0`.
region : ``VoiceRegion``
The voice region of the guild.
roles : `dict` of (`int`, ``Role``) items
The roles of the guild stored in `role_id` - `role` relation.
rules_channel_id : `int`
The channel's identifier where the rules of a public guild's should be.
The guild must be a `community` guild.
stages : `None` or `dict` of (`int`, ``Stage``) items
Active stages of the guild. Defaults to `None` if would be empty.
stickers : `dict` of (`int`, ``Sticker``) items
Stickers of th guild.
system_channel_id : `int`
The channel's identifier where the system messages are sent.
Defaults to `0`.
system_channel_flags : ``SystemChannelFlag``
Describe which type of messages are sent automatically to the system channel.
threads : `dict` of (`int`, ``ChannelThread``)
Thread channels of the guild.
user_count : `int`
The amount of users at the guild.
users : `dict` r ``WeakValueDictionary`` of (`int`, ``ClientUserBase``) items
The users at the guild stored within `user_id` - `user` relation.
vanity_code : `None` or `str`
The guild's vanity invite's code if it has.
verification_level : ``VerificationLevel``
The minimal verification needed to join to guild.
voice_states : `dict` of (`int`, ``VoiceState``) items
Each user at a voice channel is represented by a ``VoiceState`` object. voice state are stored in
`respective user's id` - `voice state` relation.
widget_channel_id : `int`
The channel's identifier for which the guild's widget is for.
Defaults to `0`.
widget_enabled : `bool`
Whether the guild's widget is enabled. Linked to ``.widget_channel``.
Notes
-----
When a guild is loaded first time, some of it's attributes might not reflect their real value. These are the
following:
- ``.max_presences_``
- ``.max_users``
- ``.widget_channel_id``
- ``.widget_enabled``
"""
__slots__ = ('_boosters', '_permission_cache', 'afk_channel_id', 'afk_timeout', 'approximate_online_count',
'approximate_user_count', 'available', 'booster_count', 'channels', 'clients', 'content_filter', 'description',
'emojis', 'features', 'is_large', 'max_presences', 'max_users', 'max_video_channel_users',
'message_notification', 'mfa', 'name', 'nsfw_level', 'owner_id', 'preferred_locale', 'premium_tier',
'public_updates_channel_id', 'region', 'roles', 'roles', 'rules_channel_id', 'stages', 'stickers',
'system_channel_id', 'system_channel_flags', 'threads', 'user_count', 'users', 'vanity_code',
'verification_level', 'voice_states', 'widget_channel_id', 'widget_enabled')
banner = IconSlot(
'banner',
'banner',
module_urls.guild_banner_url,
module_urls.guild_banner_url_as,
)
icon = IconSlot(
'icon',
'icon',
module_urls.guild_icon_url,
module_urls.guild_icon_url_as,
)
invite_splash = IconSlot(
'invite_splash',
'splash',
module_urls.guild_invite_splash_url,
module_urls.guild_invite_splash_url_as,
)
discovery_splash = IconSlot(
'discovery_splash',
'discovery_splash',
module_urls.guild_discovery_splash_url,
module_urls.guild_discovery_splash_url_as,
)
def __new__(cls, data, client):
"""
Tries to find the guild from the already existing ones. If it can not find, creates a new one. If the found
guild is partial (or freshly created), sets it's attributes from the given `data`. If the the guild is not
added to the client's guild profiles yet, adds it, and the client to the guilds's `.clients` as well.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Received guild data.
client : ``Client``
The client who received the guild's data.
Returns
-------
guild : ``Guild``
"""
guild_id = int(data['id'])
try:
self = GUILDS[guild_id]
update = (not self.clients)
except KeyError:
self = object.__new__(cls)
GUILDS[guild_id] = self
self.id = guild_id
self.clients = []
self.users = GUILD_USERS_TYPE()
self.emojis = {}
self.voice_states = {}
self.roles = {}
self.channels = {}
self.features = []
self.threads = {}
self.stickers = {}
self._permission_cache = None
self._boosters = None
self.user_count = 1
self.approximate_online_count = 0
self.approximate_user_count = 0
self.stages = None
update = True
self.available = (not data.get('unavailable', False))
if update:
try:
user_count = data['member_count']
except KeyError:
pass
else:
self.user_count = user_count
self.booster_count = -1
try:
is_large = data['large']
except KeyError:
is_large = (self.user_count >= LARGE_GUILD_LIMIT)
self.is_large = is_large
try:
role_datas = data['roles']
except KeyError:
pass
else:
for role_data in role_datas:
Role(role_data, self)
try:
emoji_datas = data['emojis']
except KeyError:
pass
else:
emojis = self.emojis
for emoji_data in emoji_datas:
emoji = Emoji(emoji_data, self)
emojis[emoji.id] = emoji
try:
sticker_datas = data['stickers']
except KeyError:
pass
else:
stickers = self.stickers
for sticker_data in sticker_datas:
sticker = Sticker(sticker_data)
stickers[sticker.id] = sticker
try:
channel_datas = data['channels']
except KeyError:
pass
else:
later = []
for channel_data in channel_datas:
channel_type = CHANNEL_TYPE_MAP.get(channel_data['type'], ChannelGuildUndefined)
if channel_type is ChannelCategory:
channel_type(channel_data, client, guild_id)
else:
later.append((channel_type, channel_data),)
for channel_type, channel_data in later:
channel_type(channel_data, client, guild_id)
self._update_attributes(data)
if CACHE_PRESENCE:
try:
user_datas = data['members']
except KeyError:
pass
else:
for user_data in user_datas:
User(user_data, self)
# If user caching is disabled, then presence caching is too.
try:
presence_data = data['presences']
except KeyError:
pass
else:
self._apply_presences(presence_data)
try:
voice_state_datas = data['voice_states']
except KeyError:
pass
else:
for voice_state_data in voice_state_datas:
VoiceState(voice_state_data, self.id)
try:
thread_datas = data['threads']
except KeyError:
pass
else:
for thread_data in thread_datas:
CHANNEL_TYPE_MAP.get(thread_data['type'], ChannelGuildUndefined)(thread_data, client, guild_id)
stage_datas = data.get('stage_instances', None)
if (stage_datas is not None) and stage_datas:
for stage_data in stage_datas:
Stage(stage_data)
if (not CACHE_PRESENCE):
# we get information about the client here
try:
user_datas = data['members']
except KeyError:
pass
else:
for user_data in user_datas:
User._bypass_no_cache(user_data, self)
if client not in self.clients:
try:
ghost_state = self.voice_states[client.id]
except KeyError:
pass
else:
trigger_voice_client_ghost_event(client, ghost_state)
self.clients.append(client)
client.guilds.add(self)
return self
@classmethod
def precreate(cls, guild_id, **kwargs):
"""
Precreates the guild with the given parameters. Precreated guilds are picked up when a guild's data is received
with the same id.
First tries to find whether a guild exists with the given id. If it does and it is partial, updates it with the
given parameters, else it creates a new one.
Parameters
----------
guild_id : `snowflake`
The guild's id.
**kwargs : keyword parameters
Additional predefined attributes for the guild.
Other Parameters
----------------
name : `str`, Optional (Keyword only)
The guild's ``.name``.
banner : `None`, ``Icon`` or `str`, Optional (Keyword only)
The guild's banner.
> Mutually exclusive with `banner_type` and `banner_hash` parameters.
banner_type : ``IconType``, Optional (Keyword only)
The guild's banner's type.
> Mutually exclusive with the `banner` parameter.
banner_hash : `int`, Optional (Keyword only)
The guild's banner's hash.
> Mutually exclusive with the `banner` parameter.
invite_splash : `None`, ``Icon`` or `str`, Optional (Keyword only)
The guild's invite splash.
> Mutually exclusive with the `invite_splash_type` and `invite_splash_hash` parameters.
invite_splash_type : `IconType``, Optional (Keyword only)
The guild's invite splash's type.
> Mutually exclusive with the `invite_splash` parameter.
invite_splash_hash : `int`, Optional (Keyword only)
The guild's invite splash's hash.
> Mutually exclusive with the `invite_splash` parameter.
discovery_splash : `None`, ``Icon`` or `str`, Optional (Keyword only)
The guild's discovery splash.
Mutually exclusive with the `discovery_splash_type` and `discovery_splash_hash` parameters.
discovery_splash_type : `IconType``, Optional (Keyword only)
The guild's discovery splash's type.
> Mutually exclusive with the `discovery_splash` parameter.
discovery_splash_hash : `int`, Optional (Keyword only)
The guild's discovery splash's hash.
> Mutually exclusive with the `discovery_splash` parameter.
icon : `None`, ``Icon`` or `str`, Optional (Keyword only)
The guild's icon.
> Mutually exclusive with `icon_type` and `icon_hash`.
icon_type : ``IconType``, Optional (Keyword only)
The guild's icon's type.
> Mutually exclusive with `icon`.
icon_hash : `int`, Optional (Keyword only)
The guild's icon's hash.
> Mutually exclusive with `icon`.
region : ``VoiceRegion`` or `str`, Optional (Keyword only)
The guild's voice region.
nsfw_level : ``NsfwLevel``, Optional (Keyword only)
The nsfw level of the guild.
Returns
-------
guild : ``Guild``
Raises
------
TypeError
If any parameter's type is bad or if unexpected parameter is passed.
ValueError
If an parameter's type is good, but it's value is unacceptable.
"""
guild_id = preconvert_snowflake(guild_id, 'guild_id')
if kwargs:
processable = []
try:
name = kwargs.pop('name')
except KeyError:
pass
else:
name = preconvert_str(name, 'name', 2, 100)
processable.append(('name', name))
cls.icon.preconvert(kwargs, processable)
cls.banner.preconvert(kwargs, processable)
cls.invite_splash.preconvert(kwargs, processable)
cls.discovery_splash.preconvert(kwargs, processable)
for attribute_name, attribute_type in (
('region', VoiceRegion),
('nsfw_level', NsfwLevel),
):
try:
attribute_value = kwargs.pop(attribute_name)
except KeyError:
pass
else:
attribute_value = preconvert_preinstanced_type(attribute_value, attribute_name, attribute_type)
processable.append((attribute_name, attribute_value))
if kwargs:
raise TypeError(f'Unused or unsettable attributes: {kwargs}')
else:
processable = None
try:
self = GUILDS[guild_id]
except KeyError:
self = cls._create_empty(guild_id)
GUILDS[guild_id] = self
else:
if self.clients:
return self
if (processable is not None):
for item in processable:
setattr(self, *item)
return self
@classmethod
def _create_empty(cls, guild_id):
"""
Creates a guild with default parameters set.
Parameters
----------
guild_id : `int`
The guild's identifier.
Returns
-------
self : ``Guild``
"""
self = object.__new__(cls)
self._boosters = None
self._permission_cache = None
self.afk_channel_id = 0
self.afk_timeout = 0
self.channels = {}
self.roles = {}
self.available = False
self.banner_hash = 0
self.banner_type = ICON_TYPE_NONE
self.booster_count = -1
self.clients = []
self.content_filter = ContentFilterLevel.disabled
self.description = None
self.discovery_splash_hash = 0
self.discovery_splash_type = ICON_TYPE_NONE
self.emojis = {}
self.features = []
self.icon_hash = 0
self.icon_type = ICON_TYPE_NONE
self.id = guild_id
self.is_large = False
self.max_presences = MAX_PRESENCES_DEFAULT
self.max_users = MAX_USERS_DEFAULT
self.max_video_channel_users = MAX_VIDEO_CHANNEL_USERS_DEFAULT
self.message_notification = MessageNotificationLevel.only_mentions
self.mfa = MFA.none
self.name = ''
self.owner_id = 0
self.preferred_locale = DEFAULT_LOCALE
self.premium_tier = 0
self.public_updates_channel_id = 0
self.region = VoiceRegion.eu_central
self.rules_channel_id = 0
self.invite_splash_hash = 0
self.invite_splash_type = ICON_TYPE_NONE
self.system_channel_id = 0
self.system_channel_flags = SystemChannelFlag.NONE
self.approximate_user_count = 1
self.users = GUILD_USERS_TYPE()
self.vanity_code = None
self.verification_level = VerificationLevel.none
self.voice_states = {}
self.widget_channel_id = 0
self.widget_enabled = False
self.user_count = 0
self.approximate_online_count = 0
self.approximate_user_count = 0
self.threads = {}
self.stages = None
self.nsfw_level = NsfwLevel.none
self.stickers = {}
return self
def __repr__(self):
"""Returns the guild's representation."""
repr_parts = [
'<', self.__class__.__name__,
' id=', repr(self.id),
]
if self.partial:
repr_parts.append(' (partial)')
name = self.name
if name:
repr_parts.append(', name=')
repr_parts.append(repr(name))
repr_parts.append('>')
return ''.join(repr_parts)
def __del__(self):
"""Clears up the guild profile references of the guild."""
users = self.users
if users:
guild_id = self.id
for user in users.values():
try:
user.guild_profiles[guild_id]
except KeyError:
pass
users.clear()
def __format__(self, code):
"""
Formats the guild in a format string.
Parameters
----------
code : `str`
The option on based the result will be formatted.
Returns
-------
guild : `str`
Raises
------
ValueError
Unknown format code.
Examples
--------
```py
>>> from hata import Guild, now_as_id
>>> guild = Guild.precreate(now_as_id(), name='GrassGrass')
>>> guild
<Guild name='GrassGrass', id=713718885970345984 (partial)>
>>> # no code stands for `guild.name`
>>> f'{guild}'
'GrassGrass'
>>> # 'c' stands for created at.
>>> f'{guild:c}'
'2020-05-23 11:44:02'
```
"""
if not code:
return self.name
if code == 'c':
return self.created_at.__format__(DATETIME_FORMAT_CODE)
raise ValueError(f'Unknown format code {code!r} for object of type {self.__class__.__name__!r}')
widget_url = module_urls.guild_widget_url
def _delete(self, client):
"""
When a client leaves (gets kicked or banned) from a guild, this method is called. If the guild loses it's last
active client, then the it's references are cleared.
Parameters
----------
client : ``Client``
The client, who left the guild.
"""
clients = self.clients
try:
clients.remove(client)
except ValueError:
pass
client.guilds.discard(self)
if clients:
return
self.threads.clear()
self.channels.clear()
self.emojis.clear()
self.stickers.clear()
self.voice_states.clear()
users = self.users
guild_id = self.id
for user in users.values():
if isinstance(user, User):
try:
del user.guild_profiles[guild_id]
except KeyError:
pass
users.clear()
self.roles.clear()
self._boosters = None
def _update_voice_state(self, data, user):
"""
Called by dispatch event. Updates the voice state of the represented user by `user_id` with the given `data`.
This method is an iterable generator.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Data received from Discord.
user : ``ClientUserBase``
The respective user.
Yields
-------
action : `int`
The respective action.
Can be one of the following:
+-----------------------+-------+
| Respective name | Value |
+=======================+=======+
| VOICE_STATE_NONE | 0 |
+-----------------------+-------+
| VOICE_STATE_JOIN | 1 |
+-----------------------+-------+
| VOICE_STATE_LEAVE | 2 |
+-----------------------+-------+
| VOICE_STATE_UPDATE | 3 |
+-----------------------+-------+
| VOICE_STATE_MOVE | 4 |
+-----------------------+-------+
voice_state : `None` or ``VoiceState``
The user's respective voice state.
Will be returned as `None` if action is `VOICE_STATE_NONE`.
old_attributes / old_channel_id : `None` or (`dict` of (`str`, `Any`) items / `int`)
If `action` is `VOICE_STATE_UPDATE`, then `old_attributes` is returned as a `dict` containing the changed
attributes in `attribute-name` - `old-value` relation. All item at the returned dictionary is optional.
+---------------+-------------------+
| Keys | Values |
+===============+===================+
| deaf | `str` |
+---------------+-------------------+
| mute | `bool` |
+---------------+-------------------+
| self_deaf | `bool` |
+---------------+-------------------+
| self_mute | `bool` |
+---------------+-------------------+
| self_stream | `bool` |
+---------------+-------------------+
| self_video | `bool` |
+---------------+-------------------+
If `action` is `VOICE_STATE_LEAVE` or `VOICE_STATE_MOVE`, then the old channel's identifier is returned.
"""
try:
voice_state = self.voice_states[user.id]
except KeyError:
voice_state = VoiceState(data, self.id)
if (voice_state is not None):
voice_state._set_cache_user(user)
yield VOICE_STATE_JOIN, voice_state, None
else:
voice_state._set_cache_user(user)
old_channel_id, new_channel_id = voice_state._update_channel(data)
if new_channel_id == 0:
yield VOICE_STATE_LEAVE, voice_state, old_channel_id
old_attributes = voice_state._difference_update_attributes(data)
if old_attributes:
yield VOICE_STATE_UPDATE, voice_state, old_attributes
if old_channel_id != new_channel_id:
yield VOICE_STATE_MOVE, voice_state, old_channel_id
def _update_voice_state_restricted(self, data, user):
"""
Called by dispatch event. Updates the voice state of the represented user by `user_id` with the given `data`.
This method is an iterable generator.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Data received from Discord.
user : ``ClientUserBase``
The respective user.
"""
try:
voice_state = self.voice_states[user.id]
except KeyError:
voice_state = VoiceState(data, self.id)
if (voice_state is not None):
voice_state._set_cache_user(user)
else:
voice_state._set_cache_user(user)
voice_state._update_channel(data)
voice_state._update_attributes(data)
@property
def text_channels(self):
"""
Returns the text channels of the guild. Announcement channels are not included.
Returns
-------
channels : `list` of ``ChannelText``
"""
return [channel for channel in self.channels.values() if channel.type == 0]
@property
def voice_channels(self):
"""
Returns the voice channels of the guild.
Returns
-------
channels : `list` of ``ChannelVoice``
"""
return [channel for channel in self.channels.values() if channel.type == 2]
@property
def category_channels(self):
"""
Returns the category channels of the guild.
Returns
-------
channels : `list` of ``ChannelCategory``
"""
return [channel for channel in self.channels.values() if channel.type == 4]
@property
def announcement_channels(self):
"""
Returns the announcement channels of the guild.
Returns
-------
channels : `list` of ``ChannelText``
"""
return [channel for channel in self.channels.values() if channel.type == 5]
@property
def store_channels(self):
"""
Returns the store channels of the guild.
Returns
-------
channels : `list` of ``ChannelStore``
"""
return [channel for channel in self.channels.values() if channel.type == 6]
@property
def thread_channels(self):
"""
Returns the thread channels of the guild.
Returns
-------
channels : `list` of ``ChannelThread``
"""
return list(self.threads.values())
@property
def stage_channels(self):
"""
Returns the stage channels of the guild.
Returns
-------
channels . `list` of ``ChannelVoiceBase``
"""
return [channel for channel in self.channels.values() if channel.type == 13]
@property
def messageable_channels(self):
"""
Returns the messageable channels of the guild.
Returns
-------
channels : `list` of ``ChannelText``
"""
return [channel for channel in self.channels.values() if channel.type in (0, 5)]
@property
def connectable_channels(self):
"""
Returns the connectable channels of the guild.
Returns
-------
channels : `list` of ``ChannelVoiceBase``
"""
return [channel for channel in self.channels.values() if channel.type in (2, 13)]
@property
def default_role(self):
"""
Returns the default role of the guild (`@everyone`).
Might return `None` at the case of partial guilds.
Returns
-------
default_role : `None` or `Role``
"""
return self.roles.get(self.id, None)
@property
def partial(self):
"""
Returns whether the guild is partial.
A guild is partial, if it has no active clients.
Returns
-------
partial : `bool`
"""
return (not self.clients)
def _sync(self, data):
"""
Syncs the guild with the requested guild data.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Received guild data.
"""
try:
is_large = data['large']
except KeyError:
is_large = (self.approximate_user_count >= LARGE_GUILD_LIMIT)
self.is_large = is_large
self._update_attributes(data)
try:
role_datas = data['roles']
except KeyError:
pass
else:
self._sync_roles(role_datas)
try:
emoji_datas = data['emojis']
except KeyError:
pass
else:
self._sync_emojis(emoji_datas)
## #sadly we don't get voice states with guild_get
## try:
## voice_state_datas=data['voice_states']
## except KeyError:
## self.voice_states.clear()
## else:
## old_voice_states=self.voice_states
## new_voice_states=self.voice_states={}
##
## for voice_state_data in voice_state_datas:
## user=create_partial_user_from_id(int(voice_state_data['user_id']))
##
## channel_id=voice_state_data.get('channel_id',None)
## if channel_id is None:
## continue
## channel=self.channels[int(channel_id)]
##
## try:
## voice_state=old_voice_states[user.id]
## except KeyError:
## new_voice_states[user.id]=VoiceState(voice_state_data,channel)
## continue
##
## voice_state._update_attributes(voice_state_data,channel)
## new_voice_states[user.id]=voice_state
def _apply_presences(self, data):
"""
Applies the presences to the guild user's. Called when the guild is created or if a user chunk is received if
presence caching is enabled.
Parameters
----------
data : `list` of (`dict` of (`str`, `Any`) items)
Guild's users' presences' data.
"""
users = self.users
for presence_data in data:
user_id = int(presence_data['user']['id'])
try:
user = users[user_id]
except KeyError:
pass
else:
user._update_presence(presence_data)
def _sync_channels(self, data):
"""
Syncs the guild's channels with the given guild channel datas.
Parameters
----------
data `list` of (`dict` of (`str`, `Any`) items)
Received guild channel datas.
"""
channels = self.channels
old_ids = set(channels)
later = []
for channel_data in data:
channel_type = CHANNEL_TYPE_MAP.get(channel_data['type'], ChannelGuildUndefined)
if channel_type is ChannelCategory:
#categories
channel = channel_type(channel_data, None, self.id)
channel_id = channel.id
try:
old_ids.remove(channel_id)
except KeyError:
pass
else:
#old channel -> update
channel._update_attributes(channel_data)
else:
later.append((channel_type, channel_data),)
#non category channels
for channel_type, channel_data in later:
channel = channel_type(channel_data, None, self.id)
channel_id = channel.id
try:
old_ids.remove(channel_id)
except KeyError:
pass
else:
#old channel -> update
channel._update_attributes(channel_data)
# deleting
for channel_id in old_ids:
channels[channel_id]._delete()
def _sync_roles(self, data):
"""
Syncs the guild's roles with the given guild role datas.
Parameters
----------
data `list` of (`dict` of (`str`, `Any`) items)
Received guild role datas.
"""
roles = self.roles
old_ids = set(roles)
# every new role can cause mass switchings at the role orders, can it mess up the order tho?
for role_data in data:
role = Role(role_data, self)
try:
old_ids.remove(role.id)
role._update_attributes(role_data)
except KeyError:
pass
for role_id in old_ids:
roles[role_id]._delete()
def get_user(self, name, default=None):
"""
Tries to find the a user with the given name at the guild. Returns the first matched one.
The search order is the following:
- `full_name`
- `name`
- `nick`
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no user was found. Defaults to `None`.
Returns
-------
user : ``ClientUserBase`` or `default`
"""
if (not 1 < len(name) < 38):
return default
users = self.users
if len(name) > 6 and name[-5] == '#':
try:
discriminator = int(name[-4:])
except ValueError:
pass
else:
name = name[:-5]
for user in users.values():
if (user.discriminator == discriminator) and (user.name == name):
return user
if len(name) > 32:
return default
for user in users.values():
if user.name == name:
return user
guild_id = self.id
for user in users.values():
nick = user.guild_profiles[guild_id].nick
if nick is None:
continue
if nick == name:
return user
return default
def get_user_like(self, name, default=None):
"""
Searches a user, who's name or nick starts with the given string and returns the first find. Also matches full
name.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no user was found. Defaults to `None`.
Returns
-------
user : ``ClientUserBase`` or `default`
"""
if (not 1 < len(name) < 38):
return default
users = self.users
if len(name) > 6 and name[-5] == '#':
try:
discriminator = int(name[-4:])
except ValueError:
pass
else:
name_ = name[:-5]
for user in users.values():
if (user.discriminator == discriminator) and (user.name == name_):
return user
if len(name) > 32:
return default
pattern = re_compile(re_escape(name), re_ignore_case)
guild_id = self.id
for user in self.users.values():
if (pattern.match(user.name) is not None):
return user
nick = user.guild_profiles[guild_id].nick
if nick is None:
continue
if pattern.match(nick) is None:
continue
return user
return default
def get_users_like(self, name):
"""
Searches the users, who's name or nick start with the given string.
Parameters
----------
name : `str`
The name to search for.
Returns
-------
users : `list` of ``ClientUserBase`` objects
"""
result = []
if (not 1 < len(name) < 38):
return result
users = self.users
if len(name) > 6 and name[-5] == '#':
try:
discriminator = int(name[-4:])
except ValueError:
pass
else:
name_ = name[:-5]
for user in users.values():
if (user.discriminator == discriminator) and (user.name == name_):
result.append(user)
break
if len(name) > 32:
return result
pattern = re_compile(re_escape(name), re_ignore_case)
guild_id = self.id
for user in self.users.values():
if pattern.match(user.name) is None:
nick = user.guild_profiles[guild_id].nick
if nick is None:
continue
if pattern.match(nick) is None:
continue
result.append(user)
return result
def get_users_like_ordered(self, name):
"""
Searches the users, who's name or nick start with the given string. At the orders them at the same ways, as
Discord orders them when requesting guild member chunk.
Parameters
----------
name : `str`
The name to search for.
Returns
-------
users : `list` of ``ClientUserBase`` objects
"""
to_sort = []
if (not 1 < len(name) < 33):
return to_sort
pattern = re_compile(re_escape(name), re_ignore_case)
guild_id = self.id
for user in self.users.values():
profile = user.guild_profiles[guild_id]
if pattern.match(user.name) is None:
nick = profile.nick
if nick is None:
continue
if pattern.match(nick) is None:
continue
joined_at = profile.joined_at
if joined_at is None:
joined_at = user.created_at
to_sort.append((joined_at, user))
if not to_sort:
return to_sort
to_sort.sort(key=user_date_sort_key)
return [x[1] for x in to_sort]
def get_emoji(self, name, default=None):
"""
Searches an emoji of the guild, what's name equals the given name.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no emoji was found. Defaults to `None`.
Returns
-------
emoji : ``Emoji`` or `default`
"""
parsed = EMOJI_NAME_RP.fullmatch(name)
if (parsed is not None):
name = parsed.group(1)
for emoji in self.emojis.values():
if emoji.name == name:
return emoji
return default
def get_emoji_like(self, name, default=None):
"""
Searches an emoji of the guild, whats name starts with the given string and returns the first find.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no emoji was found. Defaults to `None`.
Returns
-------
emoji : ``Emoji`` or `default`
"""
emoji_name_pattern = re_compile('.*?'.join(re_escape(char) for char in name), re_ignore_case)
accurate_emoji = default
accurate_match_start = 100
accurate_match_length = 100
for emoji in self.emojis.values():
emoji_name = emoji.name
parsed = emoji_name_pattern.search(emoji_name)
if parsed is None:
continue
match_start = parsed.start()
match_length = parsed.end() - match_start
if (match_length > accurate_match_length) or \
((match_length == accurate_match_length) and (match_start > accurate_match_start)):
continue
accurate_emoji = emoji
accurate_match_start = match_start
accurate_match_length = match_length
return accurate_emoji
def get_sticker(self, name, default=None):
"""
Searches a sticker of the guild, what's name equals to the given name.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no emoji was found. Defaults to `None`.
Returns
-------
sticker : ``Sticker`` or `default`
"""
for sticker in self.stickers.values():
if sticker.name == name:
return sticker
return default
def get_sticker_like(self, name, default=None):
"""
Searches a sticker of the guild, what's name or a tag starts with the given name.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no emoji was found. Defaults to `None`.
Returns
-------
sticker : ``Sticker`` or `default`
"""
target_name_length = len(name)
pattern = re_compile(re_escape(name), re_ignore_case)
accurate_sticker = default
accurate_name_length = 120
for sticker in self.stickers.values():
sticker_name = sticker.name
name_length = len(sticker_name)
if name_length > accurate_name_length:
continue
if pattern.match(sticker_name) is None:
continue
if name_length < accurate_name_length:
accurate_sticker = sticker
accurate_name_length = name_length
if (name_length == target_name_length) and (name == sticker_name):
return sticker
continue
return accurate_sticker
def get_channel(self, name, default=None):
"""
Searches a channel of the guild, what's name equals the given name.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no channel was found. Defaults to `None`.
Returns
-------
channel : ``ChannelGuildBase`` instance or `default`
"""
if name.startswith('#'):
name = name[1:]
for channel in self.channels.values():
if channel.display_name == name:
return channel
for channel in self.channels.values():
if channel.name == name:
return channel
return default
def get_channel_like(self, name, default=None, type_=None):
"""
Searches a channel of the guild, whats name starts with the given string and returns the first find.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no channel was found. Defaults to `None`.
type_ : `None`, `type`, `tuple` of `type`, Optional
Whether only specific channel type instances are accepted.
Returns
-------
channel : ``ChannelGuildBase`` instance or `default`
"""
if name.startswith('#'):
name = name[1:]
target_name_length = len(name)
if (target_name_length < 2) or (target_name_length > 100):
return default
pattern = re_compile(re_escape(name), re_ignore_case)
accurate_channel = default
accurate_name_length = 101
for channel in self.channels.values():
if (type_ is not None) and (not isinstance(channel, type_)):
continue
channel_name = channel.name
name_length = len(channel_name)
if name_length > accurate_name_length:
continue
if pattern.match(channel_name) is None:
continue
if name_length < accurate_name_length:
accurate_channel = channel
accurate_name_length = name_length
# Compare with display name
if (name_length == target_name_length) and (name == channel.display_name):
return channel
continue
return accurate_channel
def get_role(self, name, default=None):
"""
Searches a role of the guild, what's name equals the given name.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no role was found. Defaults to `None`.
Returns
-------
role : ``Role`` or `default`
"""
for role in self.roles.values():
if role.name == name:
return role
return default
def get_role_like(self, name, default=None):
"""
Searches a role of the guild, whats name starts with the given string and returns the first find.
Parameters
----------
name : `str`
The name to search for.
default : `Any`, Optional
The value what is returned when no role was found. Defaults to `None`.
Returns
-------
role : ``Role`` or `default`
"""
target_name_length = len(name)
if (target_name_length < 2) or (target_name_length > 32):
return default
pattern = re_compile(re_escape(name), re_ignore_case)
accurate_role = default
accurate_name_length = 33
for role in self.roles.values():
role_name = role.name
name_length = len(role_name)
if name_length > accurate_name_length:
continue
if pattern.match(role_name) is None:
continue
if name_length < accurate_name_length:
accurate_role = role
accurate_name_length = name_length
if (name_length == target_name_length) and (name == role_name):
return role
continue
return accurate_role
def permissions_for(self, user):
"""
Returns the permissions for the given user at the guild.
Parameters
----------
user : ``UserBase`` instance
The user to calculate it's permissions of.
Returns
-------
permissions : ``Permission``
The calculated permissions.
See Also
--------
``.cached_permissions_for`` : Cached permission calculator.
"""
guild_id = self.id
if not isinstance(user, ClientUserBase):
if user.channel_id in self.channels:
role_everyone = self.roles.get(guild_id, None)
if role_everyone is None:
permissions = PERMISSION_NONE
else:
permissions = role_everyone.permissions
return permissions
else:
return PERMISSION_NONE
if user.id == self.owner_id:
return PERMISSION_ALL
role_everyone = self.roles.get(guild_id, None)
if role_everyone is None:
permissions = 0
else:
permissions = role_everyone.permissions
try:
guild_profile = user.guild_profiles[guild_id]
except KeyError:
return PERMISSION_NONE
roles = guild_profile.roles
if (roles is not None):
for role in roles:
permissions |= role.permissions
if permissions&PERMISSION_MASK_ADMINISTRATOR:
return PERMISSION_ALL
return Permission(permissions)
def cached_permissions_for(self, user):
"""
Returns the permissions for the given user at the guild. If the user's permissions are not cached, calculates
and stores them first.
Parameters
----------
user : ``UserBase`` instance
The user to calculate it's permissions of.
Returns
-------
permissions : ``Permission``
The calculated permissions.
Notes
-----
Mainly designed for getting clients' permissions and stores only their as well. Do not caches other user's
permissions.
"""
if not isinstance(user, Client):
return self.permissions_for(user)
permission_cache = self._permission_cache
if permission_cache is None:
self._permission_cache = permission_cache = {}
else:
try:
return permission_cache[user.id]
except KeyError:
pass
permissions = self.permissions_for(user)
permission_cache[user.id] = permissions
return permissions
def permissions_for_roles(self, *roles):
"""
Returns the permissions of an imaginary user who would have the listed roles.
Parameters
----------
*roles : ``Role``
The roles to calculate final permissions from.
Returns
-------
permissions : ``Permission``
The calculated permissions.
Notes
-----
Partial roles and roles from other guilds as well are ignored.
"""
default_role = self.roles.get(self.id, None)
if default_role is None:
permissions = 0
else:
permissions = default_role.permissions
roles = sorted(roles)
for role in roles:
if role.guild is self:
permissions |= role.permissions
if permissions&PERMISSION_MASK_ADMINISTRATOR:
return PERMISSION_ALL
return Permission(permissions)
def _difference_update_attributes(self, data):
"""
Updates the guild and returns it's overwritten attributes as a `dict` with a `attribute-name` - `old-value`
relation.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Guild data received from Discord.
Returns
-------
old_attributes : `dict` of (`str`, `Any`) items
All item in the returned dict is optional.
Returned Data Structure
-----------------------
+---------------------------+-------------------------------+
| Keys | Values |
+===========================+===============================+
| afk_channel_id | `int` |
+---------------------------+-------------------------------+
| afk_timeout | `int` |
+---------------------------+-------------------------------+
| available | `bool` |
+---------------------------+-------------------------------+
| banner | ``Icon`` |
+---------------------------+-------------------------------+
| booster_count | `int` |
+---------------------------+-------------------------------+
| content_filter | ``ContentFilterLevel`` |
+---------------------------+-------------------------------+
| description | `None` or `str` |
+---------------------------+-------------------------------+
| discovery_splash | ``Icon`` |
+---------------------------+-------------------------------+
| features | `list` of ``GuildFeature`` |
+---------------------------+-------------------------------+
| icon | ``Icon`` |
+---------------------------+-------------------------------+
| invite_splash | ``Icon`` |
+---------------------------+-------------------------------+
| max_presences | `int` |
+---------------------------+-------------------------------+
| max_users | `int` |
+---------------------------+-------------------------------+
| max_video_channel_users | `int` |
+---------------------------+-------------------------------+
| message_notification | ``MessageNotificationLevel`` |
+---------------------------+-------------------------------+
| mfa | ``MFA`` |
+---------------------------+-------------------------------+
| name | `str` |
+---------------------------+-------------------------------+
| nsfw_level | `NsfwLevel` |
+---------------------------+-------------------------------+
| owner_id | `int` |
+---------------------------+-------------------------------+
| preferred_locale | `str` |
+---------------------------+-------------------------------+
| premium_tier | `int` |
+---------------------------+-------------------------------+
| public_updates_channel_id | `int` |
+---------------------------+-------------------------------+
| region | ``VoiceRegion`` |
+---------------------------+-------------------------------+
| rules_channel_id | `int` |
+---------------------------+-------------------------------+
| system_channel_id | `int` |
+---------------------------+-------------------------------+
| system_channel_flags | ``SystemChannelFlag`` |
+---------------------------+-------------------------------+
| vanity_code | `None` or `str` |
+---------------------------+-------------------------------+
| verification_level | ``VerificationLevel`` |
+---------------------------+-------------------------------+
| widget_channel_id | `int` |
+---------------------------+-------------------------------+
| widget_enabled | `bool` |
+---------------------------+-------------------------------+
"""
old_attributes = {}
# ignoring 'roles'
# ignoring 'emojis'
# ignoring 'members'
# ignoring 'presence'
# ignoring 'channels'
# ignoring 'voice_states'
# ignoring 'user_count'
# ignoring 'large'
# ignoring 'stickers'
name = data['name']
if self.name != name:
old_attributes['name'] = self.name
self.name = name
self._update_icon(data, old_attributes)
self._update_invite_splash(data, old_attributes)
self._update_discovery_splash(data, old_attributes)
self._update_banner(data, old_attributes)
region = VoiceRegion.get(data['region'])
if self.region is not region:
old_attributes['region'] = region
self.region = region
afk_timeout = data['afk_timeout']
if self.afk_timeout != afk_timeout:
old_attributes['afk_timeout'] = self.afk_timeout
self.afk_timeout = afk_timeout
verification_level = VerificationLevel.get(data['verification_level'])
if self.verification_level is not verification_level:
old_attributes['verification_level'] = self.verification_level
self.verification_level = verification_level
message_notification = MessageNotificationLevel.get(data['default_message_notifications'])
if self.message_notification is not message_notification:
old_attributes['message_notification'] = self.message_notification
self.message_notification = message_notification
mfa = MFA.get(data['mfa_level'])
if self.mfa is not mfa:
old_attributes['mfa'] = self.mfa
self.mfa = mfa
content_filter = ContentFilterLevel.get(data.get('explicit_content_filter', 0))
if self.content_filter is not content_filter:
old_attributes['content_filter'] = self.content_filter
self.content_filter = content_filter
available = (not data.get('unavailable', False))
if self.available != available:
old_attributes['available'] = self.available
self.available = available
try:
features = data['features']
except KeyError:
features = []
else:
features = [GuildFeature.get(feature) for feature in features]
features.sort()
if self.features != features:
old_attributes['features'] = self.features
self.features = features
system_channel_id = data.get('system_channel_id', None)
if system_channel_id is None:
system_channel_id = 0
else:
system_channel_id = int(system_channel_id)
if self.system_channel_id != system_channel_id:
old_attributes['system_channel_id'] = self.system_channel_id
self.system_channel_id = system_channel_id
try:
system_channel_flags = SystemChannelFlag(data['system_channel_flags'])
except KeyError:
system_channel_flags = SystemChannelFlag.ALL
if self.system_channel_flags != system_channel_flags:
old_attributes['system_channel_flags'] = self.system_channel_flags
self.system_channel_flags = system_channel_flags
public_updates_channel_id = data.get('public_updates_channel_id', None)
if public_updates_channel_id is None:
public_updates_channel_id = 0
else:
public_updates_channel_id = int(public_updates_channel_id)
if self.public_updates_channel_id != public_updates_channel_id:
old_attributes['public_updates_channel_id'] = self.public_updates_channel_id
self.public_updates_channel_id = public_updates_channel_id
owner_id = data.get('owner_id', None)
if owner_id is None:
owner_id = 0
else:
owner_id = int(owner_id)
if self.owner_id != owner_id:
old_attributes['owner_id'] = self.owner_id
self.owner_id = owner_id
afk_channel_id = data['afk_channel_id']
if afk_channel_id is None:
afk_channel_id = 0
else:
afk_channel_id = int(afk_channel_id)
if self.afk_channel_id != afk_channel_id:
old_attributes['afk_channel_id'] = self.afk_channel_id
self.afk_channel_id = afk_channel_id
widget_enabled = data.get('widget_enabled', False)
if self.widget_enabled != widget_enabled:
old_attributes['widget_enabled'] = self.widget_enabled
self.widget_enabled = widget_enabled
widget_channel_id = data.get('widget_channel_id', None)
if widget_channel_id is None:
widget_channel_id = 0
else:
widget_channel_id = int(widget_channel_id)
if self.widget_channel_id != widget_channel_id:
old_attributes['widget_channel_id'] = self.widget_channel_id
self.widget_channel_id = widget_channel_id
rules_channel_id = data.get('rules_channel_id', None)
if rules_channel_id is None:
rules_channel_id = 0
else:
rules_channel_id = int(rules_channel_id)
if self.rules_channel_id != rules_channel_id:
old_attributes['rules_channel_id'] = self.rules_channel_id
self.rules_channel_id = rules_channel_id
description = data.get('description', None)
if self.description != description:
old_attributes['description'] = self.description
self.description = description
vanity_code = data.get('vanity_url_code', None)
if self.vanity_code != vanity_code:
old_attributes['vanity_code'] = self.vanity_code
self.vanity_code = vanity_code
max_users = data.get('max_members', None)
if max_users is None:
max_users = MAX_USERS_DEFAULT
if self.max_users != max_users:
old_attributes['max_users'] = self.max_users
self.max_users = max_users
max_presences = data.get('max_presences', None)
if max_presences is None:
max_presences = MAX_PRESENCES_DEFAULT
if self.max_presences != max_presences:
old_attributes['max_presences'] = self.max_presences
self.max_presences = max_presences
max_video_channel_users = data.get('max_video_channel_users', None)
if max_video_channel_users is None:
max_video_channel_users = MAX_VIDEO_CHANNEL_USERS_DEFAULT
if self.max_video_channel_users != max_video_channel_users:
old_attributes['max_video_channel_users'] = self.max_video_channel_users
self.max_video_channel_users = max_video_channel_users
premium_tier = data['premium_tier']
if self.premium_tier != premium_tier:
old_attributes['premium_tier'] = self.premium_tier
self.premium_tier = premium_tier
booster_count = data.get('premium_subscription_count', None)
if booster_count is None:
booster_count = 0
if self.booster_count != booster_count:
old_attributes['booster_count'] = self.booster_count
self.booster_count = booster_count
self._boosters = None
preferred_locale = parse_preferred_locale(data)
if self.preferred_locale != preferred_locale:
old_attributes['preferred_locale'] = self.preferred_locale
self.preferred_locale = preferred_locale
nsfw_level = NsfwLevel.get(data.get('nsfw_level', 0))
if self.nsfw_level is not nsfw_level:
old_attributes['nsfw_level'] = self.nsfw_level
self.nsfw_level = nsfw_level
self.self._update_counts_only(data)
return old_attributes
def _update_attributes(self, data):
"""
Updates the guild and with overwriting it's old attributes.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Guild data received from Discord.
"""
# ignoring 'roles'
# ignoring 'emojis'
# ignoring 'members'
# ignoring 'presence'
# ignoring 'channels'
# ignoring 'voice_states'
# ignoring 'stickers'
self.name = data['name']
self._set_icon(data)
self._set_invite_splash(data)
self._set_discovery_splash(data)
self._set_banner(data)
self.region = VoiceRegion.get(data['region'])
self.afk_timeout = data['afk_timeout']
self.verification_level = VerificationLevel.get(data['verification_level'])
self.message_notification = MessageNotificationLevel.get(data['default_message_notifications'])
self.mfa = MFA.get(data['mfa_level'])
self.content_filter = ContentFilterLevel.get(data.get('explicit_content_filter', 0))
self.available = (not data.get('unavailable', False))
try:
features = data['features']
except KeyError:
self.features.clear()
else:
features = [GuildFeature.get(feature) for feature in features]
features.sort()
self.features = features
system_channel_id = data.get('system_channel_id', None)
if system_channel_id is None:
system_channel_id = 0
else:
system_channel_id = int(system_channel_id)
self.system_channel_id = system_channel_id
try:
system_channel_flags = SystemChannelFlag(data['system_channel_flags'])
except KeyError:
system_channel_flags = SystemChannelFlag.ALL
self.system_channel_flags = system_channel_flags
public_updates_channel_id = data.get('public_updates_channel_id', None)
if public_updates_channel_id is None:
public_updates_channel_id = 0
else:
public_updates_channel_id = int(public_updates_channel_id)
self.public_updates_channel_id = public_updates_channel_id
owner_id = data.get('owner_id', None)
if owner_id is None:
owner_id = 0
else:
owner_id = int(owner_id)
self.owner_id= owner_id
afk_channel_id = data.get('afk_channel_id', None)
if afk_channel_id is None:
afk_channel_id = 0
else:
afk_channel_id = int(afk_channel_id)
self.afk_channel_id = afk_channel_id
self.widget_enabled = data.get('widget_enabled', False)
widget_channel_id = data.get('widget_channel_id', None)
if widget_channel_id is None:
widget_channel_id = 0
else:
widget_channel_id = int(widget_channel_id)
self.widget_channel_id = widget_channel_id
rules_channel_id = data.get('rules_channel_id', None)
if rules_channel_id is None:
rules_channel_id = 0
else:
rules_channel_id = int(rules_channel_id)
self.rules_channel_id = rules_channel_id
self.description = data.get('description', None)
self.vanity_code = data.get('vanity_url_code', None)
max_users = data.get('max_members', None)
if max_users is None:
max_users = MAX_USERS_DEFAULT
self.max_users = max_users
max_presences = data.get('max_presences', None)
if max_presences is None:
max_presences = MAX_PRESENCES_DEFAULT
self.max_presences = max_presences
max_video_channel_users = data.get('max_video_channel_users', None)
if max_video_channel_users is None:
max_video_channel_users = MAX_VIDEO_CHANNEL_USERS_DEFAULT
self.max_video_channel_users = max_video_channel_users
self.premium_tier = data['premium_tier']
booster_count = data.get('premium_subscription_count', None)
if booster_count is None:
booster_count = 0
self.booster_count = booster_count
self._boosters = None
self.preferred_locale = parse_preferred_locale(data)
self.nsfw_level = NsfwLevel.get(data.get('nsfw_level', 0))
self._update_counts_only(data)
def _update_counts_only(self, data):
"""
Updates the guilds's counts if given.
Parameters
----------
data : `dict` of (`str`, `Any`) items
Received guild data.
"""
try:
approximate_online_count = data['approximate_presence_count']
except KeyError:
pass
else:
self.approximate_online_count = approximate_online_count
try:
approximate_user_count = data['approximate_member_count']
except KeyError:
pass
else:
self.approximate_user_count = approximate_user_count
def _update_emojis(self, data):
"""
Updates the emojis o the guild and returns all the changes broke down for each changes emoji.
Parameters
----------
data : `list` of (`dict` of (`str`, `Any`) items)
Received emoji datas.
Returns
-------
changes : `list` of `tuple` (`int`, ``Emoji``, (`None` or `dict` of (`str`, `Any`) items)))
The changes broken down for each changed emoji. Each element of the list is a tuple of 3 elements:
+-------+-------------------+-----------------------------------------------+
| Index | Respective name | Type |
+=======+===================+===============================================+
| 0 | action | `int` |
+-------+-------------------+-----------------------------------------------+
| 1 | emoji | ``Emoji`` |
+-------+-------------------+-----------------------------------------------+
| 2 | old_attributes | `None` or `dict` of (`str`, `Any`) items |
+-------+-------------------+-----------------------------------------------+
Possible actions:
+-----------------------+-------+
| Respective name | Value |
+=======================+=======+
| EMOJI_UPDATE_NONE | `0` |
+-----------------------+-------+
| EMOJI_UPDATE_CREATE | `1` |
+-----------------------+-------+
| EMOJI_UPDATE_DELETE | `2` |
+-----------------------+-------+
| EMOJI_UPDATE_EDIT | `3` |
+-----------------------+-------+
If action is `EMOJI_UPDATE_EDIT`, then `old_attributes` is passed as a dictionary containing the changed
attributes in an `attribute-name` - `old-value` relation. Every item in `old_attributes` is optional.
+-------------------+-------------------------------+
| Keys | Values |
+===================+===============================+
| animated | `bool` |
+-------------------+-------------------------------+
| available | `bool` |
+-------------------+-------------------------------+
| managed | `bool` |
+-------------------+-------------------------------+
| name | `int` |
+-------------------+-------------------------------+
| require_colons | `bool` |
+-------------------+-------------------------------+
| roles_ids | `None` or `tuple` of ``Role`` |
+-------------------+-------------------------------+
"""
emojis = self.emojis
changes = []
old_ids = set(emojis)
for emoji_data in data:
emoji_id = int(emoji_data['id'])
try:
emoji = emojis[emoji_id]
except KeyError:
emoji = Emoji(emoji_data, self)
emojis[emoji_id] = emoji
changes.append((EMOJI_UPDATE_CREATE, emoji, None),)
else:
old_attributes = emoji._difference_update_attributes(emoji_data)
if old_attributes:
changes.append((EMOJI_UPDATE_EDIT, emoji, old_attributes),)
old_ids.remove(emoji_id)
for emoji_id in old_ids:
try:
emoji = emojis.pop(emoji_id)
except KeyError:
pass
else:
changes.append((EMOJI_UPDATE_DELETE, emoji, None),)
return changes
def _sync_emojis(self, data):
"""
Syncs the emojis of the guild.
Parameters
----------
data : `list` of (`dict` of (`str`, `Any`) items)
Received emoji datas.
"""
emojis = self.emojis
old_ids = set(emojis)
for emoji_data in data:
emoji_id = int(emoji_data['id'])
try:
emoji = emojis[emoji_id]
except KeyError:
emoji = Emoji(emoji_data, self)
emojis[emoji_id] = emoji
else:
emoji._update_attributes(emoji_data)
old_ids.remove(emoji_id)
for emoji_id in old_ids:
try:
del emojis[emoji_id]
except KeyError:
pass
def _update_stickers(self, data):
"""
Updates the stickers of the guild and returns the changes broke down for each changed sticker.
Parameters
----------
data : `list` of (`dict` of (`str`, `Any`) items)
Received sticker datas.
Returns
-------
changes : `list` of `tuple` (`int`, ``Sticker``, (`None` or `dict` of (`str`, `Any`) items)))
The changes broken down for each changed sticker. Each element of the list is a tuple of 3 elements:
+-------+-------------------+-----------------------------------------------+
| Index | Respective name | Type |
+=======+===================+===============================================+
| 0 | action | `int` |
+-------+-------------------+-----------------------------------------------+
| 1 | sticker | ``Sticker`` |
+-------+-------------------+-----------------------------------------------+
| 2 | old_attributes | `None` or `dict` of (`str`, `Any`) items |
+-------+-------------------+-----------------------------------------------+
Possible actions:
+-----------------------+-------+
| Respective name | Value |
+=======================+=======+
| STICKER_UPDATE_NONE | `0` |
+-----------------------+-------+
| STICKER_UPDATE_CREATE | `1` |
+-----------------------+-------+
| STICKER_UPDATE_DELETE | `2` |
+-----------------------+-------+
| STICKER_UPDATE_EDIT | `3` |
+-----------------------+-------+
If action is `STICKER_UPDATE_EDIT`, then `old_attributes` is passed as a dictionary containing the changed
attributes in an `attribute-name` - `old-value` relation. Every item in `old_attributes` is optional.
+-----------------------+-----------------------------------+
| Keys | Values |
+=======================+===================================+
| available | `bool` |
+-----------------------+-----------------------------------+
| description | `None` or `str` |
+-----------------------+-----------------------------------+
| name | `str` |
+-----------------------+-----------------------------------+
| sort_value | `int` |
+-----------------------+-----------------------------------+
| tags | `None` or `frozenset` of `str` |
+-----------------------+-----------------------------------+
"""
stickers = self.stickers
changes = []
old_ids = set(stickers)
for sticker_data in data:
sticker_id = int(sticker_data['id'])
try:
sticker = stickers[sticker_id]
except KeyError:
sticker = Sticker(sticker_data)
stickers[sticker_id] = sticker
changes.append((STICKER_UPDATE_CREATE, sticker, None),)
else:
old_attributes = sticker._difference_update_attributes(sticker_data)
if old_attributes:
changes.append((STICKER_UPDATE_EDIT, sticker, old_attributes),)
old_ids.remove(sticker_id)
for sticker_id in old_ids:
try:
sticker = stickers.pop(sticker_id)
except KeyError:
pass
else:
changes.append((STICKER_UPDATE_DELETE, sticker, None),)
return changes
def _sync_stickers(self, data):
"""
Syncs the stickers of the guild.
Parameters
----------
data : `list` of (`dict` of (`str`, `Any`) items)
Received sticker datas.
"""
stickers = self.stickers
old_ids = set(stickers)
for sticker_data in data:
sticker_id = int(sticker_data['id'])
try:
sticker = stickers[sticker_id]
except KeyError:
sticker = Sticker(sticker_data)
stickers[sticker_id] = sticker
else:
sticker._update_attributes(sticker_data)
old_ids.remove(sticker_id)
for sticker_id in old_ids:
try:
del stickers[sticker_id]
except KeyError:
pass
def _invalidate_permission_cache(self):
"""
Invalidates the cached permissions of the guild.
"""
self._permission_cache = None
for channel in self.channels.values():
channel._permission_cache = None
@property
def owner(self):
"""
Returns the guild's owner's.
Returns
-------
owner : ``UserClientBase``
If user the guild has no owner, returns `ZEROUSER`.
"""
owner_id = self.owner_id
if owner_id == 0:
owner = ZEROUSER
else:
owner = create_partial_user_from_id(owner_id)
return owner
@property
def emoji_limit(self):
"""
The maximal amount of emojis, what the guild can have.
Returns
-------
limit : `int`
"""
limit = (50, 100, 150, 250)[self.premium_tier]
if limit < 200 and (GuildFeature.more_emoji in self.features):
limit = 200
return limit
@property
def bitrate_limit(self):
"""
The maximal bitrate for the guild's voice channels.
Returns
-------
limit : `int`
"""
limit = (96000, 128000, 256000, 384000)[self.premium_tier]
if limit < 128000 and (GuildFeature.vip in self.features):
limit = 128000
return limit
@property
def upload_limit(self):
"""
The maximal size of files, which can be uploaded to the guild's channels.
Returns
-------
limit : `int`
"""
return (8388608, 8388608, 52428800, 104857600)[self.premium_tier]
@property
def sticker_limit(self):
"""
The maximal amount of stickers, what the guild can have.
Returns
-------
limit : `int`
"""
limit = (0, 15, 30, 60)[self.premium_tier]
if limit < 30 and (GuildFeature.more_sticker in self.features):
limit = 30
return limit
widget_json_url = property(module_urls.guild_widget_json_url)
@property
def boosters(self):
"""
The boosters of the guild sorted by their subscription date.
These users are queried from the guild's `.users` dictionary, so make sure that is populated before accessing
the property.
Returns
-------
boosters : `list` of ``ClientUserBase``
"""
boosters = self._boosters
if boosters is None:
if self.booster_count:
boosters_ordered = []
guild_id = self.id
for user in self.users.values():
try:
guild_profile = user.guild_profiles[guild_id]
except KeyError:
continue
boosts_since = guild_profile.boosts_since
if boosts_since is None:
continue
boosters_ordered.append((boosts_since, user),)
boosters_ordered.sort(key=user_date_sort_key)
boosters = [element[1] for element in boosters_ordered]
else:
boosters=[]
self._boosters = boosters
return boosters
@property
def emoji_counts(self):
"""
Returns the emoji counts of the guild.
Returns
-------
normal_static : `int`
The static emoji count of the guild (excluding managed static).
normal_animated : `int`
The animated emoji count of the guild (excluding managed animated).
managed_static : `int`
The static managed emoji count of the guild.
manged_animated : `int`
The animated managed emoji count of the guild.
"""
normal_static = 0
normal_animated = 0
managed_static = 0
manged_animated = 0
for emoji in self.emojis.values():
if emoji.animated:
if emoji.managed:
manged_animated += 1
else:
normal_animated += 1
else:
if emoji.managed:
managed_static += 1
else:
normal_static += 1
return normal_static, normal_animated, managed_static, manged_animated
@property
def sticker_count(self):
"""
Returns the sticker counts of the guild for each type.
Returns
-------
static : `int`
The amount of static (``StickerFormat.png``) stickers of the guild.
animated : `int`
The amount of animated (``StickerFormat.apng``) stickers of the guild.
lottie : `int`
The amount of lottie (``StickerFormat.lottie``) stickers of the guild.
"""
static_count = 0
animated_count = 0
lottie_count = 0
for sticker in self.stickers.values():
sticker_format = sticker.format
if sticker_format is STICKER_FORMAT_STATIC:
static_count += 1
continue
if sticker_format is STICKER_FORMAT_ANIMATED:
animated_count += 1
continue
if sticker_format is STICKER_FORMAT_LOTTIE:
lottie_count += 1
continue
return static_count, animated_count, lottie_count
@property
def channel_list(self):
"""
Returns the channels of the guild in a list in their display order. Note, that channels inside of categories are
excluded.
Returns
-------
channels : `list` of ``ChannelGuildBase`` instances
"""
return sorted(channel for channel in self.channels.values() if channel.parent is None)
@property
def channel_list_flattened(self):
"""
Returns the channels of the guild in a list in their display order. Note, that channels inside of categories are
included as well.
channels : `list` of ``ChannelGuildBase`` instances
"""
channels = []
for channel in sorted(channel for channel in self.channels.values() if channel.parent is None):
channels.append(channel)
if type(channel) is ChannelCategory:
channels.extend(channel.list_channels)
return channels
@property
def role_list(self):
"""
Returns the roles of the guild in their display order.
Returns
-------
roles : `list` of ``Role``
"""
return sorted(self.roles.values())
@property
def nsfw(self):
nsfw_level = self.nsfw_level
if (nsfw_level is NsfwLevel.none) or (nsfw_level is NsfwLevel.safe):
return True
return False
@property
def public_updates_channel(self):
"""
Returns the channel's where the guild's public updates should go.
Returns
-------
public_updates_channel : `None` or ``ChannelText``
"""
public_updates_channel_id = self.public_updates_channel_id
if public_updates_channel_id:
return self.channels.get(public_updates_channel_id, None)
@property
def afk_channel(self):
"""
Returns the afk channel of the guild if it has.
Returns
-------
afk_channel : `None` or ``ChannelVoice``
"""
afk_channel_id = self.afk_channel_id
if afk_channel_id:
return self.channels.get(afk_channel_id, None)
@property
def rules_channel(self):
"""
Returns the channel where the rules of a public guild's should be.
Returns
-------
rules_channel : `None` or ``ChannelText``
"""
rules_channel_id = self.rules_channel_id
if rules_channel_id:
return self.channels.get(rules_channel_id, None)
@property
def system_channel(self):
"""
Returns the channel where the system messages are sent.
Returns
-------
public_updates_channel : `None` or ``ChannelText``
"""
system_channel_id = self.system_channel_id
if system_channel_id:
return self.channels.get(system_channel_id, None)
@property
def widget_channel(self):
"""
Returns the channel for which the guild's widget is for.
Returns
-------
public_updates_channel : `None` or ``ChannelText``
"""
widget_channel_id = self.widget_channel_id
if widget_channel_id:
return self.channels.get(widget_channel_id, None)
| 34.539383 | 120 | 0.502753 |
794f7bfdbafe04dc67cc05deaab3f5bcd2268370 | 8,658 | py | Python | web_frontend/cloudscheduler/glintwebui/glint_utils.py | hep-gc/cloud-scheduler-2 | 180d9dc4f8751cf8c8254518e46f83f118187e84 | [
"Apache-2.0"
] | null | null | null | web_frontend/cloudscheduler/glintwebui/glint_utils.py | hep-gc/cloud-scheduler-2 | 180d9dc4f8751cf8c8254518e46f83f118187e84 | [
"Apache-2.0"
] | null | null | null | web_frontend/cloudscheduler/glintwebui/glint_utils.py | hep-gc/cloud-scheduler-2 | 180d9dc4f8751cf8c8254518e46f83f118187e84 | [
"Apache-2.0"
] | null | null | null | # glint utils
import logging
import os
import string
import random
#import hashlib
from cloudscheduler.lib.openstack_functions import get_openstack_sess, get_glance_connection
from cloudscheduler.lib.db_config import Config
config = Config('/etc/cloudscheduler/cloudscheduler.yaml', ['general', 'openstackPoller.py', 'web_frontend'], pool_size=2, max_overflow=10)
ALPHABET = string.ascii_letters + string.digits + string.punctuation
ALPHABET = ALPHABET.replace("'", "")
ALPHABET = ALPHABET.replace('"', "")
# new glint api, maybe make this part of glint utils?
"""
def create_placeholder_image(glance, image_name, disk_format, container_format):
image = glance.images.create(
#image = glance.create_image(
name=image_name,
disk_format=disk_format,
container_format=container_format)
return image.id
"""
# Upload an image to repo, returns image id if successful
# if there is no image_id it is a direct upload and no placeholder exists
def upload_image(cloud, image_id, image_name, scratch_dir, image_checksum=None, disk_format=None, container_format="bare"):
try:
sess = get_openstack_sess(cloud, config.categories["openstackPoller.py"]["cacerts"])
if sess is False:
logging.error("Failed to get openstack session")
return False
glance = get_glance_connection(sess, cloud["region"])
if glance is False:
logging.error("Failed to get openstack glance connection")
return False
file_path = scratch_dir
if image_checksum:
file_path = scratch_dir + image_name + "---" + image_checksum
image = glance.create_image(name=image_name, disk_format=disk_format, container_format=container_format, data=open(file_path, 'rb'))
logging.info("Image upload complete")
return glance.get_image(image.id)
except Exception as exc:
logging.error("Image upload failed: %s" % exc)
return False
# Download an image from the repo, returns True if successful or False if not
def download_image(cloud, image_name, image_id, image_checksum, scratch_dir):
#open file then write to it
try:
sess = get_openstack_sess(cloud, config.categories["openstackPoller.py"]["cacerts"])
if sess is False:
return (False, "Failed to get openstack session", "", "")
glance = get_glance_connection(sess, cloud["region"])
if glance is False:
return (False, "Failed to get openstack glance connection", "", "")
if not os.path.exists(scratch_dir):
os.makedirs(scratch_dir)
file_path = scratch_dir + image_name + "---" + image_checksum
#md5 = hashlib.md5()
with open(file_path, "wb") as local_image:
response = glance.download_image(image_id, stream=True)
for chunk in response.iter_content(chunk_size=512000):
# md5.update(chunk)
local_image.write(chunk)
# if response.headers["Content-MD5"] != md5.hexdigest():
# return (False, "Checksum mismatch in downloaded content")
#glance.download_image(image_id, output=file_path)
img = glance.get_image(image_id)
return (True, "Success", img.disk_format, img.container_format)
except Exception as exc:
return (False, exc, "", "")
#def delete_image(glance, image_id):
def delete_image(cloud, image_id):
try:
sess = get_openstack_sess(cloud, config.categories["openstackPoller.py"]["cacerts"])
if sess is False:
return (1, "Failed to get openstack session")
glance = get_glance_connection(sess, cloud["region"])
if glance is False:
return (1, "Failed to get openstack glance connection")
glance.delete_image(image_id)
except Exception as exc:
logging.error("Unknown error, unable to delete image")
logging.error(exc)
return (1, exc)
return (0,)
"""
def update_image_name(glance, image_id, image_name):
#glance.images.update(image_id, name=image_name)
glance.update_image(image_id, name=image_name)
def get_checksum(glance, image_id):
#image = glance.images.get(image_id)
image = glance.get_images(image_id)
return image['checksum']
"""
#
# Check image cache and queue up a pull request if target image is not present
#
def check_cache(config, image_name, image_checksum, group_name, user, target_image=None, return_image=False):
IMAGE_CACHE = "csv2_image_cache"
if isinstance(user, str):
username = user
else:
username = user.username
if image_checksum is not None:
where_clause = "image_name='%s' and checksum='%s'" % (image_name, image_checksum)
rc, qmsg, image = config.db_query(IMAGE_CACHE, where=where_clause)
else:
where_clause = "image_name='%s' and checksum='%s'" % (image_name, image_checksum)
rc, qmsg, image = config.db_query(IMAGE_CACHE, where=where_clause)
if len(image) > 0:
# we found something in the cache we can skip queueing a pull request
if return_image:
return image[0]
return True
else:
from .celery_app import pull_request
logging.info("No image n cache, getting target image for pull request")
# nothing in the cache lets queue up a pull request
if target_image is None:
target_image = get_image(config, image_name, image_checksum, group_name)
if target_image is False:
# unable to find target image
logging.info("Unable to find target image")
if return_image:
return None
return False #maybe raise an error here
tx_id = generate_tx_id()
preq = {
"tx_id": tx_id,
"target_group_name": target_image["group_name"],
"target_cloud_name": target_image["cloud_name"],
"image_name": image_name,
"image_id": target_image["id"],
"checksum": target_image["checksum"],
"status": "pending",
"requester": username,
}
else:
tx_id = generate_tx_id()
preq = {
"tx_id": tx_id,
"target_group_name": target_image["group_name"],
"target_cloud_name": target_image["cloud_name"],
"image_name": image_name,
"image_id": target_image["id"],
"checksum": target_image["checksum"],
"status": "pending",
"requester": username,
}
PULL_REQ = "csv2_image_pull_requests"
# check if a pull request already exists for this image? or just let the workers sort it out?
config.db_merge(PULL_REQ, preq)
config.db_commit()
#pull_request.delay(tx_id = tx_id)
pull_request.apply_async((tx_id,), queue='pull_requests')
if return_image:
return None
return True
#
def get_image(config, image_name, image_checksum, group_name, cloud_name=None):
IMAGES = "cloud_images"
if cloud_name is None:
#getting a source image
logging.info("Looking for image %s, checksum: %s in group %s" % (image_name, image_checksum, group_name))
if image_checksum is not None:
where_clause = "group_name='%s' and name='%s' and checksum='%s'" % (group_name, image_name, image_checksum)
rc, qmsg, image_candidates = config.db_query(IMAGES, where=where_clause)
else:
where_clause = "group_name='%s' and name='%s'" % (group_name, image_name)
rc, qmsg, image_candidates = config.db_query(IMAGES, where=where_clause)
if len(image_candidates) > 0:
return image_candidates[0]
else:
#No image that fits specs
return False
else:
#getting a specific image
logging.debug("Retrieving image %s" % image_name)
where_clause = "group_name='%s' and cloud_name='%s' and name='%s' and checksum='%s'" % (group_name, cloud_name, image_name, image_checksum)
rc, msg, image_candidates = config.db_query(IMAGES, where=where_clause)
if len(image_candidates) > 0:
return image_candidates[0]
else:
#No image that fits specs
return False
# at a length of 16 with a 92 symbol alphabet we have a N/16^92 chance of a collision, pretty darn unlikely
def generate_tx_id(length=16):
return ''.join(random.choice(ALPHABET) for i in range(length)).replace('\\', '\\\\')
| 40.083333 | 147 | 0.63964 |
794f7c3e95ce50b47b5a7a8aa9fefd5a26bb85eb | 5,036 | bzl | Python | build_extensions/remove_from_jar.bzl | dayanruben/android-test | 2ed50d534cb7e48433dcfa5f5e9e793e99506d84 | [
"Apache-2.0"
] | 836 | 2018-05-20T23:00:12.000Z | 2022-03-29T09:53:59.000Z | build_extensions/remove_from_jar.bzl | dayanruben/android-test | 2ed50d534cb7e48433dcfa5f5e9e793e99506d84 | [
"Apache-2.0"
] | 602 | 2018-06-29T04:44:44.000Z | 2022-03-30T19:13:09.000Z | build_extensions/remove_from_jar.bzl | dayanruben/android-test | 2ed50d534cb7e48433dcfa5f5e9e793e99506d84 | [
"Apache-2.0"
] | 233 | 2018-05-21T19:51:09.000Z | 2022-03-23T17:01:25.000Z | """Removes files / directories from jar (or any zip) file.
If overlapping_jars is present, then it also removes entries in the
primary jar that exists in any of the overlapping_jars.
"""
def remove_from_jar(
name,
jar,
keep_spec,
remove_spec = None,
overlapping_jars = [],
visibility = None,
constraints = None,
**kwargs):
"""Removes specified entries from a jar file.
The entries to remove can be specified with with any combination of 'removes'
and 'overlapping_jars'.
It generates two relevant targets: <name> and lib<name>,
where <name> target is pure genrule output, that has 'concrete' jar, while
lib<name> is a java_library rule that contains <name> in its srcs.
Args:
name: Name of the remove jars target, String.
jar: jar from which to remove files, label.
keep_spec: Regex to match items to be retained in jar file.
remove_spec: Regex of items to be removed from a jar file.
overlapping_jars: jars containing entries to be removed from the main jar.
visibility: (Optional) visibility of the rules generated by this macro.
constraints: (Optional) constraints imposed on this rule as a Java library.
Currently defaults to ["android"] for compatibility reasons, but this will
be removed in the future.
**kwargs: Args to be passed to genrule and java_library, so valid args are
common set between those two:
deprecation, distribs, licenses, obsolete, tags, testonly, visibility
Usage:
remove_from_jar(
name = "jar_cleaned",
keep_spec = "foo/bar/,*|bar/foo/,*",
overlapping_jars = [":fooapp_deploy.jar"])
Explanation:
Removes all items not matching foo/bar/.* or bar/foo/.* from the jar file.
Uses fooapp_deploy.jar overlapping jar, to remove entries in main
jar that are also present in the overlapping jar.
remove_from_jar(
name = "jar_cleaned",
keep_spec = "foo/bar.*"
removes = [ "foo/bar/RemoveMe" ])
Explanation:
This will retain everything from foo/bar/.* except foo/bar/RemoveMe.
"""
if not jar or jar == "":
fail('"jar" attribute cannot be null or empty')
if constraints == None:
constraints = ["android"]
srcs = [jar]
message = ('Keeping %s from "%s." ' % (keep_spec, jar))
message += ('Removing %s from "%s." ' % (remove_spec, jar)) if remove_spec else ""
# Add overlapping_jars to sources if specified.
for overlapping_jar in overlapping_jars:
srcs += [overlapping_jar]
message += (
'Removing elements in "%s" that are present in overlapping jar "%s."' %
(jar, overlapping_jar)
)
cmd = [
"set +o pipefail;",
"tmpdir=$$(mktemp -d);",
"cp $(location %s) $@;" % jar,
"chmod +w $@;",
"$(location @local_jdk//:jar) tf $@ > $$tmpdir/file_list.txt;",
"cat $$tmpdir/file_list.txt | ",
'egrep -v "%s" | ' % keep_spec,
"xargs --no-run-if-empty zip -d $@ >",
"$$tmpdir/keep_from_jar_result.txt 2>&1 || {",
" RESULT=$$?;",
" cat $$tmpdir/keep_from_jar_result.txt;",
" exit $${RESULT};",
" };",
]
if remove_spec:
cmd += [
"$(location @local_jdk//:jar) tf $@ >",
"$$tmpdir/remove_file_list.txt;",
"cat $$tmpdir/remove_file_list.txt | ",
'egrep "%s" | ' % remove_spec,
"xargs --no-run-if-empty zip -d $@ >",
"$$tmpdir/remove_from_jar_result.txt 2>&1 || {",
" RESULT=$$?;",
" cat $$tmpdir/remove_from_jar_result.txt;",
" exit $${RESULT};",
" };",
]
if overlapping_jars:
for overlapping_jar in overlapping_jars:
cmd += [
("$(location @local_jdk//:jar) tf $(location %s) >> " +
"$$tmpdir/overlapping_jar.txt;") %
overlapping_jar,
]
cmd += [
"$(location @local_jdk//:jar) tf $@ >",
"$$tmpdir/original_jar.txt;",
"grep -F -x -f $$tmpdir/overlapping_jar.txt",
"$$tmpdir/original_jar.txt",
"| xargs --no-run-if-empty zip -d $@ >",
"$$tmpdir/remove_from_overlapping_jar_result.txt 2>&1 || {",
" RESULT=$$?;",
" cat $$tmpdir/remove_from_overlapping_jar_result.txt;",
" exit $${RESULT};",
" };",
]
cmd += ["rm -rf $$tmpdir;"]
native.genrule(
name = name,
srcs = srcs,
outs = ["%s.jar" % name],
tools = [
"@local_jdk//:jar",
],
message = message,
visibility = visibility,
cmd = " ".join(cmd),
**kwargs
)
native.java_import(
name = "lib%s" % name,
jars = [name],
constraints = constraints,
visibility = visibility,
**kwargs
)
| 34.493151 | 86 | 0.555004 |
794f7d25dfe905f29b35c8287b7b0dae5ef56d3e | 544 | py | Python | api/migrations/0002_auto_20190403_0216.py | hyusuf4/FriendZone2 | 342a2f86295341ea98bbb9dd596ef823eb509962 | [
"Apache-2.0"
] | null | null | null | api/migrations/0002_auto_20190403_0216.py | hyusuf4/FriendZone2 | 342a2f86295341ea98bbb9dd596ef823eb509962 | [
"Apache-2.0"
] | null | null | null | api/migrations/0002_auto_20190403_0216.py | hyusuf4/FriendZone2 | 342a2f86295341ea98bbb9dd596ef823eb509962 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.7 on 2019-04-03 02:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='node',
name='password',
field=models.CharField(blank=True, max_length=32),
),
migrations.AddField(
model_name='node',
name='username',
field=models.CharField(blank=True, max_length=32),
),
]
| 22.666667 | 62 | 0.566176 |
794f7e220094b18603981a90cf13293911a3487b | 2,611 | py | Python | tools/scons/scons-local-1.2.0/SCons/Tool/aixf77.py | rohankumardubey/node | d49d53fd499f7cf68fdfcc7d0c9d401e4e4407fb | [
"MIT"
] | 48 | 2015-01-09T20:39:35.000Z | 2021-12-21T21:17:52.000Z | tools/scons/scons-local-1.2.0/SCons/Tool/aixf77.py | dalizard/node | 776754c33f347ef4827cf2b9d3cea7c1d46be7b5 | [
"MIT"
] | 2 | 2016-02-05T10:27:37.000Z | 2019-01-22T16:22:51.000Z | tools/scons/scons-local-1.2.0/SCons/Tool/aixf77.py | dalizard/node | 776754c33f347ef4827cf2b9d3cea7c1d46be7b5 | [
"MIT"
] | 8 | 2015-01-12T17:14:36.000Z | 2018-09-15T14:10:27.000Z | """engine.SCons.Tool.aixf77
Tool-specific initialization for IBM Visual Age f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixf77.py 3842 2008/12/20 22:59:52 scons"
import os.path
#import SCons.Platform.aix
import f77
# It would be good to look for the AIX F77 package the same way we're now
# looking for the C and C++ packages. This should be as easy as supplying
# the correct package names in the following list and uncommenting the
# SCons.Platform.aix_get_xlc() call the in the function below.
packages = []
def get_xlf77(env):
xlf77 = env.get('F77', 'xlf77')
xlf77_r = env.get('SHF77', 'xlf77_r')
#return SCons.Platform.aix.get_xlc(env, xlf77, xlf77_r, packages)
return (None, xlf77, xlf77_r, None)
def generate(env):
"""
Add Builders and construction variables for the Visual Age FORTRAN
compiler to an Environment.
"""
path, _f77, _shf77, version = get_xlf77(env)
if path:
_f77 = os.path.join(path, _f77)
_shf77 = os.path.join(path, _shf77)
f77.generate(env)
env['F77'] = _f77
env['SHF77'] = _shf77
def exists(env):
path, _f77, _shf77, version = get_xlf77(env)
if path and _f77:
xlf77 = os.path.join(path, _f77)
if os.path.exists(xlf77):
return xlf77
return None
| 34.813333 | 83 | 0.726159 |
794f7ea4ec39144347f9e6181798c0811815f60b | 1,136 | py | Python | djangocms_fbcomments/cms_plugins.py | mishbahr/djangocms-fbcomments | 4e6bf2c636196fee85a489b510f13ae67ae05af6 | [
"BSD-3-Clause"
] | 20 | 2015-09-29T10:00:56.000Z | 2018-06-15T03:28:36.000Z | djangocms_fbcomments/cms_plugins.py | mishbahr/djangocms-fbcomments | 4e6bf2c636196fee85a489b510f13ae67ae05af6 | [
"BSD-3-Clause"
] | 1 | 2020-05-14T02:23:00.000Z | 2020-05-27T14:45:59.000Z | djangocms_fbcomments/cms_plugins.py | mishbahr/djangocms-fbcomments | 4e6bf2c636196fee85a489b510f13ae67ae05af6 | [
"BSD-3-Clause"
] | 2 | 2016-08-04T21:35:40.000Z | 2020-05-14T02:19:06.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import get_language, to_locale
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .conf import settings
from .models import FacebookComments
class FacebookCommentsPlugin(CMSPluginBase):
module = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_MODULE
name = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_NAME
model = FacebookComments
render_template = settings.DJANGOCMS_FBCOMMENTS_PLUGIN_TEMPLATE
def render(self, context, instance, placeholder):
context = super(FacebookCommentsPlugin, self).render(context, instance, placeholder)
request = context.get('request')
context['language_code'] = to_locale(get_language())
context['page_url'] = request.build_absolute_uri(location=request.path_info)
return context
class Media:
css = {
'all': ('css/djangocms_fbcomments/admin/djangocms_fbcomments.css',)
}
js = ('js/djangocms_fbcomments/admin/djangocms_fbcomments.js',)
plugin_pool.register_plugin(FacebookCommentsPlugin)
| 33.411765 | 92 | 0.751761 |
794f7ee0708e915467808da24688b373bdb52eeb | 2,664 | py | Python | scripts/copy_couchdb_fields.py | amywieliczka/harvester | ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8 | [
"BSD-3-Clause"
] | 5 | 2015-01-14T20:48:28.000Z | 2015-05-13T15:31:12.000Z | scripts/copy_couchdb_fields.py | amywieliczka/harvester | ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8 | [
"BSD-3-Clause"
] | 87 | 2015-01-09T00:17:44.000Z | 2021-12-13T19:37:44.000Z | scripts/copy_couchdb_fields.py | amywieliczka/harvester | ed21e0167ac5e6e6002fa7c89aa78cc1e93e29d8 | [
"BSD-3-Clause"
] | 4 | 2015-02-26T23:27:44.000Z | 2019-06-11T21:43:17.000Z | import sys
import os
from harvester.post_processing.couchdb_runner import CouchDBWorker
from harvester.image_harvest import harvest_image_for_doc
from harvester.couchdb_init import get_couchdb
import couchdb #couchdb-python
from dplaingestion.selector import getprop, setprop
EMAIL_RETURN_ADDRESS = os.environ.get('EMAIL_RETURN_ADDRESS',
'example@example.com')
# csv delim email addresses
EMAIL_SYS_ADMIN = os.environ.get('EMAIL_SYS_ADMINS', None)
def copy_fields_for_doc(doc, couchdb_src, field_list,
couchdb_dest):
doc_id = doc['_id']
doc_src = couchdb_src[doc_id]
for field in field_list:
value_src = getprop(doc_src, field, keyErrorAsNone=True)
print "SRC ID:{} FIELD {} VALUE:{}".format(doc_id, field, value_src)
if value_src:
setprop(doc, field, value_src)
couchdb_dest.save(doc)
else:
print 'SRC DOC {} missing {}'.format(doc_id, field)
def def_args():
import argparse
parser = argparse.ArgumentParser(
description='Copy fields from one couchdb to another')
parser.add_argument('user_email', type=str, help='user email')
#parser.add_argument('rq_queue', type=str, help='RQ Queue to put job in')
parser.add_argument('cid', type=str,
help='Collection ID')
parser.add_argument('url_couchdb_src', type=str,
help='Source couchdb')
parser.add_argument('field_list', type=str,
help='List of fields to copy over')
parser.add_argument('--url_couchdb_dest', type=str,
help='Destination couchdb (defaults to environment couch)')
return parser
def main(user_email, cid, url_couchdb_src, field_list, url_couchdb_dest=None):
worker = CouchDBWorker()
timeout = 100000
cdb_src = get_couchdb(url=url_couchdb_src, username=False, password=False)
if url_couchdb_dest:
cdb_dest= get_couchdb(url=url_couchdb_dest)
else:
cdb_dest= get_couchdb()
worker.run_by_collection(cid,
copy_fields_for_doc,
cdb_src,
field_list,
cdb_dest
)
if __name__ == '__main__':
parser = def_args()
args = parser.parse_args(sys.argv[1:])
if not args.user_email or not args.cid:
parser.print_help()
sys.exit(27)
kwargs = {}
field_list = [ x for x in args.field_list.split(',')]
if args.url_couchdb_dest:
kwargs['url_couchdb_dest'] = args.url_couchdb_dest
main(args.user_email,
args.cid,
args.url_couchdb_src,
field_list,
**kwargs)
| 36 | 78 | 0.650526 |
794f7f998aa1a640dc5ed0268e53823f4b64dce7 | 292 | py | Python | scripts/logging.py | sporkmonger/brunet | 5e51cd74af03c6649b3f917d1c30251ab4e3bffe | [
"MIT"
] | 1 | 2015-03-06T09:29:20.000Z | 2015-03-06T09:29:20.000Z | scripts/logging.py | kingctan/brunet | d17c9d951a7b9d23fb22c1daef5b09a36fc5bc6e | [
"MIT"
] | null | null | null | scripts/logging.py | kingctan/brunet | d17c9d951a7b9d23fb22c1daef5b09a36fc5bc6e | [
"MIT"
] | 1 | 2022-01-30T10:31:57.000Z | 2022-01-30T10:31:57.000Z | #!/usr/bin/python
import xmlrpclib, sys, socket
ip = "127.0.0.1"
port = "10000"
socket.setdefaulttimeout(10)
server = xmlrpclib.Server("http://" + ip + ":" + port + "/xm.rem")
enable = sys.argv[1]
option = sys.argv[2]
print server.localproxy("LogManager." + enable, "BooleanSwitch", option)
| 26.545455 | 72 | 0.678082 |
794f7ff2bf2b729b8470c6185bc04f36e500b18e | 17,231 | py | Python | openstack_dashboard/usage/quotas.py | kogotko/carburetor | 4afa3f51de877d955e6e1cc71c0bd7e07d606ad6 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/usage/quotas.py | kogotko/carburetor | 4afa3f51de877d955e6e1cc71c0bd7e07d606ad6 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/usage/quotas.py | kogotko/carburetor | 4afa3f51de877d955e6e1cc71c0bd7e07d606ad6 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import itertools
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon.utils.memoized import memoized
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import network
from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.contrib.developer.profiler import api as profiler
LOG = logging.getLogger(__name__)
NOVA_QUOTA_FIELDS = ("metadata_items",
"cores",
"instances",
"injected_files",
"injected_file_content_bytes",
"ram",
"floating_ips",
"fixed_ips",
"security_groups",
"security_group_rules",
"key_pairs",
"injected_file_path_bytes",
)
CINDER_QUOTA_FIELDS = ("volumes",
"snapshots",
"gigabytes",)
NEUTRON_QUOTA_FIELDS = ("network",
"subnet",
"port",
"router",
"floatingip",
"security_group",
"security_group_rule",
)
QUOTA_FIELDS = NOVA_QUOTA_FIELDS + CINDER_QUOTA_FIELDS + NEUTRON_QUOTA_FIELDS
QUOTA_NAMES = {
"metadata_items": _('Metadata Items'),
"cores": _('VCPUs'),
"instances": _('Instances'),
"injected_files": _('Injected Files'),
"injected_file_content_bytes": _('Injected File Content Bytes'),
"ram": _('RAM (MB)'),
"floating_ips": _('Floating IPs'),
"fixed_ips": _('Fixed IPs'),
"security_groups": _('Security Groups'),
"security_group_rules": _('Security Group Rules'),
"key_pairs": _('Key Pairs'),
"injected_file_path_bytes": _('Injected File Path Bytes'),
"volumes": _('Volumes'),
"snapshots": _('Volume Snapshots'),
"gigabytes": _('Total Size of Volumes and Snapshots (GB)'),
"network": _("Networks"),
"subnet": _("Subnets"),
"port": _("Ports"),
"router": _("Routers"),
"floatingip": _('Floating IPs'),
"security_group": _("Security Groups"),
"security_group_rule": _("Security Group Rules")
}
class QuotaUsage(dict):
"""Tracks quota limit, used, and available for a given set of quotas."""
def __init__(self):
self.usages = defaultdict(dict)
def __contains__(self, key):
return key in self.usages
def __getitem__(self, key):
return self.usages[key]
def __setitem__(self, key, value):
raise NotImplementedError("Directly setting QuotaUsage values is not "
"supported. Please use the add_quota and "
"tally methods.")
def __repr__(self):
return repr(dict(self.usages))
def get(self, key, default=None):
return self.usages.get(key, default)
def add_quota(self, quota):
"""Adds an internal tracking reference for the given quota."""
if quota.limit is None or quota.limit == -1:
# Handle "unlimited" quotas.
self.usages[quota.name]['quota'] = float("inf")
self.usages[quota.name]['available'] = float("inf")
else:
self.usages[quota.name]['quota'] = int(quota.limit)
def tally(self, name, value):
"""Adds to the "used" metric for the given quota."""
value = value or 0 # Protection against None.
# Start at 0 if this is the first value.
if 'used' not in self.usages[name]:
self.usages[name]['used'] = 0
# Increment our usage and update the "available" metric.
self.usages[name]['used'] += int(value) # Fail if can't coerce to int.
self.update_available(name)
def update_available(self, name):
"""Updates the "available" metric for the given quota."""
quota = self.usages.get(name, {}).get('quota', float('inf'))
available = quota - self.usages[name]['used']
if available < 0:
available = 0
self.usages[name]['available'] = available
def _get_quota_data(request, tenant_mode=True, disabled_quotas=None,
tenant_id=None):
quotasets = []
if not tenant_id:
tenant_id = request.user.tenant_id
if disabled_quotas is None:
disabled_quotas = get_disabled_quotas(request)
qs = base.QuotaSet()
if 'instances' not in disabled_quotas:
if tenant_mode:
quotasets.append(nova.tenant_quota_get(request, tenant_id))
else:
quotasets.append(nova.default_quota_get(request, tenant_id))
if 'volumes' not in disabled_quotas:
try:
if tenant_mode:
quotasets.append(cinder.tenant_quota_get(request, tenant_id))
else:
quotasets.append(cinder.default_quota_get(request, tenant_id))
except cinder.cinder_exception.ClientException:
disabled_quotas.update(CINDER_QUOTA_FIELDS)
msg = _("Unable to retrieve volume limit information.")
exceptions.handle(request, msg)
for quota in itertools.chain(*quotasets):
if quota.name not in disabled_quotas:
qs[quota.name] = quota.limit
return qs
@profiler.trace
def get_default_quota_data(request, disabled_quotas=None, tenant_id=None):
return _get_quota_data(request,
tenant_mode=False,
disabled_quotas=disabled_quotas,
tenant_id=tenant_id)
@profiler.trace
def get_tenant_quota_data(request, disabled_quotas=None, tenant_id=None):
qs = _get_quota_data(request,
tenant_mode=True,
disabled_quotas=disabled_quotas,
tenant_id=tenant_id)
# TODO(jpichon): There is no API to get the default system quotas
# in Neutron (cf. LP#1204956), so for now handle tenant quotas here.
# This should be handled in _get_quota_data() eventually.
if not disabled_quotas:
return qs
# Check if neutron is enabled by looking for network
if 'network' not in disabled_quotas:
tenant_id = tenant_id or request.user.tenant_id
neutron_quotas = neutron.tenant_quota_get(request, tenant_id)
if 'floating_ips' in disabled_quotas:
# Neutron with quota extension disabled
if 'floatingip' in disabled_quotas:
qs.add(base.QuotaSet({'floating_ips': -1}))
# Neutron with quota extension enabled
else:
# Rename floatingip to floating_ips since that's how it's
# expected in some places (e.g. Security & Access' Floating IPs)
fips_quota = neutron_quotas.get('floatingip').limit
qs.add(base.QuotaSet({'floating_ips': fips_quota}))
if 'security_groups' in disabled_quotas:
if 'security_group' in disabled_quotas:
qs.add(base.QuotaSet({'security_groups': -1}))
# Neutron with quota extension enabled
else:
# Rename security_group to security_groups since that's how it's
# expected in some places (e.g. Security & Access' Security Groups)
sec_quota = neutron_quotas.get('security_group').limit
qs.add(base.QuotaSet({'security_groups': sec_quota}))
if 'network' in disabled_quotas:
for item in qs.items:
if item.name == 'networks':
qs.items.remove(item)
break
else:
net_quota = neutron_quotas.get('network').limit
qs.add(base.QuotaSet({'networks': net_quota}))
if 'subnet' in disabled_quotas:
for item in qs.items:
if item.name == 'subnets':
qs.items.remove(item)
break
else:
net_quota = neutron_quotas.get('subnet').limit
qs.add(base.QuotaSet({'subnets': net_quota}))
if 'router' in disabled_quotas:
for item in qs.items:
if item.name == 'routers':
qs.items.remove(item)
break
else:
router_quota = neutron_quotas.get('router').limit
qs.add(base.QuotaSet({'routers': router_quota}))
return qs
@profiler.trace
def get_disabled_quotas(request):
disabled_quotas = set([])
# Cinder
if not cinder.is_volume_service_enabled(request):
disabled_quotas.update(CINDER_QUOTA_FIELDS)
# Neutron
if not base.is_service_enabled(request, 'network'):
disabled_quotas.update(NEUTRON_QUOTA_FIELDS)
else:
# Remove the nova network quotas
disabled_quotas.update(['floating_ips', 'fixed_ips'])
if neutron.is_extension_supported(request, 'security-group'):
# If Neutron security group is supported, disable Nova quotas
disabled_quotas.update(['security_groups', 'security_group_rules'])
else:
# If Nova security group is used, disable Neutron quotas
disabled_quotas.update(['security_group', 'security_group_rule'])
if not neutron.is_router_enabled(request):
disabled_quotas.update(['router', 'floatingip'])
try:
if not neutron.is_quotas_extension_supported(request):
disabled_quotas.update(NEUTRON_QUOTA_FIELDS)
except Exception:
LOG.exception("There was an error checking if the Neutron "
"quotas extension is enabled.")
# Nova
if not (base.is_service_enabled(request, 'compute') and
nova.can_set_quotas()):
disabled_quotas.update(NOVA_QUOTA_FIELDS)
# There appear to be no glance quota fields currently
return disabled_quotas
@profiler.trace
def _get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id):
# Unlike the other services it can be the case that nova is enabled but
# doesn't support quotas, in which case we still want to get usage info,
# so don't rely on '"instances" in disabled_quotas' as elsewhere
if not base.is_service_enabled(request, 'compute'):
return
if tenant_id:
instances, has_more = nova.server_list(
request, search_opts={'tenant_id': tenant_id})
else:
instances, has_more = nova.server_list(request)
# Fetch deleted flavors if necessary.
flavors = dict([(f.id, f) for f in nova.flavor_list(request)])
missing_flavors = [instance.flavor['id'] for instance in instances
if instance.flavor['id'] not in flavors]
for missing in missing_flavors:
if missing not in flavors:
try:
flavors[missing] = nova.flavor_get(request, missing)
except Exception:
flavors[missing] = {}
exceptions.handle(request, ignore=True)
usages.tally('instances', len(instances))
# Sum our usage based on the flavors of the instances.
for flavor in [flavors[instance.flavor['id']] for instance in instances]:
usages.tally('cores', getattr(flavor, 'vcpus', None))
usages.tally('ram', getattr(flavor, 'ram', None))
# Initialize the tally if no instances have been launched yet
if len(instances) == 0:
usages.tally('cores', 0)
usages.tally('ram', 0)
@profiler.trace
def _get_tenant_network_usages(request, usages, disabled_quotas, tenant_id):
floating_ips = []
try:
if network.floating_ip_supported(request):
floating_ips = network.tenant_floating_ip_list(request)
except Exception:
pass
usages.tally('floating_ips', len(floating_ips))
if 'security_group' not in disabled_quotas:
security_groups = []
security_groups = network.security_group_list(request)
usages.tally('security_groups', len(security_groups))
if 'network' not in disabled_quotas:
networks = []
networks = neutron.network_list(request, shared=False)
if tenant_id:
networks = [net for net in networks if net.tenant_id == tenant_id]
usages.tally('networks', len(networks))
# get shared networks
shared_networks = neutron.network_list(request, shared=True)
if tenant_id:
shared_networks = [net for net in shared_networks
if net.tenant_id == tenant_id]
usages.tally('networks', len(shared_networks))
if 'subnet' not in disabled_quotas:
subnets = neutron.subnet_list(request, shared=False)
if tenant_id:
subnets = [sub for sub in subnets if sub.tenant_id == tenant_id]
# get shared subnets
shared_subnets = neutron.subnet_list(request, shared=True)
if tenant_id:
shared_subnets = [subnet for subnet in shared_subnets
if subnet.tenant_id == tenant_id]
usages.tally('subnets', len(subnets) + len(shared_subnets))
if 'router' not in disabled_quotas:
routers = []
routers = neutron.router_list(request)
if tenant_id:
routers = [rou for rou in routers if rou.tenant_id == tenant_id]
usages.tally('routers', len(routers))
@profiler.trace
def _get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id):
if 'volumes' not in disabled_quotas:
try:
if tenant_id:
opts = {'all_tenants': 1, 'project_id': tenant_id}
volumes = cinder.volume_list(request, opts)
snapshots = cinder.volume_snapshot_list(request, opts)
else:
volumes = cinder.volume_list(request)
snapshots = cinder.volume_snapshot_list(request)
volume_usage = sum([int(v.size) for v in volumes])
snapshot_usage = sum([int(s.size) for s in snapshots])
usages.tally('gigabytes', (snapshot_usage + volume_usage))
usages.tally('volumes', len(volumes))
usages.tally('snapshots', len(snapshots))
except cinder.cinder_exception.ClientException:
msg = _("Unable to retrieve volume limit information.")
exceptions.handle(request, msg)
@profiler.trace
@memoized
def tenant_quota_usages(request, tenant_id=None):
"""Get our quotas and construct our usage object.
If no tenant_id is provided, a the request.user.project_id
is assumed to be used
"""
if not tenant_id:
tenant_id = request.user.project_id
disabled_quotas = get_disabled_quotas(request)
usages = QuotaUsage()
for quota in get_tenant_quota_data(request,
disabled_quotas=disabled_quotas,
tenant_id=tenant_id):
usages.add_quota(quota)
# Get our usages.
_get_tenant_compute_usages(request, usages, disabled_quotas, tenant_id)
_get_tenant_network_usages(request, usages, disabled_quotas, tenant_id)
_get_tenant_volume_usages(request, usages, disabled_quotas, tenant_id)
return usages
@profiler.trace
def tenant_limit_usages(request):
# TODO(licostan): This method shall be removed from Quota module.
# ProjectUsage/BaseUsage maybe used instead on volume/image dashboards.
limits = {}
try:
if base.is_service_enabled(request, 'compute'):
limits.update(nova.tenant_absolute_limits(request, reserved=True))
except Exception:
msg = _("Unable to retrieve compute limit information.")
exceptions.handle(request, msg)
if cinder.is_volume_service_enabled(request):
try:
limits.update(cinder.tenant_absolute_limits(request))
volumes = cinder.volume_list(request)
snapshots = cinder.volume_snapshot_list(request)
# gigabytesUsed should be a total of volumes and snapshots
vol_size = sum([getattr(volume, 'size', 0) for volume
in volumes])
snap_size = sum([getattr(snap, 'size', 0) for snap
in snapshots])
limits['gigabytesUsed'] = vol_size + snap_size
limits['volumesUsed'] = len(volumes)
limits['snapshotsUsed'] = len(snapshots)
except cinder.cinder_exception.ClientException:
msg = _("Unable to retrieve volume limit information.")
exceptions.handle(request, msg)
return limits
def enabled_quotas(request):
"""Returns the list of quotas available minus those that are disabled"""
return set(QUOTA_FIELDS) - get_disabled_quotas(request)
| 37.953744 | 79 | 0.631304 |
794f80507796638ae49761ca281f4f1b8e758a03 | 1,537 | py | Python | clane/similarity.py | helloybz/CLANE | 60e6f0503642ac63d3bcde136885e47954067c17 | [
"MIT"
] | 1 | 2021-12-14T10:52:55.000Z | 2021-12-14T10:52:55.000Z | clane/similarity.py | helloybz/CLANE | 60e6f0503642ac63d3bcde136885e47954067c17 | [
"MIT"
] | 14 | 2021-10-13T08:52:16.000Z | 2021-12-15T04:19:53.000Z | clane/similarity.py | helloybz/CLANE | 60e6f0503642ac63d3bcde136885e47954067c17 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
class CosineSimilarity:
"""
Cosine similarity between the two vector.
Given two vector v1 and v2, the cosine similarity between the two vector
is the cosine of theta, where the theta is the angle between the two vector on therir inner product space.
The cosine of the theta can be derived from Euclidean dot product of the two vectors.
"""
def __init__(
self,
**kwargs
) -> None:
super(CosineSimilarity, self).__init__()
def __call__(
self,
v1: torch.Tensor,
v2: torch.Tensor,
) -> torch.Tensor:
if v1.dim() == 1:
v1 = v1.unsqueeze(0)
if v2.dim() == 1:
v2 = v2.unsqueeze(0)
v1 = v1.unsqueeze(1)
v2 = v2.unsqueeze(-1)
return v1.matmul(v2).squeeze(1).squeeze(1).div(v1.pow(2).sum().sqrt() * v2.pow(2).sum().sqrt())
class AsymmertricSimilarity(nn.Module):
def __init__(
self,
n_dim: int,
**kwargs,
) -> None:
super(AsymmertricSimilarity, self).__init__()
self.Phi_src = nn.Linear(n_dim, n_dim, bias=False)
self.Phi_dst = nn.Linear(n_dim, n_dim, bias=False)
nn.init.xavier_normal_(self.Phi_src.weight)
nn.init.xavier_normal_(self.Phi_dst.weight)
def forward(
self,
z_src: torch.Tensor,
z_dst: torch.Tensor,
) -> torch.Tensor:
return self.Phi_src(z_src).unsqueeze(-2).matmul(self.Phi_dst(z_dst).unsqueeze(-1)).squeeze()
| 29 | 110 | 0.60052 |
794f81527deaf78650cbef16e6f600fb42c7be49 | 4,552 | py | Python | bonus_lesson_examples/collateral/os_upgrade/upgrade_device_alt.py | ktbyers/python_course | db288d2978b3d244a0d7d51a79dfbb5afc5dcbe8 | [
"Apache-2.0"
] | 24 | 2018-03-16T19:24:16.000Z | 2022-01-05T09:18:07.000Z | bonus_lesson_examples/collateral/os_upgrade/upgrade_device_alt.py | ktbyers/python_course | db288d2978b3d244a0d7d51a79dfbb5afc5dcbe8 | [
"Apache-2.0"
] | null | null | null | bonus_lesson_examples/collateral/os_upgrade/upgrade_device_alt.py | ktbyers/python_course | db288d2978b3d244a0d7d51a79dfbb5afc5dcbe8 | [
"Apache-2.0"
] | 29 | 2018-03-16T15:49:36.000Z | 2022-03-16T14:44:32.000Z | #!/usr/bin/env python
"""
Alternate solution to the OS Upgrade Example. This was the one I created during my initial
planning and experimentation.
"""
from __future__ import print_function, unicode_literals
from getpass import getpass
from datetime import datetime
import re
import sys
from netmiko import ConnectHandler, file_transfer
def hit_any_key():
try:
raw_input("Hit any key to continue: ")
except NameError:
input("Hit any key to continue: ")
def verify_image(ssh_conn, file_system, dest_file, file_size):
verify_cmd = 'dir {}/{}'.format(file_system, dest_file)
verify_output = ssh_conn.send_command(verify_cmd)
if file_size in verify_output and dest_file in verify_output:
print()
print(">>>>>")
print("The new image is on the remote device:")
print(verify_output)
print(">>>>>")
print()
hit_any_key()
else:
raise ValueError("New image not detected on remote device.")
def check_boot_var(ssh_conn):
"""Currently only handles a single boot system statement."""
current_boot = ssh_conn.send_command("show run | inc boot")
match = re.search(r"^(boot system flash .*)$", current_boot, flags=re.M)
boot_cmd = ''
if match:
boot_cmd = match.group(1)
return boot_cmd
def upgrade_device(net_device):
start_time = datetime.now()
print()
print("Upgrading OS on device: {}".format(net_device['host']))
print("-" * 50)
# Extract file and file system variables
file_system = net_device.pop('file_system')
source_file = net_device.pop('source_file')
dest_file = net_device.pop('dest_file')
# Establish SSH control channel
print(".establishing SSH connection.")
ssh_conn = ConnectHandler(**net_device)
# SCP new image file
print(".transferring image file.")
enable_transfer = True
if enable_transfer:
transfer_dict = file_transfer(ssh_conn, source_file=source_file, dest_file=dest_file,
file_system=file_system, direction='put',
overwrite_file=False)
else:
transfer_dict = {}
# Check the file exists and the MD5 matches the source file
if not transfer_dict.get('file_exists') or not transfer_dict.get('file_verified'):
raise ValueError("File doesn't exist or MD5 doesn't match on the remote system")
print(".verifying new image file.")
file_size = '42628912'
verify_image(ssh_conn, file_system, dest_file, file_size)
print()
print(".checking current boot commands")
boot_cmd = check_boot_var(ssh_conn)
print(".constructing new boot commands")
if boot_cmd:
boot_commands = [
"no {}".format(boot_cmd),
'boot system flash {}'.format(dest_file),
boot_cmd,
]
else:
boot_commands = [
'boot system flash {}'.format(dest_file),
]
print()
print(">>>>>")
print("Boot commands to send to the remote device:")
print(boot_commands)
print(">>>>>")
print()
hit_any_key()
print()
print(".sending new boot commands to remote device.")
output = ssh_conn.send_config_set(boot_commands)
print()
print()
print("Current boot variable: ")
print(">>>>>")
current_boot = ssh_conn.send_command("show run | inc boot")
print(current_boot)
print(">>>>>")
print()
# Reload the device
print()
try:
response = raw_input("Do you want to reload the device(y/n): ")
except NameError:
response = input("Do you want to reload the device(y/n): ")
if response.lower() != 'y':
sys.exit("Boot commands staged, but device not reloaded!\n\n")
else:
print("Saving running-config to startup-config")
ssh_conn.save_config()
print("Reloading device with new image!")
output = ssh_conn.send_command_timing("reload")
print(output)
if 'confirm' in output:
output = ssh_conn.send_command_timing("y")
end_time = datetime.now()
print("File transfer time: {}".format(end_time - start_time))
if __name__ == "__main__":
password = getpass()
cisco1 = {
'device_type': 'cisco_ios',
'host': 'cisco1.twb-tech.com',
'username': 'pyclass',
'password': password,
'file_system': 'flash:',
'source_file': 'test1.bin',
'dest_file': 'test1.bin',
}
for net_device in (cisco1,):
upgrade_device(net_device)
| 28.993631 | 93 | 0.63203 |
794f8305b528bab8091f2662332b930f23717e45 | 885 | py | Python | ws2122-lspm/Lib/site-packages/pm4py/statistics/concurrent_activities/__init__.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | ws2122-lspm/Lib/site-packages/pm4py/statistics/concurrent_activities/__init__.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | ws2122-lspm/Lib/site-packages/pm4py/statistics/concurrent_activities/__init__.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
from pm4py.statistics.concurrent_activities import log
import pkgutil
if pkgutil.find_loader("pandas"):
from pm4py.statistics.concurrent_activities import pandas
| 40.227273 | 76 | 0.749153 |
794f83f2b608614994804228a4c5cf4a46aed2db | 4,802 | py | Python | datasets/EPNDataset.py | CuiRuikai/mfm-net | 8fc67543fe6ec63cd93102c5ca78bd19d1926fdd | [
"MIT"
] | null | null | null | datasets/EPNDataset.py | CuiRuikai/mfm-net | 8fc67543fe6ec63cd93102c5ca78bd19d1926fdd | [
"MIT"
] | null | null | null | datasets/EPNDataset.py | CuiRuikai/mfm-net | 8fc67543fe6ec63cd93102c5ca78bd19d1926fdd | [
"MIT"
] | null | null | null | from unicodedata import category
import torch.utils.data as data
import numpy as np
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import data_transforms
from .io import IO
import random
import os
import json
from .build import DATASETS
from utils.logger import *
# References:
# - https://github.com/hzxie/GRNet/blob/master/utils/data_loaders.py
@DATASETS.register_module()
class EPN3D(data.Dataset):
# def __init__(self, data_root, subset, class_choice = None):
def __init__(self, config):
self.npoints = config.N_POINTS
self.category_file = config.CATEGORY_FILE_PATH
self.partial_points_path = config.PARTIAL_POINTS_PATH
self.complete_points_path = config.COMPLETE_POINTS_PATH
self.class_choice = config.class_choice
self.subset = config.subset
# Load the dataset indexing file
self.dataset_categories = []
with open(self.category_file) as f:
self.dataset_categories = json.loads(f.read())
self.dataset_categories = [dc for dc in self.dataset_categories if dc['taxonomy_name'] in self.class_choice]
self.file_list = self._get_file_list(self.subset)
self.transforms = self._get_transforms(self.subset)
def _get_transforms(self, subset):
if subset == 'train':
return data_transforms.Compose([{
'callback': 'RandomSamplePoints', # random permutate points
'parameters': {
'n_points': 2048
},
'objects': ['partial', 'complete']
}, {
'callback': 'RandomMirrorPoints',
'objects': ['partial', 'complete']
},{
'callback': 'ToTensor',
'objects': ['partial', 'complete']
}])
else:
return data_transforms.Compose([{
'callback': 'RandomSamplePoints',
'parameters': {
'n_points': 2048
},
'objects': ['partial']
}, {
'callback': 'ToTensor',
'objects': ['partial', 'complete']
}])
# def _get_transforms(self, subset):
# if subset == 'train':
# return data_transforms.Compose([{
# 'callback': 'ToTensor',
# 'objects': ['partial', 'complete']
# }])
# else:
# return data_transforms.Compose([{
# 'callback': 'ToTensor',
# 'objects': ['partial', 'complete']
# }])
def _get_file_list(self, subset):
"""Prepare file list for the dataset"""
file_list = {
'taxonomy_id': [],
'model_id': [],
'partial_path': [],
'gt_path': []
}
for dc in self.dataset_categories:
print_log('Collecting files of Taxonomy [ID=%s, Name=%s]' % (dc['taxonomy_id'], dc['taxonomy_name']), logger='EPN3DNDATASET')
category_name = dc['taxonomy_name']
partial_samples = dc[subset]['partial']
complete_samples = dc[subset]['complete']
for (partial_file, complete_file) in zip(partial_samples, complete_samples):
file_list['taxonomy_id'].append(dc['taxonomy_id'])
file_list['model_id'].append(complete_file)
file_list['partial_path'].append(self.partial_points_path % (category_name, partial_file))
file_list['gt_path'].append(self.complete_points_path % (category_name, complete_file))
shuffled_gt = file_list['gt_path'].copy()
random.shuffle(shuffled_gt)
file_list['shuffled_gt_path'] = shuffled_gt
print_log('Complete collecting files of the dataset. Total files: %d' % len(file_list['partial_path']), logger='EPN3DDATASET')
return file_list
def shuffle_gt(self):
random.shuffle(self.file_list['shuffled_gt_path'])
def __getitem__(self, idx):
sample = {}
data = {}
sample['taxonomy_id'] = self.file_list['taxonomy_id'][idx]
sample['model_id'] = self.file_list['model_id'][idx]
data['partial'] = IO.get(self.file_list['partial_path'][idx]).astype(np.float32)
if self.subset == 'train':
data['complete'] = IO.get(self.file_list['shuffled_gt_path'][idx]).astype(np.float32)
else: # test/val
data['complete'] = IO.get(self.file_list['gt_path'][idx]).astype(np.float32)
if self.transforms is not None:
data = self.transforms(data)
return sample['taxonomy_id'], sample['model_id'], (data['partial'], data['complete'])
def __len__(self):
return len(self.file_list['partial_path']) | 36.378788 | 137 | 0.586839 |
794f84017791695a80027f215c016113d16c5c39 | 382 | py | Python | dice_game/api/migrations/0003_alter_game_game_end.py | mriduldhall/Dice-Game-NEA | c3ff301a1fb7d3fb8b60d6a3b3417e068e6fda83 | [
"MIT"
] | null | null | null | dice_game/api/migrations/0003_alter_game_game_end.py | mriduldhall/Dice-Game-NEA | c3ff301a1fb7d3fb8b60d6a3b3417e068e6fda83 | [
"MIT"
] | null | null | null | dice_game/api/migrations/0003_alter_game_game_end.py | mriduldhall/Dice-Game-NEA | c3ff301a1fb7d3fb8b60d6a3b3417e068e6fda83 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.7 on 2021-10-04 20:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_game'),
]
operations = [
migrations.AlterField(
model_name='game',
name='game_end',
field=models.DateTimeField(default=None, null=True),
),
]
| 20.105263 | 64 | 0.586387 |
794f84e2a43a3945726a8aa8ea1dd0be2d664219 | 4,516 | py | Python | repos.py | asha-bc/BrightcovePY | 52e220e8e95221745b4de52ba94689509d27c072 | [
"MIT"
] | 3 | 2020-12-14T23:08:09.000Z | 2021-08-05T05:44:19.000Z | repos.py | asha-bc/BrightcovePY | 52e220e8e95221745b4de52ba94689509d27c072 | [
"MIT"
] | null | null | null | repos.py | asha-bc/BrightcovePY | 52e220e8e95221745b4de52ba94689509d27c072 | [
"MIT"
] | 2 | 2021-10-19T15:24:28.000Z | 2022-03-08T08:17:30.000Z | #!/usr/bin/env python3
import sys
import argparse
from brightcove.DeliverySystem import DeliverySystem
from brightcove.OAuth import OAuth
from brightcove.utils import load_account_info
# init the argument parsing
parser = argparse.ArgumentParser(prog=sys.argv[0])
parser.add_argument('--list', action='store_true', default=False, help='List all repositories or files in account or repository')
parser.add_argument('--add', action='store_true', default=False, help='Add a repository or file to account or repository')
parser.add_argument('--delete', action='store_true', default=False, help='Delete a repository or file in account or repository')
parser.add_argument('--repo', metavar='<repository name>', type=str, help='Name of repository')
parser.add_argument('--file', metavar='<filename>', type=str, help='File name')
parser.add_argument('--config', metavar='<config filename>', type=str, help='Name and path of account config information file')
parser.add_argument('--account', metavar='<Brightcove Account ID>', type=str, help='Brightcove Account ID to use (if different from ID in config)')
# parse the args
args = parser.parse_args()
# get account info from config file
try:
account_id, client_id, client_secret, _ = load_account_info(args.config)
except Exception as e:
print(e)
sys.exit(2)
# if account ID was provided override the one from config
account_id = args.account or account_id
# create a Delivery System API instance
ds = DeliverySystem(OAuth(account_id=account_id,client_id=client_id, client_secret=client_secret))
# delete one or all subscriptions
if args.delete:
# delete a file in a repo?
if args.repo and args.file:
if args.file=='all':
print('Delete all files not supported yet.')
else:
response = ds.DeleteFileInRepository(repo_name=args.repo, file_name=args.file, account_id=account_id)
code = response.status_code
print(f'Deleting file "{args.file}" in repository "{args.repo}": {code}')
if code not in DeliverySystem.success_responses:
print(f'Error deleting file to repository: {response.text}')
# delete a repo?
elif args.repo:
response = ds.DeleteRepository(repo_name=args.repo, account_id=account_id)
code = response.status_code
print(f'Deleting repository "{args.repo}" in account ID {account_id}: {code}')
if code not in DeliverySystem.success_responses:
print(f'Error deleting repository: {response.text}')
# add a repo to account or a file to a repo
if args.add:
# add a file to a repo?
if args.repo and args.file:
response = ds.AddFileToRepository(repo_name=args.repo, file_name=args.file, account_id=account_id)
code = response.status_code
print(f'Adding file "{args.file}" to repository "{args.repo}": {code}')
if code in DeliverySystem.success_responses:
print(response.text)
else:
print(f'Error adding file to repository: {response.text}')
# add a repo
elif args.repo:
response = ds.CreateRepository(repo_name=args.repo, account_id=account_id)
code = response.status_code
print(f'Adding repository "{args.repo}" to account ID {account_id}: {code}')
if code in DeliverySystem.success_responses:
print(response.text)
else:
print(f'Error adding repository to account: {response.text}')
# list files in repo or list all repos in account
if args.list:
# list files in a repo?
if args.repo:
response = ds.ListFilesInRepository(repo_name=args.repo, account_id=account_id)
if response.status_code in DeliverySystem.success_responses:
response = response.json()
print(f'{response["item_count"]} item(s) found in repository.\n')
for repo_file in response['items']:
print(f'Name: {repo_file["name"]}\nURL.: {repo_file["public_url"]}\n')
else:
print(f'Error listing files in repository: {response.text}')
# list repos in an account
else:
response = ds.ListRepositories(account_id=account_id)
if response.status_code in DeliverySystem.success_responses:
response = response.json()
print(f'{response["item_count"]} item(s) found in account.\n')
for repo in response['items']:
print(f'Name: {repo["name"]}')
else:
print(f'Error listing repositories in account: {response.text}')
| 46.556701 | 147 | 0.686891 |
794f8519b65f0f8d2a3664d4a28fee193bc83e5a | 1,105 | py | Python | examples/08_raytracingtriangle_colorvertex.py | Oleg595/pyRT | 6fc0ccbc6fb24dcc2a8532aa22eb9574f1afdb3a | [
"MIT"
] | 74 | 2016-09-02T08:15:39.000Z | 2021-08-09T08:16:23.000Z | examples/08_raytracingtriangle_colorvertex.py | Oleg595/pyRT | 6fc0ccbc6fb24dcc2a8532aa22eb9574f1afdb3a | [
"MIT"
] | 22 | 2016-09-02T08:15:14.000Z | 2021-02-22T19:52:21.000Z | examples/08_raytracingtriangle_colorvertex.py | Oleg595/pyRT | 6fc0ccbc6fb24dcc2a8532aa22eb9574f1afdb3a | [
"MIT"
] | 27 | 2016-09-04T12:55:27.000Z | 2022-03-19T11:21:24.000Z | # Example 8: Raytracing a single triangle - with vertex color
#
# A triangle is renderereda again - this time every vertex in the triangle has a different color
from pyrt.math import *
from pyrt.scene import *
from pyrt.geometry import Triangle, Vertex
from pyrt.camera import OrthographicCamera, PerspectiveCamera
from pyrt.renderer import SimpleRT
from PIL import Image
# Specify width/height as in example 5
width = 320
height = 240
# now create a camera and a view like in example 5:
camera = PerspectiveCamera(width, height, 60)
camera.setView(Vec3(0,-10,0), Vec3(0,0,0), Vec3(0,0,1))
# Create a scene
scene = Scene()
# Add a triangle (same as example 5) to the scene:
scene.add(Triangle(Vertex(position=(-5, 1, 0), color=(1,0,0)),
Vertex(position=(0, 1, 5), color=(0,1,0)),
Vertex(position=(5, 1, 0), color=(0,0,1))))
# Now tell the scene which camera we use
scene.setCamera(camera)
# Create a raytracer using "SimpleRT"
engine = SimpleRT()
# Render the scene:
image = engine.render(scene)
# Save the resulting image using pillow
image.save("08.png")
| 27.625 | 96 | 0.707692 |
794f852f406fb1132ad0f677a9dbd073d3fe96b3 | 415 | py | Python | backend/misty_thunder_31453/wsgi.py | crowdbotics-apps/misty-thunder-31453 | 69637f0acd518ce8004e0f8c8918c07ac66fa0d1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/misty_thunder_31453/wsgi.py | crowdbotics-apps/misty-thunder-31453 | 69637f0acd518ce8004e0f8c8918c07ac66fa0d1 | [
"FTL",
"AML",
"RSA-MD"
] | 8 | 2021-10-15T20:28:47.000Z | 2021-10-15T20:28:53.000Z | backend/misty_thunder_31453/wsgi.py | crowdbotics-apps/misty-thunder-31453 | 69637f0acd518ce8004e0f8c8918c07ac66fa0d1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for misty_thunder_31453 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'misty_thunder_31453.settings')
application = get_wsgi_application()
| 24.411765 | 79 | 0.79759 |
794f8561c4bd5b1236d10b83d39975bdf0a5ad52 | 4,146 | py | Python | bioparser/bodymap2.py | dhimmel/serg-pycode | 075de0ba470e4fbda5e33dfc23cb9ecd86ec53b7 | [
"BSD-2-Clause-Patent"
] | null | null | null | bioparser/bodymap2.py | dhimmel/serg-pycode | 075de0ba470e4fbda5e33dfc23cb9ecd86ec53b7 | [
"BSD-2-Clause-Patent"
] | null | null | null | bioparser/bodymap2.py | dhimmel/serg-pycode | 075de0ba470e4fbda5e33dfc23cb9ecd86ec53b7 | [
"BSD-2-Clause-Patent"
] | null | null | null | import os
import csv
import collections
import data
import mapping.manual_reader
class BodyMap2(object):
def __init__(self, directory=None):
if not directory:
directory = data.source_data_dir('bodymap2')
self.directory = directory
self.path = os.path.join(directory, 'E-MTAB-513-query-results.tsv')
def read(self, bto_convert=True):
read_file = open(self.path)
line = '#'
while line.startswith('#'):
line = read_file.next().rstrip()
fieldnames = line.split('\t')
tissues = list(fieldnames)
for non_tissue_key in ['Gene name', 'Gene Id']:
tissues.remove(non_tissue_key)
if bto_convert:
path = os.path.join(self.directory, 'bodymap-bto-mappings.tsv')
bodymap_to_bto = mapping.manual_reader.get_mapping_dict(
path, 'bodymap_name', 'bto_id', plural=False)
tissues = [bodymap_to_bto[tissue] for tissue in tissues]
fieldnames = fieldnames[:2] + tissues
reader = csv.DictReader(read_file, delimiter='\t', fieldnames=fieldnames)
rows = list()
for row in reader:
for tissue in tissues:
fpkm = row[tissue]
row[tissue] = float(fpkm) if fpkm != '' else None
rows.append(row)
read_file.close()
return tissues, rows
def process(self, bto_convert=True):
geom_mean = lambda nums: reduce(lambda x, y: x*y, nums) ** (1.0 / len(nums))
tissues, rows = self.read(bto_convert)
#symbol_to_gene = data.Data().hgnc.get_symbol_to_gene()
ensembl_to_gene = data.Data().hgnc.get_ensembl_to_gene()
gene_to_rows = dict()
for row in rows:
#symbol = row['Gene name']
ensembl = row['Gene Id']
#gene = symbol_to_gene.get(symbol)
gene = ensembl_to_gene.get(ensembl)
if not gene:
continue
gene_to_rows.setdefault(gene, list()).append(row)
processed_rows = list()
for gene, rows in gene_to_rows.iteritems():
processed_row = collections.OrderedDict()
processed_row['symbol'] = gene.symbol
for tissue in tissues:
fpkms = [row[tissue] for row in rows if row[tissue] is not None]
#fpkm = sum(fpkms) / len(fpkms) if fpkms else None # mean
fpkm = geom_mean(fpkms) if fpkms else None # geometric mean
processed_row[tissue] = fpkm
processed_rows.append(processed_row)
processed_rows.sort(key=lambda row: row['symbol'])
path = os.path.join(self.directory, 'processed.txt')
with open(path, 'w') as write_file:
writer = csv.writer(write_file, delimiter='\t')
writer.writerow(processed_rows[0].keys())
for row in processed_rows:
writer.writerow(row.values())
return processed_rows
def read_processed(self):
path = os.path.join(self.directory, 'processed.txt')
read_file = open(path)
reader = csv.DictReader(read_file, delimiter='\t')
tissues = list(reader.fieldnames)
tissues.remove('symbol')
rows = list()
for row in reader:
for tissue in tissues:
fpkm = row[tissue]
row[tissue] = float(fpkm) if fpkm != '' else None
rows.append(row)
read_file.close()
return tissues, rows
def get_edges(self, fpkm_cutoff = 0.0):
tissues, rows = self.read_processed()
for row in rows:
symbol = row['symbol']
for tissue in tissues:
fpkm = row[tissue]
if fpkm is None:
continue
if fpkm < fpkm_cutoff:
continue
edge = symbol, tissue, fpkm
yield edge
if __name__ == '__main__':
bodymap = BodyMap2()
bodymap.process()
edges = list(bodymap.get_edges(100))
print edges[:100]
| 34.840336 | 84 | 0.56247 |
794f85625974e2db07abe64383a687d92fd865cb | 85 | py | Python | boxesinboxes/src/boxesinboxes/__main__.py | pmfrank/beeware-tutorials | 96274b0a735bd468e946111baf441a527ff0b0d5 | [
"BSD-2-Clause"
] | 1 | 2021-06-04T05:51:39.000Z | 2021-06-04T05:51:39.000Z | boxesinboxes/src/boxesinboxes/__main__.py | pmfrank/beeware-tutorials | 96274b0a735bd468e946111baf441a527ff0b0d5 | [
"BSD-2-Clause"
] | null | null | null | boxesinboxes/src/boxesinboxes/__main__.py | pmfrank/beeware-tutorials | 96274b0a735bd468e946111baf441a527ff0b0d5 | [
"BSD-2-Clause"
] | null | null | null | from boxesinboxes.app import main
if __name__ == '__main__':
main().main_loop()
| 17 | 33 | 0.705882 |
794f857c3f6eff25ad918f4f49abaec6d9758d3b | 50,037 | py | Python | data/bin/Lib/configparser.py | shakenetwork/collector | 60864537f9b8046d1b42258756e36a54149dddf9 | [
"Apache-2.0"
] | 309 | 2015-05-08T18:22:55.000Z | 2022-01-11T12:27:41.000Z | data/bin/Lib/configparser.py | shakenetwork/collector | 60864537f9b8046d1b42258756e36a54149dddf9 | [
"Apache-2.0"
] | 30 | 2015-05-13T02:15:15.000Z | 2019-12-28T14:01:19.000Z | data/bin/Lib/configparser.py | shakenetwork/collector | 60864537f9b8046d1b42258756e36a54149dddf9 | [
"Apache-2.0"
] | 35 | 2015-06-11T05:35:55.000Z | 2022-01-11T19:32:00.000Z | """Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException.
"""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException.
"""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", repr(source)]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %s' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('$')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
| 39.337264 | 79 | 0.577333 |
794f87a29d79a2eb34d3e55433cc03c89e7c11a4 | 901 | py | Python | hb_quant/huobi/service/market/sub_candlestick.py | wenli135/Binance-volatility-trading-bot | 75a03ad61df0e95492128fb6f1f419d4dc256ab3 | [
"MIT"
] | 611 | 2019-07-10T08:17:50.000Z | 2022-03-21T18:56:39.000Z | hb_quant/huobi/service/market/sub_candlestick.py | wenli135/Binance-volatility-trading-bot | 75a03ad61df0e95492128fb6f1f419d4dc256ab3 | [
"MIT"
] | 105 | 2019-07-12T03:43:41.000Z | 2022-03-30T10:33:06.000Z | hb_quant/huobi/service/market/sub_candlestick.py | wenli135/Binance-volatility-trading-bot | 75a03ad61df0e95492128fb6f1f419d4dc256ab3 | [
"MIT"
] | 325 | 2019-07-12T02:46:54.000Z | 2022-03-21T18:56:41.000Z | import time
from huobi.utils import *
from huobi.connection.subscribe_client import SubscribeClient
from huobi.model.market import *
class SubCandleStickService:
def __init__(self, params):
self.params = params
def subscribe(self, callback, error_handler, **kwargs):
symbol_list = self.params["symbol_list"]
interval = self.params["interval"]
def subscription(connection):
for symbol in symbol_list:
connection.send(kline_channel(symbol, interval))
time.sleep(0.01)
def parse(dict_data):
return default_parse(dict_data, CandlestickEvent, Candlestick)
SubscribeClient(**kwargs).execute_subscribe_v1(subscription,
parse,
callback,
error_handler)
| 26.5 | 74 | 0.584906 |
794f87bedecad232fbf4083f635b29ec07adc499 | 23 | py | Python | APIs/Oauth/venv/lib/python3.8/site-packages/sanic/__version__.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | APIs/Oauth/venv/lib/python3.8/site-packages/sanic/__version__.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | APIs/Oauth/venv/lib/python3.8/site-packages/sanic/__version__.py | clarencejlee/jdp | d3d31db0138ff06f2f5ec592d85317941af4f280 | [
"MIT"
] | null | null | null | __version__ = "21.3.4"
| 11.5 | 22 | 0.652174 |
794f8832d5ed963c60f9a5b3b9ea04cd57781f32 | 52 | py | Python | hseling-api-midrusnlp/main.py | hseling/hseling-repo-midrusnlp | f04a3b737df2586cf5d273cb296d77f140440c72 | [
"MIT"
] | null | null | null | hseling-api-midrusnlp/main.py | hseling/hseling-repo-midrusnlp | f04a3b737df2586cf5d273cb296d77f140440c72 | [
"MIT"
] | null | null | null | hseling-api-midrusnlp/main.py | hseling/hseling-repo-midrusnlp | f04a3b737df2586cf5d273cb296d77f140440c72 | [
"MIT"
] | 1 | 2021-03-31T07:40:07.000Z | 2021-03-31T07:40:07.000Z | from hseling_api_midrusnlp.main import (
app,
)
| 13 | 40 | 0.730769 |
794f890741723a0e262f258f7b8ba66eaab3970b | 2,392 | py | Python | spice_api/tokens.py | Utagai/spice | a2604ad01c12138067eeb2036258437e0f8e4b82 | [
"MIT"
] | 41 | 2016-08-01T04:57:24.000Z | 2022-02-13T01:38:04.000Z | spice_api/tokens.py | Utagai/spice | a2604ad01c12138067eeb2036258437e0f8e4b82 | [
"MIT"
] | 32 | 2016-07-13T18:10:22.000Z | 2018-06-05T22:58:48.000Z | spice_api/tokens.py | Utagai/spice | a2604ad01c12138067eeb2036258437e0f8e4b82 | [
"MIT"
] | 14 | 2016-08-25T23:09:03.000Z | 2018-05-06T19:33:32.000Z | """
Oh, and a license thingy because otherwise it won't look cool and
professional.
MIT License
Copyright (c) [2016] [Mehrab Hoque]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the 'Software'), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
A py module for token values.
[medium]
[keys]
[operations]
[status]
[statusnumber]
"""
"""The class that defines 'mediums'. Commonly seen as [medium] through the
docs. A medium is the form in which the content comes in, and can either be
ANIME or MANGA.
These are to be treated like enum tokens and are used frequently in this API's
function calls to specify the medium for which to do work, since MyAnimeList
is very distinctly cut up into two pieces, one for anime and one for manga.
"""
class Medium:
ANIME, MANGA = list(range(2))
"""The operations available on user Lists. These are to be treated like enums
."""
class Operations:
ADD, UPDATE, DELETE = list(range(3))
"""The numerical translations for anime/manga statuses. These are to be treated
like enums.
"""
class StatusNumber:
READING = 1
WATCHING, COMPLETED, ONHOLD, DROPPED = list(range(1,5))
PLANTOWATCH = 6
PLANTOREAD = 6
"""A namespace for exposing key names in AnimeList and MangaList object
dictionaries.
"""
class Status:
READING = 'reading'
WATCHING = 'watching'
COMPLETED = 'completed'
ONHOLD = 'onhold'
DROPPED = 'dropped'
PLANTOWATCH = 'plantowatch'
PLANTOREAD = 'plantoread'
| 32.324324 | 79 | 0.747492 |
794f89c184a62850fdd55221995845f24cf3a978 | 1,079 | py | Python | main (16).py | lorenaEscobar0014/TECS2 | 92f5f6f820211b445c6388e058d97945b12b2591 | [
"MIT"
] | null | null | null | main (16).py | lorenaEscobar0014/TECS2 | 92f5f6f820211b445c6388e058d97945b12b2591 | [
"MIT"
] | null | null | null | main (16).py | lorenaEscobar0014/TECS2 | 92f5f6f820211b445c6388e058d97945b12b2591 | [
"MIT"
] | null | null | null | d=str(input("Digite el día de nacimiento: "))
m=str(input("Digite el mes de nacimiento: "))
if(m==12 and(d>=22 and d<=31) or (m==1 and (d<=20))):
print("Capricornio")
elif(m==1 and(d>=21 and d<=31) or (m==2 and (d<=19))):
print("Acuario")
elif(m==2 and(d>=20 and d<=29) or (m==3 and (d<=20))):
print("Picsis")
elif(m==3 and(d>=21 and d<=31) or (m==4 and (d<=20))):
print("Aries")
elif(m==4 and(d>=21 and d<=30) or (m==5 and (d<=20))):
print("Tauro")
elif(m==5 and(d>=1 and d<=31) or (m==6 and (d<=21))):
print("Geminis")
elif(m==6 and(d>=22 and d<=30) or (m==7 and (d<=22))):
print("Cancer")
elif(m==7 and(d>=23 and d<=31) or (m==8 and (d<=22))):
print("Leo")
elif(m==8 and(d>=23 and d<=31) or (m==9 and (d<=22))):
print("Virgo")
elif(m==9 and(d>=23 and d<=30) or (m==10 and (d<=22))):
print("Libra")
elif(m==10 and(d>=23 and d<=31) or (m==11 and (d<=22))):
print("Escorpion")
elif(m==5 and(d>=1 and d<=31) or (m==6 and (d<=21))):
print("Geminis")
elif(m==11 and(d>=23 and d<=30) or (m==12 and (d<=21))):
print("Sagitario")
print("Es tu signo sodiacal") | 37.206897 | 56 | 0.557924 |
794f8b9e41d63be25db17fb233ab2bd6407bb80e | 2,061 | py | Python | restless/interfaces/aws.py | j-ventura/restless | 25b37a54783234c26ec671a13016162e5a870ba6 | [
"MIT"
] | null | null | null | restless/interfaces/aws.py | j-ventura/restless | 25b37a54783234c26ec671a13016162e5a870ba6 | [
"MIT"
] | null | null | null | restless/interfaces/aws.py | j-ventura/restless | 25b37a54783234c26ec671a13016162e5a870ba6 | [
"MIT"
] | null | null | null | from restless.util import camel_to_snake, snake_to_camel, UniversalEncoder
from restless.interfaces import BaseRequest
import json
from urllib.parse import unquote_plus
from base64 import b64decode, b64encode
from typing import Iterable
class Request(BaseRequest):
@property
def authorizer(self) -> dict:
return self._raw.get("requestContext", {}).get("authorizer")
def __init__(self, raw, use_camel_case=True):
super().__init__(raw)
self.path = unquote_plus(raw.get("path") or raw.get("rawPath"))
if raw.get('isBase64Encoded'):
self.body = b64decode(raw["body"].encode()) if raw.get("body") else None
else:
self.body = json.loads(raw["body"]) if raw.get("body") else None
self.method = raw.get("httpMethod") or raw.get('requestContext', {}).get("http", {}).get("method")
self.headers = raw.get("headers", {})
self.query = raw.get("queryStringParameters") or {}
if use_camel_case:
for member in ['body', 'headers', 'query']:
setattr(self, member, camel_to_snake(getattr(self, member)))
class Response(dict):
def __init__(self, body="", status_code=200, headers=None, use_camel_case=True):
super().__init__(
statusCode=status_code,
headers=headers or {}
)
if isinstance(body, bytes):
self["isBase64Encoded"] = True
self["body"] = b64encode(body).decode()
elif isinstance(body, (dict, Iterable)):
self["isBase64Encoded"] = False
if isinstance(body, str):
self["body"] = body
else:
if "Content-Type" not in self["headers"]:
self["headers"]["Content-Type"] = "application/json"
if use_camel_case:
body_ = snake_to_camel(body)
else:
body_ = body
self["body"] = json.dumps(body_, cls=UniversalEncoder)
else:
raise Exception("Unsupported")
| 34.932203 | 106 | 0.589034 |
794f8be8a7920197768cc08897059ca509f8735d | 5,312 | py | Python | tests/test_intent_classification.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 83 | 2021-08-30T02:50:37.000Z | 2022-02-22T09:37:36.000Z | tests/test_intent_classification.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 2 | 2021-09-10T08:44:13.000Z | 2022-01-23T17:33:35.000Z | tests/test_intent_classification.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 6 | 2021-09-10T07:09:41.000Z | 2021-11-07T14:31:33.000Z | import os
from typing import Text
import torch
import unittest
import torch.nn as nn
import torch.optim as optim
from allennlp.models import Model
from allennlp.data.vocabulary import Vocabulary
from zsl_kg.class_encoders.auto_gnn import AutoGNN
from zsl_kg.example_encoders.text_encoder import TextEncoder
from zsl_kg.data.snips import SnipsDataset
from allennlp.data.iterators import BasicIterator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from zsl_kg.common.graph import NeighSampler
from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG
from allennlp.common.tqdm import Tqdm
class BiLinearModel(Model):
def __init__(
self,
vocab: Vocabulary,
example_encoder: object,
class_encoder: object,
joint_dim: int,
bias: bool = False,
):
super().__init__(vocab)
self.example_encoder = example_encoder
self.class_encoder = class_encoder
self.text_joint = nn.Linear(
self.example_encoder.output_dim, joint_dim, bias=bias
)
self.class_joint = nn.Linear(
self.class_encoder.output_dim, joint_dim, bias=bias
)
def forward(self, batch, node_idx, kg):
encoder_out = self.example_encoder(batch)
text_rep = self.text_joint(encoder_out)
# get label representation
class_out = self.class_encoder(node_idx, kg)
class_rep = self.class_joint(class_out)
logits = torch.matmul(text_rep, class_rep.t())
return logits
class TestIntentClassification(unittest.TestCase):
def setUp(
self,
):
label_maps = {
"train": ["weather", "music", "restaurant"],
"dev": ["search", "movie"],
"test": ["book", "playlist"],
}
data_path = "tests/test_data/datasets/snips/"
datasets = []
for split in ["train", "dev", "test"]:
labels = label_maps[split]
label_to_idx = dict(
[(label, idx) for idx, label in enumerate(labels)]
)
reader = SnipsDataset(label_to_idx)
path = os.path.join(data_path, f"{split}.txt")
_dataset = reader.read(path)
datasets.append(_dataset)
self.train_dataset, self.dev_dataset, self.test_dataset = datasets
vocab = Vocabulary.from_instances(
self.train_dataset + self.dev_dataset + self.test_dataset
)
# create the iterator
self.iterator = BasicIterator(batch_size=32)
self.iterator.index_with(vocab)
print("Loading GloVe...")
# token embed
token_embed_path = os.path.join(data_path, "word_emb.pt")
token_embedding = torch.load(token_embed_path)
print("word embeddings created...")
word_embeddings = BasicTextFieldEmbedder({"tokens": token_embedding})
# create the text encoder
print("Loading the text encoder...")
self.example_encoder = TextEncoder(word_embeddings, 300, 32, 20)
trgcn = {
"input_dim": 300,
"output_dim": 64,
"type": "trgcn",
"gnn": [
{
"input_dim": 300,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(100, mode="topk"),
"fh": 100,
},
{
"input_dim": 64,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(50, mode="topk"),
},
],
}
self.class_encoder = AutoGNN(trgcn)
self.train_graph = ConceptNetKG.load_from_disk(
"tests/test_data/subgraphs/snips/train_graph"
)
node_to_idx = dict(
[(node, idx) for idx, node in enumerate(self.train_graph.nodes)]
)
#
self.train_nodes = torch.tensor(
[
node_to_idx[node]
for node in [
"/c/en/weather",
"/c/en/music",
"/c/en/restaurant",
]
]
)
self.model = BiLinearModel(
vocab, self.example_encoder, self.class_encoder, joint_dim=20
)
self.optimizer = optim.Adam(
self.model.parameters(), lr=1e-03, weight_decay=5e-04
)
self.loss_function = nn.CrossEntropyLoss()
def test_intent_classification_train(self):
self.model.train()
total_batch_loss = 0.0
generator_tqdm = Tqdm.tqdm(
self.iterator(self.train_dataset, num_epochs=1, shuffle=False),
total=self.iterator.get_num_batches(self.train_dataset),
)
for batch in generator_tqdm:
self.optimizer.zero_grad()
logits = self.model(
batch["sentence"], self.train_nodes, self.train_graph
)
loss = self.loss_function(logits, batch["labels"])
total_batch_loss += loss.item()
loss.backward()
self.optimizer.step()
self.assertLessEqual(total_batch_loss, 100.0)
| 31.247059 | 77 | 0.573419 |
794f8c21f5153f6802a28514f477a79a5d24fc14 | 11,145 | py | Python | problem/serializers.py | binbon095/OnlineJudge | 325035092f61bee4c11045e9f62151bd85c693bf | [
"MIT"
] | null | null | null | problem/serializers.py | binbon095/OnlineJudge | 325035092f61bee4c11045e9f62151bd85c693bf | [
"MIT"
] | null | null | null | problem/serializers.py | binbon095/OnlineJudge | 325035092f61bee4c11045e9f62151bd85c693bf | [
"MIT"
] | null | null | null | import re
from django import forms
from options.options import SysOptions
from utils.api import UsernameSerializer, serializers
from utils.constants import Difficulty
from utils.serializers import LanguageNameMultiChoiceField, SPJLanguageNameChoiceField, LanguageNameChoiceField
from .models import Problem, ProblemRuleType, ProblemTag, ProblemIOMode
from .utils import parse_problem_template
class TestCaseUploadForm(forms.Form):
spj = forms.CharField(max_length=12)
file = forms.FileField()
class CreateSampleSerializer(serializers.Serializer):
input = serializers.CharField(trim_whitespace=False)
output = serializers.CharField(trim_whitespace=False)
class CreateTestCaseScoreSerializer(serializers.Serializer):
input_name = serializers.CharField(max_length=32)
output_name = serializers.CharField(max_length=32)
score = serializers.IntegerField(min_value=0)
class CreateProblemCodeTemplateSerializer(serializers.Serializer):
pass
class ProblemIOModeSerializer(serializers.Serializer):
io_mode = serializers.ChoiceField(choices=ProblemIOMode.choices())
input = serializers.CharField()
output = serializers.CharField()
def validate(self, attrs):
if attrs["input"] == attrs["output"]:
raise serializers.ValidationError("Invalid io mode")
for item in (attrs["input"], attrs["output"]):
if not re.match("^[a-zA-Z0-9.]+$", item):
raise serializers.ValidationError("Invalid io file name format")
return attrs
class CreateOrEditProblemSerializer(serializers.Serializer):
_id = serializers.CharField(max_length=32, allow_blank=True, allow_null=True)
title = serializers.CharField(max_length=1024)
description = serializers.CharField()
input_description = serializers.CharField()
output_description = serializers.CharField()
samples = serializers.ListField(child=CreateSampleSerializer(), allow_empty=False)
test_case_id = serializers.CharField(max_length=32)
test_case_score = serializers.ListField(child=CreateTestCaseScoreSerializer(), allow_empty=True)
time_limit = serializers.IntegerField(min_value=1, max_value=1000 * 60)
memory_limit = serializers.IntegerField(min_value=1, max_value=1024)
languages = LanguageNameMultiChoiceField()
template = serializers.DictField(child=serializers.CharField(min_length=1))
model_solution = serializers.DictField(child=serializers.CharField(min_length=1))
manual_judge = serializers.BooleanField()
rule_type = serializers.ChoiceField(choices=[ProblemRuleType.ACM, ProblemRuleType.OI])
io_mode = ProblemIOModeSerializer()
spj = serializers.BooleanField()
spj_language = SPJLanguageNameChoiceField(allow_blank=True, allow_null=True)
spj_code = serializers.CharField(allow_blank=True, allow_null=True)
spj_compile_ok = serializers.BooleanField(default=False)
visible = serializers.BooleanField()
difficulty = serializers.ChoiceField(choices=Difficulty.choices())
tags = serializers.ListField(child=serializers.CharField(max_length=32), allow_empty=False)
hint = serializers.CharField(allow_blank=True, allow_null=True)
source = serializers.CharField(max_length=256, allow_blank=True, allow_null=True)
share_submission = serializers.BooleanField()
class CreateProblemSerializer(CreateOrEditProblemSerializer):
pass
class EditProblemSerializer(CreateOrEditProblemSerializer):
id = serializers.IntegerField()
class CreateContestProblemSerializer(CreateOrEditProblemSerializer):
contest_id = serializers.IntegerField()
class EditContestProblemSerializer(CreateOrEditProblemSerializer):
id = serializers.IntegerField()
contest_id = serializers.IntegerField()
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = ProblemTag
fields = "__all__"
class CompileSPJSerializer(serializers.Serializer):
spj_language = SPJLanguageNameChoiceField()
spj_code = serializers.CharField()
class BaseProblemSerializer(serializers.ModelSerializer):
tags = serializers.SlugRelatedField(many=True, slug_field="name", read_only=True)
created_by = UsernameSerializer()
def get_public_template(self, obj):
ret = {}
for lang, code in obj.template.items():
ret[lang] = parse_problem_template(code)["template"]
return ret
class ProblemAdminSerializer(BaseProblemSerializer):
class Meta:
model = Problem
fields = "__all__"
class ProblemSerializer(BaseProblemSerializer):
template = serializers.SerializerMethodField("get_public_template")
class Meta:
model = Problem
exclude = ("test_case_score", "test_case_id", "visible", "is_public",
"spj_code", "spj_version", "spj_compile_ok")
class ProblemSafeSerializer(BaseProblemSerializer):
template = serializers.SerializerMethodField("get_public_template")
class Meta:
model = Problem
exclude = ("test_case_score", "test_case_id", "visible", "is_public",
"spj_code", "spj_version", "spj_compile_ok",
"difficulty", "submission_number", "accepted_number", "statistic_info")
class ContestProblemMakePublicSerializer(serializers.Serializer):
id = serializers.IntegerField()
display_id = serializers.CharField(max_length=32)
class ExportProblemSerializer(serializers.ModelSerializer):
display_id = serializers.SerializerMethodField()
description = serializers.SerializerMethodField()
input_description = serializers.SerializerMethodField()
output_description = serializers.SerializerMethodField()
test_case_score = serializers.SerializerMethodField()
hint = serializers.SerializerMethodField()
spj = serializers.SerializerMethodField()
template = serializers.SerializerMethodField()
model_solution = serializers.SerializerMethodField()
manual_judge = serializers.BooleanField()
source = serializers.SerializerMethodField()
tags = serializers.SlugRelatedField(many=True, slug_field="name", read_only=True)
def get_display_id(self, obj):
return obj._id
def _html_format_value(self, value):
return {"format": "html", "value": value}
def get_description(self, obj):
return self._html_format_value(obj.description)
def get_input_description(self, obj):
return self._html_format_value(obj.input_description)
def get_output_description(self, obj):
return self._html_format_value(obj.output_description)
def get_hint(self, obj):
return self._html_format_value(obj.hint)
def get_test_case_score(self, obj):
return [{"score": item["score"] if obj.rule_type == ProblemRuleType.OI else 100,
"input_name": item["input_name"], "output_name": item["output_name"]}
for item in obj.test_case_score]
def get_spj(self, obj):
return {"code": obj.spj_code,
"language": obj.spj_language} if obj.spj else None
def get_template(self, obj):
ret = {}
for k, v in obj.template.items():
ret[k] = parse_problem_template(v)
return ret
def get_model_solution(self, obj):
return obj.model_solution
def get_source(self, obj):
return obj.source or f"{SysOptions.website_name} {SysOptions.website_base_url}"
class Meta:
model = Problem
fields = ("display_id", "title", "description", "tags",
"input_description", "output_description",
"test_case_score", "hint", "time_limit", "memory_limit", "samples",
"template", "model_solution", "manual_judge", "spj", "rule_type", "source", "template")
class AddContestProblemSerializer(serializers.Serializer):
contest_id = serializers.IntegerField()
problem_id = serializers.IntegerField()
display_id = serializers.CharField()
class ExportProblemRequestSerialzier(serializers.Serializer):
problem_id = serializers.ListField(child=serializers.IntegerField(), allow_empty=False)
class UploadProblemForm(forms.Form):
file = forms.FileField()
class FormatValueSerializer(serializers.Serializer):
format = serializers.ChoiceField(choices=["html", "markdown"])
value = serializers.CharField(allow_blank=True)
class TestCaseScoreSerializer(serializers.Serializer):
score = serializers.IntegerField(min_value=1)
input_name = serializers.CharField(max_length=32)
output_name = serializers.CharField(max_length=32)
class TemplateSerializer(serializers.Serializer):
prepend = serializers.CharField()
template = serializers.CharField()
append = serializers.CharField()
class SPJSerializer(serializers.Serializer):
code = serializers.CharField()
language = SPJLanguageNameChoiceField()
class AnswerSerializer(serializers.Serializer):
code = serializers.CharField()
language = LanguageNameChoiceField()
class ImportProblemSerializer(serializers.Serializer):
display_id = serializers.CharField(max_length=128)
title = serializers.CharField(max_length=128)
description = FormatValueSerializer()
input_description = FormatValueSerializer()
output_description = FormatValueSerializer()
hint = FormatValueSerializer()
test_case_score = serializers.ListField(child=TestCaseScoreSerializer(), allow_null=True)
time_limit = serializers.IntegerField(min_value=1, max_value=60000)
memory_limit = serializers.IntegerField(min_value=1, max_value=10240)
samples = serializers.ListField(child=CreateSampleSerializer())
template = serializers.DictField(child=TemplateSerializer())
model_solution = serializers.DictField()
manual_judge = serializers.BooleanField()
spj = SPJSerializer(allow_null=True)
rule_type = serializers.ChoiceField(choices=ProblemRuleType.choices())
source = serializers.CharField(max_length=200, allow_blank=True, allow_null=True)
answers = serializers.ListField(child=AnswerSerializer())
tags = serializers.ListField(child=serializers.CharField())
class FPSProblemSerializer(serializers.Serializer):
class UnitSerializer(serializers.Serializer):
unit = serializers.ChoiceField(choices=["MB", "s", "ms"])
value = serializers.IntegerField(min_value=1, max_value=60000)
title = serializers.CharField(max_length=128)
description = serializers.CharField()
input = serializers.CharField()
output = serializers.CharField()
hint = serializers.CharField(allow_blank=True, allow_null=True)
time_limit = UnitSerializer()
memory_limit = UnitSerializer()
samples = serializers.ListField(child=CreateSampleSerializer())
source = serializers.CharField(max_length=200, allow_blank=True, allow_null=True)
spj = SPJSerializer(allow_null=True)
template = serializers.ListField(child=serializers.DictField(), allow_empty=True, allow_null=True)
append = serializers.ListField(child=serializers.DictField(), allow_empty=True, allow_null=True)
prepend = serializers.ListField(child=serializers.DictField(), allow_empty=True, allow_null=True)
| 38.968531 | 111 | 0.745805 |
794f8ddb5e3715df12aeffaff5d3106cf75ba5ad | 5,428 | py | Python | docs/conf.py | codingbandit/Adafruit_CircuitPython_AzureIoT | d7c91400b23a3a99528ff03ea7734c58f3bce7d1 | [
"MIT"
] | null | null | null | docs/conf.py | codingbandit/Adafruit_CircuitPython_AzureIoT | d7c91400b23a3a99528ff03ea7734c58f3bce7d1 | [
"MIT"
] | null | null | null | docs/conf.py | codingbandit/Adafruit_CircuitPython_AzureIoT | d7c91400b23a3a99528ff03ea7734c58f3bce7d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
autodoc_mock_imports = [
"adafruit_binascii",
"adafruit_logging",
"adafruit_requests",
"adafruit_hashlib",
"adafruit_ntp",
"adafruit_minimqtt",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3.4", None),
"CircuitPython": ("https://circuitpython.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Adafruit AzureIoT Library"
copyright = "2019 Brent Rubell, Jim Bennett, Elena Horton"
author = "Brent Rubell, Jim Bennett, Elena Horton"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", ".env", "CODE_OF_CONDUCT.md"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "."]
except:
html_theme = "default"
html_theme_path = ["."]
else:
html_theme_path = ["."]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "_static/favicon.ico"
# Output file base name for HTML help builder.
htmlhelp_basename = "AdafruitAzureiotLibrarydoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "AdafruitAzureIoTLibrary.tex", "AdafruitAzureIoT Library Documentation", author, "manual",),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "AdafruitAzureIoTlibrary", "Adafruit AzureIoT Library Documentation", [author], 1,)]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"AdafruitAzureIoTLibrary",
"Adafruit AzureIoT Library Documentation",
author,
"AdafruitAzureIoTLibrary",
"One line description of project.",
"Miscellaneous",
),
]
| 31.55814 | 110 | 0.680545 |
794f9185a02d2e07cef959d386552395b6a8518b | 2,070 | py | Python | lib/surface/privateca/reusable_configs/describe.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/privateca/reusable_configs/describe.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/privateca/reusable_configs/describe.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # Lint as: python3
# -*- coding: utf-8 -*- #
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe a reusable config."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.privateca import base as privateca_base
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.privateca import resource_args
from googlecloudsdk.command_lib.util.concepts import concept_parsers
class Describe(base.DescribeCommand):
"""Show details about a reusable config."""
detailed_help = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
"""\
To show details about a reusable config, run:
$ {command} leaf-server-tls
""",
}
@staticmethod
def Args(parser):
concept_parsers.ConceptParser.ForResource(
'REUSABLE_CONFIG',
resource_args.CreateReusableConfigResourceSpec(
location_fallthroughs=[
resource_args.LOCATION_PROPERTY_FALLTHROUGH]),
'The reusable config to describe.',
required=True).AddToParser(parser)
def Run(self, args):
"""Runs the command."""
reusable_config = args.CONCEPTS.reusable_config.Parse()
client = privateca_base.GetClientInstance()
messages = privateca_base.GetMessagesModule()
return client.projects_locations_reusableConfigs.Get(
messages.PrivatecaProjectsLocationsReusableConfigsGetRequest(
name=reusable_config.RelativeName()))
| 32.857143 | 74 | 0.729469 |
794f91fdd23d4b1b2abc35f2fc9eb18c9394b417 | 12,925 | py | Python | adb/windows/platform-tools/systrace/catapult/telemetry/telemetry/timeline/memory_dump_event.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 1 | 2019-01-17T19:03:17.000Z | 2019-01-17T19:03:17.000Z | adb/MACOS/platform-tools/systrace/catapult/telemetry/telemetry/timeline/memory_dump_event.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | 2 | 2017-09-08T20:26:05.000Z | 2017-09-08T20:29:07.000Z | adb/windows/platform-tools/systrace/catapult/telemetry/telemetry/timeline/memory_dump_event.py | llaske/sugarizer-deployment-tool-desktop | 34df1a56b68b15b6771671f87ab66586d60c514a | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import re
from telemetry.timeline import event as timeline_event
class MmapCategory(object):
_DEFAULT_CATEGORY = None
def __init__(self, name, file_pattern, children=None):
"""A (sub)category for classifying memory maps.
Args:
name: A string to identify the category.
file_pattern: A regex pattern, the category will aggregate memory usage
for all mapped files matching this pattern.
children: A list of MmapCategory objects, used to sub-categorize memory
usage.
"""
self.name = name
self._file_pattern = re.compile(file_pattern) if file_pattern else None
self._children = list(children) if children else None
@classmethod
def DefaultCategory(cls):
"""An implicit 'Others' match-all category with no children."""
if cls._DEFAULT_CATEGORY is None:
cls._DEFAULT_CATEGORY = cls('Others', None)
return cls._DEFAULT_CATEGORY
def Match(self, mapped_file):
"""Test whether a mapped file matches this category."""
return (self._file_pattern is None
or bool(self._file_pattern.search(mapped_file)))
def GetMatchingChild(self, mapped_file):
"""Get the first matching sub-category for a given mapped file.
Returns None if the category has no children, or the DefaultCategory if
it does have children but none of them match.
"""
if not self._children:
return None
for child in self._children:
if child.Match(mapped_file):
return child
return type(self).DefaultCategory()
ROOT_CATEGORY = MmapCategory('/', None, [
MmapCategory('Android', r'^\/dev\/ashmem(?!\/libc malloc)', [
MmapCategory('Java runtime', r'^\/dev\/ashmem\/dalvik-', [
MmapCategory('Spaces', r'\/dalvik-(alloc|presentation.main|large'
r' object|non moving|zygote) space', [
MmapCategory('Normal', r'\/dalvik-(alloc|presentation.main)'),
MmapCategory('Large', r'\/dalvik-large object'),
MmapCategory('Zygote', r'\/dalvik-zygote'),
MmapCategory('Non-moving', r'\/dalvik-non moving')
]),
MmapCategory('Linear Alloc', r'\/dalvik-LinearAlloc'),
MmapCategory('Indirect Reference Table', r'\/dalvik-indirect.ref'),
MmapCategory('Cache', r'\/dalvik-jit-code-cache'),
MmapCategory('Accounting', None)
]),
MmapCategory('Cursor', r'\/CursorWindow'),
MmapCategory('Ashmem', None)
]),
MmapCategory('Native heap',
r'^((\[heap\])|(\[anon:)|(\/dev\/ashmem\/libc malloc)|$)'),
MmapCategory('Stack', r'^\[stack'),
MmapCategory('Files',
r'\.((((so)|(jar)|(apk)|(ttf)|(odex)|(oat)|(art))$)|(dex))', [
MmapCategory('so', r'\.so$'),
MmapCategory('jar', r'\.jar$'),
MmapCategory('apk', r'\.apk$'),
MmapCategory('ttf', r'\.ttf$'),
MmapCategory('dex', r'\.((dex)|(odex$))'),
MmapCategory('oat', r'\.oat$'),
MmapCategory('art', r'\.art$'),
]),
MmapCategory('Devices', r'(^\/dev\/)|(anon_inode:dmabuf)', [
MmapCategory('GPU', r'\/((nv)|(mali)|(kgsl))'),
MmapCategory('DMA', r'anon_inode:dmabuf'),
]),
MmapCategory('Discounted tracing overhead',
r'\[discounted tracing overhead\]')
])
# Map long descriptive attribute names, as understood by MemoryBucket.GetValue,
# to the short keys used by events in raw json traces.
BUCKET_ATTRS = {
'proportional_resident': 'pss',
'private_dirty_resident': 'pd',
'private_clean_resident': 'pc',
'shared_dirty_resident': 'sd',
'shared_clean_resident': 'sc',
'swapped': 'sw'}
# Map of {memory_key: (category_path, discount_tracing), ...}.
# When discount_tracing is True, we have to discount the resident_size of the
# tracing allocator to get the correct value for that key.
MMAPS_METRICS = {
'mmaps_overall_pss': ('/.proportional_resident', True),
'mmaps_private_dirty' : ('/.private_dirty_resident', True),
'mmaps_java_heap': ('/Android/Java runtime/Spaces.proportional_resident',
False),
'mmaps_ashmem': ('/Android/Ashmem.proportional_resident', False),
'mmaps_native_heap': ('/Native heap.proportional_resident', True)}
class MemoryBucket(object):
"""Simple object to hold and aggregate memory values."""
def __init__(self):
self._bucket = dict.fromkeys(BUCKET_ATTRS.iterkeys(), 0)
def __repr__(self):
values = ', '.join('%s=%d' % (src_key, self._bucket[dst_key])
for dst_key, src_key
in sorted(BUCKET_ATTRS.iteritems()))
return '%s[%s]' % (type(self).__name__, values)
def AddRegion(self, byte_stats):
for dst_key, src_key in BUCKET_ATTRS.iteritems():
self._bucket[dst_key] += int(byte_stats.get(src_key, '0'), 16)
def GetValue(self, name):
return self._bucket[name]
class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
"""A memory dump event belonging to a single timeline.Process object.
It's a subclass of telemetry's TimelineEvent so it can be included in
the stream of events contained in timeline.domain.model objects, and have its
timing correlated with that of other events in the domain.model.
Args:
process: The Process object associated with the memory dump.
dump_events: A list of dump events of the process with the same dump id.
Properties:
dump_id: A string to identify events belonging to the same global dump.
process: The timeline.Process object that owns this memory dump event.
has_mmaps: True if the memory dump has mmaps information. If False then
GetMemoryUsage will report all zeros.
"""
def __init__(self, process, dump_events):
assert dump_events
start_time = min(event['ts'] for event in dump_events) / 1000.0
duration = max(event['ts'] for event in dump_events) / 1000.0 - start_time
super(ProcessMemoryDumpEvent, self).__init__('memory', 'memory_dump',
start_time, duration)
self.process = process
self.dump_id = dump_events[0]['id']
allocator_dumps = {}
vm_regions = []
for event in dump_events:
assert (event['ph'] == 'v' and self.process.pid == event['pid'] and
self.dump_id == event['id'])
try:
allocator_dumps.update(event['args']['dumps']['allocators'])
except KeyError:
pass # It's ok if any of those keys are not present.
try:
value = event['args']['dumps']['process_mmaps']['vm_regions']
assert not vm_regions
vm_regions = value
except KeyError:
pass # It's ok if any of those keys are not present.
self._allocators = {}
parent_path = ''
parent_has_size = False
for allocator_name, size_values in sorted(allocator_dumps.iteritems()):
if ((allocator_name.startswith(parent_path) and parent_has_size) or
allocator_name.startswith('global/')):
continue
parent_path = allocator_name + '/'
parent_has_size = 'size' in size_values['attrs']
name_parts = allocator_name.split('/')
allocator_name = name_parts[0]
# For 'gpu/android_memtrack/*' we want to keep track of individual
# components. E.g. 'gpu/android_memtrack/gl' will be stored as
# 'android_memtrack_gl' in the allocators dict.
if (len(name_parts) == 3 and allocator_name == 'gpu' and
name_parts[1] == 'android_memtrack'):
allocator_name = '_'.join(name_parts[1:3])
allocator = self._allocators.setdefault(allocator_name, {})
for size_key, size_value in size_values['attrs'].iteritems():
if size_value['units'] == 'bytes':
allocator[size_key] = (allocator.get(size_key, 0)
+ int(size_value['value'], 16))
# we need to discount tracing from malloc size.
try:
self._allocators['malloc']['size'] -= self._allocators['tracing']['size']
except KeyError:
pass # It's ok if any of those keys are not present.
self.has_mmaps = bool(vm_regions)
self._buckets = {}
for vm_region in vm_regions:
self._AddRegion(vm_region)
@property
def process_name(self):
return self.process.name
def _AddRegion(self, vm_region):
path = ''
category = ROOT_CATEGORY
while category:
path = posixpath.join(path, category.name)
self.GetMemoryBucket(path).AddRegion(vm_region['bs'])
mapped_file = vm_region['mf']
category = category.GetMatchingChild(mapped_file)
def __repr__(self):
values = ['pid=%d' % self.process.pid]
for key, value in sorted(self.GetMemoryUsage().iteritems()):
values.append('%s=%d' % (key, value))
values = ', '.join(values)
return '%s[%s]' % (type(self).__name__, values)
def GetMemoryBucket(self, path):
"""Return the MemoryBucket associated with a category path.
An empty bucket will be created if the path does not already exist.
path: A string with path in the classification tree, e.g.
'/Android/Java runtime/Cache'. Note: no trailing slash, except for
the root path '/'.
"""
if not path in self._buckets:
self._buckets[path] = MemoryBucket()
return self._buckets[path]
def GetMemoryValue(self, category_path, discount_tracing=False):
"""Return a specific value from within a MemoryBucket.
category_path: A string composed of a path in the classification tree,
followed by a '.', followed by a specific bucket value, e.g.
'/Android/Java runtime/Cache.private_dirty_resident'.
discount_tracing: A boolean indicating whether the returned value should
be discounted by the resident size of the tracing allocator.
"""
path, name = category_path.rsplit('.', 1)
value = self.GetMemoryBucket(path).GetValue(name)
if discount_tracing and 'tracing' in self._allocators:
value -= self._allocators['tracing'].get('resident_size', 0)
return value
def GetMemoryUsage(self):
"""Get a dictionary with the memory usage of this process."""
usage = {}
for name, values in self._allocators.iteritems():
# If you wish to track more attributes here, make sure they are correctly
# calculated by the ProcessMemoryDumpEvent method. All dumps whose parent
# has "size" attribute are ignored to avoid double counting. So, the
# other attributes are totals of only top level dumps.
if 'size' in values:
usage['allocator_%s' % name] = values['size']
if 'allocated_objects_size' in values:
usage['allocated_objects_%s' % name] = values['allocated_objects_size']
if 'memtrack_pss' in values:
usage[name] = values['memtrack_pss']
if self.has_mmaps:
usage.update((key, self.GetMemoryValue(*value))
for key, value in MMAPS_METRICS.iteritems())
return usage
class GlobalMemoryDump(object):
"""Object to aggregate individual process dumps with the same dump id.
Args:
process_dumps: A sequence of ProcessMemoryDumpEvent objects, all sharing
the same global dump id.
Attributes:
dump_id: A string identifying this dump.
has_mmaps: True if the memory dump has mmaps information. If False then
GetMemoryUsage will report all zeros.
"""
def __init__(self, process_dumps):
assert process_dumps
# Keep dumps sorted in chronological order.
self._process_dumps = sorted(process_dumps, key=lambda dump: dump.start)
# All process dump events should have the same dump id.
dump_ids = set(dump.dump_id for dump in self._process_dumps)
assert len(dump_ids) == 1
self.dump_id = dump_ids.pop()
# Either all processes have mmaps or none of them do.
have_mmaps = set(dump.has_mmaps for dump in self._process_dumps)
assert len(have_mmaps) == 1
self.has_mmaps = have_mmaps.pop()
@property
def start(self):
return self._process_dumps[0].start
@property
def end(self):
return max(dump.end for dump in self._process_dumps)
@property
def duration(self):
return self.end - self.start
@property
def pids(self):
return set(d.process.pid for d in self._process_dumps)
def IterProcessMemoryDumps(self):
return iter(self._process_dumps)
def CountProcessMemoryDumps(self):
return len(self._process_dumps)
def __repr__(self):
values = ['id=%s' % self.dump_id]
for key, value in sorted(self.GetMemoryUsage().iteritems()):
values.append('%s=%d' % (key, value))
values = ', '.join(values)
return '%s[%s]' % (type(self).__name__, values)
def GetMemoryUsage(self):
"""Get the aggregated memory usage over all processes in this dump."""
result = {}
for dump in self._process_dumps:
for key, value in dump.GetMemoryUsage().iteritems():
result[key] = result.get(key, 0) + value
return result
| 37.572674 | 79 | 0.668008 |
794f9395b015c4ee107658d92c359a71f0a25c7b | 7,327 | py | Python | kubernetes/client/models/v1beta1_custom_resource_definition_names.py | scele/kubernetes-client-python | 9e982cbdb5f19dc1a3935a75bdd92288f3b807fb | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta1_custom_resource_definition_names.py | scele/kubernetes-client-python | 9e982cbdb5f19dc1a3935a75bdd92288f3b807fb | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta1_custom_resource_definition_names.py | scele/kubernetes-client-python | 9e982cbdb5f19dc1a3935a75bdd92288f3b807fb | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1CustomResourceDefinitionNames(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'kind': 'str',
'list_kind': 'str',
'plural': 'str',
'short_names': 'list[str]',
'singular': 'str'
}
attribute_map = {
'kind': 'kind',
'list_kind': 'listKind',
'plural': 'plural',
'short_names': 'shortNames',
'singular': 'singular'
}
def __init__(self, kind=None, list_kind=None, plural=None, short_names=None, singular=None):
"""
V1beta1CustomResourceDefinitionNames - a model defined in Swagger
"""
self._kind = None
self._list_kind = None
self._plural = None
self._short_names = None
self._singular = None
self.discriminator = None
self.kind = kind
if list_kind is not None:
self.list_kind = list_kind
self.plural = plural
if short_names is not None:
self.short_names = short_names
if singular is not None:
self.singular = singular
@property
def kind(self):
"""
Gets the kind of this V1beta1CustomResourceDefinitionNames.
Kind is the serialized kind of the resource. It is normally CamelCase and singular.
:return: The kind of this V1beta1CustomResourceDefinitionNames.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1CustomResourceDefinitionNames.
Kind is the serialized kind of the resource. It is normally CamelCase and singular.
:param kind: The kind of this V1beta1CustomResourceDefinitionNames.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def list_kind(self):
"""
Gets the list_kind of this V1beta1CustomResourceDefinitionNames.
ListKind is the serialized kind of the list for this resource. Defaults to <kind>List.
:return: The list_kind of this V1beta1CustomResourceDefinitionNames.
:rtype: str
"""
return self._list_kind
@list_kind.setter
def list_kind(self, list_kind):
"""
Sets the list_kind of this V1beta1CustomResourceDefinitionNames.
ListKind is the serialized kind of the list for this resource. Defaults to <kind>List.
:param list_kind: The list_kind of this V1beta1CustomResourceDefinitionNames.
:type: str
"""
self._list_kind = list_kind
@property
def plural(self):
"""
Gets the plural of this V1beta1CustomResourceDefinitionNames.
Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration too: plural.group and it must be all lowercase.
:return: The plural of this V1beta1CustomResourceDefinitionNames.
:rtype: str
"""
return self._plural
@plural.setter
def plural(self, plural):
"""
Sets the plural of this V1beta1CustomResourceDefinitionNames.
Plural is the plural name of the resource to serve. It must match the name of the CustomResourceDefinition-registration too: plural.group and it must be all lowercase.
:param plural: The plural of this V1beta1CustomResourceDefinitionNames.
:type: str
"""
if plural is None:
raise ValueError("Invalid value for `plural`, must not be `None`")
self._plural = plural
@property
def short_names(self):
"""
Gets the short_names of this V1beta1CustomResourceDefinitionNames.
ShortNames are short names for the resource. It must be all lowercase.
:return: The short_names of this V1beta1CustomResourceDefinitionNames.
:rtype: list[str]
"""
return self._short_names
@short_names.setter
def short_names(self, short_names):
"""
Sets the short_names of this V1beta1CustomResourceDefinitionNames.
ShortNames are short names for the resource. It must be all lowercase.
:param short_names: The short_names of this V1beta1CustomResourceDefinitionNames.
:type: list[str]
"""
self._short_names = short_names
@property
def singular(self):
"""
Gets the singular of this V1beta1CustomResourceDefinitionNames.
Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased <kind>
:return: The singular of this V1beta1CustomResourceDefinitionNames.
:rtype: str
"""
return self._singular
@singular.setter
def singular(self, singular):
"""
Sets the singular of this V1beta1CustomResourceDefinitionNames.
Singular is the singular name of the resource. It must be all lowercase Defaults to lowercased <kind>
:param singular: The singular of this V1beta1CustomResourceDefinitionNames.
:type: str
"""
self._singular = singular
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CustomResourceDefinitionNames):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.40249 | 176 | 0.610345 |
794f9412d154dbe0614dd2afb514122b469781e4 | 683 | py | Python | setup.py | smwa/single_elimination | 3fef8d00492dd21dd34c62035718d01f888223fd | [
"MIT"
] | 2 | 2021-08-11T15:41:00.000Z | 2022-01-12T20:49:42.000Z | setup.py | smwa/single_elimination | 3fef8d00492dd21dd34c62035718d01f888223fd | [
"MIT"
] | null | null | null | setup.py | smwa/single_elimination | 3fef8d00492dd21dd34c62035718d01f888223fd | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="single_elimination",
version="1.2.0",
author="Michael Smith",
author_email="michael.smith.ok@gmail.com",
description="A single elimination tournament match handler.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/smwa/double_elimination",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 29.695652 | 65 | 0.676428 |
794f945bf6ac79dcf6534fe5782952573e4de5da | 72,632 | py | Python | simpletransformers/question_answering/question_answering_utils.py | shaoormunir/simpletransformers | 7a0889a57143d9cc25f0a82b7b979e0b2c6f3a45 | [
"Apache-2.0"
] | null | null | null | simpletransformers/question_answering/question_answering_utils.py | shaoormunir/simpletransformers | 7a0889a57143d9cc25f0a82b7b979e0b2c6f3a45 | [
"Apache-2.0"
] | null | null | null | simpletransformers/question_answering/question_answering_utils.py | shaoormunir/simpletransformers | 7a0889a57143d9cc25f0a82b7b979e0b2c6f3a45 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function
import collections
import json
import logging
import math
import os
import re
import string
from io import open
from multiprocessing import Pool, cpu_count
from functools import partial
from pprint import pprint
from tqdm import tqdm, trange
import torch
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from transformers import AdamW, get_linear_schedule_with_warmup
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
from transformers import XLMTokenizer, SquadExample
from transformers.data.processors.squad import (
squad_convert_example_to_features_init,
squad_convert_example_to_features,
)
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example.
For examples without an answer, the start and end position are -1.
"""
def __init__(
self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None,
):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
def to_list(tensor):
return tensor.detach().cpu().tolist()
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
cls_index,
p_mask,
paragraph_len,
start_position=None,
end_position=None,
is_impossible=None,
):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def get_examples(examples_to_process, is_training=True, version_2_with_negative=True):
if not isinstance(examples_to_process, list):
raise TypeError("Input should be a list of examples.")
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for paragraph in examples_to_process:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position_character = None
answer_text = None
answers = []
if "is_impossible" in qa:
is_impossible = qa["is_impossible"]
else:
is_impossible = False
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
answer_text=answer_text,
start_position_character=start_position_character,
title=None,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples
def convert_example_to_feature(example_row):
(
example,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
cls_token_at_end,
cls_token,
sep_token,
pad_token,
sequence_a_segment_id,
sequence_b_segment_id,
cls_token_segment_id,
pad_token_segment_id,
mask_padding_with_zero,
sequence_a_is_doc,
unique_id,
example_index,
) = example_row
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text,
)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"]) # pylint: disable=invalid-name
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# XLNet: P SEP Q SEP CLS
# Others: CLS Q SEP P SEP
if not sequence_a_is_doc:
# Query
tokens += query_tokens
segment_ids += [sequence_a_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
if not sequence_a_is_doc:
segment_ids.append(sequence_b_segment_id)
else:
segment_ids.append(sequence_a_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
if sequence_a_is_doc:
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
tokens += query_tokens
segment_ids += [sequence_b_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
if sequence_a_is_doc:
doc_offset = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
# if example_index < 20:
# logger.info("*** Example ***")
# logger.info("unique_id: %s" % (unique_id))
# logger.info("example_index: %s" % (example_index))
# logger.info("doc_span_index: %s" % (doc_span_index))
# logger.info("tokens: %s" % " ".join(tokens))
# logger.info(
# "token_to_orig_map: %s" % " ".join(["%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])
# )
# logger.info(
# "token_is_max_context: %s"
# % " ".join(["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()])
# )
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# if is_training and span_is_impossible:
# logger.info("impossible example")
# if is_training and not span_is_impossible:
# answer_text = " ".join(tokens[start_position : (end_position + 1)])
# logger.info("start_position: %d" % (start_position))
# logger.info("end_position: %d" % (end_position))
# logger.info("answer: %s" % (answer_text))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
)
return feature
def squad_convert_examples_to_features(
examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
return_dataset=False,
threads=1,
tqdm_enabled=True,
args=None,
):
"""
Converts a list of examples into a list of features that can be directly given as input to a model.
It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
Args:
examples: list of :class:`~transformers.data.processors.squad.SquadExample`
tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`
max_seq_length: The maximum sequence length of the inputs.
doc_stride: The stride used when the context is too large and is split across several features.
max_query_length: The maximum length of the query.
is_training: whether to create features for model evaluation or model training.
return_dataset: Default False. Either 'pt' or 'tf'.
if 'pt': returns a torch.data.TensorDataset,
if 'tf': returns a tf.data.Dataset
threads: multiple processing threadsa-smi
Returns:
list of :class:`~transformers.data.processors.squad.SquadFeatures`
Example::
processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
)
"""
# Defining helper methods
features = []
threads = min(threads, cpu_count())
if args["use_multiprocessing"]:
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
)
features = list(
tqdm(
p.imap(annotate_, examples, chunksize=args["multiprocessing_chunksize"]),
total=len(examples),
desc="convert squad examples to features",
disable=not tqdm_enabled,
)
)
else:
squad_convert_example_to_features_init(tokenizer)
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
)
features = [annotate_(example) for example in tqdm(examples, disable=not tqdm_enabled)]
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in tqdm(
features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled
):
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
if not is_training:
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask
)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_is_impossible,
)
return features, dataset
def convert_examples_to_features(
examples,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
cls_token_at_end=False,
cls_token="[CLS]",
sep_token="[SEP]",
pad_token=0,
sequence_a_segment_id=0,
sequence_b_segment_id=1,
cls_token_segment_id=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
sequence_a_is_doc=False,
silent=False,
args=None,
):
"""Converts examples into a list of `InputBatch`s."""
unique_id = 1000000000
# cnt_pos, cnt_neg = 0, 0
# max_N, max_M = 1024, 1024
# f = np.zeros((max_N, max_M), dtype=np.float32)
if args["use_multiprocessing"]:
example_rows = [
(
example,
tokenizer,
max_seq_length,
doc_stride,
max_query_length,
is_training,
cls_token_at_end,
cls_token,
sep_token,
pad_token,
sequence_a_segment_id,
sequence_b_segment_id,
cls_token_segment_id,
pad_token_segment_id,
mask_padding_with_zero,
sequence_a_is_doc,
unique_id + i,
i,
)
for i, example in enumerate(examples)
]
with Pool(args["process_count"]) as p:
features = list(
tqdm(
p.imap(convert_example_to_feature, example_rows, chunksize=args["multiprocessing_chunksize"]),
total=len(example_rows),
disable=args["silent"],
)
)
else:
features = []
for (example_index, example) in enumerate(tqdm(examples, disable=silent)):
# if example_index % 100 == 0:
# logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg)
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.orig_answer_text,
)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"]) # pylint: disable=invalid-name
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# XLNet: P SEP Q SEP CLS
# Others: CLS Q SEP P SEP
if not sequence_a_is_doc:
# Query
tokens += query_tokens
segment_ids += [sequence_a_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
if not sequence_a_is_doc:
segment_ids.append(sequence_b_segment_id)
else:
segment_ids.append(sequence_a_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
if sequence_a_is_doc:
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
tokens += query_tokens
segment_ids += [sequence_b_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
if sequence_a_is_doc:
doc_offset = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
)
)
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"])
def write_predictions(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
verbose_logging,
version_2_with_negative,
null_score_diff_threshold,
):
"""Write final predictions to the json file and log-odds of null if needed."""
# logger.info("Writing predictions to: %s" % (output_prediction_file))
# logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"],
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True,)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"]
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit,))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions, all_nbest_json, scores_diff_json
# For XLNet (and XLM which uses the same head)
RawResultExtended = collections.namedtuple(
"RawResultExtended",
["unique_id", "start_top_log_probs", "start_top_index", "end_top_log_probs", "end_top_index", "cls_logits"],
)
def write_predictions_extended(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
orig_data_file,
start_n_top,
end_n_top,
version_2_with_negative,
tokenizer,
verbose_logging,
):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"],
)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
)
# logger.info("Writing predictions to: %s", output_prediction_file)
# logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob,
)
)
prelim_predictions = sorted(
prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True,
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
# XLNet un-tokenizer
# Let's keep it simple for now and see if we need all this later.
#
# tok_start_to_orig_index = feature.tok_start_to_orig_index
# tok_end_to_orig_index = feature.tok_end_to_orig_index
# start_orig_pos = tok_start_to_orig_index[pred.start_index]
# end_orig_pos = tok_end_to_orig_index[pred.end_index]
# paragraph_text = example.paragraph_text
# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
# Previously used Bert untokenizer
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, False, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob,)
)
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
if isinstance(orig_data_file, str):
with open(orig_data_file, "r", encoding="utf-8") as reader:
orig_data = json.load(reader)
else:
orig_data = orig_data_file
qid_to_has_ans = make_qid_to_has_ans(orig_data)
exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions)
out_eval = {}
find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
return all_predictions, all_nbest_json, scores_diff_json
def get_best_predictions(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
verbose_logging,
version_2_with_negative,
null_score_diff_threshold,
):
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"],
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True,)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"]
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
if pred.start_index > 0: # this is a non-null prediction
feature = features[pred.feature_index]
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit,))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
all_best = [
{
"id": id,
"answer": [answer["text"] for answer in answers],
"probability": [answer["probability"] for answer in answers],
}
for id, answers in all_nbest_json.items()
]
return all_best
def get_best_predictions_extended(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
start_n_top,
end_n_top,
version_2_with_negative,
tokenizer,
verbose_logging,
):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"],
)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
)
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob,
)
)
prelim_predictions = sorted(
prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True,
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
# XLNet un-tokenizer
# Let's keep it simple for now and see if we need all this later.
#
# tok_start_to_orig_index = feature.tok_start_to_orig_index
# tok_end_to_orig_index = feature.tok_end_to_orig_index
# start_orig_pos = tok_start_to_orig_index[pred.start_index]
# end_orig_pos = tok_end_to_orig_index[pred.end_index]
# paragraph_text = example.paragraph_text
# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
# Previously used Bert untokenizer
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
if isinstance(tokenizer, XLMTokenizer):
final_text = get_final_text(tok_text, orig_text, verbose_logging)
else:
final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob,)
)
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
all_best = [
{
"id": id,
"answer": [answer["text"] for answer in answers],
"probability": [answer["probability"] for answer in answers],
}
for id, answers in all_nbest_json.items()
]
return all_best
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
main_eval["has_ans_exact"] = has_ans_exact
main_eval["has_ans_f1"] = has_ans_f1
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]:
continue
has_ans_cnt += 1
if qid not in scores:
continue
has_ans_score += scores[qid]
return (
100.0 * best_score / len(scores),
best_thresh,
1.0 * has_ans_score / has_ans_cnt,
)
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for p in dataset:
for qa in p["qas"]:
qid_to_has_ans[qa["id"]] = bool(qa["answers"])
return qid_to_has_ans
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for p in dataset:
for qa in p["qas"]:
qid = qa["id"]
gold_answers = [a["text"] for a in qa["answers"] if normalize_answer(a["text"])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qid not in preds:
logger.warning("Missing prediction for %s" % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info(
"Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text,
)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position : (orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def build_examples(to_predict):
"""
Builds a list of dicts in input data format from a list of contexts and qas.
"""
examples = []
for row in to_predict:
context = row["context"]
for qa in row["qas"]:
qa["answers"] = [{"text": " ", "answer_start": 0}]
qa["is_impossible"]: False
example = {"context": context, "qas": row["qas"]}
examples.append(example)
return examples
| 38.861423 | 118 | 0.606317 |
794f948ab8ec7933fed9e42ac27a5b2ad90523b8 | 1,017 | py | Python | stubs.min/Autodesk/Revit/DB/Electrical_parts/CircuitLoadCalculationMethod.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/Autodesk/Revit/DB/Electrical_parts/CircuitLoadCalculationMethod.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/Autodesk/Revit/DB/Electrical_parts/CircuitLoadCalculationMethod.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class CircuitLoadCalculationMethod(Enum,IComparable,IFormattable,IConvertible):
"""
Methods to calculate circuit loads
enum CircuitLoadCalculationMethod,values: SumApparentLoad (1),SumTrueLoadAndReactiveLoad (0)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
SumApparentLoad=None
SumTrueLoadAndReactiveLoad=None
value__=None
| 29.911765 | 215 | 0.689282 |
794f958093746918f8184e276f6105a63d2fc14c | 201 | py | Python | src/test/data_pipe_test/native_index_test.py | random-python/data_pipe | e64fbbdb04f9cd43f7f3e58688c4ac1e2c2bbb45 | [
"Apache-2.0"
] | 14 | 2020-02-08T06:27:09.000Z | 2021-06-02T07:35:09.000Z | src/test/data_pipe_test/native_index_test.py | random-python/data_pipe | e64fbbdb04f9cd43f7f3e58688c4ac1e2c2bbb45 | [
"Apache-2.0"
] | null | null | null | src/test/data_pipe_test/native_index_test.py | random-python/data_pipe | e64fbbdb04f9cd43f7f3e58688c4ac1e2c2bbb45 | [
"Apache-2.0"
] | null | null | null | """
"""
from data_pipe.runtime_library import *
from data_pipe_test.verify_index import *
def test_index_store():
index_store = NativeIndex() # @UndefinedVariable
verify_index(index_store)
| 18.272727 | 53 | 0.751244 |
794f96243a84bea7f957118427107a75908a5cfd | 812 | py | Python | yatube/yatube/urls.py | Kors557/yatubeV2.1 | 6cd4f232e823c856b963b312b0ef648bb251e0f3 | [
"MIT"
] | null | null | null | yatube/yatube/urls.py | Kors557/yatubeV2.1 | 6cd4f232e823c856b963b312b0ef648bb251e0f3 | [
"MIT"
] | null | null | null | yatube/yatube/urls.py | Kors557/yatubeV2.1 | 6cd4f232e823c856b963b312b0ef648bb251e0f3 | [
"MIT"
] | null | null | null | """yatube URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('', include('posts.urls', namespace='posts')),
path('admin/', admin.site.urls)
] | 35.304348 | 77 | 0.703202 |
794f97fd1aed2fe1b63ef715d46a2735d45bf3d5 | 6,362 | py | Python | docs/en_US/conf.py | acured/nni | 03ff374189837d28d98c3e0a14ea248d9a231f82 | [
"MIT"
] | 2 | 2020-10-27T06:53:53.000Z | 2021-02-22T22:11:15.000Z | docs/en_US/conf.py | acured/nni | 03ff374189837d28d98c3e0a14ea248d9a231f82 | [
"MIT"
] | null | null | null | docs/en_US/conf.py | acured/nni | 03ff374189837d28d98c3e0a14ea248d9a231f82 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- Project information ---------------------------------------------------
project = 'NNI'
copyright = '2021, Microsoft'
author = 'Microsoft'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = 'v2.2'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinxarg.ext',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'nbsphinx',
'sphinx.ext.extlinks',
'IPython.sphinxext.ipython_console_highlighting',
]
# Add mock modules
autodoc_mock_imports = ['apex', 'nni_node', 'tensorrt', 'pycuda']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
source_suffix = ['.rst']
# The master toctree document.
master_doc = 'contents'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Release_v1.0.md', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
html_additional_pages = {
'index': 'index.html',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo_only': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_logo = '../img/nni_logo_dark.png'
html_title = 'An open source AutoML toolkit for neural architecture search, model compression and hyper-parameter tuning (%s %s)' % \
(project, release)
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'NeuralNetworkIntelligencedoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NeuralNetworkIntelligence.tex', 'Neural Network Intelligence Documentation',
'Microsoft', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'neuralnetworkintelligence', 'Neural Network Intelligence Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NeuralNetworkIntelligence', 'Neural Network Intelligence Documentation',
author, 'NeuralNetworkIntelligence', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# external links (for github code)
# Reference the code via :githublink:`path/to/your/example/code.py`
git_commit_id = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
extlinks = {
'githublink': ('https://github.com/microsoft/nni/blob/' + git_commit_id + '/%s', 'Github link: ')
}
# -- Extension configuration -------------------------------------------------
def setup(app):
app.add_css_file('css/custom.css')
| 31.034146 | 133 | 0.661584 |
794f987af5c2dbd386c9753c22bdaf12c824979f | 2,068 | py | Python | superset/db_engine_specs/db2.py | rodrigoguariento/incubator-superset | b2633a51d43faaca74751349b96fc32784d4b377 | [
"Apache-2.0"
] | 7 | 2017-11-01T06:00:12.000Z | 2019-01-05T13:31:48.000Z | superset/db_engine_specs/db2.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 108 | 2019-06-10T05:48:22.000Z | 2021-07-26T04:20:03.000Z | superset/db_engine_specs/db2.py | Odirlei-Stein/incubator-superset | 52afc33b31475536b287b56d262b9eaa32f479ab | [
"Apache-2.0"
] | 4 | 2018-12-13T06:20:34.000Z | 2020-10-02T16:07:17.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from superset.db_engine_specs.base import BaseEngineSpec, LimitMethod
class Db2EngineSpec(BaseEngineSpec):
engine = "ibm_db_sa"
limit_method = LimitMethod.WRAP_SQL
force_column_alias_quotes = True
max_column_name_length = 30
_time_grain_functions = {
None: "{col}",
"PT1S": "CAST({col} as TIMESTAMP)" " - MICROSECOND({col}) MICROSECONDS",
"PT1M": "CAST({col} as TIMESTAMP)"
" - SECOND({col}) SECONDS"
" - MICROSECOND({col}) MICROSECONDS",
"PT1H": "CAST({col} as TIMESTAMP)"
" - MINUTE({col}) MINUTES"
" - SECOND({col}) SECONDS"
" - MICROSECOND({col}) MICROSECONDS ",
"P1D": "CAST({col} as TIMESTAMP)"
" - HOUR({col}) HOURS"
" - MINUTE({col}) MINUTES"
" - SECOND({col}) SECONDS"
" - MICROSECOND({col}) MICROSECONDS",
"P1W": "{col} - (DAYOFWEEK({col})) DAYS",
"P1M": "{col} - (DAY({col})-1) DAYS",
"P0.25Y": "{col} - (DAY({col})-1) DAYS"
" - (MONTH({col})-1) MONTHS"
" + ((QUARTER({col})-1) * 3) MONTHS",
"P1Y": "{col} - (DAY({col})-1) DAYS" " - (MONTH({col})-1) MONTHS",
}
@classmethod
def epoch_to_dttm(cls) -> str:
return "(TIMESTAMP('1970-01-01', '00:00:00') + {col} SECONDS)"
| 39.769231 | 80 | 0.631528 |
794f9881c8dac4d483aa7479061872e92455a22b | 100 | py | Python | Curso_em_Video_py3/metros em centimetros e milimetros.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
] | 1 | 2021-05-11T12:39:43.000Z | 2021-05-11T12:39:43.000Z | Curso_em_Video_py3/metros em centimetros e milimetros.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
] | null | null | null | Curso_em_Video_py3/metros em centimetros e milimetros.py | Rodrigo98Matos/Projetos_py | 6428e2c09d28fd8a717743f4434bc788e7d7d3cc | [
"MIT"
] | null | null | null | m = float(input('inserir metros:'))
print('centimetros: {} \nmilimetros: {}'.format(m*100, m*1000))
| 33.333333 | 63 | 0.66 |
794f988cfcc6662d00f358906dcf13aed98bcd73 | 470 | py | Python | examples/acoustics/normalize_int_16.py | AssembleSoftware/IoTPy | d4b7b516ef95a45cff69827003d5e2d205f2ba55 | [
"BSD-3-Clause"
] | 28 | 2017-12-19T20:21:44.000Z | 2022-02-04T09:44:03.000Z | examples/acoustics/normalize_int_16.py | AssembleSoftware/IoTPy | d4b7b516ef95a45cff69827003d5e2d205f2ba55 | [
"BSD-3-Clause"
] | 4 | 2020-05-30T20:21:58.000Z | 2020-07-11T20:49:31.000Z | examples/acoustics/normalize_int_16.py | sdeepaknarayanan/IoTPy | ba022c3d6696527b834a865b9cf403d90665145b | [
"BSD-3-Clause"
] | 11 | 2017-05-21T15:37:41.000Z | 2021-10-16T13:45:11.000Z | def normalize_int_16(a, max_value):
"""
Parameters
----------
a: np.darray
A 1-D array, i.e. vector
max_value: int
The expected maximum value of the array.
Returns
-------
result: np.ndarray
A 1-D array
The input array is normalizd by max_value.
"""
result = ((2**12) * a/max(a.max(), max_value)
return result.astype('int16')
| 23.5 | 53 | 0.474468 |
794f99ab352865b7dc10b9be500d5564e9e1d0d3 | 2,566 | py | Python | Companion/Python/companion.py | magico13/ESP-cube | 1a8dc4c223661fa6365a186241ab65ee6ebb05e7 | [
"MIT"
] | null | null | null | Companion/Python/companion.py | magico13/ESP-cube | 1a8dc4c223661fa6365a186241ab65ee6ebb05e7 | [
"MIT"
] | null | null | null | Companion/Python/companion.py | magico13/ESP-cube | 1a8dc4c223661fa6365a186241ab65ee6ebb05e7 | [
"MIT"
] | null | null | null | # python module to make interfacing with the cube simpler
import requests
import json
class Animation(object):
def __init__(self):
self.animation_type = "None"
def to_json(self):
return f'{{"animation":{self.animation_type}}}'
class Blink(Animation):
def __init__(self, count=1, wait=100, red1=0, green1=0, blue1=255, red2=0, green2=0, blue2=0):
self.Count = count
self.Wait = wait
self.Red1 = red1
self.Green1 = green1
self.Blue1 = blue1
self.Red2 = red2
self.Green2 = green2
self.Blue2 = blue2
self.animation_type = "blink"
def to_json(self):
data = {
"animation": "blink",
"count": self.Count,
"wait": self.Wait,
"color": [
self.Red1,
self.Green1,
self.Blue1
],
"color2": [
self.Red2,
self.Green2,
self.Blue2
]
}
return json.dumps(data)
class Breathe(Animation):
def __init__(self, count=1, length=1000, red=0, green=0, blue=255):
self.Count = count
self.Length = length
self.Red = red
self.Green = green
self.Blue = blue
self.animation_type = "breathe"
def to_json(self):
data = {
"animation": "breathe",
"count": self.Count,
"length": self.Length,
"color": [
self.Red,
self.Green,
self.Blue
]
}
return json.dumps(data)
class Cube():
def __init__(self, url):
self.BASEURL = url
def get_color(self):
code, json = self.get('/color')
if code == 200: return json['red'], json['green'], json['blue']
return 0, 0, 0
def set_color(self, red, green, blue):
data = f'{{"red":{red}, "green":{green}, "blue":{blue}}}'
self.post('/color', data)
def animate(self, animation):
data = animation.to_json()
self.post('/animate', data)
def set_tap(self, animation):
data = animation.to_json()
self.post('/tap', data)
def get(self, path):
r = requests.get(self.BASEURL+path)
if r.text:
return r.status_code, r.json()
return r.status_code, ''
def post(self, path, data):
r = requests.post(self.BASEURL+path, data=data)
if r.text:
return r.status_code, r.json()
return r.status_code, '' | 27.010526 | 98 | 0.51403 |
794f99c1031f98cb217d22dce8fd3733eea2493c | 3,959 | py | Python | Datasets/Vectors/us_census_states.py | jdgomezmo/gee | 7016c47ee902dbf60b1aeb6319424c61c1107345 | [
"MIT"
] | 1 | 2020-11-16T22:07:42.000Z | 2020-11-16T22:07:42.000Z | Datasets/Vectors/us_census_states.py | jdgomezmo/gee | 7016c47ee902dbf60b1aeb6319424c61c1107345 | [
"MIT"
] | null | null | null | Datasets/Vectors/us_census_states.py | jdgomezmo/gee | 7016c47ee902dbf60b1aeb6319424c61c1107345 | [
"MIT"
] | null | null | null | # %%
"""
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Vectors/us_census_states.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/us_census_states.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/us_census_states.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
"""
# %%
"""
## Install Earth Engine API and geemap
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
"""
# %%
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# %%
"""
## Create an interactive map
The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
"""
# %%
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# %%
"""
## Add Earth Engine Python script
"""
# %%
# Add Earth Engine dataset
fc = ee.FeatureCollection('TIGER/2018/States')
# .filter(ee.Filter.eq('STUSPS', 'MN'))
image = ee.Image().paint(fc, 0, 2)
Map.setCenter(-99.844, 37.649, 5)
Map.addLayer(image, {'palette': 'FF0000'}, 'TIGER/2018/States')
# Map.addLayer(fc, {}, 'US States')
# %%
"""
## Display Earth Engine data layers
"""
# %%
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map | 50.113924 | 1,021 | 0.743117 |
794f9bc2d667d488acb5aa275b3d81f1842e6c93 | 10,430 | py | Python | build/PureCloudPlatformClientV2/models/queue_conversation_callback_event_topic_error_body.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 10 | 2019-02-22T00:27:08.000Z | 2021-09-12T23:23:44.000Z | build/PureCloudPlatformClientV2/models/queue_conversation_callback_event_topic_error_body.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 5 | 2018-06-07T08:32:00.000Z | 2021-07-28T17:37:26.000Z | build/PureCloudPlatformClientV2/models/queue_conversation_callback_event_topic_error_body.py | cjohnson-ctl/platform-client-sdk-python | 38ce53bb8012b66e8a43cc8bd6ff00cf6cc99100 | [
"MIT"
] | 6 | 2020-04-09T17:43:07.000Z | 2022-02-17T08:48:05.000Z | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class QueueConversationCallbackEventTopicErrorBody(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
QueueConversationCallbackEventTopicErrorBody - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'message': 'str',
'code': 'str',
'status': 'int',
'entity_id': 'str',
'entity_name': 'str',
'message_with_params': 'str',
'message_params': 'dict(str, str)',
'context_id': 'str',
'details': 'list[QueueConversationCallbackEventTopicDetail]',
'errors': 'list[QueueConversationCallbackEventTopicErrorBody]'
}
self.attribute_map = {
'message': 'message',
'code': 'code',
'status': 'status',
'entity_id': 'entityId',
'entity_name': 'entityName',
'message_with_params': 'messageWithParams',
'message_params': 'messageParams',
'context_id': 'contextId',
'details': 'details',
'errors': 'errors'
}
self._message = None
self._code = None
self._status = None
self._entity_id = None
self._entity_name = None
self._message_with_params = None
self._message_params = None
self._context_id = None
self._details = None
self._errors = None
@property
def message(self):
"""
Gets the message of this QueueConversationCallbackEventTopicErrorBody.
:return: The message of this QueueConversationCallbackEventTopicErrorBody.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this QueueConversationCallbackEventTopicErrorBody.
:param message: The message of this QueueConversationCallbackEventTopicErrorBody.
:type: str
"""
self._message = message
@property
def code(self):
"""
Gets the code of this QueueConversationCallbackEventTopicErrorBody.
:return: The code of this QueueConversationCallbackEventTopicErrorBody.
:rtype: str
"""
return self._code
@code.setter
def code(self, code):
"""
Sets the code of this QueueConversationCallbackEventTopicErrorBody.
:param code: The code of this QueueConversationCallbackEventTopicErrorBody.
:type: str
"""
self._code = code
@property
def status(self):
"""
Gets the status of this QueueConversationCallbackEventTopicErrorBody.
:return: The status of this QueueConversationCallbackEventTopicErrorBody.
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this QueueConversationCallbackEventTopicErrorBody.
:param status: The status of this QueueConversationCallbackEventTopicErrorBody.
:type: int
"""
self._status = status
@property
def entity_id(self):
"""
Gets the entity_id of this QueueConversationCallbackEventTopicErrorBody.
:return: The entity_id of this QueueConversationCallbackEventTopicErrorBody.
:rtype: str
"""
return self._entity_id
@entity_id.setter
def entity_id(self, entity_id):
"""
Sets the entity_id of this QueueConversationCallbackEventTopicErrorBody.
:param entity_id: The entity_id of this QueueConversationCallbackEventTopicErrorBody.
:type: str
"""
self._entity_id = entity_id
@property
def entity_name(self):
"""
Gets the entity_name of this QueueConversationCallbackEventTopicErrorBody.
:return: The entity_name of this QueueConversationCallbackEventTopicErrorBody.
:rtype: str
"""
return self._entity_name
@entity_name.setter
def entity_name(self, entity_name):
"""
Sets the entity_name of this QueueConversationCallbackEventTopicErrorBody.
:param entity_name: The entity_name of this QueueConversationCallbackEventTopicErrorBody.
:type: str
"""
self._entity_name = entity_name
@property
def message_with_params(self):
"""
Gets the message_with_params of this QueueConversationCallbackEventTopicErrorBody.
:return: The message_with_params of this QueueConversationCallbackEventTopicErrorBody.
:rtype: str
"""
return self._message_with_params
@message_with_params.setter
def message_with_params(self, message_with_params):
"""
Sets the message_with_params of this QueueConversationCallbackEventTopicErrorBody.
:param message_with_params: The message_with_params of this QueueConversationCallbackEventTopicErrorBody.
:type: str
"""
self._message_with_params = message_with_params
@property
def message_params(self):
"""
Gets the message_params of this QueueConversationCallbackEventTopicErrorBody.
:return: The message_params of this QueueConversationCallbackEventTopicErrorBody.
:rtype: dict(str, str)
"""
return self._message_params
@message_params.setter
def message_params(self, message_params):
"""
Sets the message_params of this QueueConversationCallbackEventTopicErrorBody.
:param message_params: The message_params of this QueueConversationCallbackEventTopicErrorBody.
:type: dict(str, str)
"""
self._message_params = message_params
@property
def context_id(self):
"""
Gets the context_id of this QueueConversationCallbackEventTopicErrorBody.
:return: The context_id of this QueueConversationCallbackEventTopicErrorBody.
:rtype: str
"""
return self._context_id
@context_id.setter
def context_id(self, context_id):
"""
Sets the context_id of this QueueConversationCallbackEventTopicErrorBody.
:param context_id: The context_id of this QueueConversationCallbackEventTopicErrorBody.
:type: str
"""
self._context_id = context_id
@property
def details(self):
"""
Gets the details of this QueueConversationCallbackEventTopicErrorBody.
:return: The details of this QueueConversationCallbackEventTopicErrorBody.
:rtype: list[QueueConversationCallbackEventTopicDetail]
"""
return self._details
@details.setter
def details(self, details):
"""
Sets the details of this QueueConversationCallbackEventTopicErrorBody.
:param details: The details of this QueueConversationCallbackEventTopicErrorBody.
:type: list[QueueConversationCallbackEventTopicDetail]
"""
self._details = details
@property
def errors(self):
"""
Gets the errors of this QueueConversationCallbackEventTopicErrorBody.
:return: The errors of this QueueConversationCallbackEventTopicErrorBody.
:rtype: list[QueueConversationCallbackEventTopicErrorBody]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""
Sets the errors of this QueueConversationCallbackEventTopicErrorBody.
:param errors: The errors of this QueueConversationCallbackEventTopicErrorBody.
:type: list[QueueConversationCallbackEventTopicErrorBody]
"""
self._errors = errors
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.575342 | 113 | 0.625024 |
794f9ccfbe56769bdfe2c20e355f63355160bf81 | 1,150 | py | Python | Python_Hard_Way_W2/ex14b.py | jhanse9522/toolkitten | 98d6bf3792eae13b1ff14726c99b0afd2a20185d | [
"MIT"
] | null | null | null | Python_Hard_Way_W2/ex14b.py | jhanse9522/toolkitten | 98d6bf3792eae13b1ff14726c99b0afd2a20185d | [
"MIT"
] | null | null | null | Python_Hard_Way_W2/ex14b.py | jhanse9522/toolkitten | 98d6bf3792eae13b1ff14726c99b0afd2a20185d | [
"MIT"
] | null | null | null | from sys import argv
script, user_name, fav_ice = argv
prompt = '> '
print(f"Hi {user_name}, who likes {fav_ice} icecream. I'm the {script} script.")
print("I'd like to ask you a few questions.")
print(f"Do you like me {user_name} who likes {fav_ice} icecream?")
likes = input(prompt)
print(f"Where do you live {user_name} who likes {fav_ice} icecream?")
lives = input(prompt)
print(f"What computer do you have?")
computer = input(prompt)
print("""
Alright, so ignore this part because I'm practicing spacing here. you said blah blah blah about liking me.
You live in blah-no variables used here in this string. Not sure where that is.
And you have a blah blah blah computer. Nice. Let's keep going here.
""")
#Note: I tried the exercise without the f-string to see if the empty string triple quote thing in line 17 would still keep lines 18-20 separate. It worked!
print(f"""
{user_name}, let me get this straight. You said {likes} about liking me.
You really like {fav_ice}.
You live in {lives}, which I don't know too much about.
You have a {computer}.
I think I'm starting to learn just a little bit about you.
""")
| 34.848485 | 155 | 0.716522 |
794f9df4f7435269f711e61c5303e94da3c1deae | 79,214 | py | Python | exportsrv/tests/unittests/stubdata/solrdata.py | golnazads/export_service | 873f2e8d98eea036d2607b57cd51c3cd2ef73747 | [
"MIT"
] | 4 | 2019-01-13T00:42:35.000Z | 2021-06-03T15:04:35.000Z | exportsrv/tests/unittests/stubdata/solrdata.py | golnazads/export_service | 873f2e8d98eea036d2607b57cd51c3cd2ef73747 | [
"MIT"
] | 179 | 2015-05-26T21:00:26.000Z | 2022-03-30T00:13:04.000Z | exportsrv/tests/unittests/stubdata/solrdata.py | golnazads/export_service | 873f2e8d98eea036d2607b57cd51c3cd2ef73747 | [
"MIT"
] | 7 | 2016-04-18T14:25:44.000Z | 2022-02-02T19:48:08.000Z | # -*- coding: utf-8 -*-
# We have 22 different doctype in Solr, these are bibcode used to extract the data below
# bibcodes = ['2017yCat.113380453S', '2018SAAS...38.....D', '1995ans..agar..390M', '1983aiaa.meetY....K', '2007RJPh....1...35.', '2007AAS...210.2104M',
# '2009bcet.book...65L', '2018PhRvL.120b9901P', '2018TDM.....5a0201F', '2018Spin....877001P', '2017nova.pres.2388K', '2016iac..talk..872V',
# '2017PhDT........14C', '2017MsT..........2A', '2018Wthr...73Q..35.', '2017ascl.soft06009C', '2017CBET.4403....2G', '2016emo6.rept.....R',
# '1995anda.book.....N', '2017sptz.prop13168Y', '2017AAVSN.429....1W', '1991hep.th....8028G']
data = \
{
u'responseHeader': {
u'status': 0,
u'QTime': 13,
u'params': {
u'sort': u'date desc, bibcode desc',
u'x-amzn-trace-id': u'Root=1-5dd330a9-5dda490348b6637c4f990929;-',
u'rows': u'22',
u'fq': u'{!bitset}',
u'q': u'*:*',
u'start': u'0',
u'wt': u'json',
u'fl': u'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,read_count,bibcode,identifier,copyright,keyword,doctype,[citations],comment,version,property,esources,data,isbn,eid,issn,arxiv_class,editor,series,publisher,bibstem'
}
},
u'response': {
u'start': 0,
u'numFound': 22,
u'docs': [
{
u'read_count': 0,
u'identifier': [u'10.1002/wea.3072', u'2018Wthr...73Q..35.', u'10.1002/wea.3072'],
u'pubdate': u'2018-01-00',
u'abstract': u'Not Available <P />',
u'pub': u'Weather',
u'volume': u'73',
u'page_range': u'35-35',
u'num_citations': 0,
u'doi': [u'10.1002/wea.3072'],
u'year': u'2018',
u'bibcode': u'2018Wthr...73Q..35.',
u'bibstem': [u'Wthr', u'Wthr...73'],
u'issn': [u'0043-1656'],
u'doctype': u'bookreview',
u'pub_raw': u'Weather, vol. 73, issue 1, pp. 35-35',
u'esources': [u'PUB_HTML'],
u'title': [u'Book reviews'],
u'property': [u'ESOURCE', u'ARTICLE', u'REFEREED'],
u'issue': u'1',
u'page': [u'35'],
u'num_references': 0
},
{
u'read_count': 0,
u'issn': [u'2053-1583'],
u'pubdate': u'2018-01-00',
u'abstract': u'Not Available <P />',
u'num_citations': 0,
u'year': u'2018',
u'bibcode': u'2018TDM.....5a0201F',
u'bibstem': [u'TDM', u'TDM.....5'],
u'aff': [u'Editor in Chief, National Graphene Institute, University of Manchester, United Kingdom', u'Publisher, IOP Publishing, Bristol, United Kingdom'],
u'esources': [u'PUB_HTML'],
u'issue': u'1',
u'pub_raw': u'2D Materials, Volume 5, Issue 1, article id. 010201 (2018).',
u'num_references': 0,
u'pub': u'2D Materials',
u'volume': u'5',
u'doi': [u'10.1088/2053-1583/aa9403'],
u'author': [u"Fal'ko, Vladimir", u'Thomas, Ceri-Wyn'],
u'doctype': u'editorial',
u'eid': u'010201',
u'title': [u'2D Materials: maintaining editorial quality'],
u'property': [u'ESOURCE', u'ARTICLE', u'REFEREED'],
u'page': [u'010201']
},
{
u'read_count': 0,
u'pubdate': u'2018-00-00',
u'abstract': u'Not Available <P />',
u'num_citations': 0,
u'year': u'2018',
u'bibcode': u'2018Spin....877001P',
u'copyright': u'(c) 2018: World Scientific Publishing Company',
u'bibstem': [u'Spin', u'Spin....8'],
u'aff': [u'-', u'-', u'-'],
u'esources': [u'PUB_HTML', u'PUB_PDF'],
u'issue': u'4',
u'pub_raw': u'Spin, Volume 8, Issue 4, id. 1877001',
u'num_references': 0,
u'identifier': [u'2018Spin....877001P', u'10.1142/S2010324718770015', u'10.1142/S2010324718770015'],
u'pub': u'Spin',
u'volume': u'8',
u'doi': [u'10.1142/S2010324718770015'],
u'author': [u'Parkin, Stuart', u'Chantrell, Roy', u'Chang, Ching-Ray'],
u'doctype': u'obituary',
u'eid': u'1877001',
u'title': [u'Obituary: In Memoriam Professor Dr. Shoucheng Zhang, Consulting Editor'],
u'property': [u'ESOURCE', u'ARTICLE', u'REFEREED'],
u'page': [u'1877001']
},
{
u'read_count': 3,
u'isbn': [u'9783662575451'],
u'pubdate': u'2018-00-00',
u'series': u'Saas-Fee Advanced Course',
u'abstract': u'Not Available <P />',
u'num_citations': 0,
u'year': u'2018',
u'property': [u'ESOURCE', u'TOC', u'NONARTICLE', u'NOT REFEREED'],
u'bibcode': u'2018SAAS...38.....D',
u'copyright': u'(c) 2018: Springer-Verlag GmbH Germany, part of Springer Nature',
u'author': [u'Dessauges-Zavadsky, Miroslava', u'Pfenniger, Daniel'],
u'aff': [u'-', u'-'],
u'esources': [u'PUB_HTML'],
u'pub_raw': u'Millimeter Astronomy: Saas-Fee Advanced Course 38. Swiss Society for Astrophysics and Astronomy, Saas-Fee Advanced Course, Volume 38. ISBN 978-3-662-57545-1. Springer-Verlag GmbH Germany, part of Springer Nature, 2018',
u'num_references': 0,
u'pub': u'Saas-Fee Advanced Course',
u'volume': u'38',
u'doi': [u'10.1007/978-3-662-57546-8'],
u'keyword': [u'Physics'],
u'bibstem': [u'SAAS', u'SAAS...38'],
u'doctype': u'misc',
u'title': [u'Millimeter Astronomy'],
u'identifier': [u'2018SAAS...38.....D', u'10.1007/978-3-662-57546-8', u'10.1007/978-3-662-57546-8']
},
{
u'read_count': 0,
u'issn': [u'0031-9007'],
u'pubdate': u'2018-01-00',
u'abstract': u'Not Available <P />',
u'num_citations': 0,
u'year': u'2018',
u'bibcode': u'2018PhRvL.120b9901P',
u'bibstem': [u'PhRvL', u'PhRvL.120'],
u'aff': [u'-', u'-', u'-', u'-'],
u'esources': [u'PUB_HTML'],
u'issue': u'2',
u'pub_raw': u'Physical Review Letters, Volume 120, Issue 2, id.029901',
u'num_references': 4,
u'identifier': [u'2018PhRvL.120b9901P', u'10.1103/PhysRevLett.120.029901', u'10.1103/PhysRevLett.120.029901'],
u'pub': u'Physical Review Letters',
u'volume': u'120',
u'doi': [u'10.1103/PhysRevLett.120.029901'],
u'author': [u'Pustilnik, M.', u'van Heck, B.', u'Lutchyn, R. M.', u'Glazman, L. I.'],
u'doctype': u'erratum',
u'eid': u'029901',
u'title': [u'Erratum: Quantum Criticality in Resonant Andreev Conduction [Phys. Rev. Lett. 119, 116802 (2017)]'],
u'property': [u'ESOURCE', u'ARTICLE', u'REFEREED'],
u'page': [u'029901']
},
{
u'read_count': 0,
u'bibcode': u'2017PhDT........14C',
u'keyword': [u'galaxies: evolution', u'galaxies: abundances', u'galaxies: ISM'],
u'pubdate': u'2017-06-00',
u'bibstem': [u'PhDT', u'PhDT.....'],
u'property': [u'ESOURCE', u'NONARTICLE', u'REFEREED', u'PUB_OPENACCESS', u'OPENACCESS'],
u'abstract': u"Chapter 2: As part of the Bluedisk survey we analyse the radial gas-phase metallicity profiles of 50 late-type galaxies. We compare the metallicity profiles of a sample of HI-rich galaxies against a control sample of HI-'normal' galaxies. We find the metallicity gradient of a galaxy to be strongly correlated with its HI mass fraction {M}{HI}) / {M}_{\\ast}). We note that some galaxies exhibit a steeper metallicity profile in the outer disc than in the inner disc. These galaxies are found in both the HI-rich and control samples. This contradicts a previous indication that these outer drops are exclusive to HI-rich galaxies. These effects are not driven by bars, although we do find some indication that barred galaxies have flatter metallicity profiles. By applying a simple analytical model we are able to account for the variety of metallicity profiles that the two samples present. The success of this model implies that the metallicity in these isolated galaxies may be in a local equilibrium, regulated by star formation. This insight could provide an explanation of the observed local mass-metallicity relation. <P />Chapter 3 We present a method to recover the gas-phase metallicity gradients from integral field spectroscopic (IFS) observations of barely resolved galaxies. We take a forward modelling approach and compare our models to the observed spatial distribution of emission line fluxes, accounting for the degrading effects of seeing and spatial binning. The method is flexible and is not limited to particular emission lines or instruments. We test the model through comparison to synthetic observations and use downgraded observations of nearby galaxies to validate this work. As a proof of concept we also apply the model to real IFS observations of high-redshift galaxies. From our testing we show that the inferred metallicity gradients and central metallicities are fairly insensitive to the assumptions made in the model and that they are reliably recovered for galaxies with sizes approximately equal to the half width at half maximum of the point-spread function. However, we also find that the presence of star forming clumps can significantly complicate the interpretation of metallicity gradients in moderately resolved high-redshift galaxies. Therefore we emphasize that care should be taken when comparing nearby well-resolved observations to high-redshift observations of partially resolved galaxies. <P />Chapter 4 We present gas-phase metallicity gradients for 94 star-forming galaxies between (0.08 < z < 0.84). We find a negative median metallicity gradient of (-0.043^{+0.009}_{-0.007}, dex/kpc)/span>, i.e. on average we find the centres of these galaxies to be more metal-rich than their outskirts. However, there is significant scatter underlying this and we find that 10% (9) galaxies have significantly positive metallicity gradients, 39% (37) have significantly negative gradients, 28% (26) have gradients consistent with being flat, the remainder 23% (22) are considered to have unreliable gradient estimates. We find a slight trend for a more negative metallicity gradient with both increasing stellar mass and increasing star formation rate (SFR). However, given the potential redshift and size selection effects, we do not consider these trends to be significant. Indeed when we normalize the SFR of our galaxies relative to the main sequence, we do not observe any trend between the metallicity gradient and the normalized SFR. This finding is contrary to other recent studies of galaxies at similar and higher redshifts. We do, however, identify a novel trend between the metallicity gradient of a galaxy and its size. Small galaxies ((r_d < 3 kpc)) present a large spread in observed metallicity gradients (both negative and positive gradients). In contrast, we find no large galaxies (r_d > 3 kpc) with positive metallicity gradients, and overall there is less scatter in the metallicity gradient amongst the large galaxies. We suggest that these large (well-evolved) galaxies may be analogues of galaxies in the present-day Universe, which also present a common negative metallicity gradient. <P />Chapter 5 The relationship between a galaxy's stellar mass and its gas-phase metallicity results from the complex interplay between star formation and the inflow and outflow of gas. Since the gradient of metals in galaxies is also influenced by the same processes, it is therefore natural to contrast the metallicity gradient with the mass-metallicity relation. Here we study the interrelation of the stellar mass, central metallicity and metallicity gradient, using a sample of 72 galaxies spanning (0.13 < z < 0.84) with reliable metallicity gradient estimates. We find that typically the galaxies that fall below the mean mass-metallicity relation have flat or inverted metallicity gradients. We quantify their relationship taking full account of the covariance between the different variables and find that at fixed mass the central metallicity is anti-correlated with the metallicity gradient. We argue that this is consistent with a scenario that suppresses the central metallicity either through the inflow of metal poor gas or outflow of metal enriched gas. <P />",
u'author': [u'Carton, David'],
u'doctype': u'phdthesis',
u'pub': u'Ph.D. Thesis',
u'pub_raw': u'PhD Thesis, Leiden University, 2017',
u'esources': [u'PUB_HTML'],
u'num_citations': 0,
u'doi': [u'10.5281/zenodo.581221'],
u'year': u'2017',
u'title': [u'Resolving Gas-Phase Metallicity In Galaxies'],
u'identifier': [u'2017PhDT........14C', u'10.5281/zenodo.581221', u'10.5281/zenodo.581221'],
u'aff': [u'Leiden University'],
u'num_references': 1
},
{
u'read_count': 3,
u'identifier': [u'2017nova.pres.2388K'],
u'pubdate': u'2017-06-00',
u'abstract': u'The outlined regions mark the 57 knots in Tycho selected by the authors for velocity measurements. Magenta regions have redshifted line-of-sight velocities (moving away from us); cyan regions have blueshifted light-of-sight velocities (moving toward us). [Williams et al. 2017]The Tycho supernova remnant was first observed in the year 1572. Nearly 450 years later, astronomers have now used X-ray observations of Tycho to build the first-ever 3D map of a Type Ia supernova remnant.Signs of ExplosionsSupernova remnants are spectacular structures formed by the ejecta of stellar explosions as they expand outwards into the surrounding interstellar medium.One peculiarity of these remnants is that they often exhibit asymmetries in their appearance and motion. Is this because the ejecta are expanding into a nonuniform interstellar medium? Or was the explosion itself asymmetric? The best way we can explore this question is with detailed observations of the remnants.Histograms of the velocity in distribution of the knots in the X (green), Y (blue) and Z (red) directions (+Z is away from the observer). They show no evidence for asymmetric expansion of the knots. [Williams et al. 2017]Enter TychoTo this end, a team of scientists led by Brian Williams (Space Telescope Science Institute and NASA Goddard SFC) has worked to map out the 3D velocities of the ejecta in the Tycho supernova remnant. Tycho is a Type Ia supernova thought to be caused by the thermonuclear explosion of a white dwarf in a binary system that was destabilized by mass transfer from its companion.After 450 years of expansion, the remnant now has the morphological appearance of a roughly circular cloud of clumpy ejecta. The forward shock wave from the supernova, however, is known to have twice the velocity on one side of the shell as on the other.To better understand this asymmetry, Williams and collaborators selected a total of 57 knots in Tychos ejecta, spread out around the remnant. They then used 12 years of Chandra X-ray observations to measure both the knots proper motion in the plane of the sky and their line-of-sight velocity. These two measurements were then combined to build a full 3D map of the motion of the ejecta.3D hydrodynamical simulations of Tycho, stopped at the current epoch. These show that both initially smooth (top) and initially clumpy (bottom) ejecta models are consistent with the current observations of the morphology and dynamics of Tychos ejecta. [Adapted from Williams et al. 2017]Symmetry and ClumpsWilliams and collaborators found that the knots have total velocities that range from 2400 to 6600 km/s. Unlike the forward shock of the supernova, Tychos ejecta display no asymmetries in their motion which suggests that the explosion itself was symmetric. The more likely explanation is a density gradient in the interstellar medium, which could slow the shock wave on one side of the remnant without yet affecting the motion of the clumps of ejecta.As a final exploration, the authors attempt to address the origin of Tychos clumpiness. The fact that some of Tychos ejecta knots precede its outer edge has raised the question of whether the ejecta started out clumpy, or if they began smooth and only clumped during expansion. Williams and collaborators matched the morphological and dynamical data to simulations, demonstrating that neither scenario can be ruled out at this time.This first 3D map of a Type Ia supernova represents an important step in our ability to understand these stellar explosions. The authors suggest that well be able to expand on this map in the future with additional observations from Chandra, as well as with new data from future X-ray observatories that will be able to detect fainter emission.CitationBrian J. Williams et al 2017 ApJ 842 28. doi:10.3847/1538-4357/aa7384 <P />',
u'pub': u'AAS Nova Highlights',
u'num_citations': 0,
u'year': u'2017',
u'data': [u'Chandra:1'],
u'bibcode': u'2017nova.pres.2388K',
u'keyword': [u'Features', u'Highlights', u'interstellar medium', u'stellar evolution', u'supernova remnant', u'supernovae', u'white dwarfs'],
u'author': [u'Kohler, Susanna'],
u'aff': [u'-'],
u'bibstem': [u'nova', u'nova.pres'],
u'doctype': u'pressrelease',
u'page': [u'2388'],
u'esources': [u'PUB_HTML'],
u'eid': u'2388',
u'title': [u'A 3D View of a Supernova Remnant'],
u'property': [u'DATA', u'ESOURCE', u'NONARTICLE', u'NOT REFEREED', u'PUB_OPENACCESS', u'OPENACCESS'],
u'pub_raw': u'AAS Nova Highlight, 14 Jun 2017, id.2388',
u'num_references': 0
},
{
u'read_count': 0,
u'bibcode': u'2017CBET.4403....2G',
u'num_references': 0,
u'pubdate': u'2017-06-00',
u'bibstem': [u'CBET', u'CBET.4403'],
u'property': [u'ESOURCE', u'NONARTICLE', u'NOT REFEREED', u'PRIVATE'],
u'abstract': u'A previous good encounter occurred on 2006 July 29d04h11m UT (r - Delta = +0.0003 AU, solar long. = 125.841 deg). Future encounters are predicted on 2029 July 29d01h53m (+0.0007 AU, 125.816 deg), 2042 July 29d10h48m (+0.0006 AU, 125.886 deg), 2053 July 29d05h35m (+0.0001 AU, 125.848 deg), and on 2068 July 29d02h09m UT (-0.0001 AU, 125.863 deg). <P />',
u'author': [u'Green, D. W. E.'],
u'doctype': u'circular',
u'pub': u'Central Bureau Electronic Telegrams',
u'pub_raw': u'Central Bureau Electronic Telegrams, 4403, 2 (2017). Edited by Green, D. W. E.',
u'volume': u'4403',
u'esources': [u'PUB_HTML'],
u'num_citations': 0,
u'year': u'2017',
u'title': [u'Potential New Meteor Shower from Comet C/2015 D4 (Borisov)'],
u'identifier': [u'2017CBET.4403....2G'],
u'aff': [u'-'],
u'page_range': u'2',
u'page': [u'2']
},
{
u'read_count': 0,
u'bibcode': u'2017ascl.soft06009C',
u'num_references': 0,
u'keyword': [u'Software'],
u'pubdate': u'2017-06-00',
u'bibstem': [u'ascl', u'ascl.soft'],
u'property': [u'ASSOCIATED', u'ESOURCE', u'NONARTICLE', u'NOT REFEREED', u'PUB_OPENACCESS', u'OPENACCESS'],
u'abstract': u'sick infers astrophysical parameters from noisy observed spectra. Phenomena that can alter the data (e.g., redshift, continuum, instrumental broadening, outlier pixels) are modeled and simultaneously inferred with the astrophysical parameters of interest. This package relies on emcee (ascl:1303.002); it is best suited for situations where a grid of model spectra already exists, and one would like to infer model parameters given some data. <P />',
u'author': [u'Casey, Andrew R.'],
u'doctype': u'software',
u'pub': u'Astrophysics Source Code Library',
u'pub_raw': u'Astrophysics Source Code Library, record ascl:1706.009',
u'esources': [u'PUB_HTML'],
u'num_citations': 0,
u'eid': u'ascl:1706.009',
u'year': u'2017',
u'title': [u'sick: Spectroscopic inference crank'],
u'identifier': [u'2017ascl.soft06009C', u'ascl:1706.009'],
u'page': [u'ascl:1706.009'],
u'aff': [u'-']
},
{
u'comment': [u'phot.dat 2930x19 Differential photometry of BM CVn; phot_mlc.dat 2930x19 Differential photometry of BM CVn with MLC removed; res.dat 1319x185 *Numerical results of the CPS analysis; res_mlc.dat 1319x185 *Results of the CPS analysis with MLC removed'],
u'read_count': 0,
u'identifier': [u'2017yCat.113380453S'],
u'pubdate': u'2017-05-00',
u'abstract': u'The included files present the numerical data of our analysis of the BM CVn photometry. The data consists of differential Johnson V-band photometry using the star HD 116010 as the comparison star. <P />The analysis has been performed using the previously published continuous period search (CPS) method, described in detail in Lehtinen et al., 2011A&A...527A.136L, Cat. J/A+A/527/A136. <P />(4 data files). <P />',
u'pub': u'VizieR Online Data Catalog',
u'num_citations': 0,
u'year': u'2017',
u'data': [u'Vizier:1'],
u'bibcode': u'2017yCat.113380453S',
u'keyword': [u'Stars: variable'],
u'bibstem': [u'yCat', u'yCat.1133'],
u'aff': [u'-', u'-', u'-', u'-', u'-', u'-', u'-', u'-', u'-', u'-', u'-', u'-', u'-'],
u'author': [u'Siltala, J.', u'Jetsu, L.', u'Hackman, T.', u'Henry, G. W.', u'Immonen, L.', u'Kajatkari, P.', u'Lankinen, J.', u'Lehtinen, J.', u'Monira, S.', u'Nikbakhsh, S.', u'Viitanen, A.', u'Viuho, J.', u'Willamo, T.'],
u'doctype': u'catalog',
u'page': [u'J/AN/338/453'],
u'eid': u'J/AN/338/453',
u'title': [u'VizieR Online Data Catalog: BM CVn V-band differential light curve (Siltala+, 2017)'],
u'property': [u'ASSOCIATED', u'DATA', u'NONARTICLE', u'NOT REFEREED'],
u'pub_raw': u'VizieR On-line Data Catalog: J/AN/338/453. Originally published in: 2017AN....338..453S',
u'num_references': 0
},
{
u'read_count': 4,
u'identifier': [u'2017AAVSN.429....1W'],
u'pubdate': u'2017-05-00',
u'abstract': u'The observing campaign from 2016 on V694 Mon (MWC 560) (AAVSO Alert Notice 538) has been continued, but with different requirements. Photometry is no longer specifically requested on a regular basis (although ongoing observations that do not interfere with other obligations are welcome). Spectroscopy on a cadence of a week or two is requested to monitor changes in the disk outflow. Investigator Adrian Lucy writes: "Adrian Lucy and Dr. Jeno Sokoloski (Columbia University) have requested spectroscopic monitoring of the broad-absorption-line symbiotic star V694 Mon (MWC 560), as a follow-up to coordinated multi-wavelength observations obtained during its recent outburst (ATel #8653, #8832, #8957; #10281). This system is a perfect place in which to study the relationship between an accretion disk and disk winds/jets, and a high-value target for which even low-resolution spectra can be extraordinarily useful...Optical brightening in MWC 560 tends to predict higher-velocity absorption, but sometimes jumps in absorption velocity also appear during optical quiescence (e.g., Iijima 2001, ASPCS, 242, 187). If such a velocity jump occurs during photometric quiescence, it may prompt radio observations to confirm and test the proposed outflow origin for recently-discovered flat-spectrum radio emission (Lucy et al. ATel #10281)...Furthermore, volunteer spectroscopic monitoring of this system has proved useful in unpredictable ways. For example, \'amateur\' spectra obtained by Somogyi P\xe9ter in 2015 December demonstrated that the velocity of absorption was very low only a month before an optical outburst peak prompted absorption troughs up to 3000 km/s, which constrains very well the timing of the changes to the outflow to a degree that would not have been otherwise possible. Any resolution can be useful. A wavelength range that can accommodate a blueshift of at least 140 angstroms (6000 km/s) from the rest wavelengths of H-alpha at 6562 angstroms and/or H-beta at 4861 angstroms is ideal, though spectra with a smaller range can still be useful. Photometry could potentially still be useful, but will be supplementary to medium-cadence photometry being collected by the ANS collaboration." "Spectroscopy may be uploaded to the ARAS database (http://www.astrosurf.com/aras/Aras_DataBase/DataBase.htm), or sent to Adrian and Jeno directly at <lucy@astro.columbia.edu>. Finder charts with sequence may be created using the AAVSO Variable Star Plotter (https://www.aavso.org/vsp). Photometry should be submitted to the AAVSO International Database. See full Special Notice for more details. <P />',
u'pub': u'AAVSO Special Notice',
u'volume': u'429',
u'page_range': u'1',
u'num_citations': 0,
u'year': u'2017',
u'bibcode': u'2017AAVSN.429....1W',
u'keyword': [u'astronomical databases: miscellaneous', u'binaries: symbiotic', u'stars: individual (V694 Mon', u'MWC 560)'],
u'copyright': u'(C) AAVSO 2017',
u'author': [u'Waagen, Elizabeth O.'],
u'aff': [u'AAVSO'],
u'bibstem': [u'AAVSN', u'AAVSN.429'],
u'doctype': u'newsletter',
u'page': [u'1'],
u'esources': [u'PUB_HTML'],
u'title': [u'V694 Mon (MWC 560) spectroscopy requested'],
u'property': [u'ESOURCE', u'NONARTICLE', u'NOT REFEREED'],
u'pub_raw': u'AAVSO Special Notice #429',
u'num_references': 6
},
{
u'read_count': 0,
u'bibcode': u'2017sptz.prop13168Y',
u'num_references': 0,
u'pubdate': u'2017-04-00',
u'bibstem': [u'sptz', u'sptz.prop'],
u'property': [u'DATA', u'NONARTICLE', u'NOT REFEREED'],
u'abstract': u'ULIRG F01004-2237 had a strong optical flare, peaked in 2010, and the follow-up optical spectra classified this event as a TDE candidate (Tadhunter et al. 2017, Nature Astronomy). In early 2017, using archival WISE data, we discovered that its 3.4 and 4.6um fluxes have been steadily rising since 2013, increased by a factor of 3.5 and 2.6 respectively. The last epoch data from WISE on 2016-12-12 shows that F01004-2237 has reached 7.5 and 14mJy at 3.4 and 4.6um. We interpret the mid-IR LCs as infrared echoes from the earlier optical flare. We infer a convex, dust ring with a radius of 1 pc from the central heating source. Our model predicts that if this event is indeed a TDE, its mid-IR LCs should start to fade in next 5-12 months because it has already reprocessed most of the UV/optical energy from the tidal disruption. However, if this event is due to activities from an AGN, its mid-IR LCs could last over a much longer time scale. We request a total of 3.2 hours of Spitzer time to monitor the mid-IR variations in next 12 months. This will provide the critical data to confirm the nature of this transient event. <P />',
u'author': [u'Yan, Lin'],
u'doctype': u'proposal',
u'pub': u'Spitzer Proposal',
u'pub_raw': u'Spitzer Proposal ID 13168',
u'page_range': u'13168',
u'num_citations': 0,
u'year': u'2017',
u'title': [u'Confirm the Nature of a TDE Candidate in ULIRG F01004-2237 Using Spitzer mid-IR Light Curves'],
u'identifier': [u'2017sptz.prop13168Y'],
u'data': [u'Spitzer:1'],
u'aff': [u'-'],
u'page': [u'13168']
},
{
u'read_count': 0,
u'bibcode': u'2017MsT..........2A',
u'num_references': 4,
u'pubdate': u'2017-03-00',
u'bibstem': [u'MsT', u'MsT......'],
u'property': [u'ESOURCE', u'NONARTICLE', u'NOT REFEREED', u'AUTHOR_OPENACCESS', u'OPENACCESS'],
u'abstract': u'The African Very-long-baseline interferometry Network (AVN) is a joint project between South Africa and eight partner African countries aimed at establishing a VLBI (Very-Long-Baseline Interferometry) capable network of radio telescopes across the African continent. An existing structure that is earmarked for this project, is a 32 m diameter antenna located in Ghana that has become obsolete due to advances in telecommunication. The first phase of the conversion of this Ghana antenna into a radio astronomy telescope is to upgrade the antenna to observe at 5 GHz to 6.7 GHz frequency and then later to 18 GHz within a required performing tolerance. The surface and pointing accuracies for a radio telescope are much more stringent than that of a telecommunication antenna. The mechanical pointing accuracy of such telescopes is influenced by factors such as mechanical alignment, structural deformation, and servo drive train errors. The current research investigates the numerical simulation of the surface and pointing accuracies of the Ghana 32 m diameter radio astronomy telescope due to its structural deformation mainly influenced by gravity, wind and thermal loads. <P />',
u'author': [u'Azankpo, Severin'],
u'doctype': u'mastersthesis',
u'pub': u'Masters Thesis',
u'pub_raw': u'Masters thesis, University of Stellenbosch, March 2017, 120 pages',
u'esources': [u'AUTHOR_PDF', u'PUB_HTML', u'PUB_PDF'],
u'num_citations': 0,
u'year': u'2017',
u'title': [u'Surface Accuracy and Pointing Error Prediction of a 32 m Diameter Class Radio Astronomy Telescope'],
u'identifier': [u'2017MsT..........2A'],
u'aff': [u'University of Stellenbosch'],
u'page_range': u'2',
u'page': [u'2']
},
{
u'read_count': 3,
u'bibcode': u'2016emo6.rept.....R',
u'keyword': [u'THE MOON', u'ECLIPSES', u'PARTIAL', u'PENUMBRAL', u'ASTROPHOTOGRAPHY'],
u'pubdate': u'2016-10-00',
u'author': [u'Rotaru, Adrian', u'Pteancu, Mircea', u'Zaharia, Cristian'],
u'abstract': u"The web page represents circumstances and photographs from the Moon's partial/penumbral eclipse from 16 September 2016 obtained from few various places in Romania (East Europe). A part of photographs give the maximum phase of the Eclipse, while another give the reddened Moon. <P />",
u'bibstem': [u'emo6', u'emo6.rept'],
u'doctype': u'techreport',
u'pub': u'http://www.astronomy.ro/forum/viewtopic.php?p=159287#159287 (Comments in Romanian',
u'pub_raw': u'http://www.astronomy.ro/forum/viewtopic.php?p=159287#159287 (Comments in Romanian)',
u'property': [u'NONARTICLE', u'NOT REFEREED'],
u'num_citations': 0,
u'year': u'2016',
u'title': [u"The penumbral Moon's eclipse form 16 september 2016"],
u'identifier': [u'2016emo6.rept.....R'],
u'aff': [u'Bragadiru, Romania', u'Private Astronomical Observatory, Arad, Romania', u'Private Astronomical Observatory, Ploiesti, Romania'],
u'num_references': 0
},
{
u'read_count': 0,
u'bibcode': u'2016iac..talk..872V',
u'num_references': 1,
u'author': [u'Velasco, Sergio'],
u'pubdate': u'2016-03-00',
u'page_range': u'872',
u'property': [u'ESOURCE', u'NONARTICLE', u'NOT REFEREED', u'AUTHOR_OPENACCESS', u'OPENACCESS'],
u'abstract': u'Not Available <P />',
u'bibstem': [u'iac', u'iac..talk'],
u'doctype': u'talk',
u'pub': u'IAC Talks, Astronomy and Astrophysics Seminars from the Instituto de Astrofísica de Canarias',
u'pub_raw': u'IAC Talks, Astronomy and Astrophysics Seminars from the Instituto de Astrof\xedsica de Canarias, 872',
u'esources': [u'AUTHOR_HTML', u'PUB_HTML'],
u'num_citations': 0,
u'year': u'2016',
u'title': [u'Living on the edge: Adaptive Optics+Lucky Imaging'],
u'identifier': [u'2016iac..talk..872V'],
u'aff': [u'Instituto de Astrof\xedsica de Canarias'],
u'page': [u'872']
},
{
u'read_count': 0,
u'isbn': [u'9789048123674'],
u'pubdate': u'2009-00-00',
u'abstract': u'The discovery of the physical phenomenon of Nuclear Magnetic Resonance (NMR) in 1946 gave rise to the spectroscopic technique that has become a remarkably versatile research tool. One could oversimplify NMR spectros-copy by categorizing it into the two broad applications of structure elucidation of molecules (associated with chemistry and biology) and imaging (associated with medicine). But, this certainly does not do NMR spectroscopy justice in demonstrating its general acceptance and utilization across the sciences. This manuscript is not an effort to present an exhaustive, or even partial review of NMR spectroscopy applications, but rather to provide a glimpse at the wide-ranging uses of NMR spectroscopy found within the confines of a single magnetic resonance research facility, the Stanford Magnetic Resonance Laboratory. Included here are summaries of projects involving protein structure determination, mapping of intermolecular interactions, exploring fundamental biological mechanisms, following compound cycling in the environmental, analysis of synthetic solid compounds, and microimaging of a model organism. <P />',
u'num_citations': 0,
u'year': u'2009',
u'bibcode': u'2009bcet.book...65L',
u'copyright': u'(c) 2009: Springer Netherlands',
u'bibstem': [u'bcet', u'bcet.book'],
u'aff': [u'Stanford Magnetic Resonance Laboratory, Stanford University', u'Department of Chemistry, Stanford University; , Genencor', u'Department of Geological & Environmental Sciences, Stanford University; , ConocoPhillips Company', u'Department of Molecular and Cellular Physiology, Stanford University; Department of Structural Biology, Stanford University', u'Department of Geological & Environmental Sciences, Stanford University; , Agriculture and Agri-Food Canada', u'Stanford Genome Technology Center, Stanford University; Department of Biochemistry, Stanford University', u'Department of Geological & Environmental Sciences, Stanford University; Air Products and Chemicals, Inc. Allentown', u'Department of Molecular and Cellular Physiology, Stanford University; Department of Structural Biology, Stanford University', u'Department of Biochemistry, Stanford University', u'Department of Chemistry, Stanford University; Department of Biochemistry, Stanford University', u'Department of Biochemistry, Stanford University; Department of Biochemistry, Molecular Biology and Cell Biology, Northwestern University', u'Department of Chemistry, Stanford University; , Institute for Research in Biomedicine', u'Stanford Genome Technology Center, Stanford University; Department of Biochemistry, Stanford University', u'Stanford Magnetic Resonance Laboratory, Stanford University; Department of Structural Biology, Stanford University', u'Department of Molecular and Cellular Physiology, Stanford University; Department of Structural Biology, Stanford University', u'Department of Geological & Environmental Sciences, Stanford University', u'Department of Molecular and Cellular Physiology, Stanford University; Department of Structural Biology, Stanford University'],
u'esources': [u'PUB_HTML'],
u'editor': [u'Puglisi, Joseph D.'],
u'pub_raw': u'Biophysics and the Challenges of Emerging Threats, NATO Science for Peace and Security Series B: Physics and Biophysics. ISBN 978-90-481-2367-4. Springer Netherlands, 2009, p. 65',
u'num_references': 0,
u'identifier': [u'2009bcet.book...65L', u'10.1007/978-90-481-2368-1_5', u'10.1007/978-90-481-2368-1_5'],
u'pub': u'Biophysics and the Challenges of Emerging Threats',
u'page_range': u'65',
u'doi': [u'10.1007/978-90-481-2368-1_5'],
u'keyword': [u'Physics'],
u'author': [u'Liu, Corey W.', u'Alekseyev, Viktor Y.', u'Allwardt, Jeffrey R.', u'Bankovich, Alexander J.', u'Cade-Menun, Barbara J.', u'Davis, Ronald W.', u'Du, Lin-Shu', u'Garcia, K. Christopher', u'Herschlag, Daniel', u'Khosla, Chaitan', u'Kraut, Daniel A.', u'Li, Qing', u'Null, Brian', u'Puglisi, Joseph D.', u'Sigala, Paul A.', u'Stebbins, Jonathan F.', u'Varani, Luca'], u'doctype': u'inbook', u'title': [u'The Diversity of Nuclear Magnetic Resonance Spectroscopy'],
u'property': [u'ESOURCE', u'TOC', u'ARTICLE', u'NOT REFEREED'],
u'page': [u'65'],
u'doctype': u'inbook'
},
{
u'read_count': 0,
u'bibcode': u'2007AAS...210.2104M',
u'num_references': 0,
u'pubdate': u'2007-05-00',
u'bibstem': [u'AAS', u'AAS...210'],
u'series': u'American Astronomical Society Meeting Abstracts',
u'abstract': u'Palomar-QUEST (PQ) synoptic sky survey has now been routinely processing data from driftscans in real-time. As four photometric bandpasses are utilized in nearly simultaneously, PQ is well suited to search for transient and highly variable objects. Using a series of software filters i.e. programs to select/deselect objects based on certain criteria we shorten the list of candidates from the initially flagged candidate transients. Such filters include looking for known asteroids, known variables, as well as moving, but previously uncatalogued objects based on their motion within a scan as well as between successive scans. Some software filters also deal with instrumental artifacts, edge effects, and use clustering of spurious detections around bright stars. During a typical night when we cover about 500 sq. degrees, we detect hundreds of asteroids, the primary contaminants in the search for astrophysical transients beyond our solar system. <P />Here we describe some statistics based on the software filters we employ and the nature of the objects that seem to survive the process. We also discuss the usefulness of this to amateur astronomers, projects like VOEventNet, and other synoptic sky surveys. <P />We also present an outline of the work we have started on quantifying the variability of quasars, blazars, as well as various classes of Galactic sources, by combining the large number of PQ scans with other existing data sources federated in the Virtual Observatory environment. <P />The PQ survey is partially supported by the U.S. National Science Foundation (NSF). <P />',
u'author': [u'Mahabal, Ashish A.', u'Drake, A. J.', u'Djorgovski, S. G.', u'Donalek, C.', u'Glikman, E.', u'Graham, M. J.', u'Williams, R.', u'Baltay, C.', u'Rabinowitz, D.', u'PQ Team Caltech', u'Yale', u'NCSA', u'Indiana', u', . . .'],
u'doctype': u'abstract',
u'pub': u'American Astronomical Society Meeting Abstracts #210',
u'pub_raw': u'American Astronomical Society Meeting 210, id.21.04; <ALTJOURNAL>Bulletin of the American Astronomical Society, Vol. 39, p.124</ALTJOURNAL>',
u'volume': u'210',
u'property': [u'TOC', u'NONARTICLE', u'NOT REFEREED'],
u'num_citations': 0,
u'eid': u'21.04',
u'year': u'2007',
u'title': [u'Time Domain Exploration with the Palomar-QUEST Sky Survey'],
u'identifier': [u'2007BAAS...39..124M', u'2007AAS...210.2104M', u'2007BAAS...39..124M'],
u'page': [u'21.04'],
u'aff': [u'Caltech', u'Caltech', u'Caltech', u'Caltech', u'Caltech', u'Caltech', u'Caltech', u'Yale University', u'Yale University', u'-', u'-', u'-', u'-', u'-']
},
{
u'read_count': 0,
u'issn': [u'1819-3463'],
u'pubdate': u'2007-01-00',
u'abstract': u'Not Available <P />',
u'num_citations': 0,
u'year': u'2007',
u'bibcode': u'2007RJPh....1...35.',
u'bibstem': [u'RJPh', u'RJPh....1'],
u'aff': [u'-', u'-'],
u'esources': [u'PUB_HTML'],
u'issue': u'1',
u'pub_raw': u'Research Journal of Physics, vol. 1, issue 1, pp. 35-41',
u'num_references': 0,
u'identifier': [u'10.3923/rjp.2007.35.41', u'2007RJPh....1...35.', u'10.3923/rjp.2007.35.41'],
u'pub': u'Research Journal of Physics',
u'volume': u'1',
u'page_range': u'35-41',
u'doi': [u'10.3923/rjp.2007.35.41'],
u'author': [u'., S. N. Agbo', u'., E. C. Okoroigwe'],
u'doctype': u'article',
u'title': [u'Analysis of Thermal Losses in the Flat-Plate Collector of a Thermosyphon Solar Water Heater'],
u'property': [u'ESOURCE', u'ARTICLE', u'NOT REFEREED', u'PUB_OPENACCESS', u'OPENACCESS'],
u'page': [u'35']
},
{
u'read_count': 0,
u'bibcode': u'1995ans..agar..390M',
u'num_references': 0,
u'keyword': [u'Earth Orbits', u'Navigation Aids', u'Navigators', u'Onboard Equipment', u'Space Navigation', u'Spacecraft Trajectories', u'Support Systems', u'Technology Assessment', u'Technology Utilization', u'Ascent Trajectories', u'Reentry Trajectories', u'Spacecraft', u'Spacecraft Performance', u'Spacecraft Survivability', u'Tradeoffs', u'Weight (Mass)', u'Space Communications, Spacecraft Communications, Command and Tracking'],
u'pubdate': u'1995-06-00',
u'bibstem': [u'ans', u'ans..agar'],
u'property': [u'ARTICLE', u'NOT REFEREED'],
u'abstract': u'Spacecraft operation depends upon knowledge of vehicular position and, consequently, navigational support has been required for all such systems. Technical requirements for different mission trajectories and orbits are addressed with consideration given to the various tradeoffs which may need to be considered. The broad spectrum of spacecraft are considered with emphasis upon those of greater military significance (i.e., near earth orbiting satellites). Technical requirements include, but are not limited to, accuracy; physical characteristics such as weight and volume; support requirements such as electrical power and ground support; and system integrity. Generic navigation suites for spacecraft applications are described. It is shown that operational spacecraft rely primarily upon ground-based tracking and computational centers with little or no navigational function allocated to the vehicle, while technology development efforts have been and continue to be directed primarily toward onboard navigation suites. The military significance of onboard navigators is shown to both improve spacecraft survivability and performance (accuracy). <P />',
u'author': [u'Miller, Judy L.'],
u'doctype': u'inproceedings',
u'pub': u'In AGARD',
u'pub_raw': u'In AGARD, Aerospace Navigation Systems p 390-405 (SEE N96-13404 02-04)',
u'page_range': u'390-405',
u'num_citations': 0,
u'year': u'1995',
u'title': [u'Spacecraft navigation requirements'],
u'identifier': [u'1995ans..agar..390M'],
u'page': [u'390'],
u'aff': [u'Draper (Charles Stark) Lab., Inc., Cambridge, MA.']
},
{
u'read_count': 2,
u'bibcode': u'1995anda.book.....N',
u'pubdate': u'1995-00-00',
u'bibstem': [u'anda', u'anda.book'],
u'abstract': u'Not Available <P />',
u'author': [u'Nayfeh, Ali H.', u'Balachandran, Balakumar'],
u'doctype': u'book',
u'pub': u'Wiley series in nonlinear science',
u'pub_raw': u'Wiley series in nonlinear science, New York; Chichester: Wiley, |c1995',
u'property': [u'NONARTICLE', u'NOT REFEREED'],
u'num_citations': 118,
u'year': u'1995',
u'title': [u'Applied nonlinear dynamics: analytical, computational and experimental methods'],
u'identifier': [u'1995anda.book.....N'],
u'aff': [u'-', u'-'],
u'num_references': 0
},
{
u'arxiv_class': [u'hep-th'],
u'read_count': 1143,
u'pubdate': u'1988-11-00',
u'abstract': u'These lectures consisted of an elementary introduction to conformal field theory, with some applications to statistical mechanical systems, and fewer to string theory. Contents: 1. Conformal theories in d dimensions 2. Conformal theories in 2 dimensions 3. The central charge and the Virasoro algebra 4. Kac determinant and unitarity 5. Identication of m = 3 with the critical Ising model 6. Free bosons and fermions 7. Free fermions on a torus 8. Free bosons on a torus 9. Affine Kac-Moody algebras and coset constructions 10. Advanced applications <P />',
u'pub': u'arXiv e-prints',
u'num_citations': 190,
u'year': u'1988',
u'property': [u'ESOURCE', u'INSPIRE', u'ARTICLE', u'NOT REFEREED', u'EPRINT_OPENACCESS', u'OPENACCESS'],
u'bibcode': u'1991hep.th....8028G',
u'keyword': [u'High Energy Physics - Theory'],
u'author': [u'Ginsparg, Paul'],
u'aff': [u'-'],
u'bibstem': [u'arXiv', u'arXiv....'],
u'doctype': u'eprint',
u'page': [u'hep-th/9108028'],
u'esources': [u'EPRINT_HTML', u'EPRINT_PDF'],
u'eid': u'hep-th/9108028',
u'title': [u'Applied Conformal Field Theory'],
u'identifier': [u'1991hep.th....8028G', u'arXiv:hep-th/9108028'],
u'pub_raw': u'eprint arXiv:hep-th/9108028',
u'num_references': 0
},
{
u'read_count': 0,
u'bibcode': u'1983aiaa.meetY....K',
u'keyword': [u'Artificial Satellites', u'Autonomous Navigation', u'Earth-Moon System', u'Lunar Communication', u'Radio Beacons', u'Radio Navigation', u'Space Navigation', u'Doppler Navigation', u'Least Squares Method', u'Orbit Calculation', u'Space Communications, Spacecraft Communications, Command and Tracking'],
u'pubdate': u'1983-01-00',
u'bibstem': [u'aiaa', u'aiaa.meet'],
u'abstract': u"The concept of using lunar beacon signal transmission for on-board navigation for earth satellites and near-earth spacecraft is described. The system would require powerful transmitters on the earth-side of the moon's surface and black box receivers with antennae and microprocessors placed on board spacecraft for autonomous navigation. Spacecraft navigation requires three position and three velocity elements to establish location coordinates. Two beacons could be soft-landed on the lunar surface at the limits of allowable separation and each would transmit a wide-beam signal with cones reaching GEO heights and be strong enough to be received by small antennae in near-earth orbit. The black box processor would perform on-board computation with one-way Doppler/range data and dynamical models. Alternatively, GEO satellites such as the GPS or TDRSS spacecraft can be used with interferometric techniques to provide decimeter-level accuracy for aircraft navigation. <P />",
u'author': [u'Khatib, A. R.', u'Ellis, J.', u'French, J.', u'Null, G.', u'Yunck, T.', u'Wu, S.'],
u'doctype': u'proceedings',
u'pub': u'AIAA, Aerospace Sciences Meeting',
u'pub_raw': u'American Institute of Aeronautics and Astronautics, Aerospace Sciences Meeting, 21st, Reno, NV, Jan. 10-13, 1983. 7 p.',
u'property': [u'NONARTICLE', u'NOT REFEREED'],
u'num_citations': 0,
u'year': u'1983',
u'title': [u'Autonomous navigation using lunar beacons'],
u'identifier': [u'1983aiaa.meetY....K'],
u'aff': [u'California Institute of Technology, Jet Propulsion Laboratory, Pasadena, CA', u'California Institute of Technology, Jet Propulsion Laboratory, Pasadena, CA', u'California Institute of Technology, Jet Propulsion Laboratory, Pasadena, CA', u'California Institute of Technology, Jet Propulsion Laboratory, Pasadena, CA', u'California Institute of Technology, Jet Propulsion Laboratory, Pasadena, CA', u'California Institute of Technology, Jet Propulsion Laboratory, Pasadena, CA'],
u'num_references': 0
}
]
}
}
data_2 = \
{
u'responseHeader': {
u'status':0,
u'QTime':43,
u'params': {
u'q':'bibcode:2018AAS...23221409A',
u'indent':'on',
u'fl':'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,num_citations,read_count,bibcode,identifier,copyright,keyword,doctype,num_references,comment,property,esources,data,isbn,pubnote,eid',
'wt':'json',
'_':'1529341837285'
}
},
u'response': {
u'numFound':1,
u'start':0,
u'docs':[
{
u'read_count':0,
u'abstract': u'The NASA Astrophysics Data System (ADS) is used daily by researchers and curators as a discovery platform for the Astronomy literature. Over the past several years, the ADS has been adding to the breadth and depth of its contents. Scholarly astronomy articles are now indexed as full-text documents, allowing for complete and accurate literature searches. High-level data products, data links, and software used in refereed astronomy papers are now also being ingested and indexed in our database. All the search functionality exposed in the new ADS interface is also available via its API, which we are continuing to develop and enhance. In this talk I will describe the current system, our current roadmap, and solicit input from the community regarding what additional data, services, and discovery capabilities the ADS should support.',
u'num_citations':0,
u'num_references':0,
u'pubdate': u'2018-06-00',
u'year': u'2018',
u'page': [u'214.09'],
u'bibcode': u'2018AAS...23221409A',
u'bibstem': [u'AAS', u'AAS...232'],
u'identifier': [u'2018AAS...23221409A'],
u'copyright': u'(c) 2018: American Astronomical Society',
u'author': [u'Accomazzi, Alberto', u'ADS Team'],
u'aff': [u'Harvard Smithsonian, CfA', u'-'],
u'volume': u'232',
u'pub': u'American Astronomical Society Meeting Abstracts #232',
u'property': [u'NONARTICLE', u'NOT REFEREED'],
u'doctype': u'abstract',
u'pub_raw': u'American Astronomical Society, AAS Meeting #232, id.#214.09',
u'eid': u'214.09',
u'title': [u'The NASA Astrophysics Data System: Capabilities and Roadmap for the 2020s']
}
]
}
}
data_3 = \
{
u'responseHeader': {
u'status':0,
u'QTime':43,
u'params': {
u'q':'bibcode:2000ApJ...533L..25E',
u'indent':'on',
u'fl':'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,num_citations,read_count,bibcode,identifier,copyright,keyword,doctype,num_references,comment,property,esources,data,isbn,pubnote,eid',
'wt':'json',
'_':'1529341837285'
}
},
u'response': {
u'numFound':1,
u'start':0,
u'docs':[
{
u'read_count': 17,
u'abstract': u"The Dominion Radio Astrophysical Observatory's Synthesis Telescope provides the highest resolution data (1' and 0.82 km s<SUP>-1</SUP>) to date of an H I worm candidate. Observed as part of the Canadian Galactic Plane Survey, mushroom-shaped GW 123.4-1.5 extends only a few hundred parsecs, contains ~10<SUP>5</SUP> M<SUB>solar</SUB> of neutral hydrogen, and appears unrelated to a conventional shell or chimney structure. Our preliminary Zeus two-dimensional models use a single off-plane explosion with a modest (~10<SUP>51</SUP> ergs) energy input. These generic simulations generate, interior to an expanding outer blast wave, a buoyant cloud whose structure resembles the morphology of the observed feature. Unlike typical model superbubbles, the stem can be narrow because its width is not governed by the pressure behind the blast wave or the disk scale height. Using this type of approach, it should be possible to more accurately model the thin stem and other details of GW 123.4-1.5 in the future.",
u'doctype': u'article',
u'year': u'2000',
u'bibcode': u'2000ApJ...533L..25E',
u'bibstem': [u'ApJL', u'ApJL..533'],
u'author': [u'English, Jayanne', u'Taylor, A. R.', u'Mashchenko, S. Y.', u'Irwin, Judith A.', u'Basu, Shantanu', u'Johnstone, Doug'],
u'aff': [u"Department of Physics, Queen's University, Kingston, Ontario, K7L 3N6, Canada; Space Telescope Science Institute, Baltimore, MD", u"Department of Physics and Astronomy, University of Calgary, Calgary, Alberta, T2N 1N4, Canada", u"Department de Physique, Universit\u00e9 de Montr\u00e9al, Montr\u00e9al, Qu\u00e9bec, H3C 3J7, Canada", u"Department of Physics, Queen's University, Kingston, Ontario, K7L 3N6, Canada", u"Department of Physics and Astronomy, University of Western Ontario, London, Ontario, N6A 3K7, Canada", u"University of Toronto, 60 St. George Street, Toronto, Ontario, M5S 3H8, Canada"],
u'esources': [u'EPRINT_HTML', u'EPRINT_PDF', u'PUB_HTML', u'PUB_PDF'],
u'issue': u'1',
u'pub_raw': u'The Astrophysical Journal, Volume 533, Issue 1, pp. L25-L28.',
u'pub': u'The Astrophysical Journal',
u'volume': u'533',
u'page_range': u'L25-L28',
u'pubdate': u'2000-04-00',
u'data': [u'SIMBAD:3'],
u'doi': [u'10.1086/312592'],
u'keyword': [u'GALAXY: HALO', u'GALAXY: STRUCTURE', u'ISM: BUBBLES', u'ISM: INDIVIDUAL: ALPHANUMERIC: GW 123.4-1.5', u'ISM: STRUCTURE', u'Astrophysics'],
u'title': [u'The Galactic Worm GW 123.4-1.5: A Mushroom-shaped H I Cloud'],
u'num_citations': 16,
u'num_references': 12,
u'property': [u'OPENACCESS', u'REFEREED', u'EPRINT_OPENACCESS', u'PUB_OPENACCESS', u'ARTICLE'],
u'page': [u'L25']
}
]
}
}
data_4 = \
{
u'responseHeader': {
u'status':0,
u'QTime':43,
u'params': {
u'q':'bibcode:2017wfc..rept...16R',
u'indent':'on',
u'fl':'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,num_citations,read_count,bibcode,identifier,copyright,keyword,doctype,num_references,comment,property,esources,data,isbn,pubnote,eid',
'wt':'json',
'_':'1529341837285'
}
},
u'response': {
u'numFound':1,
u'start':0,
u'docs':[
{
u'read_count': 0,
u'bibcode': u'2017wfc..rept...16R',
u'bibstem': [u'wfc', u'wfc..rept'],
u'keyword': [u'Hubble Space Telescope', u'HST', u'Space Telescope Science Institute', u'STScI', u'WFC3', u'infrared blobs', u'IR blobs'],
u'page_range': u'16',
u'abstract': u'We present a investigation into possible overlaps between the known IR blobs with the grism aperture reference positions and the IR dither patterns. Each aperture was designed to place the science target (e.g. a specific star) on a cosmetically clean area of the IR detector. Similarly, the dither patterns were designed to mitigate cosmetic defects by rarely (or ideally never) placing such targets on known defects. Because blobs accumulate with time, the originally defined apertures and dither patterns may no longer accomplish their goals, it is important to reverify these combinations. We find two potential overlaps between the blob, aperture, and dither combinations, but do not recommend any changes to the current suite of aperture references positions and/or dither patterns for two reasons. First, one of the overlaps occurs with a dither/aperture combination that is seldom used for high-value science operations, but rather more common for wide-field surveys/mosaics. Second, the other overlap is 8.7 pix from a blob that has a fiducial radius of 10 pix, which already represents a very conservative distance. We conclude that a similar analysis should be repeated as new blobs occur, to continue to ensure ideal operations for high-value science targets. The purpose of this report is to document the analysis in order to facilitate its repetition in the future.',
u'author': [u'Ryan, R. E.', u'McCullough, P. R.'],
u'doctype': u'techreport',
u'pub': u'Space Telescope WFC Instrument Science Report',
u'num_citations': 0,
u'num_references': 0,
u'esources': [u'PUB_PDF'],
u'pub_raw': u'Instrument Science Report WFC3 2017-16, 6 pages',
u'year': u'2017',
u'pubdate': u'2017-06-00',
u'title': [u'Possible Overlaps Between Blobs, Grism Apertures, and Dithers'],
u'property': [u'NONARTICLE', u'NOT REFEREED'],
u'page': [u'16'],
u'aff': [u'Space Telescope Science Institute', u'Space Telescope Science Institute']
}
]
}
}
data_5 = \
{
u'responseHeader': {
u'status': 0,
u'QTime': 13,
u'params': {
u'sort': u'date desc, bibcode desc',
u'rows': u'1',
u'fq': u'{!bitset}',
u'q': u'*:*',
u'start': u'0',
u'wt': u'json',
u'fl': u'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,read_count,bibcode,identifier,copyright,keyword,doctype,[citations],comment,version,property,esources,data,isbn,eid,issn,arxiv_class,editor,series,publisher,bibstem'
}
},
u'response': {
u'start': 0,
u'numFound': 1,
u'docs': [
{
u'read_count': 0,
u'issn': [u'0031-9007'],
u'pubdate': u'2018-01-00',
u'abstract': u'Not Available <P />',
u'num_citations': 0,
u'year': u'2018',
u'bibcode': u'2018PhRvL.120b9901P',
u'bibstem': [u'PhRvL', u'PhRvL.120'],
u'aff': [u'-', u'-', u'-', u'-'],
u'esources': [u'PUB_HTML'],
u'issue': u'2',
u'pub_raw': u'Physical Review Letters, Volume 120, Issue 2, id.029901',
u'num_references': 4,
u'identifier': [u'2018PhRvL.120b9901P', u'10.1103/PhysRevLett.120.029901', u'10.1103/PhysRevLett.120.029901'],
u'pub': u'Physical Review Letters',
u'volume': u'120',
u'doi': [u'10.1103/PhysRevLett.120.029901'],
u'author': [u'Pustilnik, M.', u'van Heck, B.', u'Lutchyn, R. M.', u'Glazman, L. I.'],
u'doctype': u'erratum',
u'eid': u'029901',
u'title': [u'Erratum: Quantum Criticality in Resonant Andreev Conduction [Phys. Rev. Lett. 119, 116802 (2017)]'],
u'property': [u'ESOURCE', u'ARTICLE', u'REFEREED'],
u'page': [u'029901']
}
]
}
}
data_6 = \
{
u'responseHeader':{
u'status': 0,
u'QTime': 4,
u'params':{
u'q': u'first_author:accomazzi',
u'indent': u'on',
u'fl': u'bibcode,author,year,pub,bibstem',
u'sort': u'pub desc',
u'wt': u'json'
}
},
u'response': {
u'numFound': 10,
u'start': 0,
u'docs': [
{
u'year':'2020',
u'bibcode':'2020AAS...23528705A',
u'author':['Accomazzi, A.', 'Kurtz, M.', 'Henneken, E.', 'Grant, C.', 'Thompson, D.', 'Chyla, R.', 'McDonald, S.', 'Blanco-Cuaresma, S.', 'Shapurian, G.', 'Hostetler, T.', 'Templeton, M.', 'Lockhart, K.', 'Bukovi, K.'],
u'pub':'American Astronomical Society Meeting Abstracts',
u'bibstem':['AAS', 'AAS...235'],
u'identifier':['2020AAS...23528705A']
},
{
u'year':'2019',
u'bibcode':'2019EPSC...13.1911A',
u'author':['Accomazzi, Alberto', 'Kurtz, Michael', 'Henneken, Edwin'],
u'pub':'EPSC-DPS Joint Meeting 2019',
u'bibstem':['EPSC', 'EPSC...13'],
u'identifier': ['2019EPSC...13.1911A']
},
{
u'year':'2015',
u'bibcode':'2015scop.confE...3A',
u'author':['Accomazzi, Alberto'],
u'pub':'Science Operations 2015: Science Data Management',
u'bibstem':['scop', 'scop.conf'],
u'identifier':['2015scop.confE...3A', '10.5281/zenodo.34494', '10.5281/zenodo.34494']
},
{
u'year':'2019',
u'bibcode':'2019AAS...23338108A',
u'author':['Accomazzi, Alberto', 'Kurtz, Michael J.', 'Henneken, Edwin', 'Grant, Carolyn S.', 'Thompson, Donna M.', 'Chyla, Roman', 'McDonald, Stephen', 'Blanco-Cuaresma, Sergi', 'Shapurian, Golnaz', 'Hostetler, Timothy', 'Templeton, Matthew', 'Lockhart, Kelly'],
u'pub':'American Astronomical Society Meeting Abstracts #233',
u'bibstem':['AAS', 'AAS...233'],
u'identifier': ['2019AAS...23338108A'],
},
{
u'year':'2019',
u'bibcode':'2019AAS...23320704A',
u'author':['Accomazzi, Alberto'],
u'pub':'American Astronomical Society Meeting Abstracts #233',
u'bibstem':['AAS', 'AAS...233'],
u'identifier': ['2019AAS...23320704A'],
},
{
u'year':'2018',
u'bibcode':'2018EPJWC.18608001A',
u'author':['Accomazzi, Alberto', 'Kurtz, Michael J.', 'Henneken, Edwin A.', 'Grant, Carolyn S.', 'Thompson, Donna M.', 'Chyla, Roman', 'McDonald, Steven', 'Shaulis, Taylor J.', 'Blanco-Cuaresma, Sergi', 'Shapurian, Golnaz', 'Hostetler, Timothy W.', 'Templeton, Matthew R.'],
u'pub':'European Physical Journal Web of Conferences',
u'bibstem':['EPJWC', 'EPJWC.186'],
u'identifier':['2017arXiv171008505A', '2018EPJWC.18608001A', '10.1051/epjconf/201818608001', 'arXiv:1710.08505', '10.1051/epjconf/201818608001', '2017arXiv171008505A']
},
{
u'year':'2018',
u'bibcode':'2018AAS...23221409A',
u'author':['Accomazzi, Alberto', 'ADS Team'],
u'pub':'American Astronomical Society Meeting Abstracts #232',
u'bibstem':['AAS', 'AAS...232'],
u'identifier': ['2018AAS...23221409A'],
},
{
u'year':'2017',
u'bibcode':'2017ASPC..512...45A',
u'author':['Accomazzi, A.', 'Kurtz, M. J.', 'Henneken, E. A.', 'Grant, C. S.', 'Thompson, D. M.', 'Chyla, R.', 'Holachek, A.', 'Elliott, J.'],
u'pub':'Astronomical Data Analysis Software and Systems XXV',
u'bibstem':['ASPC', 'ASPC..512'],
u'identifier': ['2017adass..25...45A', '2018ASPC..512...45A', '2016arXiv160107858A', '2017ASPC..512...45A', 'arXiv:1601.07858', '2017adass..25...45A', '2018ASPC..512...45A', '2016arXiv160107858A'],
},
{
u'year':'2018',
u'bibcode':'2018AAS...23136217A',
u'author':['Accomazzi, Alberto', 'Kurtz, Michael J.', 'Henneken, Edwin', 'Grant, Carolyn S.', 'Thompson, Donna M.', 'Chyla, Roman', 'McDonald, Steven', 'Shaulis, Taylor J.', 'Blanco-Cuaresma, Sergi', 'Shapurian, Golnaz', 'Hostetler, Timothy W.', 'Templeton, Matthew R.', 'Lockhart, Kelly E.'],
u'pub':'American Astronomical Society Meeting Abstracts #231',
u'bibstem':['AAS', 'AAS...231'],
u'identifier': ['2018AAS...23136217A'],
},
{
u'year':'2018',
u'bibcode':'2018AAS...23130709A',
u'author':['Accomazzi, Alberto'],
u'pub':'American Astronomical Society Meeting Abstracts #231',
u'bibstem':['AAS', 'AAS...231'],
u'identifier': ['2018AAS...23130709A'],
},
]
}
}
data_7 = \
{
u'responseHeader':{
u'status': 0,
u'QTime': 8,
u'params':{
u'q': u'bibcode:2005GML...tmp....1A',
u'indent': u'on',
u'fl': u'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,read_count,bibcode,identifier,copyright,keyword,doctype,[citations],comment,version,property,esources,data,isbn,eid,issn,arxiv_class,editor,series,publisher,bibstem',
u'wt': u'json'
}
},
u'response': {
u'numFound': 1,
u'start': 0,
u'docs':[
{
u'read_count': 0,
u'doctype': u'article',
u'bibstem': [u'GML', u'GML...tmp'],
u'bibcode': u'2005GML...tmp....1A',
u'identifier': [u'2005GML...tmp....1A', u'10.1007/s00367-005-0006-y', u'10.1007/s00367-005-0006-y'],
u'pubdate': u'2005-12-00',
u'copyright': u'(c) 2005: Springer-Verlag',
u'aff': [u'Department of Geological Sciences, The University of Alabama'],
u'esources': [u'PUB_HTML'],
u'year': u'2005',
u'pub': u'Geo-Marine Letters',
u'doi': [u'10.1007/s00367-005-0006-y'],
u'author': [u'Aharon, Paul'],
u'pub_raw': u'Geo-Marine Letters, Online First',
u'issn': [u'0276-0460'],
u'title': [u'Catastrophic flood outbursts in mid-continent left imprints in the Gulf of Mexico'],
u'property': [u'ESOURCE', u'ARTICLE', u'REFEREED'],
u'num_references': 0,
u'num_citations': 2
}
]
}
}
data_8 = \
{
u'responseHeader':{
u'status':0,
u'QTime':41,
u'params':{
u'q':'bibcode:(2017EPJD...71..191Y or 2017JDSO...13...25K)',
u'indent':'on',
u'fl':'bibcode,author,year,pub,volume,page,page_count',
u'wt':'json'
}
},
u'response':{
u'numFound':2,
u'start':0,
u'docs':[
{
u'page_count':9,
u'year': '2017',
u'page':['191'],
u'bibcode':'2017EPJD...71..191Y',
u'author':[u'Yang, Huihui', u'Chen, Hongshan'],
u'pub':'European Physical Journal D',
u'volume':'71'
},
{
u'page_count':6,
u'year': '2017',
u'page':['25'],
u'bibcode':'2017JDSO...13...25K',
u'author':[u'Knapp, Wilfried', u'Thuemen, Chris'],
u'pub':'Journal of Double Star Observations',
u'volume':'13'
}
]
}
}
data_9 = \
{
u'responseHeader': {
u'status': 0,
u'QTime': 13,
u'params': {
u'sort': u'date desc, bibcode desc',
u'rows': u'1',
u'fq': u'{!bitset}',
u'q': u'*:*',
u'start': u'0',
u'wt': u'json',
u'fl': u'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,read_count,bibcode,identifier,copyright,keyword,doctype,[citations],comment,version,property,esources,data,isbn,eid,issn,arxiv_class,editor,series,publisher,bibstem'
}
},
u'response': {
u'start': 0,
u'numFound': 1,
u'docs': [
{
u'identifier': [u'2021arXiv210101542A', u'2021A&A...645L..11A', u'10.1051/0004-6361/202039988', u'arXiv:2101.01542', u'2021arXiv210101542A', u'10.1051/0004-6361/202039988'],
u'pubdate': u'2021-01-00',
u'abstract': u"We present a new summary statistic for weak lensing observables, higher than second order, suitable for extracting non-Gaussian cosmological information and inferring cosmological parameters. We name this statistic the `starlet ℓ<SUB>1</SUB>-norm' as it is computed via the sum of the absolute values of the starlet (wavelet) decomposition coefficients of a weak lensing map. In comparison to the state-of-the-art higher-order statistics - weak lensing peak counts and minimum counts, or the combination of the two - the ℓ<SUB>1</SUB>-norm provides a fast multi-scale calculation of the full void and peak distribution, avoiding the problem of defining what a peak is and what a void is: the ℓ<SUB>1</SUB>-norm carries the information encoded in all pixels of the map, not just the ones in local maxima and minima. We show its potential by applying it to the weak lensing convergence maps provided by the MassiveNus simulations to get constraints on the sum of neutrino masses, the matter density parameter, and the amplitude of the primordial power spectrum. We find that, in an ideal setting without further systematics, the starlet ℓ<SUB>1</SUB>-norm remarkably outperforms commonly used summary statistics, such as the power spectrum or the combination of peak and void counts, in terms of constraining power, representing a promising new unified framework to simultaneously account for the information encoded in peak counts and voids. We find that the starlet ℓ<SUB>1</SUB>-norm outperforms the power spectrum by 72% on M<SUB>ν</SUB>, 60% on Ω<SUB>m</SUB>, and 75% on A<SUB>s</SUB> for the Euclid-like setting considered; it also improves upon the state-of-the-art combination of peaks and voids for a single smoothing scale by 24% on M<SUB>ν</SUB>, 50% on Ω<SUB>m</SUB>, and 24% on A<SUB>s</SUB>.",
u'year': u'2021',
u'property': [u'ARTICLE', u'EPRINT_OPENACCESS', u'ESOURCE', u'OPENACCESS', u'PUB_OPENACCESS', u'REFEREED'],
u'page': [u'L11'],
u'bibcode': u'2021A&A...645L..11A',
u'bibstem': [u'A&A', u'A&A...645'],
u'author': [u'Ajani, Virginia', u'Starck, Jean-Luc', u'Pettorino, Valeria'],
u'aff': [u'AIM, CEA, CNRS, Université Paris-Saclay, Université de Paris, Sorbonne Paris Cité, 91191, Gif-sur-Yvette, France',
u'AIM, CEA, CNRS, Université Paris-Saclay, Université de Paris, Sorbonne Paris Cité, 91191, Gif-sur-Yvette, France',
u'AIM, CEA, CNRS, Université Paris-Saclay, Université de Paris, Sorbonne Paris Cité, 91191, Gif-sur-Yvette, France'],
u'esources': [u'EPRINT_HTML', u'EPRINT_PDF', u'PUB_HTML', u'PUB_PDF'],
u'arxiv_class': [u'astro-ph.CO'],
u'pub': u'Astronomy and Astrophysics',
u'volume': u'645',
u'issn': [u'0004-6361'],
u'doi': [u'10.1051/0004-6361/202039988'],
u'keyword': [u'cosmological parameters', u'large-scale structure of Universe', u'methods: statistical',
u'neutrinos', u'surveys', u'Astrophysics - Cosmology and Nongalactic Astrophysics'],
u'doctype': u'article',
u'read_count': 157,
u'pub_raw': u'Astronomy & Astrophysics, Volume 645, id.L11, <NUMPAGES>8</NUMPAGES> pp.',
u'eid': u'L11',
u'title': [u'Starlet ℓ<SUB>1</SUB>-norm for weak lensing cosmology'],
u'num_references': 0,
u'num_citations': 0,
}
]
}
}
data_10 = \
{
u'responseHeader': {
u'status': 0,
u'QTime': 13,
u'params': {
u'sort': u'date desc, bibcode desc',
u'rows': u'1',
u'fq': u'{!bitset}',
u'q': u'*:*',
u'start': u'0',
u'wt': u'json',
u'fl': u'author,title,year,pubdate,pub,pub_raw,issue,volume,page,page_range,aff,doi,abstract,read_count,bibcode,identifier,copyright,keyword,doctype,[citations],comment,version,property,esources,data,isbn,eid,issn,arxiv_class,editor,series,publisher,bibstem'
}
},
u'response': {
u'start': 0,
u'numFound': 1,
u'docs': [
{
u'identifier': [u'2003iha..book..109G', u'2003ASSL..285..109G', u'10.1007/0-306-48080-8_7', u'2003iha..book..109G', u'10.1007/0-306-48080-8_7'],
u'pubdate': u'2003-03-00',
u'abstract': u'At this writing, the AIPS package has been in active development and use for over 23 years. It is still the software of choice for all phases of data reduction for the Very Large Array, the most productive groundbased telescope in the world. It is the primary reduction system for most Very Long Baseline Interferometry including the VLBA and has been used to reduce data from other radio interferometers and single-dish telescopes as well as data taken at other wavelengths. The history and general structure of this software package are reviewed and a number of the scientific achievements for which it has been used are summarized.',
u'year': u'2003',
u'property': [u'ARTICLE', u'ESOURCE', u'REFEREED', u'TOC'],
u'page': [u'109'],
u'bibcode': u'2003ASSL..285..109G',
u'copyright': u'(c) 2003: Kluwer Academic Publishers',
u'author': [u'Greisen, E. W.'],
u'aff': [u'National Radio Astronomy Observatory'],
u'esources': [u'PUB_HTML'],
u'editor': [u'Heck, André'],
u'pub': u'Information Handling in Astronomy - Historical Vistas',
u'volume': u'285',
u'page_range': u'109',
u'doi': [u'10.1007/0-306-48080-8_7'],
u'bibstem': [u'ASSL', u'ASSL..285'],
u'doctype': u'inbook',
u'read_count': 35,
u'pub_raw': u'Information Handling in Astronomy - Historical Vistas. Edited by André Heck, Strasbourg Astronomical Observatory, France. Astrophysics and Space Science Library, Vol. 285. Dordrecht: Kluwer Academic Publishers, 2003., p.109',
u'title': [u'AIPS, the VLA, and the VLBA'],
u'num_references': 0,
u'num_citations': 0,
}
]
}
}
data_11 = \
{
u'responseHeader': {
u'status': 0,
u'QTime': 2,
u'params': {
u'q': u'bibcode:2016ApJ...818L..26F',
u'fl': u'read_count,bibcode,doctype,[citations],bibstem',
u'_':' u1626894650747'}},
'response': {
u'numFound': 1,
u'start': 0,
u'docs': [
{
u'bibcode': u'2016ApJ...818L..26F',
u'bibstem': [u'ApJL', u'ApJL..818'],
u'doctype': u'article',
u'read_count': 2,
u'[citations]': {u'num_references': 40, u'num_citations': 29},
u'identifier': [u'2016ApJ...818L..26F',
u'2016arXiv160201096F',
u'10.3847/2041-8205/818/2/L26',
u'2016arXiv160201096F',
u'10.3847/2041-8205/818/2/L26',
u'arXiv:1602.01096'],
}
]
}
} | 77.966535 | 5,282 | 0.606168 |
794f9ea28bc974d19108a23cf5eee15cc7f13f9f | 3,692 | py | Python | limitedToken.py | wonabru/chainnet | f8ec1e2b580af837cba3322ffe69b95156b1b9a1 | [
"MIT"
] | 5 | 2019-04-20T18:54:55.000Z | 2019-08-23T09:17:20.000Z | limitedToken.py | wonabru/chainnet | f8ec1e2b580af837cba3322ffe69b95156b1b9a1 | [
"MIT"
] | null | null | null | limitedToken.py | wonabru/chainnet | f8ec1e2b580af837cba3322ffe69b95156b1b9a1 | [
"MIT"
] | null | null | null | from account import CAccount
class CLimitedToken(CAccount):
def __init__(self, DB, tokenName, totalSupply, creator, address):
self.creator = 0
super().__init__(DB, tokenName, creator, address)
self.totalSupply = totalSupply
if creator is None:
self.owner = CAccount(DB, '?', None, -1)
else:
self.owner = creator
self.owner.setAmount(self, totalSupply)
self.setAmount(self, 0)
def copyFromBaseLimitToken(self, baseLimitToken):
token = CLimitedToken(self.kade, baseLimitToken.accountName, baseLimitToken.totalSupply,
baseLimitToken, address=baseLimitToken.address)
token.chain = baseLimitToken.chain
return token
def save(self, announce='', who_is_signing=None):
super().save(announce, who_is_signing)
self.kade.save('limitedToken:' + self.address, [self.totalSupply, self.owner.address])
def update(self, with_chain=2):
par = self.kade.get('limitedToken:' + self.address)
self.totalSupply, _address = par
super().update()
_account = CAccount(self.kade, '?', None, _address)
_account.update(with_chain)
self.owner = _account
def showAll(self):
#self.update()
totalSupply = 0
for acc in self.chain.uniqueAccounts:
#self.chain.uniqueAccounts[acc].update(with_chain=False)
self.chain.uniqueAccounts[acc].show()
totalSupply = totalSupply + self.chain.uniqueAccounts[acc].amount[self.address] \
if self.address in self.chain.uniqueAccounts[acc].amount.keys() else totalSupply
ret = self.accountName + ' total Supply: ' + str(self.totalSupply) + ' and on all accounts: ' + str(totalSupply)
return ret
def handshake(self, account_1, account_2, attacher):
if attacher is not None:
account_1.chain.uniqueAccounts[account_2.address] = account_2
account_2.chain.uniqueAccounts[account_1.address] = account_1
return [attacher]
raise Exception("Handshake", 'Handshake fails, no common connections')
def spreadToWorld(self, accounts):
for acc in accounts:
acc.save()
def attach(self, account, attacher):
from actionToken import CActionToken
if account is None:
raise Exception("Attach", "No account exists with given name ")
if isinstance(account, CLimitedToken) or isinstance(account, CActionToken):
raise Exception("Attach", "Attached account cannot be any Token.")
if account.address in self.chain.uniqueAccounts:
raise Exception("Attach", "Account is just attached.")
if self.address == account.address:
raise Exception("Attach", "Account cannot be attached to itself.")
listToSpread = self.handshake(self, account, attacher)
if listToSpread is None:
raise Exception("Attach", "Nothing to attach")
if attacher.address == listToSpread[0].address:
attacher = listToSpread[0]
listToSpread.remove(attacher)
if attacher.address not in self.chain.accountsCreated.keys():
self.chain.accountsCreated[attacher.address] = 1
else:
self.chain.accountsCreated[attacher.address] += 1
account.setAmount(self, 0)
self.chain.uniqueAccounts[account.address] = account
account.chain.uniqueAccounts[self.address] = self
listToSpread.append(self)
listToSpread.append(attacher)
listToSpread.append(account)
self.spreadToWorld(listToSpread)
return True | 37.292929 | 120 | 0.644637 |
794f9ef1cc01d5a45b19ccc700aabdf94b69c72e | 1,003 | py | Python | commands/open_in_browser.py | deathaxe/FileManager | 08becc98f9e71f1d76c096b84708c927eaee9eba | [
"MIT"
] | null | null | null | commands/open_in_browser.py | deathaxe/FileManager | 08becc98f9e71f1d76c096b84708c927eaee9eba | [
"MIT"
] | null | null | null | commands/open_in_browser.py | deathaxe/FileManager | 08becc98f9e71f1d76c096b84708c927eaee9eba | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import os
import sublime
from .fmcommand import FmWindowCommand
class FmOpenInBrowserCommand(FmWindowCommand):
def run(self, paths=None, *args, **kwargs):
folders = self.window.folders()
view = self.window.active_view()
url = view.settings().get("url")
if url is not None:
url = url.strip("/")
for path in paths or [view.file_name()]:
if url is None:
self.open_url("file:///" + path)
else:
for folder in folders:
if folder in path:
if os.path.splitext(os.path.basename(path))[0] == "index":
path = os.path.dirname(path)
self.open_url(url + path.replace(folder, ""))
break
else:
self.open_url("file:///" + path)
def open_url(self, url):
sublime.run_command("open_url", {"url": url})
| 30.393939 | 82 | 0.499501 |
794f9ef95b4dac9c46afc23a2495c5c7f20b631e | 7,352 | py | Python | pyslim/provenance.py | benjeffery/pyslim | 580f17f950dcb904295884a5725a14563885c142 | [
"MIT"
] | null | null | null | pyslim/provenance.py | benjeffery/pyslim | 580f17f950dcb904295884a5725a14563885c142 | [
"MIT"
] | null | null | null | pyslim/provenance.py | benjeffery/pyslim | 580f17f950dcb904295884a5725a14563885c142 | [
"MIT"
] | null | null | null | from __future__ import print_function
import platform
import warnings
import attr
import json
import msprime
import tskit
from . import _version
__version__ = _version.pyslim_version
@attr.s
class ProvenanceMetadata(object):
model_type = attr.ib()
slim_generation = attr.ib()
file_version = attr.ib()
def slim_provenance_version(provenance):
"""
Parses a provenance record, returning whether the record is a SLiM
provenance entry, and version is the file format version, or "unknown" if
it is not a SLiM entry.
:param Provenance provenance: The provenance entry, as for instance obtained
from ts.provenance(0).
:return: A (bool, string) tuple (is_slim, version).
"""
record = json.loads(provenance.record)
software_name = "unknown"
file_version = "unknown"
# >= SLiM 3.1 // file version >= 0.2
try:
software_name = record["software"]["name"]
except:
software_name = "unknown"
if software_name == "SLiM":
try:
file_version = record["slim"]["file_version"]
except:
pass
else:
# SLiM 3.0 // file version 0.1
try:
software_name = record["program"]
except:
pass
try:
file_version = record["file_version"]
except:
pass
is_slim = (software_name == "SLiM") and (file_version in ["0.1", "0.2", "0.3", "0.4"])
return is_slim, file_version
def parse_provenance(provenance):
'''
Parses a SLiM provenance entry, returning a :class:`ProvenanceMetadata`
object, or raising an error if the entry is not a SLiM provenance entry.
:param Provenance provenance: The provenance entry, as for instance obtained
from ts.provenance(0).
:rtype ProvenanceMetadata:
'''
is_slim, file_version = slim_provenance_version(provenance)
if is_slim:
record = json.loads(provenance.record)
if file_version == "0.1":
out = ProvenanceMetadata(record['model_type'],
record['generation'],
file_version)
else: # >= 0.2
out = ProvenanceMetadata(record['parameters']['model_type'],
record['slim']["generation"],
file_version)
else:
raise ValueError("Not a SLiM provenance entry.")
return out
def get_provenance(ts, only_last=True):
'''
Extracts model type, slim generation, and remembmered node count from either
the last entry in the provenance table that is tagged with "program"="SLiM"
(if ``only_last=True``) or a list of all of them (otherwise).
:param SlimTreeSequence ts: The tree sequence.
:param bool only_last: Whether to return only the last SLiM provenance entry,
(otherwise, returns a list of all SLiM entries).
:rtype ProvenanceMetadata:
'''
provenances = []
for j, p in enumerate(ts.tables.provenances):
is_slim, _ = slim_provenance_version(p)
if is_slim:
out = parse_provenance(p)
provenances.append(out)
if len(provenances) == 0:
raise ValueError("Tree sequence contains no SLiM provenance entries"
"(or your pyslim is out of date).")
if only_last:
return provenances[-1]
else:
return provenances
def upgrade_slim_provenance(tables):
"""
Copies the last provenance entry from a previous SLiM file version to that
required by the current file version.
:param TableCollection tables: the table collection
"""
prov_info = [(slim_provenance_version(p), json.loads(p.record))
for p in tables.provenances]
slim_prov = [x for x in prov_info if x[0][0]]
if len(slim_prov) == 0:
raise ValueError("Tree sequence contains no SLiM provenance entries.")
(is_slim, file_version), record = slim_prov[len(slim_prov)-1]
if not (float(file_version) < 0.4):
warnings.warn("File version is not older than 0.4; not doing anything.")
if not is_slim:
raise ValueError("Not a SLiM provenance entry.")
if file_version == "0.1":
new_record = make_slim_provenance_dict(
record['model_type'],
record['generation'])
new_record['parameters']['command'] = ['pyslim', 'convert']
else:
new_record = make_slim_provenance_dict(
record['parameters']['model_type'],
record['slim']['generation'])
new_record['parameters']['command'] = ['pyslim', 'convert']
tskit.validate_provenance(new_record)
tables.provenances.add_row(json.dumps(new_record))
def get_environment():
"""
Returns a dictionary describing the environment in which msprime
is currently running.
"""
env = {
"libraries": {
},
"parameters" : {
"command" : []
},
"os": {
"system": platform.system(),
"node": platform.node(),
"release": platform.release(),
"version": platform.version(),
"machine": platform.machine(),
},
"python": {
"implementation": platform.python_implementation(),
"version": platform.python_version_tuple(),
}
}
return env
def make_pyslim_provenance_dict():
"""
Returns a dictionary encoding the information about this version of pyslim.
"""
document = {
"schema_version": "1.0.0",
"software": {
"name" : "pyslim",
"version": __version__,
},
"parameters": {
"command": {}
},
"environment": get_environment()
}
return document
def make_slim_provenance_dict(model_type, slim_generation):
"""
Returns a dictionary encoding necessary provenance information for a SLiM tree sequence.
"""
document = {
"schema_version": "1.0.0",
"software": {
"name" : "SLiM",
"version": "3.3.2"
},
"parameters": {
"command": ['pyslim'],
"model_type": model_type,
},
"environment": {},
"metadata": {
"individuals": {
"flags": {
"16": {
"name" : "SLIM_TSK_INDIVIDUAL_ALIVE",
"description" : "the individual was alive "
+ "at the time the file was written",
},
"17": {
"name" : "SLIM_TSK_INDIVIDUAL_REMEMBERED",
"description" : "the individual was requested "
+ "by the user to be remembered",
},
"18": {
"name" : "SLIM_TSK_INDIVIDUAL_FIRST_GEN",
"description" : "the individual was in the first "
+ "generation of a new population"
}
}
}
},
"slim": {
"file_version": "0.4",
"generation": slim_generation,
"model": ""
}
}
return document
| 32.104803 | 92 | 0.556447 |
794f9f16649b73e6ccd0f6148072dc9c04c421c0 | 158 | py | Python | dps-api/dps_api/blueprints/base.py | mrendi29/dental-patient-schedule | d5e2777d135d7379d1941cd911b71fe2be2fc759 | [
"BSD-3-Clause"
] | null | null | null | dps-api/dps_api/blueprints/base.py | mrendi29/dental-patient-schedule | d5e2777d135d7379d1941cd911b71fe2be2fc759 | [
"BSD-3-Clause"
] | null | null | null | dps-api/dps_api/blueprints/base.py | mrendi29/dental-patient-schedule | d5e2777d135d7379d1941cd911b71fe2be2fc759 | [
"BSD-3-Clause"
] | null | null | null | from flask import Blueprint
bp = Blueprint("base", __name__, url_prefix="/base")
@bp.route("/")
def helloWorld():
return "Hello, cross-origin-world!"
| 15.8 | 52 | 0.683544 |
794f9f67d3fded0935b5e1c527e46e5fad47c079 | 10,609 | py | Python | google/ads/googleads/v5/services/services/ad_group_criterion_simulation_service/transports/grpc.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v5/services/services/ad_group_criterion_simulation_service/transports/grpc.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v5/services/services/ad_group_criterion_simulation_service/transports/grpc.py | batardo/google-ads-python | a39748521847e85138fca593f3be2681352ad024 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v5.resources.types import (
ad_group_criterion_simulation,
)
from google.ads.googleads.v5.services.types import (
ad_group_criterion_simulation_service,
)
from .base import (
AdGroupCriterionSimulationServiceTransport,
DEFAULT_CLIENT_INFO,
)
class AdGroupCriterionSimulationServiceGrpcTransport(
AdGroupCriterionSimulationServiceTransport
):
"""gRPC backend transport for AdGroupCriterionSimulationService.
Service to fetch ad group criterion simulations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group_criterion_simulation(
self,
) -> Callable[
[
ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest
],
ad_group_criterion_simulation.AdGroupCriterionSimulation,
]:
r"""Return a callable for the get ad group criterion
simulation method over gRPC.
Returns the requested ad group criterion simulation
in full detail.
Returns:
Callable[[~.GetAdGroupCriterionSimulationRequest],
~.AdGroupCriterionSimulation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_ad_group_criterion_simulation" not in self._stubs:
self._stubs[
"get_ad_group_criterion_simulation"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v5.services.AdGroupCriterionSimulationService/GetAdGroupCriterionSimulation",
request_serializer=ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest.serialize,
response_deserializer=ad_group_criterion_simulation.AdGroupCriterionSimulation.deserialize,
)
return self._stubs["get_ad_group_criterion_simulation"]
__all__ = ("AdGroupCriterionSimulationServiceGrpcTransport",)
| 40.803846 | 120 | 0.629937 |
794f9f8d2af8a5005d2a0b746a7d3f1e89400c71 | 9,316 | py | Python | doc/source/conf.py | pnavaro/ElasticFDA.jl | 0ed8c3f483c3a62f4215a6b65894da34e77eefc1 | [
"MIT"
] | 7 | 2017-02-05T23:39:14.000Z | 2021-06-03T06:41:47.000Z | doc/source/conf.py | pnavaro/ElasticFDA.jl | 0ed8c3f483c3a62f4215a6b65894da34e77eefc1 | [
"MIT"
] | 3 | 2016-08-31T20:48:32.000Z | 2021-12-15T02:18:36.000Z | doc/source/conf.py | pnavaro/ElasticFDA.jl | 0ed8c3f483c3a62f4215a6b65894da34e77eefc1 | [
"MIT"
] | 6 | 2016-07-12T02:13:16.000Z | 2020-09-22T17:42:16.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ElasticFDA.jl documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 13 21:02:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ElasticFDA.jl'
copyright = '2018, J. Derek Tucker'
author = 'J. Derek Tucker'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.1'
# The full version, including alpha/beta/rc tags.
release = '0.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ElasticFDAjldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ElasticFDAjl.tex', 'ElasticFDA.jl Documentation',
'J. Derek Tucker', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'elasticfdajl', 'ElasticFDA.jl Documentation',
[u'J. Derek Tucker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ElasticFDAjl', 'ElasticFDA.jl Documentation',
u'J. Derek Tucker', 'ElasticFDAjl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 32.573427 | 79 | 0.716724 |
794fa11720caf34527dd61fbff5efb203f5765db | 1,783 | py | Python | utils.py | stratosGithub/COCO-GAN | 44805d71166dbe39d57acc2ece8d1e659161b2a6 | [
"MIT"
] | 282 | 2019-04-01T11:14:10.000Z | 2022-02-15T11:07:57.000Z | utils.py | CheapLife/dataset | 727674a6ff20114cd432971f309a2d4115ef4b0d | [
"MIT"
] | 15 | 2019-05-02T12:24:25.000Z | 2021-09-06T09:37:03.000Z | utils.py | CheapLife/dataset | 727674a6ff20114cd432971f309a2d4115ef4b0d | [
"MIT"
] | 36 | 2019-04-26T05:12:58.000Z | 2022-01-20T13:24:48.000Z | import os
import tensorflow as tf
import numpy as np
from scipy.misc import imsave
def aug_cylindrical_data_tensor(batch_images_t):
width = batch_images_t.shape[2].value
rotate_dist = tf.round(tf.random.uniform([], 0, 1) * width)
rotate_dist = tf.cast(rotate_dist, tf.int32)
batch_aug_results = tf.concat([
batch_images_t[:, :, rotate_dist:], batch_images_t[:, :, :rotate_dist]
], axis=2)
return batch_aug_results
def aug_cylindrical_data_numpy(batch_images):
width = batch_images_t.shape[2]
rotate_dist = int(round(np.random.uniform(0, 1) * width))
batch_aug_results = np.concatenate([
batch_images[:, :, rotate_dist:], batch_images[:, :, :rotate_dist]
], axis=2)
return batch_aug_results
def save_manifold_images(images, size, image_path):
images = (images+1) / 2
manifold_image = np.squeeze(compose_manifold_images(images, size))
return imsave(image_path, manifold_image)
def compose_manifold_images(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter ' +
'must have dimensions: HxW or HxWx3 or HxWx4, got {}'.format(images.shape))
| 34.288462 | 87 | 0.610208 |
794fa1be31ca7f690cc9d73c33fe02c028432bcf | 13,575 | py | Python | groupdocs_editor_cloud/apis/edit_api.py | groupdocs-editor-cloud/groupdocs-editor-cloud-python | e766afc58f244245835178a81f1832b1780aa500 | [
"MIT"
] | null | null | null | groupdocs_editor_cloud/apis/edit_api.py | groupdocs-editor-cloud/groupdocs-editor-cloud-python | e766afc58f244245835178a81f1832b1780aa500 | [
"MIT"
] | null | null | null | groupdocs_editor_cloud/apis/edit_api.py | groupdocs-editor-cloud/groupdocs-editor-cloud-python | e766afc58f244245835178a81f1832b1780aa500 | [
"MIT"
] | null | null | null | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="editor_api.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from groupdocs_editor_cloud.auth import Auth
from groupdocs_editor_cloud.api_client import ApiClient
from groupdocs_editor_cloud.api_exception import ApiException
from groupdocs_editor_cloud.configuration import Configuration
class EditApi(object):
"""
GroupDocs.Editor Cloud API
:param configuration: API configuration
"""
def __init__(self, configuration):
api_client = ApiClient(configuration)
self.auth = Auth(configuration, api_client)
self.api_client = api_client
self.configuration = configuration
def close(self): # noqa: E501
"""
Closes thread pool. This method should be called when
methods are executed asynchronously (is_async=True is passed as parameter)
and this instance of EditApi is not going to be used any more.
"""
if self.api_client is not None:
if(self.api_client.pool is not None):
self.api_client.pool.close()
self.api_client.pool.join()
self.api_client.pool = None
@classmethod
def from_keys(cls, app_sid, app_key):
"""
Initializes new instance of EditApi with API keys
:param app_sid Application identifier (App SID)
:param app_key Application private key (App Key)
"""
configuration = Configuration(app_sid, app_key)
return EditApi(configuration)
@classmethod
def from_config(cls, configuration):
"""
Initializes new instance of EditApi with configuration options
:param configuration API configuration
"""
return EditApi(configuration)
def load(self, request,**kwargs): # noqa: E501
"""Load document for editing # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass is_async=True
:param is_async bool
:param LoadOptions load_options: The document load options (required)
:return: LoadResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('is_async'):
return self._load_with_http_info(request, **kwargs) # noqa: E501
(data) = self._load_with_http_info(request, **kwargs) # noqa: E501
return data
def _load_with_http_info(self, request, **kwargs): # noqa: E501
"""Load document for editing # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass is_async=True
:param is_async bool
:param LoadRequest request object with parameters
:return: LoadResult
If the method is called asynchronously,
returns the request thread.
"""
params = locals()
params['is_async'] = ''
params['_return_http_data_only'] = False
params['_preload_content'] = True
params['_request_timeout'] = ''
for key, val in six.iteritems(params['kwargs']):
if key not in params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method load" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'load_options' is set
if request.load_options is None:
raise ValueError("Missing the required parameter `load_options` when calling `load`") # noqa: E501
collection_formats = {}
path = '/editor/load'
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = []
body_params = None
if request.load_options is not None:
body_params = request.load_options
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
call_kwargs = {
'resource_path':path,
'method':'POST',
'path_params':path_params,
'query_params':query_params,
'header_params':header_params,
'body':body_params,
'post_params':form_params,
'files':local_var_files,
'response_type':'LoadResult', # noqa: E501
'auth_settings':self.auth.get_auth_settings(),
'is_async':params.get('is_async'),
'_return_http_data_only':params.get('_return_http_data_only'),
'_preload_content':params.get('_preload_content', True),
'_request_timeout':params.get('_request_timeout'),
'collection_formats':collection_formats
}
return self.api_client.call_api(**call_kwargs) # noqa: E501
def save(self, request,**kwargs): # noqa: E501
"""Save document after editing # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass is_async=True
:param is_async bool
:param SaveOptions save_options: Edited document save options (required)
:return: DocumentResult
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('is_async'):
return self._save_with_http_info(request, **kwargs) # noqa: E501
(data) = self._save_with_http_info(request, **kwargs) # noqa: E501
return data
def _save_with_http_info(self, request, **kwargs): # noqa: E501
"""Save document after editing # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass is_async=True
:param is_async bool
:param SaveRequest request object with parameters
:return: DocumentResult
If the method is called asynchronously,
returns the request thread.
"""
params = locals()
params['is_async'] = ''
params['_return_http_data_only'] = False
params['_preload_content'] = True
params['_request_timeout'] = ''
for key, val in six.iteritems(params['kwargs']):
if key not in params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'save_options' is set
if request.save_options is None:
raise ValueError("Missing the required parameter `save_options` when calling `save`") # noqa: E501
collection_formats = {}
path = '/editor/save'
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = []
body_params = None
if request.save_options is not None:
body_params = request.save_options
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
call_kwargs = {
'resource_path':path,
'method':'POST',
'path_params':path_params,
'query_params':query_params,
'header_params':header_params,
'body':body_params,
'post_params':form_params,
'files':local_var_files,
'response_type':'DocumentResult', # noqa: E501
'auth_settings':self.auth.get_auth_settings(),
'is_async':params.get('is_async'),
'_return_http_data_only':params.get('_return_http_data_only'),
'_preload_content':params.get('_preload_content', True),
'_request_timeout':params.get('_request_timeout'),
'collection_formats':collection_formats
}
return self.api_client.call_api(**call_kwargs) # noqa: E501
def __downcase_first_letter(self, s):
if len(s) == 0:
return str
else:
return s[0].lower() + s[1:]
# coding: utf-8
# --------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="load_request.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# --------------------------------------------------------------------------------
class LoadRequest(object):
"""
Request model for load operation.
:param load_options The document load options
"""
def __init__(self, load_options):
"""Initializes new instance of LoadRequest.""" # noqa: E501
self.load_options = load_options
# coding: utf-8
# --------------------------------------------------------------------------------
# <copyright company="Aspose Pty Ltd" file="save_request.py">
# Copyright (c) 2003-2021 Aspose Pty Ltd
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# --------------------------------------------------------------------------------
class SaveRequest(object):
"""
Request model for save operation.
:param save_options Edited document save options
"""
def __init__(self, save_options):
"""Initializes new instance of SaveRequest.""" # noqa: E501
self.save_options = save_options
| 39.234104 | 111 | 0.628729 |
794fa20f2c316a6d48ddab0017cb13f4c654fad1 | 92 | py | Python | casemgmt_example/__init__.py | saolsen/oso-casemgmt-django | 05e7e1d54c0ca274341df3fa53c82b9735c377c6 | [
"MIT"
] | 3 | 2020-12-18T13:52:16.000Z | 2021-02-17T17:05:28.000Z | casemgmt_example/__init__.py | saolsen/oso-casemgmt-django | 05e7e1d54c0ca274341df3fa53c82b9735c377c6 | [
"MIT"
] | 1 | 2021-04-13T18:58:17.000Z | 2021-04-13T18:58:17.000Z | casemgmt_example/__init__.py | saolsen/oso-casemgmt-django | 05e7e1d54c0ca274341df3fa53c82b9735c377c6 | [
"MIT"
] | 2 | 2020-12-21T15:10:29.000Z | 2021-02-17T19:22:05.000Z | # Register Oso extensions, etc
from casemgmt_example import auth
auth.register_extensions()
| 23 | 33 | 0.836957 |
794fa282ae38f525bb5d567414f2d75f6ae365dd | 777 | py | Python | main.py | UCY-LINC-LAB/fogify | 80dee9e2079ef45c49a6cd6629a3bf0b31461afb | [
"Apache-2.0"
] | 21 | 2020-11-09T07:47:07.000Z | 2022-02-13T10:58:10.000Z | main.py | UCY-LINC-LAB/fogify | 80dee9e2079ef45c49a6cd6629a3bf0b31461afb | [
"Apache-2.0"
] | 7 | 2020-12-16T13:47:23.000Z | 2021-06-14T12:21:43.000Z | main.py | UCY-LINC-LAB/fogify | 80dee9e2079ef45c49a6cd6629a3bf0b31461afb | [
"Apache-2.0"
] | 3 | 2020-11-18T07:27:53.000Z | 2021-05-27T14:42:57.000Z | import argparse
from agent.agent import Agent
from controller.controller import Controller
from flask_api import FlaskAPI
def initialize():
app = FlaskAPI(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--agent', help='Run agent', default=False, action="store_true")
parser.add_argument('--agent-ip', help='The IP of the agent', default="localhost")
parser.add_argument('--controller', help='Run Controller', action="store_true")
args = parser.parse_args()
if args.agent:
cmd = Agent(args, app)
if args.controller:
cmd = Controller(args, app)
return cmd
cmd = initialize()
app = cmd.app
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0', port=5500 if type(cmd) == Agent else 5000)
| 25.9 | 88 | 0.688546 |
794fa3bddbf811a69aaf93ed0fcebc589e188140 | 278 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/largest-unique-number.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/largest-unique-number.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/largest-unique-number.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n)
# Space: O(n)
import collections
class Solution(object):
def largestUniqueNumber(self, A):
"""
:type A: List[int]
:rtype: int
"""
A.append(-1)
return max(k for k,v in collections.Counter(A).items() if v == 1)
| 18.533333 | 73 | 0.535971 |
794fa3fb1d80dbd8c925e73f24debd5b12550073 | 5,746 | py | Python | oil_lang/builtin_funcs.py | adisbladis/oil | 8ae78500da543dfa899404bdca830b90277d17ad | [
"Apache-2.0"
] | null | null | null | oil_lang/builtin_funcs.py | adisbladis/oil | 8ae78500da543dfa899404bdca830b90277d17ad | [
"Apache-2.0"
] | null | null | null | oil_lang/builtin_funcs.py | adisbladis/oil | 8ae78500da543dfa899404bdca830b90277d17ad | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python2
"""
builtin_funcs.py
"""
from __future__ import print_function
from _devbuild.gen.runtime_asdl import value, scope_e
from _devbuild.gen.syntax_asdl import sh_lhs_expr
from oil_lang import objects
def SetGlobalFunc(mem, name, func):
"""Used by bin/oil.py to set split(), etc."""
assert callable(func), func
mem.SetVar(sh_lhs_expr.Name(name), value.Obj(func), (), scope_e.GlobalOnly)
def _Join(array, delim=''):
"""
func join(items Array[Str]) Str ...
"""
# default is not ' '?
return delim.join(array)
def Init(mem):
"""Populate the top level namespace with some builtin functions."""
#
# Oil
#
SetGlobalFunc(mem, 'join', _Join)
# NOTE: split() is set in main(), since it depends on the Splitter() object /
# $IFS.
# TODO: How to ask for Python's split algorithm? Or Awk's?
#
# Borrowed from Python
#
SetGlobalFunc(mem, 'Table', objects.Table)
SetGlobalFunc(mem, 'Array', objects.ParameterizedArray())
# Types:
# TODO: Should these be Bool Int Float Str List Dict?
SetGlobalFunc(mem, 'Bool', bool)
SetGlobalFunc(mem, 'Int', int)
# TODO: Enable float
# OVM: PyOS_string_to_double()
# osh: Python/ovm_stub_pystrtod.c:10: PyOS_string_to_double: Assertion `0' failed.
SetGlobalFunc(mem, 'Float', float)
SetGlobalFunc(mem, 'Tuple', tuple)
SetGlobalFunc(mem, 'Str', str)
SetGlobalFunc(mem, 'List', list)
SetGlobalFunc(mem, 'Dict', dict)
# Singleton tuple!
SetGlobalFunc(mem, 'tup', lambda x: (x,))
SetGlobalFunc(mem, 'len', len)
SetGlobalFunc(mem, 'max', max)
SetGlobalFunc(mem, 'min', min)
# NOTE: cmp() deprecated in Python 3
# Utilities
SetGlobalFunc(mem, 'abs', abs)
# round()
# divmod() - probably useful? Look at the implementation
# Return an iterable like Python 3. Used for 'step' param.
SetGlobalFunc(mem, 'range', xrange)
# For the 'step' param.
SetGlobalFunc(mem, 'slice', slice)
# Not the best API, but requires no new syntax, and is familiar to Python
# users.
SetGlobalFunc(mem, 'enumerate', enumerate)
# I never use this, but it's familiar
SetGlobalFunc(mem, 'zip', zip)
SetGlobalFunc(mem, 'any', any)
SetGlobalFunc(mem, 'all', all)
SetGlobalFunc(mem, 'sum', sum)
# We maintain the L.sort() and sorted(L) distinction.
# TODO: How do these interact with rows of a data frame?
SetGlobalFunc(mem, 'sorted', sorted)
SetGlobalFunc(mem, 'reversed', reversed)
# TODO: ord() should UTF-8 decode its argument
# ord('\u100') -> 256
#
# This can be accomplished by the str.runes() iterator though?
#
#SetGlobalFunc(mem, 'ord', ord)
#
# unichr should ENCODE its argument
# >>> unichr(0x10000)
# u'\U00010000'
# >>> unichr(0x1000000)
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ValueError: unichr() arg not in range(0x110000) (wide Python build)
# bin(5) -> 0b101 TODO: Take over %b in printf
# oct() -> '%o' % 9
# hex(17) -> 0x11
# NOTE: '%x' % 17 gives '11'. Somehow there's no equivalent for binary?
# Other builtins:
# Exceptions:
# IndexError
# KeyError
# IOError (should be same as OSError)
# StopIteration
# RuntimeError
# There's also float.hex() and float.fromhex()
# Types:
# type()
# callable() -- test if it's callable
# isinstance()
# issubclass()
#
# All Objects: (Ruby has Kernel?)
# id() - unique ID
# hash()
# object() -- what is this for? For subtyping?
# repr() -- are we maintaining repr and str? We also have a repr builtin.
#
# Introspection:
# intern()
# dir() -- list attributes names. Might want this.
# globals(), locals()
#
# Iterators:
# iter([]) -> listiterator
# next() -- do we need it?
#
# Attributes:
# delattr, hasattr, getattr, setattr
# Not including:
# - map, filter (use list comp), reduce
# - open: use redirect
# - pow() -- do 3^5, and there's no add()
# - input(), raw_input() -- read builtin instead?
# - super() -- object system is different
# - python marks these as deprecated: apply, coerce, buffer, intern
#
# Other Types:
# - set() -- I think the dict type will subsume this
# - slice() -- never needed it
# - these seem confusing
# - memoryview()
# - bytearray()
# - buffer() (deprecated by Python)
# Modules that could be builtin:
# - math -- sin(), a lot of floating point stuff like frexp()
# - new in Python 3.4: statistics
# - hashlib, e.g. useful for writing a package manager or build system
# - heapq, bisect: algorithms, somewhat rarely used
# - datetime -- hm we need some sort of better replacement
# - strftime() because awk has it
# - itertools, functools -- I don't really use these
#
# libc wrappers:
# - normpath()
# - replacement for $RANDOM. rand() and srand()?
# left to external utils:
# - mkdir, readlink()
# web formats:
# - URL serialization: cleaned up urllib.quote_plus, cgi.parse_qs
# - encodeURIComponent()
# - generate a form that generates a URL
# - cookie serialization
# - HTML escaping
#
# - maybe: base64, although the external utility might be OK
# Other serialization:
# - POSIX shell code gen
# - C code gen
# - Python code gen, etc.
# - JavaScript can use # JSON.
# NOTE:
# json and tsv2 are styled as BUILTINS
# python: json.load, dump
# js: JSON.parse, stringify
# better: read, write
#
# json read :x < foo.json
# tsv2 read :x < foo.tsv2
#
# json write -indent 2 :mydict > out.txt
# tsv2 write -indent 2 :mytable > out.txt
#
#
# Awk
#
# https://www.gnu.org/software/gawk/manual/gawk.html#Library-Functions
# Already covered: strtonum(), round()
# need strftime
| 26.601852 | 84 | 0.643752 |
794fa6c64df7a848b230937d74c9404e2c10adab | 664 | py | Python | plan/utils.py | timgates42/plan | 1e976dcfd7cbe4ae7f95a4ff48f7e40a369ca8ee | [
"BSD-3-Clause"
] | 553 | 2015-01-04T08:41:44.000Z | 2022-02-26T03:11:26.000Z | plan/utils.py | jcao1022/plan | 1e976dcfd7cbe4ae7f95a4ff48f7e40a369ca8ee | [
"BSD-3-Clause"
] | 5 | 2015-01-03T14:55:41.000Z | 2020-05-14T03:02:38.000Z | plan/utils.py | jcao1022/plan | 1e976dcfd7cbe4ae7f95a4ff48f7e40a369ca8ee | [
"BSD-3-Clause"
] | 71 | 2015-01-17T19:52:23.000Z | 2020-04-30T14:11:38.000Z | # -*- coding: utf-8 -*-
"""
plan.utils
~~~~~~~~~~
Various utilities for Plan.
:copyright: (c) 2014 by Shipeng Feng.
:license: BSD, see LICENSE for more details.
"""
from subprocess import Popen, PIPE
def communicate_process(command, stdin=None, *args, **kwargs):
"""Run the command described by command, then interact with process.
:param stdin: the data you want to send to stdin.
:return: a tuple of stdout, stderr and returncode
"""
p = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE, *args, **kwargs)
output, error = p.communicate(stdin)
returncode = p.returncode
return output, error, returncode
| 26.56 | 77 | 0.659639 |
794fa8a982689c038439abc824687d894d16dc12 | 489 | py | Python | app/schemas.py | rikkih/softwaerie | 716e1e06d3131364beac61ed09ef9fe75502d6c9 | [
"MIT"
] | null | null | null | app/schemas.py | rikkih/softwaerie | 716e1e06d3131364beac61ed09ef9fe75502d6c9 | [
"MIT"
] | null | null | null | app/schemas.py | rikkih/softwaerie | 716e1e06d3131364beac61ed09ef9fe75502d6c9 | [
"MIT"
] | null | null | null | from typing import Optional
from pydantic import BaseModel, UUID4
class PostBase(BaseModel):
title: Optional[str] = None
body: Optional[str] = None
# Properties to receive
class PostCreate(PostBase):
title: str
body: str
class PostUpdate(PostBase):
pass
class PostInDBBase(PostBase):
id: Optional[UUID4] = None
class Config:
orm_mode = True
# Properties to return
class Post(PostInDBBase):
pass
class PostInDB(PostInDBBase):
pass
| 13.583333 | 37 | 0.695297 |
794fa9261639c08a9ae7ce27e0d43216b4ed77de | 4,559 | py | Python | formulas/powers.py | pascalmolin/fungrim | f498ad76a385fe7a3b932a314747b7aa2ff475da | [
"MIT"
] | null | null | null | formulas/powers.py | pascalmolin/fungrim | f498ad76a385fe7a3b932a314747b7aa2ff475da | [
"MIT"
] | null | null | null | formulas/powers.py | pascalmolin/fungrim | f498ad76a385fe7a3b932a314747b7aa2ff475da | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .expr import *
def_Topic(
Title("Powers"),
Entries(
"ef9f8a",
),
Section("Integer exponents"),
Entries(
"d316bc",
"310f36",
"a249f6",
"6c2b31",
"c53d94",
),
Section("Elementary functions"),
Entries(
"4d6416",
"634687",
"2e0d99",
),
Section("Complex parts"),
Entries(
"0aac97",
"bc4d0a",
"caf8cf",
"18873d",
),
Section("Expansion"),
Entries(
"2090c3",
),
)
make_entry(ID("ef9f8a"),
SymbolDefinition(Pow, Pow(a,b), "Power"),
Description(""),
Description("The following table lists conditions such that", SourceForm(Pow(a, b)), "is defined in Fungrim."),
Table(TableRelation(Tuple(P, Q), Implies(P, Q)),
TableHeadings(Description("Domain"), Description("Codomain")),
List(
TableSection("Numbers"),
Tuple(And(Element(a, SetMinus(CC, 0)), Element(b, CC)), Element(Pow(a, b), CC)),
Tuple(And(Element(a, CC), Element(b, Set(0))), Element(Pow(a, b), Set(1))),
TableSection("Infinities"),
Tuple(And(Element(a, Set(Infinity,-Infinity,UnsignedInfinity)), Element(b, ZZLessEqual(-1))), Element(Pow(a, b), Set(0))),
TableSection("General domains"),
Tuple(And(Element(a, R), Element(R, Rings), Element(b, ZZGreaterEqual(0))), Element(Pow(a, b), R)),
Tuple(And(Element(a, SetMinus(K, Set(0))), Element(K, Fields), SubsetEqual(QQ, K), Element(b, ZZ)), Element(Pow(a, b), R)),
)))
make_entry(ID("d316bc"),
Formula(Equal(Pow(0, 0), 1)))
make_entry(ID("310f36"),
Formula(Equal(Pow(z, 0), 1)),
Variables(z),
Assumptions(Element(z, CC)),
And(Element(z, R), Element(R, Rings), SubsetEqual(ZZ, R)))
make_entry(ID("a249f6"),
Formula(Equal(Pow(z, 1), z)),
Variables(z),
Assumptions(Element(z, CC)),
And(Element(z, R), Element(R, Rings)))
make_entry(ID("6c2b31"),
Formula(Equal(Pow(z, n + 1), Pow(z, n)) * z),
Variables(z, n),
Assumptions(And(Element(z, CC), Element(n, ZZGreaterEqual(0))),
And(Element(z, R), Element(R, Rings), Element(n, ZZGreaterEqual(0)))))
make_entry(ID("c53d94"),
Formula(Equal(Pow(z, -1), 1/z)),
Variables(z),
Assumptions(Element(z, CC),
And(Element(z, SetMinus(K, Set(0))), Element(K, Fields), SubsetEqual(QQ, K))))
make_entry(ID("4d6416"),
Formula(Equal(Pow(a, b), Exp(b*Log(a)))),
Variables(a, b),
Assumptions(And(Element(a, SetMinus(CC, Set(0))), Element(b, CC))))
make_entry(ID("634687"),
Formula(Equal(Pow(z, Div(1,2)), Sqrt(z))),
Variables(z),
Assumptions(Element(z, CC)))
make_entry(ID("2e0d99"),
Formula(Equal(Pow(z, -Div(1,2)), 1/Sqrt(z))),
Variables(z),
Assumptions(Element(z, SetMinus(CC, Set(0)))))
make_entry(ID("0aac97"),
Formula(Equal(Pow(a+b*ConstI, c+d*ConstI),
Where(M**c * Exp(-(d*theta)) * (Cos(c*theta + d*Log(M)) + ConstI * Sin(c*theta + d*Log(M))), Equal(M, Abs(a+b*ConstI)), Equal(theta, Arg(a+b*ConstI))))),
Variables(a, b, c, d),
Assumptions(And(Element(a, RR), Element(b, RR), Element(c, RR), Element(d, RR), Unequal(a+b*ConstI, 0))))
make_entry(ID("bc4d0a"),
Formula(Equal(Abs(Pow(a+b*ConstI, c+d*ConstI)),
Where(M**c * Exp(-(d*theta)), Equal(M, Abs(a+b*ConstI)), Equal(theta, Arg(a+b*ConstI))))),
Variables(a, b, c, d),
Assumptions(And(Element(a, RR), Element(b, RR), Element(c, RR), Element(d, RR), Unequal(a+b*ConstI, 0))))
make_entry(ID("caf8cf"),
Formula(Equal(Re(Pow(a+b*ConstI, c+d*ConstI)),
Where(M**c * Exp(-(d*theta)) * Cos(c*theta + d*Log(M)), Equal(M, Abs(a+b*ConstI)), Equal(theta, Arg(a+b*ConstI))))),
Variables(a, b, c, d),
Assumptions(And(Element(a, RR), Element(b, RR), Element(c, RR), Element(d, RR), Unequal(a+b*ConstI, 0))))
make_entry(ID("18873d"),
Formula(Equal(Im(Pow(a+b*ConstI, c+d*ConstI)),
Where(M**c * Exp(-(d*theta)) * Sin(c*theta + d*Log(M)), Equal(M, Abs(a+b*ConstI)), Equal(theta, Arg(a+b*ConstI))))),
Variables(a, b, c, d),
Assumptions(And(Element(a, RR), Element(b, RR), Element(c, RR), Element(d, RR), Unequal(a+b*ConstI, 0))))
make_entry(ID("2090c3"),
Formula(Equal(
(x*y)**a,
x**a * y**a * Exp(2*ConstPi*ConstI*a \
* Floor((ConstPi - Arg(x) - Arg(y)) / (2*ConstPi))
)
)),
Variables(x, y, a),
Assumptions(And(
Element(x, SetMinus(CC, Set(0))),
Element(y, SetMinus(CC, Set(0))),
Element(a, CC)
))
)
| 33.277372 | 161 | 0.573371 |
794fa93ef577021c763a2437930b076948d75952 | 796 | py | Python | learn-django/learning_log/learning_logs/models.py | ornichola/learn-proramming | 67f2e4d8846300db766e716b7ddf66bd54209fca | [
"Unlicense"
] | 1 | 2021-06-28T10:55:00.000Z | 2021-06-28T10:55:00.000Z | learn-django/learning_log/learning_logs/models.py | ornichola/learn-proramming | 67f2e4d8846300db766e716b7ddf66bd54209fca | [
"Unlicense"
] | null | null | null | learn-django/learning_log/learning_logs/models.py | ornichola/learn-proramming | 67f2e4d8846300db766e716b7ddf66bd54209fca | [
"Unlicense"
] | null | null | null | from django.db import models
# Create your models here.
class Topic(models.Model):
"""Тема, которую изучает пользователь"""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Возвращает строковое представление модели."""
return self.text
class Entry(models.Model):
"""Информация, изученная пользователем по теме"""
topic = models.ForeignKey(Topic)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'entries'
def __str__(self):
"""Возвращает строковое представление модели."""
if len(self.text) > 50:
return self.text[:50] + "..."
else:
return self.text
| 29.481481 | 56 | 0.657035 |
794faa74f372a5d80796e2fdce7d61f2c18d7ca2 | 26 | py | Python | viberio/__init__.py | bostud/Viber_bot | 076113433837aab942f86a0f73275c50037ed8f9 | [
"MIT"
] | null | null | null | viberio/__init__.py | bostud/Viber_bot | 076113433837aab942f86a0f73275c50037ed8f9 | [
"MIT"
] | null | null | null | viberio/__init__.py | bostud/Viber_bot | 076113433837aab942f86a0f73275c50037ed8f9 | [
"MIT"
] | null | null | null | __version__ = '0.0.1dev1'
| 13 | 25 | 0.692308 |
794fab004a3d19febcd658a0d9b358d43828ecce | 5,071 | py | Python | tests/memory_checks.py | agarny/libopencor | a0c9612935bcfb74da50138f636ce051607ce1b9 | [
"Apache-2.0"
] | null | null | null | tests/memory_checks.py | agarny/libopencor | a0c9612935bcfb74da50138f636ce051607ce1b9 | [
"Apache-2.0"
] | 52 | 2021-05-12T23:26:46.000Z | 2022-03-31T22:46:31.000Z | tests/memory_checks.py | agarny/libopencor | a0c9612935bcfb74da50138f636ce051607ce1b9 | [
"Apache-2.0"
] | 1 | 2021-05-13T04:56:45.000Z | 2021-05-13T04:56:45.000Z | #!/usr/bin/env python
# Copyright libOpenCOR contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do memory checks using Memcheck from Valgrind.
# Note: this script is based on the runMemcheck.py script that used to be part
# of KDevelop (see
# https://invent.kde.org/kdevelop/kdevelop/-/blob/3973/veritas/tests/runMemcheck.py).
import math
import multiprocessing
import os
import shutil
import sys
import xml.dom.minidom
def child_node_data(dom, tag):
res = None
element = dom.getElementsByTagName(tag)
if len(element) != 0:
res = element[0].firstChild.data
return res
class Frame:
def __init__(self, frame_node):
self.function = child_node_data(frame_node, "fn")
self.file = child_node_data(frame_node, "file")
self.line = child_node_data(frame_node, "line")
def __str__(self):
res = ""
if self.function:
res += " " + self.function
if self.file and self.line:
res += " (" + self.file + ":" + self.line + ")"
res += "\n"
return res
class BackTrace:
def __init__(self, error_node):
self.kind = error_node.getElementsByTagName("kind")[0].firstChild.data
self.stack = []
for frame in error_node.getElementsByTagName("frame"):
if child_node_data(frame, "fn"):
self.stack.append(Frame(frame))
def is_leak(self):
if self.kind != "Leak_DefinitelyLost":
return False
for frame in self.stack:
if "::TestBody" in frame.function or "libOpenCOR::" in frame.function:
return True
return False
def __str__(self):
out = " Traceback (most recent call first):\n"
for frame in self.stack:
out += str(frame)
return out
def parse_errors(output):
res = []
dom = xml.dom.minidom.parseString(output)
error = dom.getElementsByTagName("error")
for stack in error:
back_trace = BackTrace(stack)
if back_trace.is_leak():
res.append(back_trace)
return res
def garbage(line):
return not line.startswith("<unknown program name>") and not line.startswith(
"profiling:"
)
def memcheck(valgrind, test, test_path):
os.system(
valgrind
+ f" --tool=memcheck --child-silent-after-fork=yes --leak-check=full --xml=yes --xml-fd=3 --num-callers=50 {test_path} 1>{test}.txt 2>{test}.err 3>{test}.xml"
)
return "".join(list(filter(garbage, open(f"{test}.xml").readlines())))
def run_test(valgrind, test, test_path):
sys.stdout.write(f"-- Checking memory in {test} - ")
if not os.access(test_path, os.X_OK):
sys.stdout.write("not found\n")
return False
errors = parse_errors(memcheck(valgrind, test, test_path))
if len(errors) == 0:
sys.stdout.write("Success\n")
return True
sys.stdout.write("Failed\n")
for error in errors:
sys.stderr.write(str(error))
return False
if __name__ == "__main__":
if len(sys.argv) > 2:
valgrind = shutil.which("valgrind")
if valgrind == None:
sys.stderr.write("-- Valgrind could not be found.\n")
sys.exit(3)
exit_code = 0
tests_dir = sys.argv[1]
tests = sys.argv[2:]
with multiprocessing.Pool(multiprocessing.cpu_count()) as process:
results = process.starmap(
run_test,
[(valgrind, test, os.path.join(tests_dir, test)) for test in tests],
)
successes = []
failures = []
for index, result in enumerate(results):
if result:
successes.append(tests[index])
else:
failures.append(tests[index])
exit_code = 2
total = len(successes) + len(failures)
sys.stdout.write("-- Summary:\n")
sys.stdout.write(
f" {math.ceil(100.0 * len(successes) / total)}% tests passed, {len(failures)} tests failed out of {total}.\n"
)
if len(failures):
sys.stdout.write("\n")
sys.stdout.write(" The failed tests are:\n")
for failure in failures:
sys.stdout.write(f" - {failure}\n")
sys.stdout.write("\n")
sys.exit(exit_code)
else:
sys.stderr.write(
f"Usage: python3 {os.path.basename(sys.argv[0])} test_exectable_dir test_executable_1 [test_exectuable_2 ...]"
)
sys.exit(1)
| 26.139175 | 166 | 0.60284 |
794fab18d5038f414b3fbd87a0fc68c1b18f4b39 | 14,764 | py | Python | randomapi.py | Tantusar/randomapi | c3accc8d5570479821a5d26d6789ca91a4a71338 | [
"MIT"
] | 1 | 2020-06-16T19:04:19.000Z | 2020-06-16T19:04:19.000Z | randomapi.py | Tantusar/randomapi | c3accc8d5570479821a5d26d6789ca91a4a71338 | [
"MIT"
] | null | null | null | randomapi.py | Tantusar/randomapi | c3accc8d5570479821a5d26d6789ca91a4a71338 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
randomapi.py: a Python implementation of the RANDOM.org JSON-RPC API
Author: Mitchell Cohen (mitch.cohen@me.com)
https://github.com/mitchchn/randomapi
Maintainer: Thomas Chick (twitter.com/Tantusar)
https://github.com/tantusar/randomapi
Date: January 18, 2020
Version: 0.3.2
RANDOM.org API reference:
- https://api.random.org/json-rpc/2/
randomapi.py supports all basic and signed methods in Release 2
of the RANDOM.ORG API. It respects delay requests from the server
and has the ability to verify digitally-signed data.
RPC code based on python-jsonrpc:
- https://pypi.python.org/pypi/python-jsonrpc
Example usage:
# Returns a list of 5 random numbers between 0 and 10
random_client = RandomJSONRPC(api_key) # Requires a valid API key
nums = random_client.generate_integers(n=5, min=0, max=10).parse()
"""
import time
import json
import logging
# Python 2/3 Compatibility
try:
from urllib.request import urlopen, Request
from urllib.error import HTTPError
from urllib.parse import urlparse, urlencode
except ImportError:
from urlparse import urlparse
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
import uuid
from collections import OrderedDict
###################### Constants #############################
JSON_URL = "https://api.random.org/json-rpc/2/invoke"
# RANDOM.org API method names
INTEGER_METHOD = "generateIntegers"
INTEGER_SEQUENCE_METHOD = "generateIntegerSequences"
DECIMAL_METHOD = "generateDecimalFractions"
GAUSSIAN_METHOD = "generateGaussians"
STRING_METHOD = "generateStrings"
UUID_METHOD = "generateUUIDs"
BLOB_METHOD = "generateBlobs"
USAGE_METHOD = "getUsage"
SIGNED_INTEGER_METHOD = "generateSignedIntegers"
SIGNED_INTEGER_SEQUENCE_METHOD = "generateSignedIntegerSequences"
SIGNED_DECIMAL_METHOD = "generateDecimalFractions"
SIGNED_GAUSSIAN_METHOD = "generateSignedGaussians"
SIGNED_STRING_METHOD = "generateSignedStrings"
SIGNED_UUID_METHOD = "generateSignedUUIDs"
SIGNED_BLOB_METHOD = "generateSignedBlobs"
RESULT_METHOD = "getResult"
VERIFY_SIGNATURE_METHOD = "verifySignature"
# RANDOM.org API parameters
ADVISORY_DELAY = "advisoryDelay"
API_KEY = "apiKey"
# JSON keys
RESULT = "result"
RANDOM = "random"
AUTHENTICITY = "authenticity"
SIGNATURE = "signature"
SERIAL_NUMBER = "serialNumber"
# RANDOM.org blob formats
FORMAT_BASE64 = "base64"
FORMT_HEX = "hex"
def valid_json_methods():
'''Returns a list of valid JSON-RPC method names from the RANDOM.org API'''
return [INTEGER_METHOD, INTEGER_SEQUENCE_METHOD, DECIMAL_METHOD, GAUSSIAN_METHOD,
STRING_METHOD, UUID_METHOD, BLOB_METHOD, USAGE_METHOD, SIGNED_INTEGER_METHOD,
SIGNED_INTEGER_SEQUENCE_METHOD, SIGNED_BLOB_METHOD, SIGNED_DECIMAL_METHOD,
SIGNED_GAUSSIAN_METHOD, SIGNED_STRING_METHOD, SIGNED_UUID_METHOD,
RESULT_METHOD, VERIFY_SIGNATURE_METHOD]
def parse_random(json_string):
"""
Returns the randomly-generated data from a RANDOM.org JSON request
:param json_string a fully-formed JSON-RPC response string
"""
data = json_to_ordered_dict(json_string)
random = []
if RANDOM in data[RESULT]:
random = data[RESULT][RANDOM]
return random
def json_to_ordered_dict(json_string):
return json.loads(json_string, object_pairs_hook=OrderedDict)
def compose_api_call(json_method_name, *args, **kwargs):
"""
Returns a fully-formed JSON-RPC string for a RANDOM.org API method
:param json_method_name: Name of the method. Can be one of:
INTEGER_METHOD, INTEGER_SEQUENCE_METHOD, DECIMAL_METHOD, GAUSSIAN_METHOD,
STRING_METHOD, UUID_METHOD, BLOB_METHOD, USAGE_METHOD, SIGNED_INTEGER_METHOD,
SIGNED_INTEGER_SEQUENCE_METHOD, SIGNED_BLOB_METHOD, SIGNED_DECIMAL_METHOD,
SIGNED_GAUSSIAN_METHOD, SIGNED_STRING_METHOD, SIGNED_UUID_METHOD,
RESULT_METHOD, VERIFY_SIGNATURE_METHOD
:param args: Positional parameters
:param kwargs: Named parameters. See: https://api.random.org/json-rpc/2/basic
for descriptions of methods and their parameters.
"""
if json_method_name not in valid_json_methods():
raise Exception(
"'{}' is not a valid RANDOM.org JSON-RPC method".format(
json_method_name))
if kwargs:
params = kwargs
if args:
params["__args"] = args
else:
params = args
request_data = {
"method": str(json_method_name),
"id": str(uuid.uuid4()),
"jsonrpc": "2.0",
"params": params
}
return json.dumps(request_data).encode('utf-8')
def http_request(url, json_string):
"""
Request data from server (POST)
:param json_string: JSON-String
"""
request = Request(url, data=json_string)
request.add_header("Content-Type", "application/json")
response = urlopen(request)
response_string = response.read()
response.close()
return response_string
class RandomJSONRPC:
def __init__(self, api_key):
"""
Creates a client which can call RANDOM.org API functions to generate
various kinds of random data.
The class is simple to use: simply instantiate a RandomJSONRPC object
with a valid API key, and call the appropriate method on the server.
For a list of available methods and parameters, see:
:param api_key: String representing a RANDOM.org JSON-RPC API key
"""
self.api_key = api_key
self._time_of_last_request = 0
self._advisory_delay = 0
def delay_request(self, requested_delay):
elapsed = time.time() - self._time_of_last_request
remaining_time = requested_delay - elapsed
logging.info("Sleeping {} more seconds...".format(remaining_time))
if remaining_time - elapsed > 0:
time.sleep(remaining_time)
def send_request(self, request_string, method=""):
'''Wraps outgoing JSON requests'''
# Create a new response class, using an ordered dict to
# preserve the integrity of signed data
# Respect delay requests from the server
if self._time_of_last_request == 0:
self._time_of_last_request = time.time()
if self._advisory_delay > 0:
self.delay_request(self._advisory_delay)
# Make the connection now
json_string = http_request(JSON_URL, request_string)
self._time_of_last_request = time.time()
# Use an ordered dict to preserve the integrity of signed data
response = RandomJSONResponse(json_to_ordered_dict(json_string), method)
if ADVISORY_DELAY in response._result:
self._advisory_delay = float(response._result[ADVISORY_DELAY]) / 1000.0
return response
####################### RANDOM.org API methods ##########################
def generate_integers(self, n, min, max, replacement=True, base=10):
'''Returns a list of true random integers with a user-defined range'''
request_string = compose_api_call(
INTEGER_METHOD, apiKey=self.api_key,
n=n, min=min, max=max, replacement=replacement, base=base)
return self.send_request(request_string, INTEGER_METHOD)
def generate_integer_sequences(self, n, length, min, max, replacement=True, base=10):
'''Returns a list of lists of true random integers with a user-defined range'''
request_string = compose_api_call(
INTEGER_SEQUENCE_METHOD, apiKey=self.api_key,
n=n, length=length, min=min, max=max, replacement=replacement, base=base)
return self.send_request(request_string, INTEGER_SEQUENCE_METHOD)
def generate_decimal_fractions(self, n, decimal_places, replacement=True):
'''Returns a list of true random decimal fractions between [0,1]
with a user-defined number of decimal places'''
request_string = compose_api_call(
DECIMAL_METHOD, apiKey=self.api_key,
n=n, decimalPlaces=decimal_places, replacement=replacement)
return self.send_request(request_string, DECIMAL_METHOD)
def generate_gaussians(self, n, mean, standard_deviation,
significant_digits):
'''Returns a list of true random numbers from a Gaussian distribution'''
request_string = compose_api_call(
GAUSSIAN_METHOD, apiKey=self.api_key,
n=n, mean=mean,
standardDeviation=standard_deviation,
significantDigits=significant_digits)
return self.send_request(request_string, GAUSSIAN_METHOD)
def generate_strings(self, n, length, characters, replacement=True):
'''Returns a list of true random strings composed from a user-defined
set of characters'''
request_string = compose_api_call(
STRING_METHOD, apiKey=self.api_key,
n=n, length=length, characters=characters, replacement=replacement)
return self.send_request(request_string, STRING_METHOD)
def generate_uuids(self, n):
'''Returns a list of true random UUIDs (version 4)'''
request_string = compose_api_call(
UUID_METHOD, apiKey=self.api_key, n=n)
return self.send_request(request_string, UUID_METHOD)
def generate_blobs(self, n, size, format=FORMAT_BASE64):
'''Returns a list of Binary Large OBjects (BLOBs) containing
true random data'''
request_string = compose_api_call(
BLOB_METHOD, apiKey=self.api_key, n=n, size=size, format=format)
return self.send_request(request_string, BLOB_METHOD)
def get_usage(self):
'''Returns a dictionary of usage information for the client's
API key.'''
request_string = compose_api_call(
USAGE_METHOD, apiKey=self.api_key)
return self.send_request(request_string, USAGE_METHOD)
####################### Digitally-signed API methods ##########################
def generate_signed_integers(self, n, min, max, replacement=True, base=10):
request_string = compose_api_call(
SIGNED_INTEGER_METHOD, apiKey=self.api_key, n=n, min=min, max=max,
replacement=replacement, base=base)
return self.send_request(request_string, SIGNED_INTEGER_METHOD)
def generate_signed_integer_sequences(self, n, length, min, max, replacement=True, base=10):
request_string = compose_api_call(
SIGNED_INTEGER_SEQUENCE_METHOD, apiKey=self.api_key, n=n, length=length, min=min,
max=max, replacement=replacement, base=base)
return self.send_request(request_string, SIGNED_INTEGER_SEQUENCE_METHOD)
def generate_signed_decimal_fractions(self, n, decimal_places,
replacement=True):
request_string = compose_api_call(
SIGNED_DECIMAL_METHOD, apiKey=self.api_key,
n=n, decimalPlaces=decimal_places, replacement=replacement)
return self.send_request(request_string, SIGNED_DECIMAL_METHOD)
def generate_signed_gaussians(self, n, mean, standard_deviation,
significant_digits):
request_string = compose_api_call(
SIGNED_GAUSSIAN_METHOD, apiKey=self.api_key,
n=n, mean=mean,
standardDeviation=standard_deviation,
significantDigits=significant_digits)
return self.send_request(request_string, SIGNED_GAUSSIAN_METHOD)
def generate_signed_strings(self, n, length, characters, replacement=True):
request_string = compose_api_call(
SIGNED_STRING_METHOD, apiKey=self.api_key,
n=n, length=length, characters=characters, replacement=replacement)
return self.send_request(request_string, SIGNED_STRING_METHOD)
def generate_signed_uuids(self, n):
request_string = compose_api_call(
SIGNED_UUID_METHOD, apiKey=self.api_key, n=n)
return self.send_request(request_string, SIGNED_UUID_METHOD)
def generate_signed_blobs(self, n, size, format=FORMAT_BASE64):
request_string = compose_api_call(
SIGNED_BLOB_METHOD,
apiKey=self.api_key, n=n, size=size, format=format)
return self.send_request(request_string, SIGNED_BLOB_METHOD)
def get_result(self, serial_number):
'''Returns the result of a previous request given a supplied serial
number.'''
request_string = compose_api_call(
RESULT_METHOD, apiKey=self.api_key, serialNumber=serial_number)
response = self.send_request(request_string, RESULT_METHOD)
response._method = response._random['method']
return response
def verify_signature(self):
"""
Verifies signed data with RANDOM.org.
"""
if not self._signature:
return None
json_string = compose_api_call(
VERIFY_SIGNATURE_METHOD, random=self._random,
signature=self._signature)
response = self.send_request(json_string, VERIFY_SIGNATURE_METHOD)
if AUTHENTICITY in response._result:
return response._result[AUTHENTICITY]
else:
raise Exception("Unable to verify authenticity of signed data")
class RandomJSONResponse:
def __init__(self, json_data, method=""):
self._json_data = json_data
self._result = {}
self._random = []
self._signature = ""
self._serial_number = 0
self._method = method
self.check_errors()
self._populate()
def check_errors(self):
'''Checks to see if the received JSON object has errors'''
if 'error' in self._json_data:
error = self._json_data['error']
code = error['code']
message = error['message']
raise Exception(
"""Error code: {}. Message: {}
See: https://api.random.org/json-rpc/2/error-codes""".format(code, message))
def _populate(self):
if RESULT in self._json_data:
self._result = self._json_data[RESULT]
if RANDOM in self._result:
self._random = self._result[RANDOM]
if SIGNATURE in self._result:
self._signature = self._result[SIGNATURE]
if SERIAL_NUMBER in self._random:
self._serial_number = self._random[SERIAL_NUMBER]
def parse(self):
'''Parses the received JSON data object and returns the random data'''
return self._random['data']
def __repr__(self):
try:
return "<RandomJSONResponse " + self._method + " " + str(self._random["data"]) + ">"
except:
return "<RandomJSONResponse None>"
def __str__(self):
try:
return str(self._random['data'])
except:
return ""
| 36.544554 | 96 | 0.68308 |
794fab8fae3d8f6ab2ee386af2854ab8d6d93b41 | 2,834 | py | Python | packages/mbed-host-tests/test/host_registry.py | noralsydmp/mbed-os-tools | 5a14958aa49eb5764afba8e1dc3208cae2955cd7 | [
"Apache-2.0"
] | 29 | 2018-11-30T19:45:22.000Z | 2022-03-29T17:02:16.000Z | packages/mbed-host-tests/test/host_registry.py | noralsydmp/mbed-os-tools | 5a14958aa49eb5764afba8e1dc3208cae2955cd7 | [
"Apache-2.0"
] | 160 | 2018-11-30T21:55:52.000Z | 2022-01-18T10:58:09.000Z | packages/mbed-host-tests/test/host_registry.py | noralsydmp/mbed-os-tools | 5a14958aa49eb5764afba8e1dc3208cae2955cd7 | [
"Apache-2.0"
] | 73 | 2018-11-30T21:34:41.000Z | 2021-10-02T05:51:40.000Z | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from mbed_host_tests.host_tests_registry import HostRegistry
from mbed_host_tests import BaseHostTest
class HostRegistryTestCase(unittest.TestCase):
class HostTestClassMock(BaseHostTest):
def setup(self):
pass
def result(self):
pass
def teardown(self):
pass
def setUp(self):
self.HOSTREGISTRY = HostRegistry()
def tearDown(self):
pass
def test_register_host_test(self):
self.HOSTREGISTRY.register_host_test('host_test_mock_auto', self.HostTestClassMock())
self.assertEqual(True, self.HOSTREGISTRY.is_host_test('host_test_mock_auto'))
def test_unregister_host_test(self):
self.HOSTREGISTRY.register_host_test('host_test_mock_2_auto', self.HostTestClassMock())
self.assertEqual(True, self.HOSTREGISTRY.is_host_test('host_test_mock_2_auto'))
self.assertNotEqual(None, self.HOSTREGISTRY.get_host_test('host_test_mock_2_auto'))
self.HOSTREGISTRY.unregister_host_test('host_test_mock_2_auto')
self.assertEqual(False, self.HOSTREGISTRY.is_host_test('host_test_mock_2_auto'))
def test_get_host_test(self):
self.HOSTREGISTRY.register_host_test('host_test_mock_3_auto', self.HostTestClassMock())
self.assertEqual(True, self.HOSTREGISTRY.is_host_test('host_test_mock_3_auto'))
self.assertNotEqual(None, self.HOSTREGISTRY.get_host_test('host_test_mock_3_auto'))
def test_is_host_test(self):
self.assertEqual(False, self.HOSTREGISTRY.is_host_test(''))
self.assertEqual(False, self.HOSTREGISTRY.is_host_test(None))
self.assertEqual(False, self.HOSTREGISTRY.is_host_test('xyz'))
def test_host_test_str_not_empty(self):
for ht_name in self.HOSTREGISTRY.HOST_TESTS:
ht = self.HOSTREGISTRY.HOST_TESTS[ht_name]
self.assertNotEqual(None, ht)
def test_host_test_has_name_attribute(self):
for ht_name in self.HOSTREGISTRY.HOST_TESTS:
ht = self.HOSTREGISTRY.HOST_TESTS[ht_name]
self.assertTrue(hasattr(ht, 'setup'))
self.assertTrue(hasattr(ht, 'result'))
self.assertTrue(hasattr(ht, 'teardown'))
if __name__ == '__main__':
unittest.main()
| 36.805195 | 95 | 0.729358 |
794fac4ba47f099ec1449b97421fc09eb156b2b0 | 121 | py | Python | shop/products/admin.py | ahmadreza-smdi/ms-shop | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | [
"MIT"
] | 6 | 2019-11-23T17:16:17.000Z | 2021-09-20T13:12:55.000Z | shop/products/admin.py | abdulkarimFallatah/ms-shop | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | [
"MIT"
] | 5 | 2021-04-08T22:00:07.000Z | 2022-02-10T12:38:25.000Z | shop/products/admin.py | abdulkarimFallatah/ms-shop | 65ba3f3061e2ac5c63115b08dadfe7d67f645fb6 | [
"MIT"
] | 2 | 2019-11-24T20:33:16.000Z | 2021-09-09T00:29:52.000Z | from django.contrib import admin
from .models import Product
# Register your models here.
admin.site.register(Product)
| 17.285714 | 32 | 0.801653 |
794facd9798de4988880d8fad0eaa62d26c55c64 | 1,680 | py | Python | examples/ElasticsearchDomain.py | DrLuke/troposphere | 05672a2b0cf87215dbd6a2a656669e0d3c92d0e5 | [
"BSD-2-Clause"
] | 1 | 2019-05-27T21:22:51.000Z | 2019-05-27T21:22:51.000Z | examples/ElasticsearchDomain.py | DrLuke/troposphere | 05672a2b0cf87215dbd6a2a656669e0d3c92d0e5 | [
"BSD-2-Clause"
] | 1 | 2021-06-25T15:20:46.000Z | 2021-06-25T15:20:46.000Z | examples/ElasticsearchDomain.py | DrLuke/troposphere | 05672a2b0cf87215dbd6a2a656669e0d3c92d0e5 | [
"BSD-2-Clause"
] | 2 | 2018-05-05T18:40:43.000Z | 2018-09-19T04:17:05.000Z | # Converted from Elasticsearch Domain example located at:
# http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#d0e51519
from troposphere import Template, constants
from troposphere.elasticsearch import Domain, EBSOptions, VPCOptions
from troposphere.elasticsearch import ElasticsearchClusterConfig
from troposphere.elasticsearch import SnapshotOptions
templ = Template()
templ.add_description('Elasticsearch Domain example')
es_domain = templ.add_resource(Domain(
'ElasticsearchDomain',
DomainName="ExampleElasticsearchDomain",
ElasticsearchClusterConfig=ElasticsearchClusterConfig(
DedicatedMasterEnabled=True,
InstanceCount=2,
ZoneAwarenessEnabled=True,
InstanceType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterType=constants.ELASTICSEARCH_M3_MEDIUM,
DedicatedMasterCount=3
),
EBSOptions=EBSOptions(EBSEnabled=True,
Iops=0,
VolumeSize=20,
VolumeType="gp2"),
SnapshotOptions=SnapshotOptions(AutomatedSnapshotStartHour=0),
AccessPolicies={'Version': '2012-10-17',
'Statement': [{
'Effect': 'Allow',
'Principal': {
'AWS': '*'
},
'Action': 'es:*',
'Resource': '*'
}]},
AdvancedOptions={"rest.action.multi.allow_explicit_index": "true"},
VPCOptions=VPCOptions(
SubnetIds=["subnet-4f2bb123"],
SecurityGroupIds=["sg-04cf048c"]
)
))
print(templ.to_json())
| 35.744681 | 111 | 0.633929 |
794fae319c24c59c23eeb135b2597fd2b24d1ce1 | 448 | py | Python | project/src/main/python/backTest/factorAnalysis/main_simple.py | daifengqi/big-data-hft | 013747ca3c2ca984eeac723fd5d8f8e3458b840c | [
"MIT"
] | 1 | 2022-03-07T09:32:40.000Z | 2022-03-07T09:32:40.000Z | project/src/main/python/backTest/factorAnalysis/main_simple.py | daifengqi/big-data-hft | 013747ca3c2ca984eeac723fd5d8f8e3458b840c | [
"MIT"
] | null | null | null | project/src/main/python/backTest/factorAnalysis/main_simple.py | daifengqi/big-data-hft | 013747ca3c2ca984eeac723fd5d8f8e3458b840c | [
"MIT"
] | 1 | 2022-03-03T16:22:37.000Z | 2022-03-03T16:22:37.000Z | # from updateStockPool import UpdateStockPool,update_benchmark_return
from factorAnalysisTimeHorizonSimple import updateFactorAnalysisResSimple
# from factorAnalysisResPlot import plotFactorAnalysisRes
# import sys
import time
import warnings
warnings.filterwarnings("ignore")
factor_analysis_config_path = r"D:\HX_proj\factorAnalysis\config_factorAnalysisSimple.yaml"
# # factor analysis
updateFactorAnalysisResSimple(factor_analysis_config_path) | 40.727273 | 91 | 0.886161 |
794fae3b16c3935b4600103819cdefdc0b061ec8 | 19,863 | py | Python | manila/share/drivers/tegile/tegile.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 159 | 2015-01-02T09:35:15.000Z | 2022-01-04T11:51:34.000Z | manila/share/drivers/tegile/tegile.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 5 | 2015-07-24T09:28:21.000Z | 2020-11-20T04:33:51.000Z | manila/share/drivers/tegile/tegile.py | openstack/manila | 1ebae738c235c6f1874ac7b11307e0d5fb567dba | [
"Apache-2.0"
] | 128 | 2015-01-05T22:52:28.000Z | 2021-12-29T14:00:58.000Z | # Copyright (c) 2016 by Tegile Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Share driver for Tegile storage.
"""
import json
import requests
import six
from oslo_config import cfg
from oslo_log import log
from manila import exception
from manila.i18n import _
from manila.share import driver
from manila.share import utils as share_utils
from manila import utils
tegile_opts = [
cfg.HostAddressOpt('tegile_nas_server',
help='Tegile NAS server hostname or IP address.'),
cfg.StrOpt('tegile_nas_login',
help='User name for the Tegile NAS server.'),
cfg.StrOpt('tegile_nas_password',
help='Password for the Tegile NAS server.'),
cfg.StrOpt('tegile_default_project',
help='Create shares in this project')]
CONF = cfg.CONF
CONF.register_opts(tegile_opts)
LOG = log.getLogger(__name__)
DEFAULT_API_SERVICE = 'openstack'
TEGILE_API_PATH = 'zebi/api'
TEGILE_LOCAL_CONTAINER_NAME = 'Local'
TEGILE_SNAPSHOT_PREFIX = 'Manual-S-'
VENDOR = 'Tegile Systems Inc.'
DEFAULT_BACKEND_NAME = 'Tegile'
VERSION = '1.0.0'
DEBUG_LOGGING = False # For debugging purposes
def debugger(func):
"""Returns a wrapper that wraps func.
The wrapper will log the entry and exit points of the function.
"""
def wrapper(*args, **kwds):
if DEBUG_LOGGING:
LOG.debug('Entering %(classname)s.%(funcname)s',
{
'classname': args[0].__class__.__name__,
'funcname': func.__name__,
})
LOG.debug('Arguments: %(args)s, %(kwds)s',
{
'args': args[1:],
'kwds': kwds,
})
f_result = func(*args, **kwds)
if DEBUG_LOGGING:
LOG.debug('Exiting %(classname)s.%(funcname)s',
{
'classname': args[0].__class__.__name__,
'funcname': func.__name__,
})
LOG.debug('Results: %(result)s',
{'result': f_result})
return f_result
return wrapper
class TegileAPIExecutor(object):
def __init__(self, classname, hostname, username, password):
self._classname = classname
self._hostname = hostname
self._username = username
self._password = password
def __call__(self, *args, **kwargs):
return self._send_api_request(*args, **kwargs)
@debugger
@utils.retry(retry_param=(requests.ConnectionError, requests.Timeout),
interval=30,
retries=3,
backoff_rate=1)
def _send_api_request(self, method, params=None,
request_type='post',
api_service=DEFAULT_API_SERVICE,
fine_logging=DEBUG_LOGGING):
if params is not None:
params = json.dumps(params)
url = 'https://%s/%s/%s/%s' % (self._hostname,
TEGILE_API_PATH,
api_service,
method)
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, '
'url: %(url)s', {
'classname': self._classname,
'method': method,
'url': url,
})
if request_type == 'post':
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) '
'method: %(method)s, payload: %(payload)s',
{
'classname': self._classname,
'method': method,
'payload': params,
})
req = requests.post(url,
data=params,
auth=(self._username, self._password),
verify=False)
else:
req = requests.get(url,
auth=(self._username, self._password),
verify=False)
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) method: %(method)s, '
'return code: %(retcode)s',
{
'classname': self._classname,
'method': method,
'retcode': req,
})
try:
response = req.json()
if fine_logging:
LOG.debug('TegileAPIExecutor(%(classname)s) '
'method: %(method)s, response: %(response)s',
{
'classname': self._classname,
'method': method,
'response': response,
})
except ValueError:
# Some APIs don't return output and that's fine
response = ''
req.close()
if req.status_code != 200:
raise exception.TegileAPIException(response=req.text)
return response
class TegileShareDriver(driver.ShareDriver):
"""Tegile NAS driver. Allows for NFS and CIFS NAS storage usage."""
def __init__(self, *args, **kwargs):
super(TegileShareDriver, self).__init__(False, *args, **kwargs)
self.configuration.append_config_values(tegile_opts)
self._default_project = (self.configuration.safe_get(
"tegile_default_project") or 'openstack')
self._backend_name = (self.configuration.safe_get('share_backend_name')
or CONF.share_backend_name
or DEFAULT_BACKEND_NAME)
self._hostname = self.configuration.safe_get('tegile_nas_server')
username = self.configuration.safe_get('tegile_nas_login')
password = self.configuration.safe_get('tegile_nas_password')
self._api = TegileAPIExecutor(self.__class__.__name__,
self._hostname,
username,
password)
@debugger
def create_share(self, context, share, share_server=None):
"""Is called to create share."""
share_name = share['name']
share_proto = share['share_proto']
pool_name = share_utils.extract_host(share['host'], level='pool')
params = (pool_name, self._default_project, share_name, share_proto)
# Share name coming from the backend is the most reliable. Sometimes
# a few options in Tegile array could cause sharename to be different
# from the one passed to it. Eg. 'projectname-sharename' instead
# of 'sharename' if inherited share properties are selected.
ip, real_share_name = self._api('createShare', params).split()
LOG.info("Created share %(sharename)s, share id %(shid)s.",
{'sharename': share_name, 'shid': share['id']})
return self._get_location_path(real_share_name, share_proto, ip)
@debugger
def extend_share(self, share, new_size, share_server=None):
"""Is called to extend share.
There is no resize for Tegile shares.
We just adjust the quotas. The API is still called 'resizeShare'.
"""
self._adjust_size(share, new_size, share_server)
@debugger
def shrink_share(self, shrink_share, shrink_size, share_server=None):
"""Uses resize_share to shrink a share.
There is no shrink for Tegile shares.
We just adjust the quotas. The API is still called 'resizeShare'.
"""
self._adjust_size(shrink_share, shrink_size, share_server)
@debugger
def _adjust_size(self, share, new_size, share_server=None):
pool, project, share_name = self._get_pool_project_share_name(share)
params = ('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
share_name),
six.text_type(new_size),
'GB')
self._api('resizeShare', params)
@debugger
def delete_share(self, context, share, share_server=None):
"""Is called to remove share."""
pool, project, share_name = self._get_pool_project_share_name(share)
params = ('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
share_name),
True,
False)
self._api('deleteShare', params)
@debugger
def create_snapshot(self, context, snapshot, share_server=None):
"""Is called to create snapshot."""
snap_name = snapshot['name']
pool, project, share_name = self._get_pool_project_share_name(
snapshot['share'])
share = {
'poolName': '%s' % pool,
'projectName': '%s' % project,
'name': share_name,
'availableSize': 0,
'totalSize': 0,
'datasetPath': '%s/%s/%s' %
(pool,
TEGILE_LOCAL_CONTAINER_NAME,
project),
'mountpoint': share_name,
'local': 'true',
}
params = (share, snap_name, False)
LOG.info('Creating snapshot for share_name=%(shr)s'
' snap_name=%(name)s',
{'shr': share_name, 'name': snap_name})
self._api('createShareSnapshot', params)
@debugger
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None, parent_share=None):
"""Create a share from a snapshot - clone a snapshot."""
pool, project, share_name = self._get_pool_project_share_name(share)
params = ('%s/%s/%s/%s@%s%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
snapshot['share_name'],
TEGILE_SNAPSHOT_PREFIX,
snapshot['name'],
),
share_name,
True,
)
ip, real_share_name = self._api('cloneShareSnapshot',
params).split()
share_proto = share['share_proto']
return self._get_location_path(real_share_name, share_proto, ip)
@debugger
def delete_snapshot(self, context, snapshot, share_server=None):
"""Is called to remove snapshot."""
pool, project, share_name = self._get_pool_project_share_name(
snapshot['share'])
params = ('%s/%s/%s/%s@%s%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
share_name,
TEGILE_SNAPSHOT_PREFIX,
snapshot['name']),
False)
self._api('deleteShareSnapshot', params)
@debugger
def ensure_share(self, context, share, share_server=None):
"""Invoked to sure that share is exported."""
# Fetching share name from server, because some configuration
# options can cause sharename different from the OpenStack share name
pool, project, share_name = self._get_pool_project_share_name(share)
params = [
'%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
share_name),
]
ip, real_share_name = self._api('getShareIPAndMountPoint',
params).split()
share_proto = share['share_proto']
location = self._get_location_path(real_share_name, share_proto, ip)
return [location]
@debugger
def _allow_access(self, context, share, access, share_server=None):
"""Allow access to the share."""
share_proto = share['share_proto']
access_type = access['access_type']
access_level = access['access_level']
access_to = access['access_to']
self._check_share_access(share_proto, access_type)
pool, project, share_name = self._get_pool_project_share_name(share)
params = ('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
share_name),
share_proto,
access_type,
access_to,
access_level)
self._api('shareAllowAccess', params)
@debugger
def _deny_access(self, context, share, access, share_server=None):
"""Deny access to the share."""
share_proto = share['share_proto']
access_type = access['access_type']
access_level = access['access_level']
access_to = access['access_to']
self._check_share_access(share_proto, access_type)
pool, project, share_name = self._get_pool_project_share_name(share)
params = ('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
share_name),
share_proto,
access_type,
access_to,
access_level)
self._api('shareDenyAccess', params)
def _check_share_access(self, share_proto, access_type):
if share_proto == 'CIFS' and access_type != 'user':
reason = ('Only USER access type is allowed for '
'CIFS shares.')
LOG.warning(reason)
raise exception.InvalidShareAccess(reason=reason)
elif share_proto == 'NFS' and access_type not in ('ip', 'user'):
reason = ('Only IP or USER access types are allowed for '
'NFS shares.')
LOG.warning(reason)
raise exception.InvalidShareAccess(reason=reason)
elif share_proto not in ('NFS', 'CIFS'):
reason = ('Unsupported protocol \"%s\" specified for '
'access rule.') % share_proto
raise exception.InvalidShareAccess(reason=reason)
@debugger
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
if not (add_rules or delete_rules):
# Recovery mode
pool, project, share_name = (
self._get_pool_project_share_name(share))
share_proto = share['share_proto']
params = ('%s/%s/%s/%s' % (pool,
TEGILE_LOCAL_CONTAINER_NAME,
project,
share_name),
share_proto)
# Clears all current ACLs
# Remove ip and user ACLs if share_proto is NFS
# Remove user ACLs if share_proto is CIFS
self._api('clearAccessRules', params)
# Looping through all rules.
# Will have one API call per rule.
for access in access_rules:
self._allow_access(context, share, access, share_server)
else:
# Adding/Deleting specific rules
for access in delete_rules:
self._deny_access(context, share, access, share_server)
for access in add_rules:
self._allow_access(context, share, access, share_server)
@debugger
def _update_share_stats(self, **kwargs):
"""Retrieve stats info."""
try:
data = self._api(method='getArrayStats',
request_type='get',
fine_logging=False)
# fixing values coming back here as String to float
for pool in data.get('pools', []):
pool['total_capacity_gb'] = float(
pool.get('total_capacity_gb', 0))
pool['free_capacity_gb'] = float(
pool.get('free_capacity_gb', 0))
pool['allocated_capacity_gb'] = float(
pool.get('allocated_capacity_gb', 0))
pool['qos'] = pool.pop('QoS_support', False)
pool['reserved_percentage'] = (
self.configuration.reserved_share_percentage)
pool['reserved_snapshot_percentage'] = (
self.configuration.reserved_share_from_snapshot_percentage
or self.configuration.reserved_share_percentage)
pool['dedupe'] = True
pool['compression'] = True
pool['thin_provisioning'] = True
pool['max_over_subscription_ratio'] = (
self.configuration.max_over_subscription_ratio)
data['share_backend_name'] = self._backend_name
data['vendor_name'] = VENDOR
data['driver_version'] = VERSION
data['storage_protocol'] = 'NFS_CIFS'
data['snapshot_support'] = True
data['create_share_from_snapshot_support'] = True
data['qos'] = False
super(TegileShareDriver, self)._update_share_stats(data)
except Exception:
msg = _('Unexpected error while trying to get the '
'usage stats from array.')
LOG.exception(msg)
raise
@debugger
def get_pool(self, share):
"""Returns pool name where share resides.
:param share: The share hosted by the driver.
:return: Name of the pool where given share is hosted.
"""
pool = share_utils.extract_host(share['host'], level='pool')
return pool
@debugger
def get_network_allocations_number(self):
"""Get number of network interfaces to be created."""
return 0
@debugger
def _get_location_path(self, share_name, share_proto, ip=None):
if ip is None:
ip = self._hostname
if share_proto == 'NFS':
location = '%s:%s' % (ip, share_name)
elif share_proto == 'CIFS':
location = r'\\%s\%s' % (ip, share_name)
else:
message = _('Invalid NAS protocol supplied: %s.') % share_proto
raise exception.InvalidInput(message)
export_location = {
'path': location,
'is_admin_only': False,
'metadata': {
'preferred': True,
},
}
return export_location
@debugger
def _get_pool_project_share_name(self, share):
pool = share_utils.extract_host(share['host'], level='pool')
project = self._default_project
share_name = share['name']
return pool, project, share_name
| 38.34556 | 79 | 0.533907 |
794faeb1bd5c8954656b2bbe6484546c425c57e3 | 4,341 | py | Python | rtutil/SettingAPI/classes.py | tuna2134/rt-backend-1 | 71c48f56f21720b05a76dc0cdbdcb18a80d16afb | [
"BSD-4-Clause"
] | null | null | null | rtutil/SettingAPI/classes.py | tuna2134/rt-backend-1 | 71c48f56f21720b05a76dc0cdbdcb18a80d16afb | [
"BSD-4-Clause"
] | null | null | null | rtutil/SettingAPI/classes.py | tuna2134/rt-backend-1 | 71c48f56f21720b05a76dc0cdbdcb18a80d16afb | [
"BSD-4-Clause"
] | null | null | null | # rtutil SettingAPI - Classes
from typing import (Type, Literal, Optional, Union, Iterator,
Any, List, Tuple, Dict, Callable, Coroutine)
from sanic import exceptions
from copy import copy
from discord.ext import commands
import discord
SettingType = Literal["guild", "user"]
ModeType = Literal["write", "read"]
IGNORE_VALUES = ("ITEM_TYPE", "display_name", "on_init", "name")
def get_bylang(data: Union[str, Dict[str, str]], lang: str) -> str:
if isinstance(data, dict):
return data.get(lang, data["ja"])
else:
return data
class Context:
def __init__(self, mode: ModeType, author: Type[discord.abc.User]):
self.mode: ModeType = mode
self.author: Type[discord.abc.User] = author
self.guild: Union[discord.Guild, None] = getattr(author, "guild", None)
class SettingItem:
def __init__(self, name: str, display_name: Union[str, Dict[str, str]],
*args, **kwargs):
self.name = name
self.display_name = display_name
if (on_init := getattr(self, "on_init", None)):
on_init(*args, **kwargs)
ITEM_MAINS = {
"text": "text",
"check": "checked",
"radios": "checked",
"list": "index"
}
class SettingData:
def __init__(self, setting_type: SettingType,
description: Union[str, Dict[str, str]],
callback: Callable[[Any], Coroutine],
*args, permissions: List[str] = [], **kwargs):
self.description: str = description
self.permissions: List[str] = permissions
self.callback: Callable[[Context, SettingItem]] = callback
self.setting_type: Literal["guild", "user"] = setting_type
self.data: List[Type[SettingItem]] = args
async def run_callback(
self, *args, cog: Union[None, commands.Cog] = None
) -> Optional[Type[SettingItem]]:
return await self.callback(*(([cog] if cog else []) + list(args)))
async def get_dictionary(
self, cog: Union[None, commands.Cog], lang: str,
mode: ModeType, member: discord.Member
) -> List[Tuple[str, Dict[str, Union[str, dict]]]]:
return [(item, {
"item_type": item.ITEM_TYPE,
"display_name": get_bylang(item.display_name, lang),
item.ITEM_TYPE: {
name: getattr(item, name)
for name in dir(
await self.run_callback(
Context(mode, member), copy(item),
cog=cog
)
)
if name not in IGNORE_VALUES
and not name.startswith("_")
}
}) for item in self.data]
async def update_setting(
self, cog: Union[None, commands.Cog], item_name: str,
data: str, member: discord.Member) -> None:
for item in self.data:
if item.name == item_name:
item = copy(item)
if item.ITEM_TYPE == "radios":
setattr(item, data, True)
else:
setattr(
item, ITEM_MAINS[item.ITEM_TYPE], data
)
await self.run_callback(
Context("write", member), item,
cog=cog
)
break
else:
raise exceptions.SanicException(
message="(更新する設定が)ないです。",
status_code=404
)
class TextBox(SettingItem):
ITEM_TYPE = "text"
def on_init(self, text: str, multiple_line: bool = False):
self.text: str = text
self.multiple_line: bool = multiple_line
class CheckBox(SettingItem):
ITEM_TYPE = "check"
def on_init(self, checked: bool):
self.checked: bool = checked
class RadioButton(SettingItem):
ITEM_TYPE = "radios"
def on_init(self, data: dict):
for key in data:
setattr(self, key, data[key])
class ListBox(SettingItem):
ITEM_TYPE = "list"
def on_init(self, index: int, texts: List[str]):
self.index: int = index
self.texts: List[str] = texts
| 30.787234 | 80 | 0.535361 |
794fafad4f6fffcac832e7c520b5b960222c736f | 16,155 | py | Python | designate_tempest_plugin/tests/api/v2/test_transfer_request.py | openstack/designate-tempest-plugin | 0ebf5006bb416435af88b03272247e1f8a3c7921 | [
"Apache-2.0"
] | 15 | 2016-04-22T10:00:44.000Z | 2020-01-08T02:36:34.000Z | designate_tempest_plugin/tests/api/v2/test_transfer_request.py | openstack/designate-tempest-plugin | 0ebf5006bb416435af88b03272247e1f8a3c7921 | [
"Apache-2.0"
] | null | null | null | designate_tempest_plugin/tests/api/v2/test_transfer_request.py | openstack/designate-tempest-plugin | 0ebf5006bb416435af88b03272247e1f8a3c7921 | [
"Apache-2.0"
] | 10 | 2016-04-14T16:35:07.000Z | 2019-03-18T14:47:36.000Z | # Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from designate_tempest_plugin.tests import base
from designate_tempest_plugin import data_utils as dns_data_utils
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseTransferRequestTest(base.BaseDnsV2Test):
excluded_keys = ['created_at', 'updated_at', 'key', 'links']
class TransferRequestTest(BaseTransferRequestTest):
credentials = ["primary", "alt", "admin", "system_admin"]
@classmethod
def setup_credentials(cls):
# Do not create network resources for these test.
cls.set_network_resources()
super(TransferRequestTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(TransferRequestTest, cls).setup_clients()
if CONF.enforce_scope.designate:
cls.admin_client = (cls.os_system_admin.dns_v2.
TransferRequestClient())
else:
cls.admin_client = cls.os_admin.dns_v2.TransferRequestClient()
cls.zone_client = cls.os_primary.dns_v2.ZonesClient()
cls.alt_zone_client = cls.os_alt.dns_v2.ZonesClient()
cls.client = cls.os_primary.dns_v2.TransferRequestClient()
cls.alt_client = cls.os_alt.dns_v2.TransferRequestClient()
@decorators.idempotent_id('2381d489-ad84-403d-b0a2-8b77e4e966bf')
def test_create_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Ensure we respond with ACTIVE status')
self.assertEqual('ACTIVE', transfer_request['status'])
@decorators.idempotent_id('5deae1ac-7c14-42dc-b14e-4e4b2725beb7')
def test_create_transfer_request_scoped(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
transfer_request_data = dns_data_utils.rand_transfer_request_data(
target_project_id=self.os_alt.credentials.project_id)
LOG.info('Create a scoped zone transfer_request')
_, transfer_request = self.client.create_transfer_request(
zone['id'], transfer_request_data)
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Ensure we respond with ACTIVE status')
self.assertEqual('ACTIVE', transfer_request['status'])
@decorators.idempotent_id('4505152f-0a9c-4f02-b385-2216c914a0be')
def test_create_transfer_request_empty_body(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request_empty_body(
zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Ensure we respond with ACTIVE status')
self.assertEqual('ACTIVE', transfer_request['status'])
@decorators.idempotent_id('64a7be9f-8371-4ce1-a242-c1190de7c985')
def test_show_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Fetch the transfer_request')
_, body = self.client.show_transfer_request(transfer_request['id'])
LOG.info('Ensure the fetched response matches the '
'created transfer_request')
self.assertExpected(transfer_request, body, self.excluded_keys)
@decorators.idempotent_id('5bed4582-9cfb-11eb-a160-74e5f9e2a801')
@decorators.skip_because(bug="1926572")
def test_show_transfer_request_impersonate_another_project(self):
LOG.info('Create a zone')
zone = self.zone_client.create_zone()[1]
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
transfer_request = self.client.create_transfer_request(zone['id'])[1]
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('As Admin tenant fetch the transfer_request without using '
'"x-auth-sudo-project-id" HTTP header. Expected: 404')
self.assertRaises(lib_exc.NotFound,
lambda: self.admin_client.show_transfer_request(
transfer_request['id']))
LOG.info('As Admin tenant fetch the transfer_request using '
'"x-auth-sudo-project-id" HTTP header.')
body = self.admin_client.show_transfer_request(
transfer_request['id'],
headers={'x-auth-sudo-project-id': zone['project_id']})[1]
LOG.info('Ensure the fetched response matches the '
'created transfer_request')
self.assertExpected(transfer_request, body, self.excluded_keys)
@decorators.idempotent_id('235ded87-0c47-430b-8cad-4f3194b927a6')
def test_show_transfer_request_as_target(self):
# Checks the target of a scoped transfer request can see
# the request.
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
transfer_request_data = dns_data_utils.rand_transfer_request_data(
target_project_id=self.os_alt.credentials.project_id)
LOG.info('Create a scoped zone transfer_request')
_, transfer_request = self.client.create_transfer_request(
zone['id'], transfer_request_data)
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Fetch the transfer_request as the target')
_, body = self.alt_client.show_transfer_request(transfer_request['id'])
LOG.info('Ensure the fetched response matches the '
'created transfer_request')
excluded_keys = self.excluded_keys + ["target_project_id",
"project_id"]
self.assertExpected(transfer_request, body, excluded_keys)
@decorators.idempotent_id('7d81c487-aa15-44c4-b3e5-424ab9e6a3e5')
def test_delete_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'],
ignore_errors=lib_exc.NotFound)
LOG.info('Delete the transfer_request')
_, body = self.client.delete_transfer_request(transfer_request['id'])
self.assertRaises(lib_exc.NotFound,
lambda: self.client.show_transfer_request(transfer_request['id']))
@decorators.idempotent_id('ddd42a19-1768-428c-846e-32f9d6493011')
def test_list_transfer_requests(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('List transfer_requests')
_, body = self.client.list_transfer_requests()
self.assertGreater(len(body['transfer_requests']), 0)
@decorators.idempotent_id('db985892-9d02-11eb-a160-74e5f9e2a801')
def test_list_transfer_requests_all_projects(self):
LOG.info('Create a Primary zone')
primary_zone = self.zone_client.create_zone()[1]
self.addCleanup(self.wait_zone_delete,
self.zone_client, primary_zone['id'])
LOG.info('Create an Alt zone')
alt_zone = self.alt_zone_client.create_zone()[1]
self.addCleanup(self.wait_zone_delete,
self.alt_zone_client, alt_zone['id'])
LOG.info('Create a zone transfer_request using Primary client')
primary_transfer_request = self.client.create_transfer_request(
primary_zone['id'])[1]
self.addCleanup(self.client.delete_transfer_request,
primary_transfer_request['id'])
LOG.info('Create a zone transfer_request using Alt client')
alt_transfer_request = self.alt_client.create_transfer_request(
alt_zone['id'])[1]
self.addCleanup(self.alt_client.delete_transfer_request,
alt_transfer_request['id'])
LOG.info('List transfer_requests for all projects using Admin tenant '
'without "x-auth-all-projects" HTTP header. '
'Expected: empty list')
self.assertEqual([], self.admin_client.list_transfer_requests()[1][
'transfer_requests'], 'Failed, requests list is not empty')
LOG.info('List transfer_requests for all projects using Admin tenant '
'and "x-auth-all-projects" HTTP header.')
# Note: This is an all-projects list call, so other tests running
# in parallel will impact the list result set. Since the default
# pagination limit is only 20, we set a param limit of 1000 here.
request_ids = [
item['id'] for item in self.admin_client.list_transfer_requests(
headers=self.all_projects_header,
params={'limit': 1000})[1]['transfer_requests']]
for request_id in [primary_transfer_request['id'],
alt_transfer_request['id']]:
self.assertIn(request_id, request_ids,
"Failed, transfer request ID:{} wasn't found in "
"listed IDs{}".format(request_id, request_ids))
@decorators.idempotent_id('de5e9d32-c723-4518-84e5-58da9722cc13')
def test_update_transfer_request(self):
LOG.info('Create a zone')
_, zone = self.zone_client.create_zone()
self.addCleanup(self.wait_zone_delete, self.zone_client, zone['id'])
LOG.info('Create a zone transfer_request')
_, transfer_request = self.client.create_transfer_request(zone['id'])
self.addCleanup(self.client.delete_transfer_request,
transfer_request['id'])
LOG.info('Update the transfer_request')
data = {
"description": "demo descripion"
}
_, transfer_request_patch = self.client.update_transfer_request(
transfer_request['id'], transfer_request_data=data)
self.assertEqual(data['description'],
transfer_request_patch['description'])
@decorators.idempotent_id('73b754a9-e856-4fd6-80ba-e8d1b80f5dfa')
def test_list_transfer_requests_dot_json_fails(self):
uri = self.client.get_uri('transfer_requests.json')
self.assertRaises(lib_exc.NotFound,
lambda: self.client.get(uri))
class TestTransferRequestNotFound(BaseTransferRequestTest):
@classmethod
def setup_credentials(cls):
# Do not create network resources for these test.
cls.set_network_resources()
super(TestTransferRequestNotFound, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(TestTransferRequestNotFound, cls).setup_clients()
cls.client = cls.os_primary.dns_v2.TransferRequestClient()
@decorators.idempotent_id('d255f72f-ba24-43df-9dba-011ed7f4625d')
def test_show_transfer_request_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.client.show_transfer_request,
data_utils.rand_uuid())
self.assertTransferRequest404(e.resp, e.resp_body)
@decorators.idempotent_id('9ff383fb-c31d-4c6f-8085-7b261e401223')
def test_update_transfer_request_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.client.update_transfer_request,
data_utils.rand_uuid())
self.assertTransferRequest404(e.resp, e.resp_body)
@decorators.idempotent_id('5a4a0755-c01d-448f-b856-b081b96ae77e')
def test_delete_transfer_request_404(self):
e = self.assertRaises(lib_exc.NotFound,
self.client.delete_transfer_request,
data_utils.rand_uuid())
self.assertTransferRequest404(e.resp, e.resp_body)
def assertTransferRequest404(self, resp, resp_body):
self.assertEqual(404, resp.status)
self.assertEqual(404, resp_body['code'])
self.assertEqual("zone_transfer_request_not_found", resp_body['type'])
self.assertEqual("Could not find ZoneTransferRequest",
resp_body['message'])
class TestTransferRequestInvalidId(BaseTransferRequestTest):
@classmethod
def setup_credentials(cls):
# Do not create network resources for these test.
cls.set_network_resources()
super(TestTransferRequestInvalidId, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(TestTransferRequestInvalidId, cls).setup_clients()
cls.client = cls.os_primary.dns_v2.TransferRequestClient()
@decorators.idempotent_id('2205dd19-ecc7-4c68-9e89-63c47d642b07')
def test_show_transfer_request_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.client.show_transfer_request,
'foo')
self.assertTransferRequestInvalidId(e.resp, e.resp_body)
@decorators.idempotent_id('af0ce46f-10be-4cce-a1d5-1b5c2a39fb97')
def test_update_transfer_request_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.client.update_transfer_request,
'foo')
self.assertTransferRequestInvalidId(e.resp, e.resp_body)
@decorators.idempotent_id('1728dca5-01f1-45f4-b59d-7a981d479394')
def test_delete_transfer_request_invalid_uuid(self):
e = self.assertRaises(lib_exc.BadRequest,
self.client.delete_transfer_request,
'foo')
self.assertTransferRequestInvalidId(e.resp, e.resp_body)
def assertTransferRequestInvalidId(self, resp, resp_body):
self.assertEqual(400, resp.status)
self.assertEqual(400, resp_body['code'])
self.assertEqual("invalid_uuid", resp_body['type'])
| 44.875 | 79 | 0.668709 |
794faff3ce512565452020f3a4c2123d8cb79186 | 425 | py | Python | robots/test/strategies/run_tests/tests/test_pick_best/init.py | memristor/mep2 | bc5cddacba3d740f791f3454b8cb51bda83ce202 | [
"MIT"
] | 5 | 2018-11-27T15:15:00.000Z | 2022-02-10T21:44:13.000Z | robots/test/strategies/run_tests/tests/test_pick_best/init.py | memristor/mep2 | bc5cddacba3d740f791f3454b8cb51bda83ce202 | [
"MIT"
] | 2 | 2018-10-20T15:48:40.000Z | 2018-11-20T05:11:33.000Z | robots/test/strategies/run_tests/tests/test_pick_best/init.py | memristor/mep2 | bc5cddacba3d740f791f3454b8cb51bda83ce202 | [
"MIT"
] | 1 | 2020-02-07T12:44:47.000Z | 2020-02-07T12:44:47.000Z | weight=1
a=_State(2)
from core.Util import col
def run():
@_do
def _():
sleep(0.2)
_print('hehe1')
@_do
def _():
sleep(0.2)
_print('hehe1')
_print(col.yellow,'pick best',col.white)
with _pick_best():
sleep(4)
@_do
def _():
sleep(3)
_print('hehe')
@_do
def _():
_print('1 sec hehe')
sleep(1)
@_do
def _():
_print('2 sec hehe')
sleep(2)
sleep(1)
_print('ow lol')
| 12.142857 | 41 | 0.557647 |
794fb036da944b52799e44ad31da45c01ab100de | 14,813 | py | Python | polyglotdb/query/annotations/attributes/base.py | jeffmielke/PolyglotDB | 619fb487a15db1c4b9e844e843c81e0994973421 | [
"MIT"
] | null | null | null | polyglotdb/query/annotations/attributes/base.py | jeffmielke/PolyglotDB | 619fb487a15db1c4b9e844e843c81e0994973421 | [
"MIT"
] | null | null | null | polyglotdb/query/annotations/attributes/base.py | jeffmielke/PolyglotDB | 619fb487a15db1c4b9e844e843c81e0994973421 | [
"MIT"
] | null | null | null | from ...base.helper import key_for_cypher
from ....exceptions import AnnotationAttributeError, SubsetError
from ..elements import (EqualClauseElement, GtClauseElement, GteClauseElement,
LtClauseElement, LteClauseElement, NotEqualClauseElement,
InClauseElement, NotInClauseElement, ContainsClauseElement, RegexClauseElement,
RightAlignedClauseElement, LeftAlignedClauseElement,
NotRightAlignedClauseElement, NotLeftAlignedClauseElement,
SubsetClauseElement, NotSubsetClauseElement,
NullClauseElement, NotNullClauseElement,
FollowsClauseElement, PrecedesClauseElement)
from ...base import NodeAttribute, Node, CollectionNode, CollectionAttribute
special_attributes = ['duration', 'count', 'rate', 'position', 'subset']
class AnnotationAttribute(NodeAttribute):
"""
Class for information about the attributes of annotations in a graph
query
Parameters
----------
annotation : AnnotationAttribute
Annotation that this attribute refers to
label : str
Label of the attribute
Attributes
----------
annotation : AnnotationAttribute
Annotation that this attribute refers to
label : str
Label of the attribute
output_label : str or None
User-specified label to use in query results
"""
collapsing = False
def __init__(self, annotation, label):
super(AnnotationAttribute, self).__init__(annotation, label)
self.acoustic = False
def __hash__(self):
return hash((self.node, self.label))
def __repr__(self):
return '<AnnotationAttribute \'{}\'>'.format(str(self))
def requires_type(self):
if self.node.hierarchy is None or self.label in special_attributes:
return False
return not self.node.hierarchy.has_token_property(self.node.node_type, self.label)
def for_cypher(self, type=False):
"""Returns annotation duration or annotation type if applicable, otherwise annotation name and label """
if self.label == 'duration':
return '{a}.end - {a}.begin'.format(a=self.node.alias)
if type or self.requires_type():
return '{}.{}'.format(self.node.type_alias, key_for_cypher(self.label))
return '{}.{}'.format(self.node.alias, key_for_cypher(self.label))
@property
def with_alias(self):
"""
returns type_alias if there is one
alias otherwise
"""
if self.requires_type():
return self.node.type_alias
else:
return self.node.alias
def __eq__(self, other):
try:
if self.label == 'begin' and other.label == 'begin':
return LeftAlignedClauseElement(self.node, other.node)
elif self.label == 'end' and other.label == 'end':
return RightAlignedClauseElement(self.node, other.node)
except AttributeError:
pass
if self.label == 'subset':
return SubsetClauseElement(self, other)
if other is None:
return NullClauseElement(self, other)
return EqualClauseElement(self, other)
def __ne__(self, other):
try:
if self.label == 'begin' and other.label == 'begin':
return NotLeftAlignedClauseElement(self.node, other.node)
elif self.label == 'end' and other.label == 'end':
return NotRightAlignedClauseElement(self.node, other.node)
except AttributeError:
pass
if self.label == 'subset':
return NotSubsetClauseElement(self, other)
if other is None:
return NotNullClauseElement(self, other)
return NotEqualClauseElement(self, other)
def __gt__(self, other):
return GtClauseElement(self, other)
def __ge__(self, other):
return GteClauseElement(self, other)
def __lt__(self, other):
return LtClauseElement(self, other)
def __le__(self, other):
return LteClauseElement(self, other)
def in_(self, other):
"""
Checks if the parameter other has a 'cypher' element
executes the query if it does and appends the relevant results
or appends parameter other
Parameters
----------
other : list
attribute will be checked against elements in this list
Returns
-------
string
clause for asserting membership in a filter
"""
if hasattr(other, 'cypher'):
results = other.all()
t = []
for x in results:
t.append(getattr(x, self.label))
else:
t = other
return InClauseElement(self, t)
def not_in_(self, other):
"""
Checks if the parameter other has a 'cypher' element
executes the query if it does and appends the relevant results
or appends parameter other
Parameters
----------
other : list
attribute will be checked against elements in this list
Returns
-------
string
clause for asserting non-membership in a filter
"""
if hasattr(other, 'cypher'):
results = other.all()
t = []
for x in results:
t.append(getattr(x, self.label))
else:
t = other
return NotInClauseElement(self, t)
def regex(self, pattern):
""" Returns a clause for filtering based on regular expressions."""
return RegexClauseElement(self, pattern)
def aliased_for_output(self, type=False):
"""
creates cypher string for output
Returns
-------
string
string for output
"""
return '{} AS {}'.format(self.for_cypher(type), self.output_alias_for_cypher)
def for_type_filter(self):
return self.for_cypher(type=True)
class AnnotationNode(Node):
"""
Class for annotations referenced in graph queries
Parameters
----------
type : str
Annotation type
pos : int
Position in the query, defaults to 0
Attributes
----------
type : str
Annotation type
pos : int
Position in the query
previous : :class:`~polyglotdb.graph.attributes.AnnotationAttribute`
Returns the Annotation of the same type with the previous position
following : :class:`~polyglotdb.graph.attributes.AnnotationAttribute`
Returns the Annotation of the same type with the following position
"""
match_template = '''({token_alias})-[:is_a]->({type_alias})'''
# template = '''({token_alias})'''
begin_template = '{}_{}_begin'
end_template = '{}_{}_end'
alias_template = 'node_{t}'
def __init__(self, node_type, corpus=None, hierarchy=None):
super(AnnotationNode, self).__init__(node_type, corpus=corpus, hierarchy=hierarchy)
def __hash__(self):
return hash(self.key)
def __eq__(self, other):
if not isinstance(other, AnnotationNode):
return False
if self.key != other.key:
return False
return True
def __str__(self):
return '{}_0'.format(self.key)
def __repr__(self):
return '<AnnotationNode object with \'{}\' type>'.format(self.node_type)
def for_match(self):
""" sets 'token_alias' and 'type_alias' keyword arguments for an annotation """
kwargs = {'token_alias': self.define_alias,
'type_alias': self.define_type_alias}
return self.match_template.format(**kwargs)
def filter_by_subset(self, *args):
""" adds each item in args to the hierarchy type_labels"""
if self.hierarchy is not None:
for a in args:
if not self.hierarchy.has_type_subset(self.node_type, a) and not self.hierarchy.has_token_subset(
self.node_type, a):
raise (SubsetError('{} is not a subset of {} types or tokens.'.format(a, self.node_type)))
self.subset_labels = sorted(set(self.subset_labels + list(args)))
return self
@property
def define_type_alias(self):
""" Returns a cypher string for getting all type_labels"""
label_string = ':{}_type'.format(self.node_type)
if self.corpus is not None:
label_string += ':{}'.format(key_for_cypher(self.corpus))
if self.subset_labels:
subset_type_labels = [x for x in self.subset_labels if self.hierarchy.has_type_subset(self.node_type, x)]
if subset_type_labels:
label_string += ':' + ':'.join(map(key_for_cypher, subset_type_labels))
return '{}{}'.format(self.type_alias, label_string)
@property
def define_alias(self):
""" Returns a cypher string for getting all token_labels"""
label_string = ':{}:speech'.format(self.node_type)
if self.corpus is not None:
label_string += ':{}'.format(key_for_cypher(self.corpus))
if self.subset_labels:
subset_token_labels = [x for x in self.subset_labels if self.hierarchy.has_token_subset(self.node_type, x)]
if subset_token_labels:
label_string += ':' + ':'.join(map(key_for_cypher, subset_token_labels))
return '{}{}'.format(self.alias, label_string)
@property
def type_alias(self):
""" Returns a cypher formatted string of type alias"""
return key_for_cypher('type_' + self.alias.replace('`', ''))
@property
def alias(self):
"""Returns a cypher formatted string of keys and prefixes"""
return key_for_cypher(self.alias_template.format(t=self.key))
@property
def with_alias(self):
""" Returns alias """
return self.alias
@property
def withs(self):
""" Returns a list of alias and type_alias """
return [self.alias, self.type_alias]
def precedes(self, other_annotation):
return PrecedesClauseElement(self, other_annotation)
def follows(self, other_annotation):
return FollowsClauseElement(self, other_annotation)
def __getattr__(self, key):
if key == 'current':
return self
elif key in ['previous', 'following']:
from .precedence import PreviousAnnotation, FollowingAnnotation
if key == 'previous':
return PreviousAnnotation(self, -1)
else:
return FollowingAnnotation(self, 1)
elif key in ['previous_pause', 'following_pause']:
from .pause import FollowingPauseAnnotation, PreviousPauseAnnotation
node = self
if self.node_type != self.hierarchy.word_name:
node = getattr(self, self.hierarchy.word_name)
if key == 'previous_pause':
return PreviousPauseAnnotation(node)
else:
return FollowingPauseAnnotation(node)
elif key.startswith('previous'):
p, key = key.split('_', 1)
p = self.previous
return getattr(p, key)
elif key.startswith('following'):
p, key = key.split('_', 1)
f = self.following
return getattr(f, key)
elif key == 'follows_pause':
from .pause import FollowsPauseAttribute
return FollowsPauseAttribute(self)
elif key == 'precedes_pause':
from .pause import PrecedesPauseAttribute
return PrecedesPauseAttribute(self)
elif key == 'speaker':
from .speaker import SpeakerAnnotation
return SpeakerAnnotation(self)
elif key == 'discourse':
from .discourse import DiscourseAnnotation
return DiscourseAnnotation(self)
elif key == 'pitch':
from .acoustic import PitchAttribute
return PitchAttribute(self, relative=('relative' in key))
elif key == 'intensity':
from .acoustic import IntensityAttribute
return IntensityAttribute(self, relative=('relative' in key))
elif key == 'formants':
from .acoustic import FormantAttribute
return FormantAttribute(self, relative=('relative' in key))
elif self.hierarchy is not None and key in self.hierarchy.contained_by(self.node_type):
from .hierarchical import HierarchicalAnnotation
types = self.hierarchy.get_higher_types(self.node_type)
prev_node = self
cur_node = None
for t in types:
higher_node = AnnotationNode(t, corpus=self.corpus, hierarchy=self.hierarchy)
cur_node = HierarchicalAnnotation(prev_node, higher_node)
prev_node = cur_node
if t == key:
break
return cur_node
elif self.hierarchy is not None and key in self.hierarchy.contains(self.node_type):
from .path import SubPathAnnotation
return SubPathAnnotation(self, AnnotationNode(key, corpus=self.corpus))
elif self.hierarchy is not None \
and self.node_type in self.hierarchy.subannotations \
and key in self.hierarchy.subannotations[self.node_type]:
from .subannotation import SubAnnotation
return SubAnnotation(self, AnnotationNode(key, corpus=self.corpus))
else:
if key not in special_attributes and self.hierarchy is not None and not self.hierarchy.has_token_property(
self.node_type, key) and not self.hierarchy.has_type_property(self.node_type, key):
properties = [x[0] for x in
self.hierarchy.type_properties[self.node_type] | self.hierarchy.token_properties[
self.node_type]]
raise AnnotationAttributeError(
'The \'{}\' annotation types do not have a \'{}\' property (available: {}).'.format(self.node_type,
key, ', '.join(
properties)))
return AnnotationAttribute(self, key)
class AnnotationCollectionNode(CollectionNode):
def with_statement(self):
""" """
return ', '.join(['collect(n) as {a}'.format(a=self.collection_alias),
'collect(t) as {a}'.format(a=self.collection_type_alias)])
@property
def withs(self):
withs = [self.collection_alias, self.collection_type_alias]
return withs
class AnnotationCollectionAttribute(CollectionAttribute):
pass
| 37.982051 | 119 | 0.607507 |
794fb18cb77dbfe40223c33ac50a017e42898c0e | 1,564 | py | Python | gbe/templatetags/gbe_tags.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 1 | 2021-03-14T11:56:47.000Z | 2021-03-14T11:56:47.000Z | gbe/templatetags/gbe_tags.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 180 | 2019-09-15T19:52:46.000Z | 2021-11-06T23:48:01.000Z | gbe/templatetags/gbe_tags.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | null | null | null | from django import template
from django.conf import settings
from django.utils.formats import date_format
register = template.Library()
@register.inclusion_tag('gbe/tag_templates/mailchimp.tmpl')
def mailchimp():
if settings.MC_API_KEY == 'TEST':
return {'have_mc': False}
return {'mc_api_url': settings.MC_API_URL,
'mc_api_user': settings.MC_API_USER,
'mc_api_id': settings.MC_API_ID,
'have_mc': True,
}
@register.filter
def display_track_title(title, truncated_length):
title = title.split('/')[-1]
if len(title) <= truncated_length:
return title
else:
return title[:truncated_length] + "..."
def build_schedule_context(profile):
events = profile.volunteer_schedule()
schedule = [
{'event': str(event),
'time': "%s - %s" % (date_format(event.starttime, "DATETIME_FORMAT"),
date_format(event.starttime + event.duration,
"TIME_FORMAT")),
'location': str(event.location)}
for event in events]
return {'schedule': schedule}
@register.inclusion_tag('gbe/tag_templates/schedule.tmpl')
def volunteer_schedule(profile):
return build_schedule_context(profile)
@register.inclusion_tag('gbe/tag_templates/schedule_plaintext.tmpl')
def volunteer_schedule_plaintext(profile):
return build_schedule_context(profile)
@register.filter
def keyvalue(dict, key):
return dict[key]
@register.filter
def testkey(dict, key):
return key in dict
| 26.965517 | 78 | 0.662404 |
794fb23d7ea2b0eb8cd884bcaa97539ca13b4d78 | 361 | py | Python | 2020/day_02/__main__.py | d02d33pak/Advent-Of-Code | 765b0302c256ad61864095a537a3f6379901b1c2 | [
"MIT"
] | null | null | null | 2020/day_02/__main__.py | d02d33pak/Advent-Of-Code | 765b0302c256ad61864095a537a3f6379901b1c2 | [
"MIT"
] | null | null | null | 2020/day_02/__main__.py | d02d33pak/Advent-Of-Code | 765b0302c256ad61864095a537a3f6379901b1c2 | [
"MIT"
] | null | null | null | """
Day 2 Main Module
"""
from day02 import parse_input, part1, part2
if __name__ == "__main__":
# trying out the new walrus[:=] oprtr in python
if (part := int(input("Enter Part: "))) == 1:
print(part1(parse_input("input.txt")))
elif part == 2:
print(part2(parse_input("input.txt")))
else:
print("Wrong choice [1|2]")
| 24.066667 | 51 | 0.595568 |
794fb24cfe67240a1fc8392b4b2d703bc32fdf6f | 14,460 | py | Python | external/workload-automation/wa/instruments/perf.py | qais-yousef/lisa | 8343e26bf0565589928a69ccbe67b1be03403db7 | [
"Apache-2.0"
] | null | null | null | external/workload-automation/wa/instruments/perf.py | qais-yousef/lisa | 8343e26bf0565589928a69ccbe67b1be03403db7 | [
"Apache-2.0"
] | null | null | null | external/workload-automation/wa/instruments/perf.py | qais-yousef/lisa | 8343e26bf0565589928a69ccbe67b1be03403db7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=unused-argument
import csv
import os
import re
from devlib.collector.perf import PerfCollector
from wa import Instrument, Parameter
from wa.utils.types import list_or_string, list_of_strs, numeric
PERF_COUNT_REGEX = re.compile(r'^(CPU\d+)?\s*(\d+)\s*(.*?)\s*(\[\s*\d+\.\d+%\s*\])?\s*$')
class PerfInstrument(Instrument):
name = 'perf'
description = """
Perf is a Linux profiling with performance counters.
Simpleperf is an Android profiling tool with performance counters.
It is highly recomended to use perf_type = simpleperf when using this instrument
on android devices since it recognises android symbols in record mode and is much more stable
when reporting record .data files. For more information see simpleperf documentation at:
https://android.googlesource.com/platform/system/extras/+/master/simpleperf/doc/README.md
Performance counters are CPU hardware registers that count hardware events
such as instructions executed, cache-misses suffered, or branches
mispredicted. They form a basis for profiling applications to trace dynamic
control flow and identify hotspots.
perf accepts options and events. If no option is given the default '-a' is
used. For events, the default events for perf are migrations and cs. The default
events for simpleperf are raw-cpu-cycles, raw-l1-dcache, raw-l1-dcache-refill, raw-instructions-retired.
They both can be specified in the config file.
Events must be provided as a list that contains them and they will look like
this ::
(for perf_type = perf ) perf_events = ['migrations', 'cs']
(for perf_type = simpleperf) perf_events = ['raw-cpu-cycles', 'raw-l1-dcache']
Events can be obtained by typing the following in the command line on the
device ::
perf list
simpleperf list
Whereas options, they can be provided as a single string as following ::
perf_options = '-a -i'
perf_options = '--app com.adobe.reader'
Options can be obtained by running the following in the command line ::
man perf-stat
"""
parameters = [
Parameter('perf_type', kind=str, allowed_values=['perf', 'simpleperf'], default='perf',
global_alias='perf_type', description="""Specifies which type of perf binaries
to install. Use simpleperf for collecting perf data on android systems."""),
Parameter('command', kind=str, default='stat', allowed_values=['stat', 'record'],
global_alias='perf_command', description="""Specifies which perf command to use. If in record mode
report command will also be executed and results pulled from target along with raw data
file"""),
Parameter('events', kind=list_of_strs, global_alias='perf_events',
description="""Specifies the events to be counted."""),
Parameter('optionstring', kind=list_or_string, default='-a',
global_alias='perf_options',
description="""Specifies options to be used for the perf command. This
may be a list of option strings, in which case, multiple instances of perf
will be kicked off -- one for each option string. This may be used to e.g.
collected different events from different big.LITTLE clusters. In order to
profile a particular application process for android with simpleperf use
the --app option e.g. --app com.adobe.reader
"""),
Parameter('report_option_string', kind=str, global_alias='perf_report_options', default=None,
description="""Specifies options to be used to gather report when record command
is used. It's highly recommended to use perf_type simpleperf when running on
android devices as reporting options are unstable with perf"""),
Parameter('labels', kind=list_of_strs, default=None,
global_alias='perf_labels',
description="""Provides labels for perf/simpleperf output for each optionstring.
If specified, the number of labels must match the number of ``optionstring``\ s.
"""),
Parameter('force_install', kind=bool, default=False,
description="""
always install perf binary even if perf is already present on the device.
"""),
]
def __init__(self, target, **kwargs):
super(PerfInstrument, self).__init__(target, **kwargs)
self.collector = None
self.outdir = None
def initialize(self, context):
self.collector = PerfCollector(self.target,
self.perf_type,
self.command,
self.events,
self.optionstring,
self.report_option_string,
self.labels,
self.force_install)
def setup(self, context):
self.outdir = os.path.join(context.output_directory, self.perf_type)
self.collector.set_output(self.outdir)
self.collector.reset()
def start(self, context):
self.collector.start()
def stop(self, context):
self.collector.stop()
def update_output(self, context):
self.logger.info('Extracting reports from target...')
self.collector.get_data()
if self.perf_type == 'perf':
self._process_perf_output(context)
else:
self._process_simpleperf_output(context)
def teardown(self, context):
self.collector.reset()
def _process_perf_output(self, context):
if self.command == 'stat':
self._process_perf_stat_output(context)
elif self.command == 'record':
self._process_perf_record_output(context)
def _process_simpleperf_output(self, context):
if self.command == 'stat':
self._process_simpleperf_stat_output(context)
elif self.command == 'record':
self._process_simpleperf_record_output(context)
def _process_perf_stat_output(self, context):
for host_file in os.listdir(self.outdir):
label = host_file.split('.out')[0]
host_file_path = os.path.join(self.outdir, host_file)
context.add_artifact(label, host_file_path, 'raw')
with open(host_file_path) as fh:
in_results_section = False
for line in fh:
if 'Performance counter stats' in line:
in_results_section = True
next(fh) # skip the following blank line
if not in_results_section:
continue
if not line.strip(): # blank line
in_results_section = False
break
else:
self._add_perf_stat_metric(line, label, context)
@staticmethod
def _add_perf_stat_metric(line, label, context):
line = line.split('#')[0] # comment
match = PERF_COUNT_REGEX.search(line)
if not match:
return
classifiers = {}
cpu = match.group(1)
if cpu is not None:
classifiers['cpu'] = int(cpu.replace('CPU', ''))
count = int(match.group(2))
metric = '{}_{}'.format(label, match.group(3))
context.add_metric(metric, count, classifiers=classifiers)
def _process_perf_record_output(self, context):
for host_file in os.listdir(self.outdir):
label, ext = os.path.splitext(host_file)
context.add_artifact(label, os.path.join(self.outdir, host_file), 'raw')
column_headers = []
column_header_indeces = []
event_type = ''
if ext == '.rpt':
with open(os.path.join(self.outdir, host_file)) as fh:
for line in fh:
words = line.split()
if not words:
continue
event_type = self._get_report_event_type(words, event_type)
column_headers = self._get_report_column_headers(column_headers, words, 'perf')
for column_header in column_headers:
column_header_indeces.append(line.find(column_header))
self._add_report_metric(column_headers,
column_header_indeces,
line,
words,
context,
event_type,
label)
@staticmethod
def _get_report_event_type(words, event_type):
if words[0] != '#':
return event_type
if len(words) == 6 and words[4] == 'event':
event_type = words[5]
event_type = event_type.strip("'")
return event_type
def _process_simpleperf_stat_output(self, context):
labels = []
for host_file in os.listdir(self.outdir):
labels.append(host_file.split('.out')[0])
for opts, label in zip(self.optionstring, labels):
stat_file = os.path.join(self.outdir, '{}{}'.format(label, '.out'))
if '--csv' in opts:
self._process_simpleperf_stat_from_csv(stat_file, context, label)
else:
self._process_simpleperf_stat_from_raw(stat_file, context, label)
@staticmethod
def _process_simpleperf_stat_from_csv(stat_file, context, label):
with open(stat_file) as csv_file:
readCSV = csv.reader(csv_file, delimiter=',')
line_num = 0
for row in readCSV:
if line_num > 0 and 'Total test time' not in row:
classifiers = {'scaled from(%)': row[len(row) - 2].replace('(', '').replace(')', '').replace('%', '')}
context.add_metric('{}_{}'.format(label, row[1]), row[0], 'count', classifiers=classifiers)
line_num += 1
@staticmethod
def _process_simpleperf_stat_from_raw(stat_file, context, label):
with open(stat_file) as fh:
for line in fh:
if '#' in line:
tmp_line = line.split('#')[0]
tmp_line = line.strip()
count, metric = tmp_line.split(' ')[0], tmp_line.split(' ')[2]
count = int(count.replace(',', ''))
scaled_percentage = line.split('(')[1].strip().replace(')', '').replace('%', '')
scaled_percentage = int(scaled_percentage)
metric = '{}_{}'.format(label, metric)
context.add_metric(metric, count, 'count', classifiers={'scaled from(%)': scaled_percentage})
def _process_simpleperf_record_output(self, context):
for host_file in os.listdir(self.outdir):
label, ext = os.path.splitext(host_file)
context.add_artifact(label, os.path.join(self.outdir, host_file), 'raw')
if ext != '.rpt':
continue
column_headers = []
column_header_indeces = []
event_type = ''
with open(os.path.join(self.outdir, host_file)) as fh:
for line in fh:
words = line.split()
if not words:
continue
if words[0] == 'Event:':
event_type = words[1]
column_headers = self._get_report_column_headers(column_headers,
words,
'simpleperf')
for column_header in column_headers:
column_header_indeces.append(line.find(column_header))
self._add_report_metric(column_headers,
column_header_indeces,
line,
words,
context,
event_type,
label)
@staticmethod
def _get_report_column_headers(column_headers, words, perf_type):
if 'Overhead' not in words:
return column_headers
if perf_type == 'perf':
words.remove('#')
column_headers = words
# Concatonate Shared Objects header
if 'Shared' in column_headers:
shared_index = column_headers.index('Shared')
column_headers[shared_index:shared_index + 2] = ['{} {}'.format(column_headers[shared_index],
column_headers[shared_index + 1])]
return column_headers
@staticmethod
def _add_report_metric(column_headers, column_header_indeces, line, words, context, event_type, label):
if '%' not in words[0]:
return
classifiers = {}
for i in range(1, len(column_headers)):
classifiers[column_headers[i]] = line[column_header_indeces[i]:column_header_indeces[i + 1]].strip()
context.add_metric('{}_{}_Overhead'.format(label, event_type),
numeric(words[0].strip('%')),
'percent',
classifiers=classifiers)
| 45.615142 | 122 | 0.567842 |
794fb2e3c2f739eb867f619027ee0ac937881e96 | 6,626 | py | Python | tools/misc/bsn_proposal_generation.py | kiyoon/Video-Swin-Transformer | 7a0d40ced8fb52c064d1cd11ffa8b0c3bbb77607 | [
"Apache-2.0"
] | 648 | 2021-06-24T19:33:09.000Z | 2022-03-31T06:27:24.000Z | tools/bsn_proposal_generation.py | xumingze0308/mmaction2 | 777546f27f8f5a3c83e10d966e2149be2fc9fa31 | [
"Apache-2.0"
] | 53 | 2021-07-01T03:07:52.000Z | 2022-03-27T16:15:29.000Z | tools/bsn_proposal_generation.py | xumingze0308/mmaction2 | 777546f27f8f5a3c83e10d966e2149be2fc9fa31 | [
"Apache-2.0"
] | 117 | 2021-06-25T01:22:32.000Z | 2022-03-31T08:33:55.000Z | import argparse
import os
import os.path as osp
import mmcv
import numpy as np
import torch.multiprocessing as mp
from mmaction.localization import (generate_bsp_feature,
generate_candidate_proposals)
def load_video_infos(ann_file):
"""Load the video annotations.
Args:
ann_file (str): A json file path of the annotation file.
Returns:
list[dict]: A list containing annotations for videos.
"""
video_infos = []
anno_database = mmcv.load(ann_file)
for video_name in anno_database:
video_info = anno_database[video_name]
video_info['video_name'] = video_name
video_infos.append(video_info)
return video_infos
def generate_proposals(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_proposals_thread, **kwargs):
"""Generate proposals using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results
pgm_proposals_dir (str): Directory to save generated proposals.
pgm_proposals_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_candidate_proposals".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_proposals_thread
processes = []
manager = mp.Manager()
result_dict = manager.dict()
kwargs['result_dict'] = result_dict
for tid in range(pgm_proposals_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_proposals_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_candidate_proposals,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_proposals_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
header = 'tmin,tmax,tmin_score,tmax_score,score,match_iou,match_ioa'
for video_name in result_dict:
proposals = result_dict[video_name]
proposal_path = osp.join(pgm_proposals_dir, video_name + '.csv')
np.savetxt(
proposal_path,
proposals,
header=header,
delimiter=',',
comments='')
prog_bar.update()
def generate_features(ann_file, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, pgm_features_thread, **kwargs):
"""Generate proposals features using multi-process.
Args:
ann_file (str): A json file path of the annotation file for
all videos to be processed.
tem_results_dir (str): Directory to read tem results.
pgm_proposals_dir (str): Directory to read generated proposals.
pgm_features_dir (str): Directory to save generated features.
pgm_features_thread (int): Total number of threads.
kwargs (dict): Keyword arguments for "generate_bsp_feature".
"""
video_infos = load_video_infos(ann_file)
num_videos = len(video_infos)
num_videos_per_thread = num_videos // pgm_features_thread
processes = []
manager = mp.Manager()
feature_return_dict = manager.dict()
kwargs['result_dict'] = feature_return_dict
for tid in range(pgm_features_thread - 1):
tmp_video_list = range(tid * num_videos_per_thread,
(tid + 1) * num_videos_per_thread)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
tmp_video_list = range((pgm_features_thread - 1) * num_videos_per_thread,
num_videos)
p = mp.Process(
target=generate_bsp_feature,
args=(
tmp_video_list,
video_infos,
tem_results_dir,
pgm_proposals_dir,
),
kwargs=kwargs)
p.start()
processes.append(p)
for p in processes:
p.join()
# save results
os.makedirs(pgm_features_dir, exist_ok=True)
prog_bar = mmcv.ProgressBar(num_videos)
for video_name in feature_return_dict.keys():
bsp_feature = feature_return_dict[video_name]
feature_path = osp.join(pgm_features_dir, video_name + '.npy')
np.save(feature_path, bsp_feature)
prog_bar.update()
def parse_args():
parser = argparse.ArgumentParser(description='Proposal generation module')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'--mode',
choices=['train', 'test'],
default='test',
help='train or test')
args = parser.parse_args()
return args
def main():
print('Begin Proposal Generation Module')
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
tem_results_dir = cfg.tem_results_dir
pgm_proposals_dir = cfg.pgm_proposals_dir
pgm_features_dir = cfg.pgm_features_dir
if args.mode == 'test':
generate_proposals(cfg.ann_file_val, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_val, tem_results_dir, pgm_proposals_dir,
pgm_features_dir, **cfg.pgm_features_test_cfg)
print('\nFinish feature generation')
elif args.mode == 'train':
generate_proposals(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, **cfg.pgm_proposals_cfg)
print('\nFinish proposal generation')
generate_features(cfg.ann_file_train, tem_results_dir,
pgm_proposals_dir, pgm_features_dir,
**cfg.pgm_features_train_cfg)
print('\nFinish feature generation')
print('Finish Proposal Generation Module')
if __name__ == '__main__':
main()
| 33.464646 | 79 | 0.630546 |
794fb2e4556709410bc96c8724226455c0392c39 | 879 | py | Python | lkmltools/linter/rules/fieldrules/all_caps_rule.py | iserko/lookml-tools | caa46e489e789f59987965be98038cab10c0fdf0 | [
"Apache-2.0"
] | null | null | null | lkmltools/linter/rules/fieldrules/all_caps_rule.py | iserko/lookml-tools | caa46e489e789f59987965be98038cab10c0fdf0 | [
"Apache-2.0"
] | null | null | null | lkmltools/linter/rules/fieldrules/all_caps_rule.py | iserko/lookml-tools | caa46e489e789f59987965be98038cab10c0fdf0 | [
"Apache-2.0"
] | null | null | null | """
an all caps rule
Authors:
Carl Anderson (carl.anderson@weightwatchers.com)
"""
from lkmltools.linter.field_rule import FieldRule
from lkmltools.lookml_field import LookMLField
class AllCapsRule(FieldRule):
"""is the name non all caps?
"""
def run(self, lookml_field):
"""is the name non all caps?
Args:
lookml_field (LookMLField): instance of LookMLField
Returns:
(tuple): tuple containing:
relevant (bool): is this rule relevant for this JSON chunk?
passed (bool): did the rule pass?
"""
if not (
lookml_field.is_dimension()
or lookml_field.is_dimension_group()
or lookml_field.is_measure()
):
return False, None
return True, lookml_field.name != lookml_field.name.upper()
| 24.416667 | 75 | 0.596132 |
794fb34d3960238e99605119f2f9dee1993937ba | 1,144 | py | Python | vccpython/components/evaporator.py | thermalogic/SimVCCE | 23b6f2f5adc1a49ca023d40a8de0ca61f27d4564 | [
"MIT"
] | null | null | null | vccpython/components/evaporator.py | thermalogic/SimVCCE | 23b6f2f5adc1a49ca023d40a8de0ca61f27d4564 | [
"MIT"
] | null | null | null | vccpython/components/evaporator.py | thermalogic/SimVCCE | 23b6f2f5adc1a49ca023d40a8de0ca61f27d4564 | [
"MIT"
] | null | null | null |
from .port import *
class Evaporator:
energy = "QIN"
devtype = "EVAPORATOR"
def __init__(self, dictDev):
""" Initializes the Evaporator """
self.name = dictDev['name']
self.iPort = [Port(dictDev['iPort'])]
self.oPort = [Port(dictDev['oPort'])]
# map the name of port to the port obj
self.portdict = {
"iPort": self.iPort,
"oPort": self.oPort
}
def state(self):
self.iPort[0].p = self.oPort[0].p
def balance(self):
""" mass and energy balance """
if self.iPort[0].mdot is not None:
self.oPort[0].mdot = self.iPort[0].mdot
elif self.oPort[0].mdot is not None:
self.iPort[0].mdot = self.oPort[0].mdot
self.Qin = self.iPort[0].mdot * (self.oPort[0].h - self.iPort[0].h)
def __str__(self):
result = '\n' + self.name
result += '\n' + " PORTS "+Port.title
result += '\n' + " iPort " + self.iPort[0].__str__()
result += '\n' + " oPort " + self.oPort[0].__str__()
result += '\nQin(kW): \t{:>.2f}'.format(self.Qin)
return result
| 29.333333 | 75 | 0.528846 |
794fb501b60793a42de59c8f2f065ca2568e28c1 | 2,491 | py | Python | test/functional/mempool_resurrect.py | karbonbaron/monicoin | b87519b1e1d84778afddff3b5a530da1ab545157 | [
"MIT"
] | null | null | null | test/functional/mempool_resurrect.py | karbonbaron/monicoin | b87519b1e1d84778afddff3b5a530da1ab545157 | [
"MIT"
] | null | null | null | test/functional/mempool_resurrect.py | karbonbaron/monicoin | b87519b1e1d84778afddff3b5a530da1ab545157 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test resurrection of mined transactions when the blockchain is re-organized."""
from test_framework.test_framework import MonicoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet import MiniWallet
class MempoolCoinbaseTest(MonicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
node = self.nodes[0]
wallet = MiniWallet(node)
# Add enough mature utxos to the wallet so that all txs spend confirmed coins
wallet.generate(3)
node.generate(100)
# Spend block 1/2/3's coinbase transactions
# Mine a block
# Create three more transactions, spending the spends
# Mine another block
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again
blocks = []
spends1_ids = [wallet.send_self_transfer(from_node=node)['txid'] for _ in range(3)]
blocks.extend(node.generate(1))
spends2_ids = [wallet.send_self_transfer(from_node=node)['txid'] for _ in range(3)]
blocks.extend(node.generate(1))
spends_ids = set(spends1_ids + spends2_ids)
# mempool should be empty, all txns confirmed
assert_equal(set(node.getrawmempool()), set())
confirmed_txns = set(node.getblock(blocks[0])['tx'] + node.getblock(blocks[1])['tx'])
# Checks that all spend txns are contained in the mined blocks
assert spends_ids < confirmed_txns
# Use invalidateblock to re-org back
node.invalidateblock(blocks[0])
# All txns should be back in mempool with 0 confirmations
assert_equal(set(node.getrawmempool()), spends_ids)
# Generate another block, they should all get mined
blocks = node.generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(node.getrawmempool()), set())
confirmed_txns = set(node.getblock(blocks[0])['tx'])
assert spends_ids < confirmed_txns
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| 38.921875 | 93 | 0.680851 |
794fb7414b331c197d49a8a69b78efe486131ddc | 679 | py | Python | interpretImage.py | BlanchonMarc/RandomImageGenerator | fd684c8f27d0c7eeec66cd2521d482a8405dd097 | [
"MIT"
] | null | null | null | interpretImage.py | BlanchonMarc/RandomImageGenerator | fd684c8f27d0c7eeec66cd2521d482a8405dd097 | [
"MIT"
] | null | null | null | interpretImage.py | BlanchonMarc/RandomImageGenerator | fd684c8f27d0c7eeec66cd2521d482a8405dd097 | [
"MIT"
] | null | null | null | import numpy as np
import glob
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import matplotlib.colors as color
import math
def ParamToInten(AoP, DoP, Inten, angle):
return ((Inten/2.0) * (1 + DoP*np.cos(math.radians(2*AoP) - 2*math.radians(angle))))
if __name__ == "__main__":
imagedir = "output/image/"
listimage = glob.glob(f"{imagedir}*.tiff")
for pth in listimage:
img = color.rgb_to_hsv(mpimg.imread(pth))
#array = np.zeros_like(img)
AoP = img[:, :, 0] * 360.0
DoP = img[:, :, 1] * 100.0
Inten = img[:, :, 2] / 255.0
print(np.amax(AoP))
# plt.imshow(img)
# plt.show()
| 24.25 | 88 | 0.600884 |
794fb7855cb5f92af770939a9089f8bd580cd525 | 104 | py | Python | src/lib/application/client/api.py | kolesa-team/python-ms-skeleton | b0ac658a0539ce18bb4384f96dc7f7473e701111 | [
"MIT"
] | null | null | null | src/lib/application/client/api.py | kolesa-team/python-ms-skeleton | b0ac658a0539ce18bb4384f96dc7f7473e701111 | [
"MIT"
] | 1 | 2021-06-01T22:48:10.000Z | 2021-06-01T22:48:10.000Z | src/lib/application/client/api.py | kolesa-team/python-ms-skeleton | b0ac658a0539ce18bb4384f96dc7f7473e701111 | [
"MIT"
] | 1 | 2018-10-12T11:40:55.000Z | 2018-10-12T11:40:55.000Z | import requests
from src.lib.application.httpClient import HttpClient
class Api(HttpClient):
pass | 14.857143 | 53 | 0.798077 |
794fb883a0aa7518ae60e193f3ce0ad76426c4bb | 932 | py | Python | vc_rcnn/data/datasets/list_dataset.py | alfred100p/VC-R-CNN | c887f5b6db6932fb5c828c8037e299ce5baadb9e | [
"MIT"
] | 344 | 2020-02-27T07:48:49.000Z | 2022-02-02T10:37:49.000Z | vc_rcnn/data/datasets/list_dataset.py | aLefred0/VC-R-CNN | 5b01e44618c406592184275b734d3fbd3f11234c | [
"MIT"
] | 18 | 2020-03-01T05:22:21.000Z | 2021-08-12T15:06:34.000Z | vc_rcnn/data/datasets/list_dataset.py | aLefred0/VC-R-CNN | 5b01e44618c406592184275b734d3fbd3f11234c | [
"MIT"
] | 59 | 2020-02-29T12:53:41.000Z | 2022-03-07T02:17:35.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""
Simple dataset class that wraps a list of path names
"""
from PIL import Image
from vc_rcnn.structures.bounding_box import BoxList
class ListDataset(object):
def __init__(self, image_lists, transforms=None):
self.image_lists = image_lists
self.transforms = transforms
def __getitem__(self, item):
img = Image.open(self.image_lists[item]).convert("RGB")
# dummy target
w, h = img.size
target = BoxList([[0, 0, w, h]], img.size, mode="xyxy")
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.image_lists)
def get_img_info(self, item):
"""
Return the image dimensions for the image, without
loading and pre-processing it
"""
pass
| 25.189189 | 71 | 0.635193 |
794fb98b738d31fb0a1ff53883f39400075bf4d6 | 513 | py | Python | blog/migrations/0003_auto_20191008_1328.py | joaopaiva/BlogAC318 | 12a0723043259864cf16a138e1d5224282f6a709 | [
"MIT"
] | null | null | null | blog/migrations/0003_auto_20191008_1328.py | joaopaiva/BlogAC318 | 12a0723043259864cf16a138e1d5224282f6a709 | [
"MIT"
] | 1 | 2021-06-10T22:03:59.000Z | 2021-06-10T22:03:59.000Z | blog/migrations/0003_auto_20191008_1328.py | joaopaiva/BlogAC318 | 12a0723043259864cf16a138e1d5224282f6a709 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.13 on 2019-10-08 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_post_type'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='post_type',
),
migrations.AddField(
model_name='post',
name='id_video_youtube',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
| 22.304348 | 73 | 0.576998 |
794fba1005de2980db1212e20654d6493364c46d | 778 | py | Python | packages/postgres-database/src/simcore_postgres_database/models/scicrunch_resources.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | packages/postgres-database/src/simcore_postgres_database/models/scicrunch_resources.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | packages/postgres-database/src/simcore_postgres_database/models/scicrunch_resources.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | """ Stores SOME of the information associated to Research Resource Identifiers (RRIDs) as defined in https://scicrunch.org/resources
"""
import sqlalchemy as sa
from sqlalchemy.sql import func
from .base import metadata
scicrunch_resources = sa.Table(
"scicrunch_resources",
metadata,
sa.Column("rrid", sa.String, nullable=False, primary_key=True),
sa.Column("name", sa.String, nullable=False),
sa.Column("description", sa.String, nullable=True),
sa.Column(
"creation_date", sa.DateTime(), nullable=False, server_default=func.now()
),
sa.Column(
"last_change_date",
sa.DateTime(),
nullable=False,
server_default=func.now(),
onupdate=func.now(), # this will auto-update on modification
),
)
| 29.923077 | 132 | 0.679949 |
794fbb023063d4b88c35838db3913d2f58b25750 | 60 | py | Python | cifar10/models/__init__.py | huangleiBuaa/IterNorm-pytorch | 574b1247036106c40d199c73ab29785b16d05407 | [
"BSD-2-Clause"
] | 28 | 2019-04-23T14:40:47.000Z | 2022-03-28T13:55:21.000Z | cifar10/models/__init__.py | huangleiBuaa/IterNorm-pytorch | 574b1247036106c40d199c73ab29785b16d05407 | [
"BSD-2-Clause"
] | 2 | 2019-06-27T08:27:26.000Z | 2021-07-03T14:40:44.000Z | cifar10/models/__init__.py | huangleiBuaa/IterNorm-pytorch | 574b1247036106c40d199c73ab29785b16d05407 | [
"BSD-2-Clause"
] | 8 | 2019-04-10T13:20:25.000Z | 2021-07-29T11:10:49.000Z | from .resnet import *
from .vgg import *
from .WRN import *
| 15 | 21 | 0.7 |
794fbb1257669e0cc1524949f05e56ce6c145241 | 2,389 | py | Python | tools/_gen_firrtl_modules.py | edwardcwang/coreir | f71c5864c21cf82112dc641ab0400aac778685c9 | [
"BSD-3-Clause"
] | 104 | 2017-04-05T23:23:32.000Z | 2022-03-18T03:47:05.000Z | tools/_gen_firrtl_modules.py | edwardcwang/coreir | f71c5864c21cf82112dc641ab0400aac778685c9 | [
"BSD-3-Clause"
] | 588 | 2017-04-02T17:11:29.000Z | 2021-12-22T01:37:47.000Z | tools/_gen_firrtl_modules.py | edwardcwang/coreir | f71c5864c21cf82112dc641ab0400aac778685c9 | [
"BSD-3-Clause"
] | 27 | 2017-03-15T17:05:13.000Z | 2022-03-30T20:23:08.000Z | class Source:
def __init__(self):
self._source = ""
def add_line(self, text=""):
self._source += text + "\n"
def __str__(self):
return self._source
source = Source()
ops = {
"unary": {
"not" : "not(in)",
"neg" : "asUInt(neg(in))", # `neg` works on UInts, so we don't need to interpret the input
"andr" : "andr(in)",
"orr" : "orr(in)",
"xorr" : "xorr(in)"
},
"binary": {
"and" : "and(in0, in1)",
"or" : "or(in0, in1)",
"xor" : "xor(in0, in1)",
"dshl" : "dshl(in0, in1)",
"dlshr" : "dshr(in0, in1)",
"dashr" : "asUInt(dshr(asSInt(in0), in1))",
"add" : "add(in0, in1)",
"sub" : "sub(in0, in1)",
"mul" : "mul(in0, in1)",
"udiv" : "div(in0, in1)",
"urem" : "mod(in0, in1)",
"sdiv" : "asUInt(div(asSInt(in0), asSInt(in1)))",
"srem" : "asUInt(mod(asSInt(in0), asSInt(in1)))",
# "smod" : "$signed(in0) % $signed(in1)", # TODO not sure if this should be mod? Verilog version doesn't implement it
"eq" : "eq(in0, in1)",
"slt" : "asUInt(lt(asSInt(in0), asSInt(in1)))",
"sgt" : "asUInt(gt(asSInt(in0), asSInt(in1)))",
"sle" : "asUInt(leq(asSInt(in0), asSInt(in1)))",
"sge" : "asUInt(gte(asSInt(in0), asSInt(in1)))",
"ult" : "lt(in0, in1)",
"ugt" : "gt(in0, in1)",
"ule" : "leq(in0, in1)",
"uge" : "geq(in0, in1)"
}
# "static_shift": {
# "lshr" : "in >> SHIFTBITS",
# "shl" : "in << SHIFTBITS",
# "ashr" : "$signed(in) >>> SHIFTBITS"
# },
}
source = Source()
for _type, ops_set in ops.items():
for op, body in ops_set.items():
source.add_line( " module coreir_{} :".format(op))
if _type == "unary":
source.add_line(" input in : UInt")
source.add_line(" output out : UInt")
source.add_line()
if _type == "binary":
source.add_line(" input in0 : UInt")
source.add_line(" input in1 : UInt")
source.add_line(" output out : UInt")
source.add_line()
source.add_line( " assign out <= {}".format(body))
source.add_line()
with open("coreir_primitive_wrappers.fir", "w") as output:
output.write(str(source))
| 31.853333 | 126 | 0.480117 |
794fbbb990a003bc87a24cb7fe158b3ba7652c51 | 5,609 | py | Python | tb_rest_client/models/models_pe/client_attributes_querying_snmp_communication_config.py | samson0v/python_tb_rest_client | 08ff7898740f7cec2170e85d5c3c89e222e967f7 | [
"Apache-2.0"
] | 30 | 2020-06-19T06:42:50.000Z | 2021-08-23T21:16:36.000Z | tb_rest_client/models/models_pe/client_attributes_querying_snmp_communication_config.py | samson0v/python_tb_rest_client | 08ff7898740f7cec2170e85d5c3c89e222e967f7 | [
"Apache-2.0"
] | 25 | 2021-08-30T01:17:27.000Z | 2022-03-16T14:10:14.000Z | tb_rest_client/models/models_pe/client_attributes_querying_snmp_communication_config.py | samson0v/python_tb_rest_client | 08ff7898740f7cec2170e85d5c3c89e222e967f7 | [
"Apache-2.0"
] | 23 | 2020-07-06T13:41:54.000Z | 2021-08-23T21:04:50.000Z | # coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ClientAttributesQueryingSnmpCommunicationConfig(object):
"""NOTE: This class is auto generated by the swagger code generator program.
from tb_rest_client.api_client import ApiClient
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'mappings': 'list[SnmpMapping]',
'querying_frequency_ms': 'int',
'spec': 'str'
}
attribute_map = {
'mappings': 'mappings',
'querying_frequency_ms': 'queryingFrequencyMs',
'spec': 'spec'
}
def __init__(self, mappings=None, querying_frequency_ms=None, spec=None): # noqa: E501
"""ClientAttributesQueryingSnmpCommunicationConfig - a model defined in Swagger""" # noqa: E501
self._mappings = None
self._querying_frequency_ms = None
self._spec = None
self.discriminator = None
if mappings is not None:
self.mappings = mappings
if querying_frequency_ms is not None:
self.querying_frequency_ms = querying_frequency_ms
if spec is not None:
self.spec = spec
@property
def mappings(self):
"""Gets the mappings of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:return: The mappings of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:rtype: list[SnmpMapping]
"""
return self._mappings
@mappings.setter
def mappings(self, mappings):
"""Sets the mappings of this ClientAttributesQueryingSnmpCommunicationConfig.
:param mappings: The mappings of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:type: list[SnmpMapping]
"""
self._mappings = mappings
@property
def querying_frequency_ms(self):
"""Gets the querying_frequency_ms of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:return: The querying_frequency_ms of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:rtype: int
"""
return self._querying_frequency_ms
@querying_frequency_ms.setter
def querying_frequency_ms(self, querying_frequency_ms):
"""Sets the querying_frequency_ms of this ClientAttributesQueryingSnmpCommunicationConfig.
:param querying_frequency_ms: The querying_frequency_ms of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:type: int
"""
self._querying_frequency_ms = querying_frequency_ms
@property
def spec(self):
"""Gets the spec of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:return: The spec of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:rtype: str
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this ClientAttributesQueryingSnmpCommunicationConfig.
:param spec: The spec of this ClientAttributesQueryingSnmpCommunicationConfig. # noqa: E501
:type: str
"""
allowed_values = ["CLIENT_ATTRIBUTES_QUERYING", "SHARED_ATTRIBUTES_SETTING", "TELEMETRY_QUERYING", "TO_DEVICE_RPC_REQUEST"] # noqa: E501
if spec not in allowed_values:
raise ValueError(
"Invalid value for `spec` ({0}), must be one of {1}" # noqa: E501
.format(spec, allowed_values)
)
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ClientAttributesQueryingSnmpCommunicationConfig, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClientAttributesQueryingSnmpCommunicationConfig):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.189349 | 145 | 0.629167 |
794fbbe58fbe9b74ba63157e11523b1078e9b8c3 | 1,200 | py | Python | modules/boost_math/doc/gamma_p_inv.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | 1 | 2022-03-24T03:35:10.000Z | 2022-03-24T03:35:10.000Z | modules/boost_math/doc/gamma_p_inv.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | null | null | null | modules/boost_math/doc/gamma_p_inv.py | brycelelbach/nt2 | 73d7e8dd390fa4c8d251c6451acdae65def70e0b | [
"BSL-1.0"
] | null | null | null | [ ## this file was manually modified by jt
{
'functor' : {
'arity' : '2',
'call_types' : [],
'ret_arity' : '0',
'rturn' : {
'default' : 'T',
},
'simd_types' : [],
'special' : ['boost_math'],
'type_defs' : [],
'types' : ['real_'],
},
'info' : 'manually modified',
'unit' : {
'global_header' : {
'first_stamp' : 'created by jt the 07/03/2011',
'included' : [],
'notes' : [],
'stamp' : 'modified by jt the 07/03/2011',
},
'ranges' : {
'default' : [['T(-10)', 'T(10)'], ['T(0)', 'T(1)']],
},
'specific_values' : {
},
'verif_test' : {
'property_call' : {
'default' : ['nt2::boost_math::gamma_p_inv(a0,a1)'],
},
'property_value' : {
'default' : ['nt2::boost_math::gamma_p_inv(a0,a1)'],
},
'simd' : {
},
'ulp_thresh' : {
'default' : ['1'],
},
},
},
},
]
| 27.272727 | 69 | 0.334167 |
794fbc5a1b53297dc32c0a49adada8c020e5e2e0 | 1,342 | py | Python | site_info/migrations/0009_auto_20180220_2058.py | WarwickAnimeSoc/aniMango | f927c2bc6eb484561ab38172ebebee6f03c8b13b | [
"MIT"
] | null | null | null | site_info/migrations/0009_auto_20180220_2058.py | WarwickAnimeSoc/aniMango | f927c2bc6eb484561ab38172ebebee6f03c8b13b | [
"MIT"
] | 6 | 2016-10-18T14:52:05.000Z | 2020-06-18T15:14:41.000Z | site_info/migrations/0009_auto_20180220_2058.py | WarwickAnimeSoc/aniMango | f927c2bc6eb484561ab38172ebebee6f03c8b13b | [
"MIT"
] | 6 | 2020-02-07T17:37:37.000Z | 2021-01-15T00:01:43.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-20 20:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('site_info', '0008_auto_20170923_0911'),
]
operations = [
migrations.AlterField(
model_name='exec',
name='academic_year',
field=models.IntegerField(choices=[(1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018)], default=2018, verbose_name='Academic year starting'),
),
migrations.AlterField(
model_name='historyentry',
name='academic_year',
field=models.IntegerField(choices=[(1997, 1997), (1998, 1998), (1999, 1999), (2000, 2000), (2001, 2001), (2002, 2002), (2003, 2003), (2004, 2004), (2005, 2005), (2006, 2006), (2007, 2007), (2008, 2008), (2009, 2009), (2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018)], default=2018, verbose_name='Academic year starting'),
),
]
| 51.615385 | 409 | 0.603577 |
794fbc7bef1a26bd51327d4e19d2ef5b72fc6754 | 4,957 | py | Python | spark/src/main/resources/python/zeppelin_pyspark.py | fogbeam/fogbeam_zeppelin | 6a096c90d9ef46c337b02995ae7caab8659768a9 | [
"Apache-2.0"
] | 1 | 2018-01-29T06:23:01.000Z | 2018-01-29T06:23:01.000Z | spark/src/main/resources/python/zeppelin_pyspark.py | BabbleGrabble/incubator-zeppelin-oauth2 | 784bba97678282cdd65ad5093d5bf883d101bd9e | [
"Apache-2.0"
] | null | null | null | spark/src/main/resources/python/zeppelin_pyspark.py | BabbleGrabble/incubator-zeppelin-oauth2 | 784bba97678282cdd65ad5093d5bf883d101bd9e | [
"Apache-2.0"
] | null | null | null | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys, getopt, traceback
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.storagelevel import StorageLevel
from pyspark.accumulators import Accumulator, AccumulatorParam
from pyspark.broadcast import Broadcast
from pyspark.serializers import MarshalSerializer, PickleSerializer
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, SchemaRDD, Row
class Logger(object):
def __init__(self):
self.out = ""
def write(self, message):
self.out = self.out + message
def get(self):
return self.out
def reset(self):
self.out = ""
class PyZeppelinContext(dict):
def __init__(self, zc):
self.z = zc
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print gateway.jvm.org.apache.zeppelin.spark.ZeppelinContext.showDF(self.z, obj._jdf)
else:
print str(obj)
# By implementing special methods it makes operating on it more Pythonic
def __setitem__(self, key, item):
self.z.put(key, item)
def __getitem__(self, key):
return self.z.get(key)
def __delitem__(self, key):
self.z.remove(key)
def __contains__(self, item):
return self.z.containsKey(item)
def add(self, key, value):
self.__setitem__(key, value)
def put(self, key, value):
self.__setitem__(key, value)
def get(self, key):
return self.__getitem__(key)
output = Logger()
sys.stdout = output
sys.stderr = output
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = sys.argv[2]
if sparkVersion.startswith("1.4"):
gateway = JavaGateway(client, auto_convert = True)
else:
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
intp.onPythonScriptInitialized()
jsc = intp.getJavaSparkContext()
if sparkVersion.startswith("1.2"):
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
elif sparkVersion.startswith("1.3"):
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
elif sparkVersion.startswith("1.4"):
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
sqlc = SQLContext(sc, intp.getSQLContext())
sqlContext = sqlc
z = PyZeppelinContext(intp.getZeppelinContext())
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
final_code = None
for s in stmts:
if s == None:
continue
# skip comment
s_stripped = s.strip()
if len(s_stripped) == 0 or s_stripped.startswith("#"):
continue
if final_code:
final_code += "\n" + s
else:
final_code = s
if final_code:
compiledCode = compile(final_code, "<string>", "exec")
sc.setJobGroup(jobGroup, "Zeppelin")
eval(compiledCode)
intp.setStatementsFinished(output.get(), False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| 30.042424 | 92 | 0.730684 |
794fbcc001e1123a1b5576ea4ab34a6a6f7f9ec1 | 4,145 | py | Python | docs/source/examples/kkr_bandstruc_example.py | broeder-j/aiida-kkr | fe7f39aa8f1396e02c0eb51c1cd2a7dc050620d6 | [
"MIT"
] | 2 | 2017-11-09T10:21:43.000Z | 2017-11-09T18:42:05.000Z | docs/source/examples/kkr_bandstruc_example.py | broeder-j/aiida-kkr | fe7f39aa8f1396e02c0eb51c1cd2a7dc050620d6 | [
"MIT"
] | 8 | 2018-07-19T12:33:28.000Z | 2018-10-18T10:02:32.000Z | docs/source/examples/kkr_bandstruc_example.py | broeder-j/aiida-kkr | fe7f39aa8f1396e02c0eb51c1cd2a7dc050620d6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# connect to aiida db
from aiida import load_dbenv, is_dbenv_loaded
if not is_dbenv_loaded():
load_dbenv()
# load essential aiida classes
from aiida.orm import Code, DataFactory, load_node
StructureData = DataFactory('structure')
ParameterData = DataFactory('parameter')
# helper function:
def wait_for_it(calc, maxwait=300):
from time import sleep
N = 0
print 'start waiting for calculation to finish'
while not calc.has_finished() and N<(maxwait/2.):
N += 1
if N%5==0:
print('.')
sleep(2.)
print('waiting done after {} seconds: {} {}'.format(N*2, calc.has_finished(), calc.has_finished_ok()))
# some settings (parent calculations):
# converged KKR calculation (taken form bulk Cu KKR example)
kkr_calc_converged = load_node(24951)
# previous DOS calculation started from converged KKR calc (taken from KKRimp DOS example, i.e. GF host calculation with DOS contour)
host_dos_calc = load_node(25030)
# generate kpoints for bandstructure calculation
from aiida_kkr.calculations.voro import VoronoiCalculation
struc, voro_parent = VoronoiCalculation.find_parent_structure(kkr_calc_converged.out.remote_folder)
from aiida.tools.data.array.kpoints import get_explicit_kpoints_path
kpts = get_explicit_kpoints_path(struc).get('explicit_kpoints')
# run bandstructure calculation
# create bandstructure calculation reusing old settings (including same computer and resources in this example)
kkrcode = kkr_calc_converged.get_code()
kkrcalc = kkrcode.new_calc()
kkrcalc.use_kpoints(kpts) # pass kpoints as input
kkrcalc.use_parent_folder(kkr_calc_converged.out.remote_folder)
kkrcalc.set_resources(kkr_calc_converged.get_resources())
# change parameters to qdos settings (E range and number of points)
from aiida_kkr.tools.kkr_params import kkrparams
qdos_params = kkrparams(**kkr_calc_converged.inp.parameters.get_dict()) # reuse old settings
# reuse the same emin/emax settings as in DOS run (extracted from input parameter node)
qdos_params.set_multiple_values(EMIN=host_dos_calc.inp.parameters.get_dict().get('EMIN'),
EMAX=host_dos_calc.inp.parameters.get_dict().get('EMAX'),
NPT2=100)
kkrcalc.use_parameters(ParameterData(dict=qdos_params.get_dict()))
# store and submit calculation
kkrcalc.store_all()
kkrcalc.submit()
wait_for_it(kkrcalc, maxwait=600)
# plot results
# extract kpoint labels
klbl = kpts.labels
# fix overlapping labels (nicer plotting)
tmp = klbl[2]
tmp = (tmp[0], '\n'+tmp[1]+' ')
klbl[2] = tmp
tmp = klbl[3]
tmp = (tmp[0], ' '+tmp[1])
klbl[3] = tmp
#plotting of bandstructure and previously calculated DOS data
# load DOS data
from aiida_kkr.tools.common_functions import interpolate_dos
dospath_host = host_dos_calc.out.retrieved.get_abs_path('')
ef, dos, dos_interpol = interpolate_dos(dospath_host, return_original=True)
dos, dos_interpol = dos[0], dos_interpol[0]
# load qdos file and reshape
from numpy import loadtxt, sum, log
qdos_file = kkrcalc.out.retrieved.get_abs_path('qdos.01.1.dat')
q = loadtxt(qdos_file)
nepts = len(set(q[:,0]))
data = q[:,5:].reshape(nepts, len(q)/nepts, -1)
e = (q[::len(q)/nepts, 0]-ef)*13.6
# plot bandstructure
from matplotlib.pyplot import figure, pcolormesh, show, xticks, ylabel, axhline, axvline, gca, title, plot, ylim, xlabel, suptitle
figure(figsize=((8, 4.8)))
pcolormesh(range(len(q)/nepts), e, log(sum(abs(data), axis=2)), lw=0)
xticks([i[0] for i in klbl], [i[1] for i in klbl])
ylabel('E-E_F (eV)')
axhline(0, color='lightgrey', lw=1)
title('band structure')
# plot DOS on right hand side of bandstructure plot
axBand = gca()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(axBand)
axDOS = divider.append_axes("right", 1.2, pad=0.1, sharey=axBand)
plot(dos_interpol[:,1]/13.6, (dos_interpol[:,0]-ef)*13.6)
ylim(e.min(), e.max())
axhline(0, color='grey', lw=1)
axvline(0, color='grey', lw=1)
axDOS.yaxis.set_tick_params(labelleft=False, labelright=True, right=True, left=False)
xlabel('states/eV')
title('DOS')
suptitle(struc.get_formula(), fontsize=16)
show() | 33.97541 | 133 | 0.740893 |
794fbcc63d34d8d21101f56cd7aa5bab090bad76 | 753 | py | Python | elk_project/elk_project/urls.py | joyliao07/elk | c697d6847c57c0e7f3b4dc71a373c5fe0407e237 | [
"MIT"
] | null | null | null | elk_project/elk_project/urls.py | joyliao07/elk | c697d6847c57c0e7f3b4dc71a373c5fe0407e237 | [
"MIT"
] | 7 | 2019-12-04T23:17:25.000Z | 2021-06-09T17:54:51.000Z | elk_project/elk_project/urls.py | joyliao07/elk | c697d6847c57c0e7f3b4dc71a373c5fe0407e237 | [
"MIT"
] | null | null | null | """elk_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.227273 | 77 | 0.710491 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.