hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73087de99d28d62c47ee47f567d8a406a1bc2b8 | 1,502 | py | Python | var/spack/repos/builtin/packages/eztrace/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-08-13T15:24:33.000Z | 2021-10-18T18:38:19.000Z | var/spack/repos/builtin/packages/eztrace/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2020-07-24T17:09:12.000Z | 2022-01-17T22:39:06.000Z | var/spack/repos/builtin/packages/eztrace/package.py | robertodr/spack | 9b809e01b47d48f01b3d257912fe1b752943cd3d | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-01-22T14:01:28.000Z | 2020-07-23T21:35:12.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Eztrace(AutotoolsPackage):
"""EZTrace is a tool to automatically generate execution traces
of HPC applications."""
homepage = "http://eztrace.gforge.inria.fr"
url = "https://gitlab.com/eztrace/eztrace/-/archive/eztrace-1.1-10/eztrace-eztrace-1.1-10.tar.gz"
maintainers = ['trahay']
version('1.1-10', sha256='97aba8f3b3b71e8e2f7ef47e00c262234e27b9cb4a870c85c525317a83a3f0d4')
depends_on('mpi')
# Does not work on Darwin due to MAP_POPULATE
conflicts('platform=darwin')
def patch(self):
filter_file(
'"DEFAULT_OUTFILE"',
'" DEFAULT_OUTFILE "',
'extlib/gtg/extlib/otf/tools/otfshrink/otfshrink.cpp',
string=True
)
def setup_build_environment(self, env):
if self.spec.satisfies('%fj'):
env.set('LDFLAGS', '--linkfortran')
def configure_args(self):
args = ["--with-mpi={0}".format(self.spec["mpi"].prefix)]
return args
@run_before('build')
def fix_libtool(self):
if self.spec.satisfies('%fj'):
libtools = ['extlib/gtg/libtool',
'extlib/opari2/build-frontend/libtool']
for f in libtools:
filter_file('wl=""', 'wl="-Wl,"', f, string=True)
| 31.957447 | 106 | 0.62783 |
from spack import *
class Eztrace(AutotoolsPackage):
homepage = "http://eztrace.gforge.inria.fr"
url = "https://gitlab.com/eztrace/eztrace/-/archive/eztrace-1.1-10/eztrace-eztrace-1.1-10.tar.gz"
maintainers = ['trahay']
version('1.1-10', sha256='97aba8f3b3b71e8e2f7ef47e00c262234e27b9cb4a870c85c525317a83a3f0d4')
depends_on('mpi')
conflicts('platform=darwin')
def patch(self):
filter_file(
'"DEFAULT_OUTFILE"',
'" DEFAULT_OUTFILE "',
'extlib/gtg/extlib/otf/tools/otfshrink/otfshrink.cpp',
string=True
)
def setup_build_environment(self, env):
if self.spec.satisfies('%fj'):
env.set('LDFLAGS', '--linkfortran')
def configure_args(self):
args = ["--with-mpi={0}".format(self.spec["mpi"].prefix)]
return args
@run_before('build')
def fix_libtool(self):
if self.spec.satisfies('%fj'):
libtools = ['extlib/gtg/libtool',
'extlib/opari2/build-frontend/libtool']
for f in libtools:
filter_file('wl=""', 'wl="-Wl,"', f, string=True)
| true | true |
f73088043a279f1eb399bbe99ed06db1fe268266 | 6,952 | py | Python | Wessim1.py | tinyheero/wessim | 30c91560e0d6d8ce96a41ce3bcd40b3467c3a1c6 | [
"MIT"
] | null | null | null | Wessim1.py | tinyheero/wessim | 30c91560e0d6d8ce96a41ce3bcd40b3467c3a1c6 | [
"MIT"
] | null | null | null | Wessim1.py | tinyheero/wessim | 30c91560e0d6d8ce96a41ce3bcd40b3467c3a1c6 | [
"MIT"
] | 1 | 2020-08-18T03:17:58.000Z | 2020-08-18T03:17:58.000Z | #!/usr/bin/env python2
import sys
import random
import bisect
import pysam
import gzip
import cPickle
import numpy
from time import time, localtime, strftime
import argparse
from multiprocessing import Process
import os
import math
import pysam
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0,'t':1,'g':2,'c':3,'n':4}
def subprogram(command, name):
os.system(command)
print "exiting subprocess " + str(name)
def main(argv):
t0 = time()
arguline = " ".join(argv)
parser = argparse.ArgumentParser(description='Wessim1: Whole Exome Sequencing SIMulator 1 (Ideal target region-based version)', prog='Wessim1', formatter_class=argparse.RawTextHelpFormatter)
group1 = parser.add_argument_group('Mandatory input files')
group1.add_argument(
'--target-fasta-file',
help='The target FASTA file generated from get_region_vector.py',
required=True
)
group1.add_argument(
'--target-abd-file',
help='The target abd file generated from get_region_vector.py',
required=True
)
group1.add_argument(
'-n', '--num-reads',
metavar='INT', type=int, dest='readnumber', required=True,
help='total (n)umber of reads'
)
group1.add_argument(
'-l', metavar = 'INT', type=int, dest='readlength', required=True,
help='read (l)ength (bp)'
)
group1.add_argument(
'-M', metavar = 'FILE', dest='model', required=True,
help='GemSim (M)odel file (.gzip)'
)
group2 = parser.add_argument_group('Parameters for exome capture')
group2.add_argument(
'-f', '--fragment-size',
metavar = 'INT', type=int, dest='fragsize', required=False,
help='mean (f)ragment size. this corresponds to insert size when sequencing in paired-end mode. [200]',
default=200
)
group2.add_argument('-d', metavar = 'INT', type=int, dest='fragsd', required=False, help='standard (d)eviation of fragment size [50]', default=50)
group2.add_argument('-m', metavar = 'INT', type=int, dest='fragmin', required=False, help='(m)inimum fragment length [read_length + 20]')
group2.add_argument('-x', metavar = 'INT',type=int, dest='slack', required=False, help='slack margin of the given boundaries [0]', default=0)
group3 = parser.add_argument_group('Parameters for sequencing')
group3.add_argument(
'-p', '--paired-reads',
action='store_true',
help='Generate paired-end reads'
)
group3.add_argument('-t', metavar = 'INT', type=int, dest='threadnumber', required=False, help='number of (t)hreaded subprocesses [1]', default=1)
group4 = parser.add_argument_group('Output options')
group4.add_argument('-o', metavar = 'FILE', dest='outfile', help='(o)utput file header. ".fastq.gz" or ".fastq" will be attached automatically. Output will be splitted into two files in paired-end mode', required=True)
group4.add_argument('-z', action='store_true', help='compress output with g(z)ip [false]')
group4.add_argument('-q', metavar = 'INT', type=int, dest='qualbase', required=False, help='(q)uality score offset [33]', default=33)
group4.add_argument('-v', action='store_true', help='(v)erbose; print out intermediate messages.')
group4.add_argument('--read-name-prefix', dest='read_name_prefix', default = '_from_', required=False, help='Prefix to add to simulated read names (default: "%(default)s")')
group4.add_argument(
'--use-rce', action='store_true',
help='Use the target RCE values for generating reads'
)
args = parser.parse_args()
isize = args.fragsize
isd = args.fragsd
imin = args.fragmin
slack = args.slack
paired = args.paired_reads
readlength = args.readlength
readnumber = args.readnumber
threadnumber = args.threadnumber
read_name_prefix = args.read_name_prefix
if imin==None:
if paired:
imin = readlength + 20
else:
imin = readlength + 20
if isize < imin:
print "too small mean fragment size (" + str(isize) + ") compared to minimum length (" + str(imin) + "). Increase it and try again."
sys.exit(0)
model = args.model
outfile = args.outfile
compress = args.z
qualbase = args.qualbase
verbose = args.v
print
print "-------------------------------------------"
print "Target FASTA file:", args.target_fasta_file
print "Target ABD file:", args.target_abd_file
print "Fragment:",isize, "+-", isd, ">", imin
print "Paired-end mode?", paired
print "Sequencing model:", model
print "Read length:", readlength, "Read number:", readnumber
print "Output File:", outfile
print "Gzip compress?", compress
print "Quality base:", qualbase
print "Thread number:", threadnumber
print "Read name prefix:", read_name_prefix
print "Job started at:", strftime("%Y-%m-%d %H:%M:%S", localtime())
print "-------------------------------------------"
print
cur_script_path = os.path.dirname(os.path.abspath(__file__))
processes = []
for t in range(0, threadnumber):
readstart = int(float(readnumber) / float(threadnumber) * t) + 1
readend = int(float(readnumber) / float(threadnumber) * (t+1))
# Sub-command for __sub_wessim1.py
command = "python2 " + cur_script_path + "/" "__sub_wessim1.py " + arguline + " -1 " + str(readstart) + " -2 " + str(readend) + " -i " + str(t+1)
print command
p = Process(target=subprogram, args=(command, t+1))
p.start()
processes.append(p)
for p in processes:
p.join()
t1 = time()
print "Done generating " + str(readnumber) + " reads in %f secs" % (t1 - t0)
print "Merging subresults..."
wread = None
wread2 = None
if paired and compress:
wread = gzip.open(outfile + "_1.fastq.gz", 'wb')
wread2 = gzip.open(outfile + "_2.fastq.gz", 'wb')
elif paired and not compress:
wread = open(outfile + "_1.fastq", 'w')
wread2 = open(outfile + "_2.fastq", 'w')
elif not paired and compress:
wread = gzip.open(outfile + ".fastq.gz", 'wb')
else:
wread = open(outfile + ".fastq", 'w')
if not paired:
for t in range(0, threadnumber):
suboutfile = outfile + "-" + str(t+1)
fread = None
if compress:
suboutfile += ".fastq.gz"
fread = gzip.open(suboutfile, 'rb')
else:
suboutfile += ".fastq"
fread = open(suboutfile, 'r')
line = fread.readline()
while line:
wread.write(line)
line = fread.readline()
fread.close()
os.remove(suboutfile)
wread.close()
else:
for t in range(0, threadnumber):
suboutfile1 = outfile + "-" + str(t+1) + "_1"
suboutfile2 = outfile + "-" + str(t+1) + "_2"
fread1 = None
fread2 = None
if compress:
suboutfile1 += ".fastq.gz"
suboutfile2 += ".fastq.gz"
fread1 = gzip.open(suboutfile1, "rb")
fread2 = gzip.open(suboutfile2, "rb")
else:
suboutfile1 += ".fastq"
suboutfile2 += ".fastq"
fread1 = open(suboutfile1, "r")
fread2 = open(suboutfile2, "r")
line1 = fread1.readline()
line2 = fread2.readline()
while line1 and line2:
wread.write(line1)
wread2.write(line2)
line1 = fread1.readline()
line2 = fread2.readline()
fread1.close()
fread2.close()
os.remove(suboutfile1)
os.remove(suboutfile2)
wread.close()
wread2.close()
sys.exit(0)
if __name__=="__main__":
main(sys.argv[1:])
| 33.263158 | 219 | 0.675489 |
import sys
import random
import bisect
import pysam
import gzip
import cPickle
import numpy
from time import time, localtime, strftime
import argparse
from multiprocessing import Process
import os
import math
import pysam
inds={'A':0,'T':1,'G':2,'C':3,'N':4,'a':0,'t':1,'g':2,'c':3,'n':4}
def subprogram(command, name):
os.system(command)
print "exiting subprocess " + str(name)
def main(argv):
t0 = time()
arguline = " ".join(argv)
parser = argparse.ArgumentParser(description='Wessim1: Whole Exome Sequencing SIMulator 1 (Ideal target region-based version)', prog='Wessim1', formatter_class=argparse.RawTextHelpFormatter)
group1 = parser.add_argument_group('Mandatory input files')
group1.add_argument(
'--target-fasta-file',
help='The target FASTA file generated from get_region_vector.py',
required=True
)
group1.add_argument(
'--target-abd-file',
help='The target abd file generated from get_region_vector.py',
required=True
)
group1.add_argument(
'-n', '--num-reads',
metavar='INT', type=int, dest='readnumber', required=True,
help='total (n)umber of reads'
)
group1.add_argument(
'-l', metavar = 'INT', type=int, dest='readlength', required=True,
help='read (l)ength (bp)'
)
group1.add_argument(
'-M', metavar = 'FILE', dest='model', required=True,
help='GemSim (M)odel file (.gzip)'
)
group2 = parser.add_argument_group('Parameters for exome capture')
group2.add_argument(
'-f', '--fragment-size',
metavar = 'INT', type=int, dest='fragsize', required=False,
help='mean (f)ragment size. this corresponds to insert size when sequencing in paired-end mode. [200]',
default=200
)
group2.add_argument('-d', metavar = 'INT', type=int, dest='fragsd', required=False, help='standard (d)eviation of fragment size [50]', default=50)
group2.add_argument('-m', metavar = 'INT', type=int, dest='fragmin', required=False, help='(m)inimum fragment length [read_length + 20]')
group2.add_argument('-x', metavar = 'INT',type=int, dest='slack', required=False, help='slack margin of the given boundaries [0]', default=0)
group3 = parser.add_argument_group('Parameters for sequencing')
group3.add_argument(
'-p', '--paired-reads',
action='store_true',
help='Generate paired-end reads'
)
group3.add_argument('-t', metavar = 'INT', type=int, dest='threadnumber', required=False, help='number of (t)hreaded subprocesses [1]', default=1)
group4 = parser.add_argument_group('Output options')
group4.add_argument('-o', metavar = 'FILE', dest='outfile', help='(o)utput file header. ".fastq.gz" or ".fastq" will be attached automatically. Output will be splitted into two files in paired-end mode', required=True)
group4.add_argument('-z', action='store_true', help='compress output with g(z)ip [false]')
group4.add_argument('-q', metavar = 'INT', type=int, dest='qualbase', required=False, help='(q)uality score offset [33]', default=33)
group4.add_argument('-v', action='store_true', help='(v)erbose; print out intermediate messages.')
group4.add_argument('--read-name-prefix', dest='read_name_prefix', default = '_from_', required=False, help='Prefix to add to simulated read names (default: "%(default)s")')
group4.add_argument(
'--use-rce', action='store_true',
help='Use the target RCE values for generating reads'
)
args = parser.parse_args()
isize = args.fragsize
isd = args.fragsd
imin = args.fragmin
slack = args.slack
paired = args.paired_reads
readlength = args.readlength
readnumber = args.readnumber
threadnumber = args.threadnumber
read_name_prefix = args.read_name_prefix
if imin==None:
if paired:
imin = readlength + 20
else:
imin = readlength + 20
if isize < imin:
print "too small mean fragment size (" + str(isize) + ") compared to minimum length (" + str(imin) + "). Increase it and try again."
sys.exit(0)
model = args.model
outfile = args.outfile
compress = args.z
qualbase = args.qualbase
verbose = args.v
print
print "-------------------------------------------"
print "Target FASTA file:", args.target_fasta_file
print "Target ABD file:", args.target_abd_file
print "Fragment:",isize, "+-", isd, ">", imin
print "Paired-end mode?", paired
print "Sequencing model:", model
print "Read length:", readlength, "Read number:", readnumber
print "Output File:", outfile
print "Gzip compress?", compress
print "Quality base:", qualbase
print "Thread number:", threadnumber
print "Read name prefix:", read_name_prefix
print "Job started at:", strftime("%Y-%m-%d %H:%M:%S", localtime())
print "-------------------------------------------"
print
cur_script_path = os.path.dirname(os.path.abspath(__file__))
processes = []
for t in range(0, threadnumber):
readstart = int(float(readnumber) / float(threadnumber) * t) + 1
readend = int(float(readnumber) / float(threadnumber) * (t+1))
command = "python2 " + cur_script_path + "/" "__sub_wessim1.py " + arguline + " -1 " + str(readstart) + " -2 " + str(readend) + " -i " + str(t+1)
print command
p = Process(target=subprogram, args=(command, t+1))
p.start()
processes.append(p)
for p in processes:
p.join()
t1 = time()
print "Done generating " + str(readnumber) + " reads in %f secs" % (t1 - t0)
print "Merging subresults..."
wread = None
wread2 = None
if paired and compress:
wread = gzip.open(outfile + "_1.fastq.gz", 'wb')
wread2 = gzip.open(outfile + "_2.fastq.gz", 'wb')
elif paired and not compress:
wread = open(outfile + "_1.fastq", 'w')
wread2 = open(outfile + "_2.fastq", 'w')
elif not paired and compress:
wread = gzip.open(outfile + ".fastq.gz", 'wb')
else:
wread = open(outfile + ".fastq", 'w')
if not paired:
for t in range(0, threadnumber):
suboutfile = outfile + "-" + str(t+1)
fread = None
if compress:
suboutfile += ".fastq.gz"
fread = gzip.open(suboutfile, 'rb')
else:
suboutfile += ".fastq"
fread = open(suboutfile, 'r')
line = fread.readline()
while line:
wread.write(line)
line = fread.readline()
fread.close()
os.remove(suboutfile)
wread.close()
else:
for t in range(0, threadnumber):
suboutfile1 = outfile + "-" + str(t+1) + "_1"
suboutfile2 = outfile + "-" + str(t+1) + "_2"
fread1 = None
fread2 = None
if compress:
suboutfile1 += ".fastq.gz"
suboutfile2 += ".fastq.gz"
fread1 = gzip.open(suboutfile1, "rb")
fread2 = gzip.open(suboutfile2, "rb")
else:
suboutfile1 += ".fastq"
suboutfile2 += ".fastq"
fread1 = open(suboutfile1, "r")
fread2 = open(suboutfile2, "r")
line1 = fread1.readline()
line2 = fread2.readline()
while line1 and line2:
wread.write(line1)
wread2.write(line2)
line1 = fread1.readline()
line2 = fread2.readline()
fread1.close()
fread2.close()
os.remove(suboutfile1)
os.remove(suboutfile2)
wread.close()
wread2.close()
sys.exit(0)
if __name__=="__main__":
main(sys.argv[1:])
| false | true |
f7308819cec616f6758ef2e8960686762613ad90 | 18,855 | py | Python | cbcintacctsdk/apis/api_base.py | Cold-Bore-Capital/sageintacct-sdk-py | a65c89aba9987a6d74238ad7d28b39ddd704d68c | [
"MIT"
] | null | null | null | cbcintacctsdk/apis/api_base.py | Cold-Bore-Capital/sageintacct-sdk-py | a65c89aba9987a6d74238ad7d28b39ddd704d68c | [
"MIT"
] | null | null | null | cbcintacctsdk/apis/api_base.py | Cold-Bore-Capital/sageintacct-sdk-py | a65c89aba9987a6d74238ad7d28b39ddd704d68c | [
"MIT"
] | null | null | null | """
API Base class with util functions
"""
import json
import datetime
import uuid
from warnings import warn
from typing import Dict, List, Tuple
from urllib.parse import unquote
import re
import xmltodict
import requests
from ..exceptions import SageIntacctSDKError, ExpiredTokenError, InvalidTokenError, NoPrivilegeError, \
WrongParamsError, NotFoundItemError, InternalServerError, DataIntegrityWarning
from .constants import dimensions_fields_mapping
class ApiBase:
"""The base class for all API classes."""
def __init__(self, dimension: str = None, pagesize: int = 2000, post_legacy_method: str = None):
self._sender_id = None
self._sender_password = None
self._session_id = None
self._api_url = 'https://api.intacct.com/ia/xml/xmlgw.phtml'
self._dimension = dimension
self._pagesize = pagesize
self._post_legacy_method = post_legacy_method
@property
def dimension(self):
return self._post_legacy_method
@dimension.setter
def dimension(self, dimension: str):
"""
Set the sender id for APIs
:param post_legacy_method: sender id
:return: None
"""
self._dimension = dimension
@property
def post_legacy_method(self):
return self._post_legacy_method
@post_legacy_method.setter
def post_legacy_method(self, post_legacy_method: str):
"""
Set the sender id for APIs
:param post_legacy_method: sender id
:return: None
"""
self._post_legacy_method = post_legacy_method
@property
def sender_id(self):
"""
Set the sender id for APIs
:param sender_id: sender id
:return: None
"""
return self._sender_id
@sender_id.setter
def sender_id(self, sender_id: str):
"""
Set the sender id for APIs
:param sender_id: sender id
:return: None
"""
self._sender_id = sender_id
def set_sender_id(self, sender_id: str):
"""
Set the sender id for APIs
:param sender_id: sender id
:return: None
"""
self._sender_id = sender_id
def set_sender_password(self, sender_password: str):
"""
Set the sender password for APIs
:param sender_password: sender id
:return: None
"""
self._sender_password = sender_password
def get_session_id(self, user_id: str, company_id: str, user_password: str, entity_id: str = None):
"""
Sets the session id for APIs
:param access_token: acceess token (JWT)
:return: session id
"""
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'login': {
'userid': user_id,
'companyid': company_id,
'password': user_password,
'locationid': entity_id
}
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
'getAPISession': None
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
if response['authentication']['status'] == 'success':
session_details = response['result']['data']['api']
self._api_url = session_details['endpoint']
self._session_id = session_details['sessionid']
return self._session_id
else:
raise SageIntacctSDKError('Error: {0}'.format(response['errormessage']))
# TODO: Change to property decorators
def set_session_id(self, session_id: str):
"""
Set the session id for APIs
:param session_id: session id
:return: None
"""
self._session_id = session_id
def _support_id_msg(self, errormessages):
"""Finds whether the error messages is list / dict and assign type and error assignment.
Parameters:
errormessages (dict / list): error message received from Sage Intacct.
Returns:
Error message assignment and type.
"""
error = {}
if isinstance(errormessages['error'], list):
error['error'] = errormessages['error'][0]
error['type'] = 'list'
elif isinstance(errormessages['error'], dict):
error['error'] = errormessages['error']
error['type'] = 'dict'
return error
def _decode_support_id(self, errormessages):
"""Decodes Support ID.
Parameters:
errormessages (dict / list): error message received from Sage Intacct.
Returns:
Same error message with decoded Support ID.
"""
support_id_msg = self._support_id_msg(errormessages)
data_type = support_id_msg['type']
error = support_id_msg['error']
if (error and error['description2']):
message = error['description2']
support_id = re.search('Support ID: (.*)]', message)
if support_id.group(1):
decoded_support_id = unquote(support_id.group(1))
message = message.replace(support_id.group(1), decoded_support_id)
# Converting dict to list even for single error response
if data_type == 'dict':
errormessages['error'] = [errormessages['error']]
errormessages['error'][0]['description2'] = message if message else None
return errormessages
def _post_request(self, dict_body: dict, api_url: str):
"""Create a HTTP post request.
Parameters:
data (dict): HTTP POST body data for the wanted API.
api_url (str): Url for the wanted API.
Returns:
A response from the request (dict).
"""
api_headers = {
'content-type': 'application/xml'
}
body = xmltodict.unparse(dict_body)
response = requests.post(api_url, headers=api_headers, data=body)
parsed_xml = xmltodict.parse(response.text, force_list={self._dimension})
parsed_response = json.loads(json.dumps(parsed_xml))
if response.status_code == 200:
if parsed_response['response']['control']['status'] == 'success':
api_response = parsed_response['response']['operation']
if parsed_response['response']['control']['status'] == 'failure':
exception_msg = self._decode_support_id(parsed_response['response']['errormessage'])
raise WrongParamsError('Some of the parameters are wrong', exception_msg)
if api_response['authentication']['status'] == 'failure':
raise InvalidTokenError('Invalid token / Incorrect credentials', api_response['errormessage'])
if api_response['result']['status'] == 'success':
return api_response
if api_response['result']['status'] == 'failure':
exception_msg = self._decode_support_id(api_response['result']['errormessage'])
for error in exception_msg['error']:
if error['description2'] and 'You do not have permission for API' in error['description2']:
raise InvalidTokenError('The user has insufficient privilege', exception_msg)
raise WrongParamsError('Error during {0}'.format(api_response['result']['function']), exception_msg)
if response.status_code == 400:
raise WrongParamsError('Some of the parameters are wrong', parsed_response)
if response.status_code == 401:
raise InvalidTokenError('Invalid token / Incorrect credentials', parsed_response)
if response.status_code == 403:
raise NoPrivilegeError('Forbidden, the user has insufficient privilege', parsed_response)
if response.status_code == 404:
raise NotFoundItemError('Not found item with ID', parsed_response)
if response.status_code == 498:
raise ExpiredTokenError('Expired token, try to refresh it', parsed_response)
if response.status_code == 500:
raise InternalServerError('Internal server error', parsed_response)
raise SageIntacctSDKError('Error: {0}'.format(parsed_response))
def format_and_send_request(self, data: Dict):
"""Format data accordingly to convert them to xml.
Parameters:
data (dict): HTTP POST body data for the wanted API.
Returns:
A response from the __post_request (dict).
"""
key = next(iter(data))
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'sessionid': self._session_id
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
key: data[key]
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
return response['result']
def post(self, data: Dict):
if self._dimension in ('CCTRANSACTION', 'EPPAYMENT', 'create_invoice', 'create_aradjustment','update_invoice','update_customer'):
return self._construct_post_legacy_payload(data)
elif self._dimension == 'readReport':
return self._construct_run_report(data)
elif (self._dimension == 'ARINVOICE' and self._post_legacy_method=='delete'):
return self._construct_delete(data)
elif (self._dimension =='custom_report'):
return self._construct_read_custom_report(data)
else:
return self._construct_post_payload(data)
def _construct_post_payload(self, data: Dict):
payload = {
'create': {
self._dimension: data
}
}
return self.format_and_send_request(payload)
def _construct_run_report(self, data: str):
payload = {
"readReport": {
#'type': "interactive",
'report': data
}}
return self.format_and_send_request(payload)
def _construct_read_custom_report(self, data: str):
payload = {
"readReport": {
'@type': "interactive",
'@returnDef': "true",
'report': data
}}
return self.format_and_send_request(payload)
def _construct_delete(self, data: str) -> str:
payload = {"delete": data}
return self.format_and_send_request(payload)
def _construct_post_legacy_payload(self, data: Dict):
payload = {
self._post_legacy_method: data
}
return self.format_and_send_request(payload)
def _construct_post_legacy_aradjustment_payload(self, data: Dict):
payload = {
'create_aradjustment': data
}
return self.format_and_send_request(payload)
def count(self):
get_count = {
'query': {
'object': self._dimension,
'select': {
'field': 'RECORDNO'
},
'pagesize': '1'
}
}
response = self.format_and_send_request(get_count)
return int(response['data']['@totalcount'])
def read_by_query(self, fields: list = None):
"""Read by Query from Sage Intacct
Parameters:
fields (list): Get selective fields to be returned. (optional).
Returns:
Dict.
"""
payload = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': None,
'pagesize': '1000'
}
}
return self.format_and_send_request(payload)
def get(self, field: str, value: str, fields: list = None):
"""Get data from Sage Intacct based on filter.
Parameters:
field (str): A parameter to filter by the field. (required).
value (str): A parameter to filter by the field - value. (required).
Returns:
Dict.
"""
data = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': "{0} = '{1}'".format(field, value),
'pagesize': '1000'
}
}
return self.format_and_send_request(data)['data']
def get_all(self, field: str = None, value: str = None, fields: list = None):
"""Get all data from Sage Intacct
Returns:
List of Dict.
"""
complete_data = []
count = self.count()
pagesize = self._pagesize
for offset in range(0, count, pagesize):
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if field and value:
data['query']['filter'] = {
'equalto': {
'field': field,
'value': value
}
}
paginated_data = self.format_and_send_request(data)['data'][self._dimension]
complete_data.extend(paginated_data)
return complete_data
__query_filter = List[Tuple[str, str, str]]
def get_by_query(self, fields: List[str] = None,
and_filter: __query_filter = None,
or_filter: __query_filter = None,
filter_payload: dict = None):
"""Get data from Sage Intacct using query method based on filter.
See sage intacct documentation here for query structures:
https://developer.intacct.com/web-services/queries/
Parameters:
fields (str): A parameter to filter by the field. (required).
and_filter (list(tuple)): List of tuple containing (operator (str),field (str), value (str))
or_filter (list(tuple)): List of tuple containing (operator (str),field (str), value (str))
filter_payload (dict): Formatted query payload in dictionary format.
if 'between' operators is used on and_filter or or_filter field must be submitted as
[str,str]
if 'in' operator is used field may be submitted as [str,str,str,...]
Returns:
Dict.
"""
complete_data = []
count = self.count()
pagesize = self._pagesize
offset = 0
formatted_filter = filter_payload
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if and_filter and or_filter:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
formatted_filter['and']['or'] = {}
for operator, field, value in or_filter:
formatted_filter['and']['or'].setdefault(operator, {}).update({'field': field, 'value': value})
elif and_filter:
if len(and_filter) > 1:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in and_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
elif or_filter:
if len(or_filter) > 1:
formatted_filter = {'or': {}}
for operator, field, value in or_filter:
formatted_filter['or'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in or_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
if formatted_filter:
data['query']['filter'] = formatted_filter
for offset in range(0, count, pagesize):
data['query']['offset'] = offset
paginated_data = self.format_and_send_request(data)['data']
try:
complete_data.extend(paginated_data[self._dimension])
except:
pass
filtered_total = int(paginated_data['@totalcount'])
if paginated_data['@numremaining'] == '0':
break
if filtered_total != len(complete_data):
warn(message='Your data may not be complete. Records returned do not equal total query record count',
category=DataIntegrityWarning)
return complete_data
def get_lookup(self):
""" Returns all fields with attributes from the object called on.
Parameters:
self
Returns:
Dict.
"""
data = {'lookup': {'object': self._dimension}}
return self.format_and_send_request(data)['data']
| 34.723757 | 137 | 0.547759 | import json
import datetime
import uuid
from warnings import warn
from typing import Dict, List, Tuple
from urllib.parse import unquote
import re
import xmltodict
import requests
from ..exceptions import SageIntacctSDKError, ExpiredTokenError, InvalidTokenError, NoPrivilegeError, \
WrongParamsError, NotFoundItemError, InternalServerError, DataIntegrityWarning
from .constants import dimensions_fields_mapping
class ApiBase:
def __init__(self, dimension: str = None, pagesize: int = 2000, post_legacy_method: str = None):
self._sender_id = None
self._sender_password = None
self._session_id = None
self._api_url = 'https://api.intacct.com/ia/xml/xmlgw.phtml'
self._dimension = dimension
self._pagesize = pagesize
self._post_legacy_method = post_legacy_method
@property
def dimension(self):
return self._post_legacy_method
@dimension.setter
def dimension(self, dimension: str):
self._dimension = dimension
@property
def post_legacy_method(self):
return self._post_legacy_method
@post_legacy_method.setter
def post_legacy_method(self, post_legacy_method: str):
self._post_legacy_method = post_legacy_method
@property
def sender_id(self):
return self._sender_id
@sender_id.setter
def sender_id(self, sender_id: str):
self._sender_id = sender_id
def set_sender_id(self, sender_id: str):
self._sender_id = sender_id
def set_sender_password(self, sender_password: str):
self._sender_password = sender_password
def get_session_id(self, user_id: str, company_id: str, user_password: str, entity_id: str = None):
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'login': {
'userid': user_id,
'companyid': company_id,
'password': user_password,
'locationid': entity_id
}
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
'getAPISession': None
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
if response['authentication']['status'] == 'success':
session_details = response['result']['data']['api']
self._api_url = session_details['endpoint']
self._session_id = session_details['sessionid']
return self._session_id
else:
raise SageIntacctSDKError('Error: {0}'.format(response['errormessage']))
def set_session_id(self, session_id: str):
self._session_id = session_id
def _support_id_msg(self, errormessages):
error = {}
if isinstance(errormessages['error'], list):
error['error'] = errormessages['error'][0]
error['type'] = 'list'
elif isinstance(errormessages['error'], dict):
error['error'] = errormessages['error']
error['type'] = 'dict'
return error
def _decode_support_id(self, errormessages):
support_id_msg = self._support_id_msg(errormessages)
data_type = support_id_msg['type']
error = support_id_msg['error']
if (error and error['description2']):
message = error['description2']
support_id = re.search('Support ID: (.*)]', message)
if support_id.group(1):
decoded_support_id = unquote(support_id.group(1))
message = message.replace(support_id.group(1), decoded_support_id)
if data_type == 'dict':
errormessages['error'] = [errormessages['error']]
errormessages['error'][0]['description2'] = message if message else None
return errormessages
def _post_request(self, dict_body: dict, api_url: str):
api_headers = {
'content-type': 'application/xml'
}
body = xmltodict.unparse(dict_body)
response = requests.post(api_url, headers=api_headers, data=body)
parsed_xml = xmltodict.parse(response.text, force_list={self._dimension})
parsed_response = json.loads(json.dumps(parsed_xml))
if response.status_code == 200:
if parsed_response['response']['control']['status'] == 'success':
api_response = parsed_response['response']['operation']
if parsed_response['response']['control']['status'] == 'failure':
exception_msg = self._decode_support_id(parsed_response['response']['errormessage'])
raise WrongParamsError('Some of the parameters are wrong', exception_msg)
if api_response['authentication']['status'] == 'failure':
raise InvalidTokenError('Invalid token / Incorrect credentials', api_response['errormessage'])
if api_response['result']['status'] == 'success':
return api_response
if api_response['result']['status'] == 'failure':
exception_msg = self._decode_support_id(api_response['result']['errormessage'])
for error in exception_msg['error']:
if error['description2'] and 'You do not have permission for API' in error['description2']:
raise InvalidTokenError('The user has insufficient privilege', exception_msg)
raise WrongParamsError('Error during {0}'.format(api_response['result']['function']), exception_msg)
if response.status_code == 400:
raise WrongParamsError('Some of the parameters are wrong', parsed_response)
if response.status_code == 401:
raise InvalidTokenError('Invalid token / Incorrect credentials', parsed_response)
if response.status_code == 403:
raise NoPrivilegeError('Forbidden, the user has insufficient privilege', parsed_response)
if response.status_code == 404:
raise NotFoundItemError('Not found item with ID', parsed_response)
if response.status_code == 498:
raise ExpiredTokenError('Expired token, try to refresh it', parsed_response)
if response.status_code == 500:
raise InternalServerError('Internal server error', parsed_response)
raise SageIntacctSDKError('Error: {0}'.format(parsed_response))
def format_and_send_request(self, data: Dict):
key = next(iter(data))
timestamp = datetime.datetime.now()
dict_body = {
'request': {
'control': {
'senderid': self._sender_id,
'password': self._sender_password,
'controlid': timestamp,
'uniqueid': False,
'dtdversion': 3.0,
'includewhitespace': False
},
'operation': {
'authentication': {
'sessionid': self._session_id
},
'content': {
'function': {
'@controlid': str(uuid.uuid4()),
key: data[key]
}
}
}
}
}
response = self._post_request(dict_body, self._api_url)
return response['result']
def post(self, data: Dict):
if self._dimension in ('CCTRANSACTION', 'EPPAYMENT', 'create_invoice', 'create_aradjustment','update_invoice','update_customer'):
return self._construct_post_legacy_payload(data)
elif self._dimension == 'readReport':
return self._construct_run_report(data)
elif (self._dimension == 'ARINVOICE' and self._post_legacy_method=='delete'):
return self._construct_delete(data)
elif (self._dimension =='custom_report'):
return self._construct_read_custom_report(data)
else:
return self._construct_post_payload(data)
def _construct_post_payload(self, data: Dict):
payload = {
'create': {
self._dimension: data
}
}
return self.format_and_send_request(payload)
def _construct_run_report(self, data: str):
payload = {
"readReport": {
'report': data
}}
return self.format_and_send_request(payload)
def _construct_read_custom_report(self, data: str):
payload = {
"readReport": {
'@type': "interactive",
'@returnDef': "true",
'report': data
}}
return self.format_and_send_request(payload)
def _construct_delete(self, data: str) -> str:
payload = {"delete": data}
return self.format_and_send_request(payload)
def _construct_post_legacy_payload(self, data: Dict):
payload = {
self._post_legacy_method: data
}
return self.format_and_send_request(payload)
def _construct_post_legacy_aradjustment_payload(self, data: Dict):
payload = {
'create_aradjustment': data
}
return self.format_and_send_request(payload)
def count(self):
get_count = {
'query': {
'object': self._dimension,
'select': {
'field': 'RECORDNO'
},
'pagesize': '1'
}
}
response = self.format_and_send_request(get_count)
return int(response['data']['@totalcount'])
def read_by_query(self, fields: list = None):
payload = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': None,
'pagesize': '1000'
}
}
return self.format_and_send_request(payload)
def get(self, field: str, value: str, fields: list = None):
data = {
'readByQuery': {
'object': self._dimension,
'fields': ','.join(fields) if fields else '*',
'query': "{0} = '{1}'".format(field, value),
'pagesize': '1000'
}
}
return self.format_and_send_request(data)['data']
def get_all(self, field: str = None, value: str = None, fields: list = None):
complete_data = []
count = self.count()
pagesize = self._pagesize
for offset in range(0, count, pagesize):
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if field and value:
data['query']['filter'] = {
'equalto': {
'field': field,
'value': value
}
}
paginated_data = self.format_and_send_request(data)['data'][self._dimension]
complete_data.extend(paginated_data)
return complete_data
__query_filter = List[Tuple[str, str, str]]
def get_by_query(self, fields: List[str] = None,
and_filter: __query_filter = None,
or_filter: __query_filter = None,
filter_payload: dict = None):
complete_data = []
count = self.count()
pagesize = self._pagesize
offset = 0
formatted_filter = filter_payload
data = {
'query': {
'object': self._dimension,
'select': {
'field': fields if fields else dimensions_fields_mapping[self._dimension]
},
'pagesize': pagesize,
'offset': offset
}
}
if and_filter and or_filter:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
formatted_filter['and']['or'] = {}
for operator, field, value in or_filter:
formatted_filter['and']['or'].setdefault(operator, {}).update({'field': field, 'value': value})
elif and_filter:
if len(and_filter) > 1:
formatted_filter = {'and': {}}
for operator, field, value in and_filter:
formatted_filter['and'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in and_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
elif or_filter:
if len(or_filter) > 1:
formatted_filter = {'or': {}}
for operator, field, value in or_filter:
formatted_filter['or'].setdefault(operator, {}).update({'field': field, 'value': value})
else:
formatted_filter = {}
for operator, field, value in or_filter:
formatted_filter.setdefault(operator, {}).update({'field': field, 'value': value})
if formatted_filter:
data['query']['filter'] = formatted_filter
for offset in range(0, count, pagesize):
data['query']['offset'] = offset
paginated_data = self.format_and_send_request(data)['data']
try:
complete_data.extend(paginated_data[self._dimension])
except:
pass
filtered_total = int(paginated_data['@totalcount'])
if paginated_data['@numremaining'] == '0':
break
if filtered_total != len(complete_data):
warn(message='Your data may not be complete. Records returned do not equal total query record count',
category=DataIntegrityWarning)
return complete_data
def get_lookup(self):
data = {'lookup': {'object': self._dimension}}
return self.format_and_send_request(data)['data']
| true | true |
f730894e33a039bd60f085adc8ba9927f4c02448 | 17,661 | py | Python | binance.py | mrhuytran/bnb-api-wrapper | 569e6eddc9c44f50a918b046cdb248bee60ac0e1 | [
"MIT"
] | 1 | 2021-02-19T17:23:04.000Z | 2021-02-19T17:23:04.000Z | binance.py | mrhuytran/bnb-api-wrapper | 569e6eddc9c44f50a918b046cdb248bee60ac0e1 | [
"MIT"
] | null | null | null | binance.py | mrhuytran/bnb-api-wrapper | 569e6eddc9c44f50a918b046cdb248bee60ac0e1 | [
"MIT"
] | 1 | 2020-11-16T05:59:49.000Z | 2020-11-16T05:59:49.000Z | import requests
import json
from datetime import datetime
import time
import pandas as pd
from pandas import DataFrame as df
import hmac
import hashlib
from interval_enum import Interval
from order_enum import Order
class BinanceClient:
def __init__(self, api_key, api_secret):
self.key = api_key
self.secret = api_secret
self.base = 'https://api.binance.com'
self.endpoint = {
'klines': '/api/v1/klines',
'price_ticker': '/api/v3/ticker/price',
'24hr_ticker': '/api/v3/ticker/24hr',
'historical_trade': '/api/v3/historicalTrades', # recent trades on the market
'order': '/api/v3/order',
'test_order': '/api/v3/order/test',
'open_order': '/api/v3/openOrders', # all open orders
'all_order': '/api/v3/allOrders', # all orders: active, cancelled, filler
'my_trade': '/api/v3/myTrades' # all trades for a specific symbol on the account
}
'''
***********************************************************
GET METHODS
***********************************************************
'''
'''
return klines for a specified symbol
@param
required - symbol: str, interval: Interval
'''
def get_klines(self, symbol, interval):
# specifying parameters for request body
params = {
'symbol': symbol,
'interval': interval.value
}
# specifying url enpoint
url = self.base + self.endpoint['klines']
# get api response
response = requests.get(url, params=params)
# convert json to dict
data = json.loads(response.text)
# convert dict to data frame
klines_df = df(data)
# get open time and close time from klines_df
o_timestamp_df = klines_df[0] # open timestamp
c_timestamp_df = klines_df[6] # close timestamp
# create empty arrays for formatted datetime
o_time = [] # open time
c_time = [] # close time
# convert timestamps to datetime format
for (o_timestamp, c_timestamp) in zip(o_timestamp_df, c_timestamp_df):
o_time.append(datetime.fromtimestamp(int(o_timestamp/1000)))
c_time.append(datetime.fromtimestamp(int(c_timestamp/1000)))
# convert datetime to string datetime format for df
o_timestamp_df = df(o_time)
c_timestamp_df = df(c_time)
# replacing the original timestamp with formatted datetime string
klines_df[0] = o_timestamp_df
klines_df[6] = c_timestamp_df
# modifying dataframe
klines_df.pop(11)
klines_df.columns = ['openTime', 'open', 'high', 'low', 'close',
'volume', 'closeTime', 'quoteAssetVol',
'no. of trades', 'taker_buy_baseAssetVol',
'taker_buy_quoteAssetVol']
return klines_df
'''
return current price
1. for a symbol if symbol is specified
2. for all symbols
@param
optional - symbol: str
'''
def get_price(self, symbol=None):
# specifying parameters for request body
params = {
'symbol': symbol
}
# specifying url endpoint
url = self.base + self.endpoint['price_ticker']
# get api response
response = requests.get(url, params=params)
# convert json to dict
data = json.loads(response.text)
# convert dict to dataframe
if isinstance(data, list):
price_df = df(data)
else:
price_df = df([data])
return price_df
'''
return 24 hour ticker
1. for a symbol if symbol is specified
2. for all symbols
@param
optional - symbol: str
'''
def get_24hr_ticker(self, symbol=None):
# specify parameters for request body
params = {
'symbol': symbol
}
# specifying url endpoint
url = self.base + self.endpoint['24hr_ticker']
# request api response
response = requests.get(url, params=params)
# convert json to dict
data = json.loads(response.text)
# convert dict to dataframe
if isinstance(data, list):
ticker_df = df(data)
else:
ticker_df = df([data])
# get openTime and closeTime from ticker_df
open_time_df = ticker_df['openTime']
close_time_df = ticker_df['closeTime']
# create new empty arrays for openTime and closeTime
open_time = []
close_time = []
# convert timestamps to datetime format
for (o, c) in zip(open_time_df, close_time_df):
open_time.append(datetime.fromtimestamp(int(o/1000)))
close_time.append(datetime.fromtimestamp(int(c/1000)))
# convert timestamps to string format
open_time_df = df(open_time)
close_time_df = df(close_time)
# replace timestamps in ticker_df with formatted timestamps
ticker_df['openTime'] = open_time_df
ticker_df['closeTime'] = close_time_df
return ticker_df
'''
return list of historical trades
1. start from a specific trade if tradeId is specified upto
the specified amount of trade records
2. most recent trades if tradeId is not specified
a. most recent 500 trades if limit is not specified
b. the amount of trades specified by limit
@param
required - symbol: str
optional - limit: int, tradeId: long
'''
def get_historical_trade(self, symbol, limit=None, tradeId=None):
# specifying parameter for request body
params = {
'symbol': symbol,
'limit': limit,
'fromId': tradeId
}
# specifying url endpoint
url = self.base + self.endpoint['historical_trade']
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
# when exception occurs
if not isinstance(data, list):
return data
# convert dict to dataframe
trade_df = df(data)
if not trade_df.empty:
# get time from trade_df
time_df = trade_df['time']
# make new empty array for time
_time = []
# convert timestamp to datetime format
for t in time_df:
_time.append(datetime.fromtimestamp(int(t/1000)))
# convert timestamp to string format
time_df = df(_time)
# replace timestamp in trade_df with formatted timestamp
trade_df['time'] = time_df
return trade_df
'''
get the status of an order
@param
required - symbol: str, orderId: long
'''
def get_query_order(self, symbol, orderId):
# specify parameters for request body
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
# specify url endpoint
url = self.base + self.endpoint['order']
# sign request
self.sign_request(params)
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
'''
return list of open orders
1. of a symbol if symbol is specified
2. of all symbols if symbol is not specified
@param
optional - symbol: str
'''
def get_open_order(self, symbol=None):
# specify general paramenters for request body
params = {
'timestamp': int(round(time.time()*1000))
}
# specify optional parameters for request body
if symbol != None:
params['symbol'] = symbol
# specify url endpoint
url = self.base + self.endpoint['open_order']
# sign request
self.sign_request(params)
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
# convert json to dict
data = json.loads(response.text)
# when exception occurs
if not isinstance(data, list):
return data
# convert dict to dataframe
open_order_df = df(data)
# if dataframe is not empty
if not open_order_df.empty:
# get time and updateTime form open_order_df
time_df = open_order_df['time'] # time
updateTime_df = open_order_df['updateTime'] # updateTime
# create new empty arrays for time and updateTime
_time = []
_updateTime = []
# convert time and updateTime to datetime format
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
# convert time and updateTime to df
time_df = df(_time)
updateTime_df = df(_updateTime)
# replace original timestamps with formatted timestamps in open_order_df
open_order_df['time'] = time_df
open_order_df['updateTime'] = updateTime_df
return open_order_df
'''
return all orders of the specified symbol: active, canceled, filled
1. if orderId is specified, return orders with id >= orderId
2. else, return most recent orders for this symbol
@param
required - symbol: str
optional - orderId: long, limit: int
'''
def get_all_order(self, symbol, orderId=None, limit=None):
# specify the general parameters for request body
params = {
'symbol': symbol,
'timestamp': int(round(time.time()*1000))
}
# specify optional parameters for request body
if limit != None:
if orderId != None:
params['orderId'] = orderId
params['limit'] = limit
else:
params['limit'] = limit
else:
if orderId != None:
params['orderId'] = orderId
# specify url endpoint
url = self.base + self.endpoint['all_order']
# sign request
self.sign_request(params)
# request api response
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
# convert json to dict
data = json.loads(response.text)
# when exception occurs
if not isinstance(data, list):
return data
# convert data to dataframe
all_order_df = df(data)
# time and updateTime from all_order_df
time_df = all_order_df['time'] # time
updateTime_df = all_order_df['updateTime'] # updateTime
# create new empty arrays for time and updateTime
_time = []
_updateTime = []
# convert time and updateTime to datetime format
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
# convert time and updateTime to df
time_df = df(_time)
updateTime_df = df(_updateTime)
# replace original timestamps with formatted timestamps in all_order_df
all_order_df['time'] = time_df
all_order_df['updateTime'] = updateTime_df
return all_order_df
'''
***********************************************************
POST METHODS
***********************************************************
'''
'''
make a new order
1. set test=True if want to test order
2. set test=False if want to place order and the order is relected on the account
@private
@params
required - symbol: str, side: enum, orderType: enum
'''
def __new_order(self, symbol, side, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
# specify the general parameters for request body
params = {
'symbol': symbol,
'side': side.value,
'type': orderType.value,
'newOrderRespType': 'RESULT',
'timestamp': int(round(time.time()*1000))
}
# specify option parameters for request body
if orderType == Order.LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.MARKET:
params['quantity'] = quantity
elif orderType == Order.STOP_LOSS:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.STOP_LOSS_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.TAKE_PROFIT:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.TAKE_PROFIT_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.LIMIT_MAKER:
params['quantity'] = quantity
params['price'] = price
else:
raise Exception('Invalid order type.')
# specify url endpoint
if test == True:
url = self.base + self.endpoint['test_order']
else:
url = self.base + self.endpoint['order']
# sign request
self.sign_request(params)
# initialize new order, request api response
response = requests.post(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
'''
make a new buy order
1. set test=True if want to test buy order
2. set test=False if want to place buy order and the buy order is relected on the account
@params
required - symbol: str, orderType: enum
'''
def buy(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.BUY, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
'''
make a new sell order
1. set test=True if want to test sell order
2. set test=False if want to place sell order and the sell order is relected on the account
@params
required - symbol: str, orderType: enum
'''
def sell(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.SELL, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
'''
***********************************************************
DELETE METHODS
***********************************************************
'''
'''
cancel an open order
@param
@require symbol: str, orderId: long
'''
def cancel_order(self, symbol, orderId):
# specify parameters for request body
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
# specify url endpoint
url = self.base + self.endpoint['order']
# sign request
self.sign_request(params)
# initialize cancel order, request api response
response = requests.delete(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
'''
sign your request to Binance API
'''
def sign_request(self, params: dict):
#make a query string
query_string = '&'.join(["{}={}".format(d,params[d]) for d in params])
#hashing secret
signature = hmac.new(self.secret.encode('utf-8'),
query_string.encode('utf-8'),
hashlib.sha256)
# add your signature to the request body
params['signature'] = signature.hexdigest()
| 33.135084 | 120 | 0.554895 | import requests
import json
from datetime import datetime
import time
import pandas as pd
from pandas import DataFrame as df
import hmac
import hashlib
from interval_enum import Interval
from order_enum import Order
class BinanceClient:
def __init__(self, api_key, api_secret):
self.key = api_key
self.secret = api_secret
self.base = 'https://api.binance.com'
self.endpoint = {
'klines': '/api/v1/klines',
'price_ticker': '/api/v3/ticker/price',
'24hr_ticker': '/api/v3/ticker/24hr',
'historical_trade': '/api/v3/historicalTrades',
'order': '/api/v3/order',
'test_order': '/api/v3/order/test',
'open_order': '/api/v3/openOrders',
'all_order': '/api/v3/allOrders',
'my_trade': '/api/v3/myTrades'
}
def get_klines(self, symbol, interval):
params = {
'symbol': symbol,
'interval': interval.value
}
url = self.base + self.endpoint['klines']
response = requests.get(url, params=params)
data = json.loads(response.text)
klines_df = df(data)
o_timestamp_df = klines_df[0]
c_timestamp_df = klines_df[6]
o_time = []
c_time = []
for (o_timestamp, c_timestamp) in zip(o_timestamp_df, c_timestamp_df):
o_time.append(datetime.fromtimestamp(int(o_timestamp/1000)))
c_time.append(datetime.fromtimestamp(int(c_timestamp/1000)))
o_timestamp_df = df(o_time)
c_timestamp_df = df(c_time)
klines_df[0] = o_timestamp_df
klines_df[6] = c_timestamp_df
klines_df.pop(11)
klines_df.columns = ['openTime', 'open', 'high', 'low', 'close',
'volume', 'closeTime', 'quoteAssetVol',
'no. of trades', 'taker_buy_baseAssetVol',
'taker_buy_quoteAssetVol']
return klines_df
def get_price(self, symbol=None):
params = {
'symbol': symbol
}
url = self.base + self.endpoint['price_ticker']
response = requests.get(url, params=params)
data = json.loads(response.text)
if isinstance(data, list):
price_df = df(data)
else:
price_df = df([data])
return price_df
def get_24hr_ticker(self, symbol=None):
params = {
'symbol': symbol
}
url = self.base + self.endpoint['24hr_ticker']
response = requests.get(url, params=params)
data = json.loads(response.text)
if isinstance(data, list):
ticker_df = df(data)
else:
ticker_df = df([data])
open_time_df = ticker_df['openTime']
close_time_df = ticker_df['closeTime']
open_time = []
close_time = []
for (o, c) in zip(open_time_df, close_time_df):
open_time.append(datetime.fromtimestamp(int(o/1000)))
close_time.append(datetime.fromtimestamp(int(c/1000)))
open_time_df = df(open_time)
close_time_df = df(close_time)
ticker_df['openTime'] = open_time_df
ticker_df['closeTime'] = close_time_df
return ticker_df
def get_historical_trade(self, symbol, limit=None, tradeId=None):
params = {
'symbol': symbol,
'limit': limit,
'fromId': tradeId
}
url = self.base + self.endpoint['historical_trade']
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
if not isinstance(data, list):
return data
trade_df = df(data)
if not trade_df.empty:
time_df = trade_df['time']
_time = []
for t in time_df:
_time.append(datetime.fromtimestamp(int(t/1000)))
time_df = df(_time)
trade_df['time'] = time_df
return trade_df
def get_query_order(self, symbol, orderId):
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
url = self.base + self.endpoint['order']
self.sign_request(params)
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
def get_open_order(self, symbol=None):
params = {
'timestamp': int(round(time.time()*1000))
}
if symbol != None:
params['symbol'] = symbol
url = self.base + self.endpoint['open_order']
self.sign_request(params)
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
if not isinstance(data, list):
return data
open_order_df = df(data)
if not open_order_df.empty:
time_df = open_order_df['time']
updateTime_df = open_order_df['updateTime']
_time = []
_updateTime = []
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
time_df = df(_time)
updateTime_df = df(_updateTime)
open_order_df['time'] = time_df
open_order_df['updateTime'] = updateTime_df
return open_order_df
def get_all_order(self, symbol, orderId=None, limit=None):
params = {
'symbol': symbol,
'timestamp': int(round(time.time()*1000))
}
if limit != None:
if orderId != None:
params['orderId'] = orderId
params['limit'] = limit
else:
params['limit'] = limit
else:
if orderId != None:
params['orderId'] = orderId
url = self.base + self.endpoint['all_order']
self.sign_request(params)
response = requests.get(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
if not isinstance(data, list):
return data
all_order_df = df(data)
time_df = all_order_df['time']
updateTime_df = all_order_df['updateTime']
_time = []
_updateTime = []
for (t, u) in zip(time_df, updateTime_df):
_time.append(datetime.fromtimestamp(int(t/1000)))
_updateTime.append(datetime.fromtimestamp(int(u/1000)))
time_df = df(_time)
updateTime_df = df(_updateTime)
all_order_df['time'] = time_df
all_order_df['updateTime'] = updateTime_df
return all_order_df
def __new_order(self, symbol, side, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
params = {
'symbol': symbol,
'side': side.value,
'type': orderType.value,
'newOrderRespType': 'RESULT',
'timestamp': int(round(time.time()*1000))
}
if orderType == Order.LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.MARKET:
params['quantity'] = quantity
elif orderType == Order.STOP_LOSS:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.STOP_LOSS_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.TAKE_PROFIT:
params['quantity'] = quantity
params['stopPrice'] = stopPrice
elif orderType == Order.TAKE_PROFIT_LIMIT:
params['timeInForce'] = timeInForce
params['quantity'] = quantity
params['price'] = price
params['stopPrice'] = stopPrice
if icebergQty != None:
params['icebergQty'] = icebergQty
elif orderType == Order.LIMIT_MAKER:
params['quantity'] = quantity
params['price'] = price
else:
raise Exception('Invalid order type.')
if test == True:
url = self.base + self.endpoint['test_order']
else:
url = self.base + self.endpoint['order']
self.sign_request(params)
response = requests.post(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
def buy(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.BUY, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
def sell(self, symbol, orderType, test=True, timeInForce=None, quantity=None,
quoteOrderQty=None, price=None, stopPrice=None, icebergQty=None):
return self.__new_order(symbol, Order.SELL, orderType, test=test, timeInForce=timeInForce, quantity=quantity,
quoteOrderQty=quoteOrderQty, price=price, stopPrice=stopPrice, icebergQty=icebergQty)
def cancel_order(self, symbol, orderId):
params = {
'symbol': symbol,
'orderId': orderId,
'timestamp': int(round(time.time()*1000))
}
url = self.base + self.endpoint['order']
self.sign_request(params)
response = requests.delete(url, params=params, headers={'X-MBX-APIKEY': self.key})
data = json.loads(response.text)
return data
def sign_request(self, params: dict):
query_string = '&'.join(["{}={}".format(d,params[d]) for d in params])
signature = hmac.new(self.secret.encode('utf-8'),
query_string.encode('utf-8'),
hashlib.sha256)
params['signature'] = signature.hexdigest()
| true | true |
f7308a1085cb46493a7cbe0bd9834d3db9018f4f | 946 | py | Python | Initial files/dht_publish.py | vinayrnair/ESP32-DHT22-Sensors | cc7a34eb866b39aa62ce8cdf676371aa9d6a3d28 | [
"Unlicense"
] | 1 | 2021-12-07T09:40:38.000Z | 2021-12-07T09:40:38.000Z | Initial files/dht_publish.py | vinayrnair/ESP32-DHT22-Sensors | cc7a34eb866b39aa62ce8cdf676371aa9d6a3d28 | [
"Unlicense"
] | null | null | null | Initial files/dht_publish.py | vinayrnair/ESP32-DHT22-Sensors | cc7a34eb866b39aa62ce8cdf676371aa9d6a3d28 | [
"Unlicense"
] | null | null | null | from time import sleep
from umqtt.simple import MQTTClient
from machine import Pin
from dht import DHT22
SERVER = 'ip address' # MQTT Server Address (Change to the IP address of your Pi)
CLIENT_ID = 'ESP32_DHT22_Sensor'
TOPIC = b'temp_humidity'
client = MQTTClient(CLIENT_ID, SERVER)
client.connect() # Connect to MQTT broker
sensor = DHT22(Pin(23, Pin.IN, Pin.PULL_UP)) # DHT-22 on GPIO 23 (input with internal pull-up resistor)
while True:
try:
sensor.measure() # Poll sensor
t = sensor.temperature()
h = sensor.humidity()
if isinstance(t, float) and isinstance(h, float): # Confirm sensor results are numeric
msg = (b'{0:3.1f},{1:3.1f}'.format(t, h))
client.publish(TOPIC, msg) # Publish sensor data to MQTT topic
print(msg)
else:
print('Invalid sensor readings.')
except OSError:
print('Failed to read sensor.')
sleep(4)
| 32.62069 | 105 | 0.651163 | from time import sleep
from umqtt.simple import MQTTClient
from machine import Pin
from dht import DHT22
SERVER = 'ip address'
CLIENT_ID = 'ESP32_DHT22_Sensor'
TOPIC = b'temp_humidity'
client = MQTTClient(CLIENT_ID, SERVER)
client.connect()
sensor = DHT22(Pin(23, Pin.IN, Pin.PULL_UP))
while True:
try:
sensor.measure()
t = sensor.temperature()
h = sensor.humidity()
if isinstance(t, float) and isinstance(h, float):
msg = (b'{0:3.1f},{1:3.1f}'.format(t, h))
client.publish(TOPIC, msg)
print(msg)
else:
print('Invalid sensor readings.')
except OSError:
print('Failed to read sensor.')
sleep(4)
| true | true |
f7308a5b65edfbcf8a90beaa071fcbcffe8b5c75 | 1,912 | py | Python | echopype/utils/prov.py | mbdunn/echopype | a53290801d1ca062d45c00ca2c541d54682dd40a | [
"Apache-2.0"
] | null | null | null | echopype/utils/prov.py | mbdunn/echopype | a53290801d1ca062d45c00ca2c541d54682dd40a | [
"Apache-2.0"
] | null | null | null | echopype/utils/prov.py | mbdunn/echopype | a53290801d1ca062d45c00ca2c541d54682dd40a | [
"Apache-2.0"
] | null | null | null | from datetime import datetime as dt
from pathlib import PosixPath
from typing import Any, Dict, List, Tuple, Union
# TODO: uncomment after release (causes flake8 to fail)
# from _echopype_version import version as ECHOPYPE_VERSION
from typing_extensions import Literal
ProcessType = Literal["conversion", "processing"]
def echopype_prov_attrs(process_type: ProcessType) -> Dict[str, str]:
"""
Standard echopype software attributes for provenance
Parameters
----------
process_type : ProcessType
Echopype process function type
"""
# TODO: change hard coded 0.6.0 after release
prov_dict = {
f"{process_type}_software_name": "echopype",
f"{process_type}_software_version": "0.6.0", # ECHOPYPE_VERSION,
f"{process_type}_time": dt.utcnow().isoformat(timespec="seconds") + "Z", # use UTC time
}
return prov_dict
def source_files_vars(source_paths: Union[str, List[Any]]) -> Dict[str, Tuple]:
"""
Create source_filenames provenance variable dict to be used for creating
xarray dataarray.
Parameters
----------
source_paths: Union[str, List[Any]]
Source file paths as either a single path string or a list of Path-type paths
Returns
-------
source_files_var: Dict[str, Tuple]
Single-element dict containing a tuple for creating the
source_filenames xarray dataarray with filenames dimension
"""
# Handle a plain string containing a single path,
# a single pathlib Path, or a list of strings or pathlib paths
if type(source_paths) in (str, PosixPath):
source_files = [str(source_paths)]
else:
source_files = [str(p) for p in source_paths]
source_files_var = {
"source_filenames": (
"filenames",
source_files,
{"long_name": "Source filenames"},
),
}
return source_files_var
| 29.875 | 96 | 0.668933 | from datetime import datetime as dt
from pathlib import PosixPath
from typing import Any, Dict, List, Tuple, Union
from typing_extensions import Literal
ProcessType = Literal["conversion", "processing"]
def echopype_prov_attrs(process_type: ProcessType) -> Dict[str, str]:
prov_dict = {
f"{process_type}_software_name": "echopype",
f"{process_type}_software_version": "0.6.0",
f"{process_type}_time": dt.utcnow().isoformat(timespec="seconds") + "Z",
}
return prov_dict
def source_files_vars(source_paths: Union[str, List[Any]]) -> Dict[str, Tuple]:
if type(source_paths) in (str, PosixPath):
source_files = [str(source_paths)]
else:
source_files = [str(p) for p in source_paths]
source_files_var = {
"source_filenames": (
"filenames",
source_files,
{"long_name": "Source filenames"},
),
}
return source_files_var
| true | true |
f7308b26ee37870995f03c79abb88818796df454 | 18,976 | py | Python | pypy/interpreter/error.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2016-07-06T23:30:20.000Z | 2017-05-30T15:59:31.000Z | pypy/interpreter/error.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | null | null | null | pypy/interpreter/error.py | kantai/passe-pypy-taint-tracking | b60a3663f8fe89892dc182c8497aab97e2e75d69 | [
"MIT"
] | 2 | 2020-07-09T08:14:22.000Z | 2021-01-15T18:01:25.000Z | import os, sys
from rpython.rlib import jit
from rpython.rlib.objectmodel import we_are_translated
from errno import EINTR
AUTO_DEBUG = os.getenv('PYPY_DEBUG')
RECORD_INTERPLEVEL_TRACEBACK = True
class OperationError(Exception):
"""Interpreter-level exception that signals an exception that should be
sent to the application level.
OperationError instances have three attributes (and no .args),
w_type, _w_value and _application_traceback, which contain the wrapped
type and value describing the exception, and a chained list of
PyTraceback objects making the application-level traceback.
"""
_w_value = None
_application_traceback = None
def __init__(self, w_type, w_value, tb=None):
assert w_type is not None
self.setup(w_type)
self._w_value = w_value
self._application_traceback = tb
def setup(self, w_type):
self.w_type = w_type
if not we_are_translated():
self.debug_excs = []
def clear(self, space):
# for sys.exc_clear()
self.w_type = space.w_None
self._w_value = space.w_None
self._application_traceback = None
if not we_are_translated():
del self.debug_excs[:]
def match(self, space, w_check_class):
"Check if this application-level exception matches 'w_check_class'."
return space.exception_match(self.w_type, w_check_class)
def async(self, space):
"Check if this is an exception that should better not be caught."
return (self.match(space, space.w_SystemExit) or
self.match(space, space.w_KeyboardInterrupt))
def __str__(self):
"NOT_RPYTHON: Convenience for tracebacks."
s = self._w_value
if self.__class__ is not OperationError and s is None:
s = self._compute_value()
return '[%s: %s]' % (self.w_type, s)
def errorstr(self, space, use_repr=False):
"The exception class and value, as a string."
w_value = self.get_w_value(space)
if space is None:
# this part NOT_RPYTHON
exc_typename = str(self.w_type)
exc_value = str(w_value)
else:
w = space.wrap
if space.is_w(space.type(self.w_type), space.w_str):
exc_typename = space.str_w(self.w_type)
else:
exc_typename = space.str_w(
space.getattr(self.w_type, w('__name__')))
if space.is_w(w_value, space.w_None):
exc_value = ""
else:
try:
if use_repr:
exc_value = space.str_w(space.repr(w_value))
else:
exc_value = space.str_w(space.str(w_value))
except OperationError:
# oups, cannot __str__ the exception object
exc_value = "<oups, exception object itself cannot be str'd>"
if not exc_value:
return exc_typename
else:
return '%s: %s' % (exc_typename, exc_value)
def record_interpreter_traceback(self):
"""Records the current traceback inside the interpreter.
This traceback is only useful to debug the interpreter, not the
application."""
if not we_are_translated():
if RECORD_INTERPLEVEL_TRACEBACK:
self.debug_excs.append(sys.exc_info())
def print_application_traceback(self, space, file=None):
"NOT_RPYTHON: Dump a standard application-level traceback."
if file is None: file = sys.stderr
self.print_app_tb_only(file)
print >> file, self.errorstr(space)
def print_app_tb_only(self, file):
"NOT_RPYTHON"
tb = self._application_traceback
if tb:
import linecache
print >> file, "Traceback (application-level):"
while tb is not None:
co = tb.frame.pycode
lineno = tb.get_lineno()
fname = co.co_filename
if fname.startswith('<inline>\n'):
lines = fname.split('\n')
fname = lines[0].strip()
try:
l = lines[lineno]
except IndexError:
l = ''
else:
l = linecache.getline(fname, lineno)
print >> file, " File \"%s\"," % fname,
print >> file, "line", lineno, "in", co.co_name
if l:
if l.endswith('\n'):
l = l[:-1]
l = " " + l.lstrip()
print >> file, l
tb = tb.next
def print_detailed_traceback(self, space=None, file=None):
"""NOT_RPYTHON: Dump a nice detailed interpreter- and
application-level traceback, useful to debug the interpreter."""
import traceback, cStringIO
if file is None: file = sys.stderr
f = cStringIO.StringIO()
for i in range(len(self.debug_excs)-1, -1, -1):
print >> f, "Traceback (interpreter-level):"
traceback.print_tb(self.debug_excs[i][2], file=f)
f.seek(0)
debug_print(''.join(['|| ' + line for line in f.readlines()]), file)
if self.debug_excs:
from pypy.tool import tb_server
tb_server.publish_exc(self.debug_excs[-1])
self.print_app_tb_only(file)
print >> file, '(application-level)', self.errorstr(space)
if AUTO_DEBUG:
import debug
debug.fire(self)
@jit.unroll_safe
def normalize_exception(self, space):
"""Normalize the OperationError. In other words, fix w_type and/or
w_value to make sure that the __class__ of w_value is exactly w_type.
"""
#
# This method covers all ways in which the Python statement
# "raise X, Y" can produce a valid exception type and instance.
#
# In the following table, 'Class' means a subclass of BaseException
# and 'inst' is an instance of either 'Class' or a subclass of it.
# Or 'Class' can also be an old-style class and 'inst' an old-style
# instance of it.
#
# The flow object space only deals with non-advanced case. Old-style
# classes and instances *are* advanced.
#
# input (w_type, w_value)... becomes... advanced case?
# ---------------------------------------------------------------------
# (tuple, w_value) (tuple[0], w_value) yes
# (Class, None) (Class, Class()) no
# (Class, inst) (inst.__class__, inst) no
# (Class, tuple) (Class, Class(*tuple)) yes
# (Class, x) (Class, Class(x)) no
# ("string", ...) ("string", ...) deprecated
# (inst, None) (inst.__class__, inst) no
#
w_type = self.w_type
w_value = self.get_w_value(space)
while space.is_true(space.isinstance(w_type, space.w_tuple)):
w_type = space.getitem(w_type, space.wrap(0))
if space.exception_is_valid_obj_as_class_w(w_type):
# this is for all cases of the form (Class, something)
if space.is_w(w_value, space.w_None):
# raise Type: we assume we have to instantiate Type
w_value = space.call_function(w_type)
w_type = self._exception_getclass(space, w_value)
else:
w_valuetype = space.exception_getclass(w_value)
if space.exception_issubclass_w(w_valuetype, w_type):
# raise Type, Instance: let etype be the exact type of value
w_type = w_valuetype
else:
if space.is_true(space.isinstance(w_value, space.w_tuple)):
# raise Type, tuple: assume the tuple contains the
# constructor args
w_value = space.call(w_type, w_value)
else:
# raise Type, X: assume X is the constructor argument
w_value = space.call_function(w_type, w_value)
w_type = self._exception_getclass(space, w_value)
else:
# the only case left here is (inst, None), from a 'raise inst'.
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
raise OperationError(space.w_TypeError,
space.wrap("instance exception may not "
"have a separate value"))
w_value = w_inst
w_type = w_instclass
self.w_type = w_type
self._w_value = w_value
def _exception_getclass(self, space, w_inst):
w_type = space.exception_getclass(w_inst)
if not space.exception_is_valid_class_w(w_type):
typename = w_type.getname(space)
msg = ("exceptions must be old-style classes or derived "
"from BaseException, not %s")
raise operationerrfmt(space.w_TypeError, msg, typename)
return w_type
def write_unraisable(self, space, where, w_object=None,
with_traceback=False, extra_line=''):
if w_object is None:
objrepr = ''
else:
try:
objrepr = space.str_w(space.repr(w_object))
except OperationError:
objrepr = '?'
#
try:
if with_traceback:
w_t = self.w_type
w_v = self.get_w_value(space)
w_tb = space.wrap(self.get_traceback())
space.appexec([space.wrap(where),
space.wrap(objrepr),
space.wrap(extra_line),
w_t, w_v, w_tb],
"""(where, objrepr, extra_line, t, v, tb):
import sys, traceback
sys.stderr.write('From %s%s:\\n' % (where, objrepr))
if extra_line:
sys.stderr.write(extra_line)
traceback.print_exception(t, v, tb)
""")
else:
msg = 'Exception %s in %s%s ignored\n' % (
self.errorstr(space, use_repr=True), where, objrepr)
space.call_method(space.sys.get('stderr'), 'write',
space.wrap(msg))
except OperationError:
pass # ignored
def get_w_value(self, space):
w_value = self._w_value
if w_value is None:
value = self._compute_value()
self._w_value = w_value = space.wrap(value)
return w_value
def _compute_value(self):
raise NotImplementedError
def get_traceback(self):
"""Calling this marks the PyTraceback as escaped, i.e. it becomes
accessible and inspectable by app-level Python code. For the JIT.
Note that this has no effect if there are already several traceback
frames recorded, because in this case they are already marked as
escaping by executioncontext.leave() being called with
got_exception=True.
"""
from pypy.interpreter.pytraceback import PyTraceback
tb = self._application_traceback
if tb is not None and isinstance(tb, PyTraceback):
tb.frame.mark_as_escaped()
return tb
def set_traceback(self, traceback):
"""Set the current traceback. It should either be a traceback
pointing to some already-escaped frame, or a traceback for the
current frame. To support the latter case we do not mark the
frame as escaped. The idea is that it will be marked as escaping
only if the exception really propagates out of this frame, by
executioncontext.leave() being called with got_exception=True.
"""
self._application_traceback = traceback
# ____________________________________________________________
# optimization only: avoid the slowest operation -- the string
# formatting with '%' -- in the common case were we don't
# actually need the message. Only supports %s and %d.
_fmtcache = {}
_fmtcache2 = {}
def decompose_valuefmt(valuefmt):
"""Returns a tuple of string parts extracted from valuefmt,
and a tuple of format characters."""
formats = []
parts = valuefmt.split('%')
i = 1
while i < len(parts):
if parts[i].startswith('s') or parts[i].startswith('d'):
formats.append(parts[i][0])
parts[i] = parts[i][1:]
i += 1
elif parts[i] == '': # support for '%%'
parts[i-1] += '%' + parts[i+1]
del parts[i:i+2]
else:
raise ValueError("invalid format string (only %s or %d supported)")
assert len(formats) > 0, "unsupported: no % command found"
return tuple(parts), tuple(formats)
def get_operrcls2(valuefmt):
strings, formats = decompose_valuefmt(valuefmt)
assert len(strings) == len(formats) + 1
try:
OpErrFmt = _fmtcache2[formats]
except KeyError:
from rpython.rlib.unroll import unrolling_iterable
attrs = ['x%d' % i for i in range(len(formats))]
entries = unrolling_iterable(enumerate(attrs))
#
class OpErrFmt(OperationError):
def __init__(self, w_type, strings, *args):
self.setup(w_type)
assert len(args) == len(strings) - 1
self.xstrings = strings
for i, attr in entries:
setattr(self, attr, args[i])
assert w_type is not None
def _compute_value(self):
lst = [None] * (len(formats) + len(formats) + 1)
for i, attr in entries:
string = self.xstrings[i]
value = getattr(self, attr)
lst[i+i] = string
lst[i+i+1] = str(value)
lst[-1] = self.xstrings[-1]
return ''.join(lst)
#
_fmtcache2[formats] = OpErrFmt
return OpErrFmt, strings
def get_operationerr_class(valuefmt):
try:
result = _fmtcache[valuefmt]
except KeyError:
result = _fmtcache[valuefmt] = get_operrcls2(valuefmt)
return result
get_operationerr_class._annspecialcase_ = 'specialize:memo'
def operationerrfmt(w_type, valuefmt, *args):
"""Equivalent to OperationError(w_type, space.wrap(valuefmt % args)).
More efficient in the (common) case where the value is not actually
needed."""
OpErrFmt, strings = get_operationerr_class(valuefmt)
return OpErrFmt(w_type, strings, *args)
operationerrfmt._annspecialcase_ = 'specialize:arg(1)'
# ____________________________________________________________
# Utilities
from rpython.tool.ansi_print import ansi_print
def debug_print(text, file=None, newline=True):
# 31: ANSI color code "red"
ansi_print(text, esc="31", file=file, newline=newline)
try:
WindowsError
except NameError:
_WINDOWS = False
else:
_WINDOWS = True
def wrap_windowserror(space, e, w_filename=None):
from rpython.rlib import rwin32
winerror = e.winerror
try:
msg = rwin32.FormatError(winerror)
except ValueError:
msg = 'Windows Error %d' % winerror
exc = space.w_WindowsError
if w_filename is not None:
w_error = space.call_function(exc, space.wrap(winerror),
space.wrap(msg), w_filename)
else:
w_error = space.call_function(exc, space.wrap(winerror),
space.wrap(msg))
return OperationError(exc, w_error)
def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError',
w_exception_class=None):
assert isinstance(e, OSError)
if _WINDOWS and isinstance(e, WindowsError):
return wrap_windowserror(space, e, w_filename)
errno = e.errno
if errno == EINTR:
space.getexecutioncontext().checksignals()
try:
msg = os.strerror(errno)
except ValueError:
msg = 'error %d' % errno
if w_exception_class is None:
exc = getattr(space, exception_name)
else:
exc = w_exception_class
if w_filename is not None:
w_error = space.call_function(exc, space.wrap(errno),
space.wrap(msg), w_filename)
else:
w_error = space.call_function(exc, space.wrap(errno),
space.wrap(msg))
return OperationError(exc, w_error)
wrap_oserror2._annspecialcase_ = 'specialize:arg(3)'
def wrap_oserror(space, e, filename=None, exception_name='w_OSError',
w_exception_class=None):
if filename is not None:
return wrap_oserror2(space, e, space.wrap(filename),
exception_name=exception_name,
w_exception_class=w_exception_class)
else:
return wrap_oserror2(space, e, None,
exception_name=exception_name,
w_exception_class=w_exception_class)
wrap_oserror._annspecialcase_ = 'specialize:arg(3)'
def exception_from_errno(space, w_type):
from rpython.rlib.rposix import get_errno
errno = get_errno()
msg = os.strerror(errno)
w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg))
return OperationError(w_type, w_error)
def new_exception_class(space, name, w_bases=None, w_dict=None):
"""Create a new exception type.
@param name: the name of the type.
@param w_bases: Either an exception type, or a wrapped tuple of
exception types. default is space.w_Exception.
@param w_dict: an optional dictionary to populate the class __dict__.
"""
if '.' in name:
module, name = name.rsplit('.', 1)
else:
module = None
if w_bases is None:
w_bases = space.newtuple([space.w_Exception])
elif not space.isinstance_w(w_bases, space.w_tuple):
w_bases = space.newtuple([w_bases])
if w_dict is None:
w_dict = space.newdict()
w_exc = space.call_function(
space.w_type, space.wrap(name), w_bases, w_dict)
if module:
space.setattr(w_exc, space.wrap("__module__"), space.wrap(module))
return w_exc
def typed_unwrap_error_msg(space, expected, w_obj):
type_name = space.type(w_obj).getname(space)
return space.wrap("expected %s, got %s object" % (expected, type_name))
| 39.865546 | 81 | 0.57657 | import os, sys
from rpython.rlib import jit
from rpython.rlib.objectmodel import we_are_translated
from errno import EINTR
AUTO_DEBUG = os.getenv('PYPY_DEBUG')
RECORD_INTERPLEVEL_TRACEBACK = True
class OperationError(Exception):
"""Interpreter-level exception that signals an exception that should be
sent to the application level.
OperationError instances have three attributes (and no .args),
w_type, _w_value and _application_traceback, which contain the wrapped
type and value describing the exception, and a chained list of
PyTraceback objects making the application-level traceback.
"""
_w_value = None
_application_traceback = None
def __init__(self, w_type, w_value, tb=None):
assert w_type is not None
self.setup(w_type)
self._w_value = w_value
self._application_traceback = tb
def setup(self, w_type):
self.w_type = w_type
if not we_are_translated():
self.debug_excs = []
def clear(self, space):
self.w_type = space.w_None
self._w_value = space.w_None
self._application_traceback = None
if not we_are_translated():
del self.debug_excs[:]
def match(self, space, w_check_class):
"Check if this application-level exception matches 'w_check_class'."
return space.exception_match(self.w_type, w_check_class)
def async(self, space):
"Check if this is an exception that should better not be caught."
return (self.match(space, space.w_SystemExit) or
self.match(space, space.w_KeyboardInterrupt))
def __str__(self):
"NOT_RPYTHON: Convenience for tracebacks."
s = self._w_value
if self.__class__ is not OperationError and s is None:
s = self._compute_value()
return '[%s: %s]' % (self.w_type, s)
def errorstr(self, space, use_repr=False):
"The exception class and value, as a string."
w_value = self.get_w_value(space)
if space is None:
exc_typename = str(self.w_type)
exc_value = str(w_value)
else:
w = space.wrap
if space.is_w(space.type(self.w_type), space.w_str):
exc_typename = space.str_w(self.w_type)
else:
exc_typename = space.str_w(
space.getattr(self.w_type, w('__name__')))
if space.is_w(w_value, space.w_None):
exc_value = ""
else:
try:
if use_repr:
exc_value = space.str_w(space.repr(w_value))
else:
exc_value = space.str_w(space.str(w_value))
except OperationError:
exc_value = "<oups, exception object itself cannot be str'd>"
if not exc_value:
return exc_typename
else:
return '%s: %s' % (exc_typename, exc_value)
def record_interpreter_traceback(self):
"""Records the current traceback inside the interpreter.
This traceback is only useful to debug the interpreter, not the
application."""
if not we_are_translated():
if RECORD_INTERPLEVEL_TRACEBACK:
self.debug_excs.append(sys.exc_info())
def print_application_traceback(self, space, file=None):
"NOT_RPYTHON: Dump a standard application-level traceback."
if file is None: file = sys.stderr
self.print_app_tb_only(file)
print >> file, self.errorstr(space)
def print_app_tb_only(self, file):
"NOT_RPYTHON"
tb = self._application_traceback
if tb:
import linecache
print >> file, "Traceback (application-level):"
while tb is not None:
co = tb.frame.pycode
lineno = tb.get_lineno()
fname = co.co_filename
if fname.startswith('<inline>\n'):
lines = fname.split('\n')
fname = lines[0].strip()
try:
l = lines[lineno]
except IndexError:
l = ''
else:
l = linecache.getline(fname, lineno)
print >> file, " File \"%s\"," % fname,
print >> file, "line", lineno, "in", co.co_name
if l:
if l.endswith('\n'):
l = l[:-1]
l = " " + l.lstrip()
print >> file, l
tb = tb.next
def print_detailed_traceback(self, space=None, file=None):
"""NOT_RPYTHON: Dump a nice detailed interpreter- and
application-level traceback, useful to debug the interpreter."""
import traceback, cStringIO
if file is None: file = sys.stderr
f = cStringIO.StringIO()
for i in range(len(self.debug_excs)-1, -1, -1):
print >> f, "Traceback (interpreter-level):"
traceback.print_tb(self.debug_excs[i][2], file=f)
f.seek(0)
debug_print(''.join(['|| ' + line for line in f.readlines()]), file)
if self.debug_excs:
from pypy.tool import tb_server
tb_server.publish_exc(self.debug_excs[-1])
self.print_app_tb_only(file)
print >> file, '(application-level)', self.errorstr(space)
if AUTO_DEBUG:
import debug
debug.fire(self)
@jit.unroll_safe
def normalize_exception(self, space):
"""Normalize the OperationError. In other words, fix w_type and/or
w_value to make sure that the __class__ of w_value is exactly w_type.
"""
#
# This method covers all ways in which the Python statement
# "raise X, Y" can produce a valid exception type and instance.
#
# In the following table, 'Class' means a subclass of BaseException
# and 'inst' is an instance of either 'Class' or a subclass of it.
# Or 'Class' can also be an old-style class and 'inst' an old-style
# instance of it.
#
# The flow object space only deals with non-advanced case. Old-style
# classes and instances *are* advanced.
#
# input (w_type, w_value)... becomes... advanced case?
# ---------------------------------------------------------------------
# (tuple, w_value) (tuple[0], w_value) yes
# (Class, None) (Class, Class()) no
# (Class, inst) (inst.__class__, inst) no
# (Class, tuple) (Class, Class(*tuple)) yes
# (Class, x) (Class, Class(x)) no
# ("string", ...) ("string", ...) deprecated
# (inst, None) (inst.__class__, inst) no
#
w_type = self.w_type
w_value = self.get_w_value(space)
while space.is_true(space.isinstance(w_type, space.w_tuple)):
w_type = space.getitem(w_type, space.wrap(0))
if space.exception_is_valid_obj_as_class_w(w_type):
# this is for all cases of the form (Class, something)
if space.is_w(w_value, space.w_None):
# raise Type: we assume we have to instantiate Type
w_value = space.call_function(w_type)
w_type = self._exception_getclass(space, w_value)
else:
w_valuetype = space.exception_getclass(w_value)
if space.exception_issubclass_w(w_valuetype, w_type):
# raise Type, Instance: let etype be the exact type of value
w_type = w_valuetype
else:
if space.is_true(space.isinstance(w_value, space.w_tuple)):
# raise Type, tuple: assume the tuple contains the
# constructor args
w_value = space.call(w_type, w_value)
else:
# raise Type, X: assume X is the constructor argument
w_value = space.call_function(w_type, w_value)
w_type = self._exception_getclass(space, w_value)
else:
# the only case left here is (inst, None), from a 'raise inst'.
w_inst = w_type
w_instclass = self._exception_getclass(space, w_inst)
if not space.is_w(w_value, space.w_None):
raise OperationError(space.w_TypeError,
space.wrap("instance exception may not "
"have a separate value"))
w_value = w_inst
w_type = w_instclass
self.w_type = w_type
self._w_value = w_value
def _exception_getclass(self, space, w_inst):
w_type = space.exception_getclass(w_inst)
if not space.exception_is_valid_class_w(w_type):
typename = w_type.getname(space)
msg = ("exceptions must be old-style classes or derived "
"from BaseException, not %s")
raise operationerrfmt(space.w_TypeError, msg, typename)
return w_type
def write_unraisable(self, space, where, w_object=None,
with_traceback=False, extra_line=''):
if w_object is None:
objrepr = ''
else:
try:
objrepr = space.str_w(space.repr(w_object))
except OperationError:
objrepr = '?'
#
try:
if with_traceback:
w_t = self.w_type
w_v = self.get_w_value(space)
w_tb = space.wrap(self.get_traceback())
space.appexec([space.wrap(where),
space.wrap(objrepr),
space.wrap(extra_line),
w_t, w_v, w_tb],
"""(where, objrepr, extra_line, t, v, tb):
import sys, traceback
sys.stderr.write('From %s%s:\\n' % (where, objrepr))
if extra_line:
sys.stderr.write(extra_line)
traceback.print_exception(t, v, tb)
""")
else:
msg = 'Exception %s in %s%s ignored\n' % (
self.errorstr(space, use_repr=True), where, objrepr)
space.call_method(space.sys.get('stderr'), 'write',
space.wrap(msg))
except OperationError:
pass # ignored
def get_w_value(self, space):
w_value = self._w_value
if w_value is None:
value = self._compute_value()
self._w_value = w_value = space.wrap(value)
return w_value
def _compute_value(self):
raise NotImplementedError
def get_traceback(self):
"""Calling this marks the PyTraceback as escaped, i.e. it becomes
accessible and inspectable by app-level Python code. For the JIT.
Note that this has no effect if there are already several traceback
frames recorded, because in this case they are already marked as
escaping by executioncontext.leave() being called with
got_exception=True.
"""
from pypy.interpreter.pytraceback import PyTraceback
tb = self._application_traceback
if tb is not None and isinstance(tb, PyTraceback):
tb.frame.mark_as_escaped()
return tb
def set_traceback(self, traceback):
"""Set the current traceback. It should either be a traceback
pointing to some already-escaped frame, or a traceback for the
current frame. To support the latter case we do not mark the
frame as escaped. The idea is that it will be marked as escaping
only if the exception really propagates out of this frame, by
executioncontext.leave() being called with got_exception=True.
"""
self._application_traceback = traceback
# ____________________________________________________________
# optimization only: avoid the slowest operation -- the string
# formatting with '%' -- in the common case were we don't
_fmtcache = {}
_fmtcache2 = {}
def decompose_valuefmt(valuefmt):
"""Returns a tuple of string parts extracted from valuefmt,
and a tuple of format characters."""
formats = []
parts = valuefmt.split('%')
i = 1
while i < len(parts):
if parts[i].startswith('s') or parts[i].startswith('d'):
formats.append(parts[i][0])
parts[i] = parts[i][1:]
i += 1
elif parts[i] == '':
parts[i-1] += '%' + parts[i+1]
del parts[i:i+2]
else:
raise ValueError("invalid format string (only %s or %d supported)")
assert len(formats) > 0, "unsupported: no % command found"
return tuple(parts), tuple(formats)
def get_operrcls2(valuefmt):
strings, formats = decompose_valuefmt(valuefmt)
assert len(strings) == len(formats) + 1
try:
OpErrFmt = _fmtcache2[formats]
except KeyError:
from rpython.rlib.unroll import unrolling_iterable
attrs = ['x%d' % i for i in range(len(formats))]
entries = unrolling_iterable(enumerate(attrs))
class OpErrFmt(OperationError):
def __init__(self, w_type, strings, *args):
self.setup(w_type)
assert len(args) == len(strings) - 1
self.xstrings = strings
for i, attr in entries:
setattr(self, attr, args[i])
assert w_type is not None
def _compute_value(self):
lst = [None] * (len(formats) + len(formats) + 1)
for i, attr in entries:
string = self.xstrings[i]
value = getattr(self, attr)
lst[i+i] = string
lst[i+i+1] = str(value)
lst[-1] = self.xstrings[-1]
return ''.join(lst)
_fmtcache2[formats] = OpErrFmt
return OpErrFmt, strings
def get_operationerr_class(valuefmt):
try:
result = _fmtcache[valuefmt]
except KeyError:
result = _fmtcache[valuefmt] = get_operrcls2(valuefmt)
return result
get_operationerr_class._annspecialcase_ = 'specialize:memo'
def operationerrfmt(w_type, valuefmt, *args):
"""Equivalent to OperationError(w_type, space.wrap(valuefmt % args)).
More efficient in the (common) case where the value is not actually
needed."""
OpErrFmt, strings = get_operationerr_class(valuefmt)
return OpErrFmt(w_type, strings, *args)
operationerrfmt._annspecialcase_ = 'specialize:arg(1)'
from rpython.tool.ansi_print import ansi_print
def debug_print(text, file=None, newline=True):
ansi_print(text, esc="31", file=file, newline=newline)
try:
WindowsError
except NameError:
_WINDOWS = False
else:
_WINDOWS = True
def wrap_windowserror(space, e, w_filename=None):
from rpython.rlib import rwin32
winerror = e.winerror
try:
msg = rwin32.FormatError(winerror)
except ValueError:
msg = 'Windows Error %d' % winerror
exc = space.w_WindowsError
if w_filename is not None:
w_error = space.call_function(exc, space.wrap(winerror),
space.wrap(msg), w_filename)
else:
w_error = space.call_function(exc, space.wrap(winerror),
space.wrap(msg))
return OperationError(exc, w_error)
def wrap_oserror2(space, e, w_filename=None, exception_name='w_OSError',
w_exception_class=None):
assert isinstance(e, OSError)
if _WINDOWS and isinstance(e, WindowsError):
return wrap_windowserror(space, e, w_filename)
errno = e.errno
if errno == EINTR:
space.getexecutioncontext().checksignals()
try:
msg = os.strerror(errno)
except ValueError:
msg = 'error %d' % errno
if w_exception_class is None:
exc = getattr(space, exception_name)
else:
exc = w_exception_class
if w_filename is not None:
w_error = space.call_function(exc, space.wrap(errno),
space.wrap(msg), w_filename)
else:
w_error = space.call_function(exc, space.wrap(errno),
space.wrap(msg))
return OperationError(exc, w_error)
wrap_oserror2._annspecialcase_ = 'specialize:arg(3)'
def wrap_oserror(space, e, filename=None, exception_name='w_OSError',
w_exception_class=None):
if filename is not None:
return wrap_oserror2(space, e, space.wrap(filename),
exception_name=exception_name,
w_exception_class=w_exception_class)
else:
return wrap_oserror2(space, e, None,
exception_name=exception_name,
w_exception_class=w_exception_class)
wrap_oserror._annspecialcase_ = 'specialize:arg(3)'
def exception_from_errno(space, w_type):
from rpython.rlib.rposix import get_errno
errno = get_errno()
msg = os.strerror(errno)
w_error = space.call_function(w_type, space.wrap(errno), space.wrap(msg))
return OperationError(w_type, w_error)
def new_exception_class(space, name, w_bases=None, w_dict=None):
"""Create a new exception type.
@param name: the name of the type.
@param w_bases: Either an exception type, or a wrapped tuple of
exception types. default is space.w_Exception.
@param w_dict: an optional dictionary to populate the class __dict__.
"""
if '.' in name:
module, name = name.rsplit('.', 1)
else:
module = None
if w_bases is None:
w_bases = space.newtuple([space.w_Exception])
elif not space.isinstance_w(w_bases, space.w_tuple):
w_bases = space.newtuple([w_bases])
if w_dict is None:
w_dict = space.newdict()
w_exc = space.call_function(
space.w_type, space.wrap(name), w_bases, w_dict)
if module:
space.setattr(w_exc, space.wrap("__module__"), space.wrap(module))
return w_exc
def typed_unwrap_error_msg(space, expected, w_obj):
type_name = space.type(w_obj).getname(space)
return space.wrap("expected %s, got %s object" % (expected, type_name))
| false | true |
f7308d265ec40e3e5b63ae6f5eca1836d9790623 | 1,861 | py | Python | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeNotificationConfigurationsRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeNotificationConfigurationsRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | 1 | 2020-05-31T14:51:47.000Z | 2020-05-31T14:51:47.000Z | aliyun-python-sdk-ess/aliyunsdkess/request/v20140828/DescribeNotificationConfigurationsRequest.py | LittleJober/aliyun-openapi-python-sdk | f45cfa2248a5c8c47b2cebc1d4d1c2516b94df76 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class DescribeNotificationConfigurationsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeNotificationConfigurations','ess')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupId(self):
return self.get_query_params().get('ScalingGroupId')
def set_ScalingGroupId(self,ScalingGroupId):
self.add_query_param('ScalingGroupId',ScalingGroupId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 37.979592 | 92 | 0.780226 |
from aliyunsdkcore.request import RpcRequest
from aliyunsdkess.endpoint import endpoint_data
class DescribeNotificationConfigurationsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ess', '2014-08-28', 'DescribeNotificationConfigurations','ess')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ScalingGroupId(self):
return self.get_query_params().get('ScalingGroupId')
def set_ScalingGroupId(self,ScalingGroupId):
self.add_query_param('ScalingGroupId',ScalingGroupId)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | true | true |
f7308d359d4660aec3e1f8d361278eb035139385 | 2,776 | py | Python | jcp/lexer.py | RupasaiR/CC-Project | b50164ddadc45ef0f09edd791215b3e127feee58 | [
"MIT"
] | null | null | null | jcp/lexer.py | RupasaiR/CC-Project | b50164ddadc45ef0f09edd791215b3e127feee58 | [
"MIT"
] | null | null | null | jcp/lexer.py | RupasaiR/CC-Project | b50164ddadc45ef0f09edd791215b3e127feee58 | [
"MIT"
] | null | null | null | #-----------------------------------------------------------------------------------
# lexer.py
# Lexer for Java programming language in Python
# Reference: https://docs.oracle.com/javase/specs/jls/se7/html/jls-18.html
#-----------------------------------------------------------------------------------
import ply.lex as lex
# Reference: https://docs.oracle.com/javase/tutorial/java/nutsandbolts/_keywords.html
keywords = ('this', 'class', 'void', 'super', 'extends', 'implements', 'enum', 'interface',
'byte', 'short', 'int', 'long', 'char', 'float', 'double', 'boolean', 'null',
'true', 'false',
'final', 'public', 'protected', 'private', 'abstract', 'static', 'strictfp', 'transient', 'volatile',
'synchronized', 'native',
'throws', 'default',
'instanceof',
'if', 'else', 'while', 'for', 'switch', 'case', 'assert', 'do',
'break', 'continue', 'return', 'throw', 'try', 'catch', 'finally', 'new',
'package', 'import'
)
tokens = [
'NAME',
'NUM',
'CHAR_LITERAL',
'STRING_LITERAL',
'LINE_COMMENT', 'BLOCK_COMMENT',
'OR', 'AND',
'EQ', 'NEQ', 'GTEQ', 'LTEQ',
'LSHIFT', 'RSHIFT', 'RRSHIFT',
'TIMES_ASSIGN', 'DIVIDE_ASSIGN', 'REMAINDER_ASSIGN',
'PLUS_ASSIGN', 'MINUS_ASSIGN', 'LSHIFT_ASSIGN', 'RSHIFT_ASSIGN', 'RRSHIFT_ASSIGN',
'AND_ASSIGN', 'OR_ASSIGN', 'XOR_ASSIGN',
'PLUSPLUS', 'MINUSMINUS',
'ELLIPSIS'
] + [k.upper() for k in keywords]
literals = '()+-*/=?:,.^|&~!=[]{};<>@%'
t_NUM = r'\.?[0-9][0-9eE_lLdDa-fA-F.xXpP]*'
t_CHAR_LITERAL = r'\'([^\\\n]|(\\.))*?\''
t_STRING_LITERAL = r'\"([^\\\n]|(\\.))*?\"'
t_ignore_LINE_COMMENT = '//.*'
def t_BLOCK_COMMENT(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
t_OR = r'\|\|'
t_AND = '&&'
t_EQ = '=='
t_NEQ = '!='
t_GTEQ = '>='
t_LTEQ = '<='
t_LSHIFT = '<<'
t_RSHIFT = '>>'
t_RRSHIFT = '>>>'
t_TIMES_ASSIGN = r'\*='
t_DIVIDE_ASSIGN = '/='
t_REMAINDER_ASSIGN = '%='
t_PLUS_ASSIGN = r'\+='
t_MINUS_ASSIGN = '-='
t_LSHIFT_ASSIGN = '<<='
t_RSHIFT_ASSIGN = '>>='
t_RRSHIFT_ASSIGN = '>>>='
t_AND_ASSIGN = '&='
t_OR_ASSIGN = r'\|='
t_XOR_ASSIGN = '\^='
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'\-\-'
t_ELLIPSIS = r'\.\.\.'
t_ignore = ' \t\f'
def t_NAME(t):
'[A-Za-z_$][A-Za-z0-9_$]*'
if t.value in keywords:
t.type = t.value.upper()
return t
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_newline2(t):
r'(\r\n)+'
t.lexer.lineno += len(t.value) / 2
def t_error(t):
print("Illegal character '{}' ({}) in line {}".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno))
t.lexer.skip(1)
# Lexer build
lexer = lex.lex()
| 26.692308 | 113 | 0.507565 |
import ply.lex as lex
keywords = ('this', 'class', 'void', 'super', 'extends', 'implements', 'enum', 'interface',
'byte', 'short', 'int', 'long', 'char', 'float', 'double', 'boolean', 'null',
'true', 'false',
'final', 'public', 'protected', 'private', 'abstract', 'static', 'strictfp', 'transient', 'volatile',
'synchronized', 'native',
'throws', 'default',
'instanceof',
'if', 'else', 'while', 'for', 'switch', 'case', 'assert', 'do',
'break', 'continue', 'return', 'throw', 'try', 'catch', 'finally', 'new',
'package', 'import'
)
tokens = [
'NAME',
'NUM',
'CHAR_LITERAL',
'STRING_LITERAL',
'LINE_COMMENT', 'BLOCK_COMMENT',
'OR', 'AND',
'EQ', 'NEQ', 'GTEQ', 'LTEQ',
'LSHIFT', 'RSHIFT', 'RRSHIFT',
'TIMES_ASSIGN', 'DIVIDE_ASSIGN', 'REMAINDER_ASSIGN',
'PLUS_ASSIGN', 'MINUS_ASSIGN', 'LSHIFT_ASSIGN', 'RSHIFT_ASSIGN', 'RRSHIFT_ASSIGN',
'AND_ASSIGN', 'OR_ASSIGN', 'XOR_ASSIGN',
'PLUSPLUS', 'MINUSMINUS',
'ELLIPSIS'
] + [k.upper() for k in keywords]
literals = '()+-*/=?:,.^|&~!=[]{};<>@%'
t_NUM = r'\.?[0-9][0-9eE_lLdDa-fA-F.xXpP]*'
t_CHAR_LITERAL = r'\'([^\\\n]|(\\.))*?\''
t_STRING_LITERAL = r'\"([^\\\n]|(\\.))*?\"'
t_ignore_LINE_COMMENT = '//.*'
def t_BLOCK_COMMENT(t):
t.lexer.lineno += t.value.count('\n')
t_OR = r'\|\|'
t_AND = '&&'
t_EQ = '=='
t_NEQ = '!='
t_GTEQ = '>='
t_LTEQ = '<='
t_LSHIFT = '<<'
t_RSHIFT = '>>'
t_RRSHIFT = '>>>'
t_TIMES_ASSIGN = r'\*='
t_DIVIDE_ASSIGN = '/='
t_REMAINDER_ASSIGN = '%='
t_PLUS_ASSIGN = r'\+='
t_MINUS_ASSIGN = '-='
t_LSHIFT_ASSIGN = '<<='
t_RSHIFT_ASSIGN = '>>='
t_RRSHIFT_ASSIGN = '>>>='
t_AND_ASSIGN = '&='
t_OR_ASSIGN = r'\|='
t_XOR_ASSIGN = '\^='
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'\-\-'
t_ELLIPSIS = r'\.\.\.'
t_ignore = ' \t\f'
def t_NAME(t):
if t.value in keywords:
t.type = t.value.upper()
return t
def t_newline(t):
t.lexer.lineno += len(t.value)
def t_newline2(t):
t.lexer.lineno += len(t.value) / 2
def t_error(t):
print("Illegal character '{}' ({}) in line {}".format(t.value[0], hex(ord(t.value[0])), t.lexer.lineno))
t.lexer.skip(1)
lexer = lex.lex()
| true | true |
f7308dfb1be9d3dd6bfea4a978d12b10c84b88cb | 1,785 | py | Python | kessk_web/device/wexinSignature.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 9 | 2019-09-30T04:24:39.000Z | 2021-07-15T06:08:20.000Z | kessk_web/device/wexinSignature.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 6 | 2020-05-14T03:13:32.000Z | 2022-02-10T10:23:46.000Z | kessk_web/device/wexinSignature.py | yungs2017/kessk-switch | a56c73c756bb88e8ee38b7aa196fd58a4a802341 | [
"BSD-3-Clause"
] | 2 | 2020-12-19T07:12:01.000Z | 2021-05-24T02:21:15.000Z | import hashlib
import random
import string
import time
from django.core.cache import cache
import requests
from common.config import WECHAT_GET_JSSDK_TICKET_URL, WECHAT_GET_ACCESS_TOKEN_URL
class Signature:
"""
Get Wechat JSSDK signature
"""
def __init__(self,url):
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': Base_authorization.get_ticket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)]).encode('utf-8')
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
class Base_authorization():
"""
Get JSSDK ticket and accesstoken
Cache to Django table cache
"""
@classmethod
def get_ticket(cls):
key = 'ticket'
if cache.has_key(key):
ticket = cache.get(key)
else:
if cache.has_key('access_token'):
access_token = cache.get('access_token')
else:
access_token = cls.get_access_token()
ticket = requests.get(WECHAT_GET_JSSDK_TICKET_URL+access_token).json()['ticket']
cache.set(key,ticket,110*60)
return ticket
@staticmethod
def get_access_token():
key = 'access_token'
access_token = requests.get(WECHAT_GET_ACCESS_TOKEN_URL).json()['access_token']
# print(access_token.text)
cache.set(key,access_token,110*60)
return access_token | 29.262295 | 111 | 0.62521 | import hashlib
import random
import string
import time
from django.core.cache import cache
import requests
from common.config import WECHAT_GET_JSSDK_TICKET_URL, WECHAT_GET_ACCESS_TOKEN_URL
class Signature:
def __init__(self,url):
self.ret = {
'nonceStr': self.__create_nonce_str(),
'jsapi_ticket': Base_authorization.get_ticket(),
'timestamp': self.__create_timestamp(),
'url': url
}
def __create_nonce_str(self):
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(15))
def __create_timestamp(self):
return int(time.time())
def sign(self):
string = '&'.join(['%s=%s' % (key.lower(), self.ret[key]) for key in sorted(self.ret)]).encode('utf-8')
self.ret['signature'] = hashlib.sha1(string).hexdigest()
return self.ret
class Base_authorization():
@classmethod
def get_ticket(cls):
key = 'ticket'
if cache.has_key(key):
ticket = cache.get(key)
else:
if cache.has_key('access_token'):
access_token = cache.get('access_token')
else:
access_token = cls.get_access_token()
ticket = requests.get(WECHAT_GET_JSSDK_TICKET_URL+access_token).json()['ticket']
cache.set(key,ticket,110*60)
return ticket
@staticmethod
def get_access_token():
key = 'access_token'
access_token = requests.get(WECHAT_GET_ACCESS_TOKEN_URL).json()['access_token']
cache.set(key,access_token,110*60)
return access_token | true | true |
f7308e9c4d895763eaa137bfcd7402eab1c334b6 | 388 | py | Python | src/euler_python_package/euler_python/medium/p124.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p124.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p124.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | def problem124():
LIMIT = 100000
# Modification of the sieve of Eratosthenes
rads = [0] + [1] * LIMIT
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rad, i) for (i, rad) in enumerate(rads))
return data[10000][1]
if __name__ == "__main__":
print(problem124())
| 22.823529 | 59 | 0.53866 | def problem124():
LIMIT = 100000
rads = [0] + [1] * LIMIT
for i in range(2, len(rads)):
if rads[i] == 1:
for j in range(i, len(rads), i):
rads[j] *= i
data = sorted((rad, i) for (i, rad) in enumerate(rads))
return data[10000][1]
if __name__ == "__main__":
print(problem124())
| true | true |
f7308f2ba96d7185ba98d55e78390dec75d685f8 | 1,923 | py | Python | Chapter05/utils.py | Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras | fccada4810ba1fe8b79c5a74420a590c95623b52 | [
"MIT"
] | 76 | 2019-05-27T23:38:53.000Z | 2021-12-19T00:31:13.000Z | Chapter05/utils.py | Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras | fccada4810ba1fe8b79c5a74420a590c95623b52 | [
"MIT"
] | 9 | 2019-05-29T21:01:32.000Z | 2020-07-30T12:00:02.000Z | Chapter05/utils.py | Kushalshingote/Hands-On-Generative-Adversarial-Networks-with-Keras | fccada4810ba1fe8b79c5a74420a590c95623b52 | [
"MIT"
] | 35 | 2019-05-12T04:20:54.000Z | 2022-03-03T19:46:06.000Z | import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
from math import ceil
import numpy as np
import argparse
from functools import partial
import os
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers.merge import _Merge
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
from keras import backend as K
from keras.datasets import cifar10
def get_data():
# load cifar10 data
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# convert train and test data to float32
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
# scale train and test data to [-1, 1]
X_train = (X_train / 255) * 2 - 1
X_test = (X_train / 255) * 2 - 1
return X_train, X_test
def plot_images(images, filename):
# scale images to [0.0, 1.0]
images = (images + 1) / 2
h, w, c = images.shape[1:]
grid_size = ceil(np.sqrt(images.shape[0]))
images = (images.reshape(grid_size, grid_size, h, w, c)
.transpose(0, 2, 1, 3, 4)
.reshape(grid_size*h, grid_size*w, c))
plt.figure(figsize=(16, 16))
plt.imsave(filename, images)
plt.close('all')
def plot_losses(losses_d, losses_g, filename):
losses_d = np.array(losses_d)
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
axes = axes.flatten()
axes[0].plot(losses_d[:, 0])
axes[1].plot(losses_d[:, 1])
axes[2].plot(losses_d[:, 2])
axes[3].plot(losses_g)
axes[0].set_title("losses_d")
axes[1].set_title("losses_d_real")
axes[2].set_title("losses_d_fake")
axes[3].set_title("losses_g")
plt.tight_layout()
plt.savefig(filename)
plt.close() | 30.52381 | 69 | 0.690588 | import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
from math import ceil
import numpy as np
import argparse
from functools import partial
import os
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Reshape, Flatten
from keras.layers.merge import _Merge
from keras.layers.convolutional import Convolution2D, Conv2DTranspose
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import Adam, RMSprop
from keras.datasets import mnist
from keras import backend as K
from keras.datasets import cifar10
def get_data():
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype(np.float32)
X_test = X_test.astype(np.float32)
X_train = (X_train / 255) * 2 - 1
X_test = (X_train / 255) * 2 - 1
return X_train, X_test
def plot_images(images, filename):
images = (images + 1) / 2
h, w, c = images.shape[1:]
grid_size = ceil(np.sqrt(images.shape[0]))
images = (images.reshape(grid_size, grid_size, h, w, c)
.transpose(0, 2, 1, 3, 4)
.reshape(grid_size*h, grid_size*w, c))
plt.figure(figsize=(16, 16))
plt.imsave(filename, images)
plt.close('all')
def plot_losses(losses_d, losses_g, filename):
losses_d = np.array(losses_d)
fig, axes = plt.subplots(2, 2, figsize=(8, 8))
axes = axes.flatten()
axes[0].plot(losses_d[:, 0])
axes[1].plot(losses_d[:, 1])
axes[2].plot(losses_d[:, 2])
axes[3].plot(losses_g)
axes[0].set_title("losses_d")
axes[1].set_title("losses_d_real")
axes[2].set_title("losses_d_fake")
axes[3].set_title("losses_g")
plt.tight_layout()
plt.savefig(filename)
plt.close() | true | true |
f7308f9c00ebc9bbce9d89a7668d0310b9d0223d | 793 | py | Python | PythonVirtEnv/Lib/site-packages/plotly/validators/layout/grid/_subplots.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 7 | 2021-09-29T09:46:36.000Z | 2022-03-24T08:30:41.000Z | PythonVirtEnv/Lib/site-packages/plotly/validators/layout/grid/_subplots.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 1 | 2021-09-30T16:56:21.000Z | 2021-10-15T09:14:12.000Z | PythonVirtEnv/Lib/site-packages/plotly/validators/layout/grid/_subplots.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 1 | 2021-09-29T22:34:05.000Z | 2021-09-29T22:34:05.000Z | import _plotly_utils.basevalidators
class SubplotsValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="subplots", parent_name="layout.grid", **kwargs):
super(SubplotsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dimensions=kwargs.pop("dimensions", 2),
edit_type=kwargs.pop("edit_type", "plot"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop(
"items",
{
"valType": "enumerated",
"values": ["/^x([2-9]|[1-9][0-9]+)?y([2-9]|[1-9][0-9]+)?$/", ""],
"editType": "plot",
},
),
**kwargs
)
| 36.045455 | 85 | 0.515763 | import _plotly_utils.basevalidators
class SubplotsValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="subplots", parent_name="layout.grid", **kwargs):
super(SubplotsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dimensions=kwargs.pop("dimensions", 2),
edit_type=kwargs.pop("edit_type", "plot"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop(
"items",
{
"valType": "enumerated",
"values": ["/^x([2-9]|[1-9][0-9]+)?y([2-9]|[1-9][0-9]+)?$/", ""],
"editType": "plot",
},
),
**kwargs
)
| true | true |
f730903cf9f64cb688db3b16064ab4dafccd0ee0 | 297 | py | Python | 5.3-Thursday/writing.py | lraynes/activities | 5438ff9869df9d67757817fd4994be545eb38604 | [
"MIT"
] | null | null | null | 5.3-Thursday/writing.py | lraynes/activities | 5438ff9869df9d67757817fd4994be545eb38604 | [
"MIT"
] | null | null | null | 5.3-Thursday/writing.py | lraynes/activities | 5438ff9869df9d67757817fd4994be545eb38604 | [
"MIT"
] | null | null | null | import os
import csv
output_path = os.path.join(".", "output", "new.csv")
with open(output_path, "w", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(["First Name", "Last Name", "SSN"])
csvwriter.writerow(["Laura", "Raynes", "555-55-5555"])
| 29.7 | 58 | 0.653199 | import os
import csv
output_path = os.path.join(".", "output", "new.csv")
with open(output_path, "w", newline="") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(["First Name", "Last Name", "SSN"])
csvwriter.writerow(["Laura", "Raynes", "555-55-5555"])
| true | true |
f73090ca8723e36f89646c3925d6c5c1658f9fa9 | 24,872 | py | Python | harvester_e2e_tests/scenarios/test_vm_actions.py | tjjh89017/tests | ab7a7dbc380f2585cf6de709d203912cf34fa84a | [
"Apache-2.0"
] | 5 | 2021-06-21T08:17:44.000Z | 2022-03-25T03:12:13.000Z | harvester_e2e_tests/scenarios/test_vm_actions.py | tjjh89017/tests | ab7a7dbc380f2585cf6de709d203912cf34fa84a | [
"Apache-2.0"
] | 107 | 2021-06-07T07:31:14.000Z | 2022-03-30T07:24:33.000Z | harvester_e2e_tests/scenarios/test_vm_actions.py | tjjh89017/tests | ab7a7dbc380f2585cf6de709d203912cf34fa84a | [
"Apache-2.0"
] | 17 | 2021-05-26T21:05:54.000Z | 2022-03-29T00:49:50.000Z | # Copyright (c) 2021 SUSE LLC
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, contact SUSE LLC.
#
# To contact SUSE about this file by physical or electronic mail,
# you may find current contact information at www.suse.com
from harvester_e2e_tests import utils
import polling2
import time
import json
import pytest
pytest_plugins = [
'harvester_e2e_tests.fixtures.keypair',
'harvester_e2e_tests.fixtures.vm',
'harvester_e2e_tests.fixtures.volume',
'harvester_e2e_tests.fixtures.backuptarget'
]
def backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget):
"""
Backup Restore Testing
Covers:
backup-and-restore-13-Restore Backup for VM that was live migrated
backup-and-restore-14-Backup Single VM that has been live migrated
before
"""
backup_json = utils.random_name()
try:
vm_name = vm_with_volume['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_before_migrate = vm_instance_json['status']['nodeName']
resp = admin_session.get(harvester_api_endpoints.get_node % (
vm_node_before_migrate))
resp = admin_session.get(harvester_api_endpoints.list_nodes)
assert resp.status_code == 200, 'Failed to list nodes: %s' % (
resp.content)
nodes_json = resp.json()['data']
for node in nodes_json:
if node['metadata']['name'] != vm_node_before_migrate:
node_to_migrate = node['metadata']['name']
resp = admin_session.put(harvester_api_endpoints.migrate_vm % (
vm_name),
json={"nodeName": node_to_migrate})
assert resp.status_code == 202, 'Failed to migrat VM to host %s' % (
node_to_migrate)
# give it some time for the VM to migrate
time.sleep(120)
def _check_vm_instance_migrated():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
vm_name))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'migrationState' in resp_json['status'] and
resp_json['status']['migrationState']['completed']):
return True
return False
success = polling2.poll(
_check_vm_instance_migrated,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out as waiting for VM to migrate : %s' % (
vm_name)
vmi_json_after_migrate = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_after_migrate = vmi_json_after_migrate['status']['nodeName']
assert vm_node_after_migrate != vm_node_before_migrate, (
'Failed to Migrate as Host remains same. '
'Node Before Migrate: %s; Node after Migrate: %s' % (
vm_node_before_migrate, vm_node_after_migrate))
# Create backup of Live migrated VM
backup_name = utils.random_name()
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
# Stop VM
utils.stop_vm(request, admin_session,
harvester_api_endpoints, vm_name)
# Restore existing VM from backup
restore_name = utils.random_name()
utils.restore_vm_backup(request, admin_session,
harvester_api_endpoints,
name=restore_name,
vm_name=vm_name,
backup_name=backup_name)
utils.assert_vm_ready(request, admin_session,
harvester_api_endpoints,
vm_name, running=True)
resp = admin_session.get(harvester_api_endpoints.get_vm % (
vm_name))
assert resp.status_code == 200, 'Failed to get restor VM %s: %s' % (
vm_name, resp.content)
restored_vm_json = resp.json()
restored_vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, restored_vm_json)
restored_vm_node = restored_vm_instance_json['status']['nodeName']
assert restored_vm_node == vm_node_after_migrate, (
'Node of restored VM not same as Node after VM migration '
'Node Of Restored VM: %s; VM Node after Migrate: %s' % (
restored_vm_node, vm_node_after_migrate))
finally:
if not request.config.getoption('--do-not-cleanup'):
if vm_with_volume:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, vm_with_volume)
if backup_json:
utils.delete_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget, backup_json)
def update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget):
vm_name = basic_vm['metadata']['name']
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
backup_json['metadata']['annotations'] = {
'test.harvesterhci.io': 'for-test-update'
}
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm_backup % (
backup_json['metadata']['name']),
backup_json,
harvester_api_endpoints.get_vm_backup % (
backup_json['metadata']['name']),
use_yaml=True)
updated_backup_data = resp.json()
assert updated_backup_data['metadata']['annotations'].get(
'test.harvesterhci.io') == 'for-test-update'
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget, backup_json)
@pytest.mark.virtual_machines_p1
@pytest.mark.p1
class TestVMActions:
"""
Test Virtual Machines opertions like restart,stop,start,pause,
unpause
Covers:
virtual-machines-55-VM operations stop,start,restart,pause,unpause
virtual-machines-50-VM Edit VM via YAML with CPU
"""
def test_create_vm(self, admin_session, harvester_api_endpoints, basic_vm):
# make sure the VM instance is successfully created
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
# make sure it has a cdrom device
devices = basic_vm['spec']['template']['spec']['domain']['devices']
disks = devices['disks']
found_cdrom = False
for disk in disks:
if 'cdrom' in disk:
found_cdrom = True
break
assert found_cdrom, 'Expecting "cdrom" in the disks list.'
def test_restart_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
previous_uid = basic_vm['metadata']['uid']
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
def test_stop_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
utils.stop_vm(request, admin_session, harvester_api_endpoints,
vm_name)
def test_start_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
# NOTE: this step must be done after VM has stopped
resp = admin_session.put(harvester_api_endpoints.start_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 202, (
'Failed to start VM instance %s: %s' % (
basic_vm['metadata']['name'], resp.content))
# give it some time for the VM to start
time.sleep(120)
def _check_vm_instance_started():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
resp_json['status']['phase'] == 'Running'):
return True
return False
success = polling2.poll(
_check_vm_instance_started,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Failed to get VM instance for: %s' % (
basic_vm['metadata']['name'])
def test_pause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.pause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to pause VM instance %s' % (
basic_vm['metadata']['name'])
# give it some time for the VM to pause
time.sleep(60)
def _check_vm_instance_paused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if 'status' in resp_json:
for condition in resp_json['status']['conditions']:
if (condition['type'] == 'Paused' and
condition['status'] == 'True'):
return True
return False
success = polling2.poll(
_check_vm_instance_paused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be paused.'
def test_unpause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
# NOTE: make sure to execute this step after _paused_vm()
resp = admin_session.put(harvester_api_endpoints.unpause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to unpause VM instance %s' % (
basic_vm['metadata']['name'])
# give it some time to unpause
time.sleep(10)
def _check_vm_instance_unpaused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'ready' in resp_json['status'] and
resp_json['status']['ready']):
return True
return False
success = polling2.poll(
_check_vm_instance_unpaused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be unpaused.'
def test_update_vm_cpu(self, request, admin_session,
harvester_api_endpoints, basic_vm):
vm_name = basic_vm['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
previous_uid = vm_instance_json['metadata']['uid']
domain_data = basic_vm['spec']['template']['spec']['domain']
updated_cores = domain_data['cpu']['cores'] + 1
domain_data['cpu']['cores'] = updated_cores
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm % (vm_name),
basic_vm,
harvester_api_endpoints.get_vm % (vm_name))
updated_vm_data = resp.json()
updated_domain_data = (
updated_vm_data['spec']['template']['spec']['domain'])
assert updated_domain_data['cpu']['cores'] == updated_cores
# restart the VM instance for the changes to take effect
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
@pytest.mark.volumes_p2
@pytest.mark.volumes_p1
@pytest.mark.p2
@pytest.mark.p1
class TestVMVolumes:
def test_create_vm_with_external_volume(self, admin_session,
harvester_api_endpoints,
vm_with_volume):
"""
Test virtual machines
Covers:
virtual-machines-11-Create VM with two disk volumes
"""
# make sure the VM instance is successfully created
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
# make sure it's data volumes are in-use
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code == 200, (
'Failed to lookup volume %s: %s' % (
volume['persistentVolumeClaim']['claimName'],
resp.content))
volume_json = resp.json()
owned_by = json.loads(
volume_json['metadata']['annotations'].get(
'harvesterhci.io/owned-by'))
expected_owner = '%s/%s' % (
vm_with_volume['metadata']['namespace'],
vm_with_volume['metadata']['name'])
# make sure VM is one of the owners
found = False
for owner in owned_by:
if (owner['schema'] == 'kubevirt.io.virtualmachine' and
expected_owner in owner['refs']):
found = True
break
assert found, ('Expecting %s to be in volume %s owners list' % (
expected_owner, volume['persistentVolumeClaim']['claimName']))
def test_delete_volume_in_use(self, request, admin_session,
harvester_api_endpoints, vm_with_volume):
"""
Volume testing
Covers:
Negative vol-01-Delete Volume that is in use
vol-13-Validate volume shows as in use when attached
"""
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
# try to delete a volume in 'in-use' state and it should
# fail
resp = admin_session.delete(
harvester_api_endpoints.delete_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code not in [200, 201], (
'Deleting "in-use" volumes should not be permitted: %s' % (
resp.content))
def test_delete_vm_then_volumes(self, request, admin_session,
harvester_api_endpoints,
vm_with_volume, volume):
"""
Volume testing
Covers:
vol-15-Delete volume that was attached to VM but now is not
"""
# delete the VM but keep the volumes
utils.delete_vm(request, admin_session, harvester_api_endpoints,
vm_with_volume, remove_all_disks=False)
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for data_vol in volumes:
volume_name = data_vol['persistentVolumeClaim']['claimName']
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume_name))
assert resp.status_code == 200, (
'Failed to lookup data volume %s: %s' % (
volume_name, resp.content))
# now cleanup the volume
utils.delete_volume_by_name(request, admin_session,
harvester_api_endpoints, volume_name)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
"""
Backup and Restore
Covers:
backup-and-restore-02-Backup Single VM s3
backup-and-restore-07-Delete single Backup
backup-and-restore-01-create backup target
"""
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_s3,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_s3, backup_json)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
"""
Backup and Restore
Covers:
vol-02-Backup Single VM nfs
"""
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_s3(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3):
"""
Backup and Restore
Covers:
backup-and-restore-13-Restore Backup S3 for VM that was
live migrated
backup-and-restore-14-Backup single vm S3 for VM that was
live migrated before
"""
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_nfs(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs):
"""
Backup and Restore
Covers:
backup-and-restore-13-Restore Backup nfs for VM that was
live migrated
backup-and-restore-14-Backup single vm nfs for VM that was
live migrated before
"""
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
"""
Backup Restore Testing
Covers:
backup-and-restore-11-Edit Backup nfs
"""
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_nfs)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
"""
Backup Restore Testing
Covers:
backup-and-restore-11-Edit Backup s3
"""
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_s3)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_restore_backup_vm_on(request, admin_session,
harvester_api_endpoints,
basic_vm, backuptarget_nfs):
"""
Backup Restore Testing
Covers:
Negative backup-and-restore-08-Restore Backup Negative
"""
# make sure the VM instance is successfully created
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
vm_name = vm_instance_json['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
restore_name = utils.random_name()
request_json = utils.get_json_object_from_template(
'basic_vm_restore',
name=restore_name,
vm_name=vm_name,
backup_name=backup_name
)
resp = admin_session.post(
harvester_api_endpoints.create_vm_restore,
json=request_json)
content = resp.json()
assert 'please stop the VM' in content['message']
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
| 41.801681 | 79 | 0.575064 |
from harvester_e2e_tests import utils
import polling2
import time
import json
import pytest
pytest_plugins = [
'harvester_e2e_tests.fixtures.keypair',
'harvester_e2e_tests.fixtures.vm',
'harvester_e2e_tests.fixtures.volume',
'harvester_e2e_tests.fixtures.backuptarget'
]
def backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget):
backup_json = utils.random_name()
try:
vm_name = vm_with_volume['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_before_migrate = vm_instance_json['status']['nodeName']
resp = admin_session.get(harvester_api_endpoints.get_node % (
vm_node_before_migrate))
resp = admin_session.get(harvester_api_endpoints.list_nodes)
assert resp.status_code == 200, 'Failed to list nodes: %s' % (
resp.content)
nodes_json = resp.json()['data']
for node in nodes_json:
if node['metadata']['name'] != vm_node_before_migrate:
node_to_migrate = node['metadata']['name']
resp = admin_session.put(harvester_api_endpoints.migrate_vm % (
vm_name),
json={"nodeName": node_to_migrate})
assert resp.status_code == 202, 'Failed to migrat VM to host %s' % (
node_to_migrate)
time.sleep(120)
def _check_vm_instance_migrated():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
vm_name))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'migrationState' in resp_json['status'] and
resp_json['status']['migrationState']['completed']):
return True
return False
success = polling2.poll(
_check_vm_instance_migrated,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out as waiting for VM to migrate : %s' % (
vm_name)
vmi_json_after_migrate = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
vm_node_after_migrate = vmi_json_after_migrate['status']['nodeName']
assert vm_node_after_migrate != vm_node_before_migrate, (
'Failed to Migrate as Host remains same. '
'Node Before Migrate: %s; Node after Migrate: %s' % (
vm_node_before_migrate, vm_node_after_migrate))
backup_name = utils.random_name()
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
utils.stop_vm(request, admin_session,
harvester_api_endpoints, vm_name)
restore_name = utils.random_name()
utils.restore_vm_backup(request, admin_session,
harvester_api_endpoints,
name=restore_name,
vm_name=vm_name,
backup_name=backup_name)
utils.assert_vm_ready(request, admin_session,
harvester_api_endpoints,
vm_name, running=True)
resp = admin_session.get(harvester_api_endpoints.get_vm % (
vm_name))
assert resp.status_code == 200, 'Failed to get restor VM %s: %s' % (
vm_name, resp.content)
restored_vm_json = resp.json()
restored_vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, restored_vm_json)
restored_vm_node = restored_vm_instance_json['status']['nodeName']
assert restored_vm_node == vm_node_after_migrate, (
'Node of restored VM not same as Node after VM migration '
'Node Of Restored VM: %s; VM Node after Migrate: %s' % (
restored_vm_node, vm_node_after_migrate))
finally:
if not request.config.getoption('--do-not-cleanup'):
if vm_with_volume:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, vm_with_volume)
if backup_json:
utils.delete_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget, backup_json)
def update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget):
vm_name = basic_vm['metadata']['name']
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget,
name=backup_name,
vm_name=vm_name)
backup_json['metadata']['annotations'] = {
'test.harvesterhci.io': 'for-test-update'
}
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm_backup % (
backup_json['metadata']['name']),
backup_json,
harvester_api_endpoints.get_vm_backup % (
backup_json['metadata']['name']),
use_yaml=True)
updated_backup_data = resp.json()
assert updated_backup_data['metadata']['annotations'].get(
'test.harvesterhci.io') == 'for-test-update'
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget, backup_json)
@pytest.mark.virtual_machines_p1
@pytest.mark.p1
class TestVMActions:
def test_create_vm(self, admin_session, harvester_api_endpoints, basic_vm):
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
devices = basic_vm['spec']['template']['spec']['domain']['devices']
disks = devices['disks']
found_cdrom = False
for disk in disks:
if 'cdrom' in disk:
found_cdrom = True
break
assert found_cdrom, 'Expecting "cdrom" in the disks list.'
def test_restart_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
previous_uid = basic_vm['metadata']['uid']
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
def test_stop_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
vm_name = basic_vm['metadata']['name']
utils.stop_vm(request, admin_session, harvester_api_endpoints,
vm_name)
def test_start_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.start_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 202, (
'Failed to start VM instance %s: %s' % (
basic_vm['metadata']['name'], resp.content))
time.sleep(120)
def _check_vm_instance_started():
resp = admin_session.get(
harvester_api_endpoints.get_vm_instance % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
resp_json['status']['phase'] == 'Running'):
return True
return False
success = polling2.poll(
_check_vm_instance_started,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Failed to get VM instance for: %s' % (
basic_vm['metadata']['name'])
def test_pause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.pause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to pause VM instance %s' % (
basic_vm['metadata']['name'])
time.sleep(60)
def _check_vm_instance_paused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if 'status' in resp_json:
for condition in resp_json['status']['conditions']:
if (condition['type'] == 'Paused' and
condition['status'] == 'True'):
return True
return False
success = polling2.poll(
_check_vm_instance_paused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be paused.'
def test_unpause_vm(self, request, admin_session, harvester_api_endpoints,
basic_vm):
resp = admin_session.put(harvester_api_endpoints.unpause_vm % (
basic_vm['metadata']['name']))
assert resp.status_code == 200, 'Failed to unpause VM instance %s' % (
basic_vm['metadata']['name'])
time.sleep(10)
def _check_vm_instance_unpaused():
resp = admin_session.get(harvester_api_endpoints.get_vm % (
basic_vm['metadata']['name']))
if resp.status_code == 200:
resp_json = resp.json()
if ('status' in resp_json and
'ready' in resp_json['status'] and
resp_json['status']['ready']):
return True
return False
success = polling2.poll(
_check_vm_instance_unpaused,
step=5,
timeout=request.config.getoption('--wait-timeout'))
assert success, 'Timed out while waiting for VM to be unpaused.'
def test_update_vm_cpu(self, request, admin_session,
harvester_api_endpoints, basic_vm):
vm_name = basic_vm['metadata']['name']
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
previous_uid = vm_instance_json['metadata']['uid']
domain_data = basic_vm['spec']['template']['spec']['domain']
updated_cores = domain_data['cpu']['cores'] + 1
domain_data['cpu']['cores'] = updated_cores
resp = utils.poll_for_update_resource(
request, admin_session,
harvester_api_endpoints.update_vm % (vm_name),
basic_vm,
harvester_api_endpoints.get_vm % (vm_name))
updated_vm_data = resp.json()
updated_domain_data = (
updated_vm_data['spec']['template']['spec']['domain'])
assert updated_domain_data['cpu']['cores'] == updated_cores
utils.restart_vm(admin_session, harvester_api_endpoints, previous_uid,
vm_name, request.config.getoption('--wait-timeout'))
@pytest.mark.volumes_p2
@pytest.mark.volumes_p1
@pytest.mark.p2
@pytest.mark.p1
class TestVMVolumes:
def test_create_vm_with_external_volume(self, admin_session,
harvester_api_endpoints,
vm_with_volume):
utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, vm_with_volume)
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code == 200, (
'Failed to lookup volume %s: %s' % (
volume['persistentVolumeClaim']['claimName'],
resp.content))
volume_json = resp.json()
owned_by = json.loads(
volume_json['metadata']['annotations'].get(
'harvesterhci.io/owned-by'))
expected_owner = '%s/%s' % (
vm_with_volume['metadata']['namespace'],
vm_with_volume['metadata']['name'])
# make sure VM is one of the owners
found = False
for owner in owned_by:
if (owner['schema'] == 'kubevirt.io.virtualmachine' and
expected_owner in owner['refs']):
found = True
break
assert found, ('Expecting %s to be in volume %s owners list' % (
expected_owner, volume['persistentVolumeClaim']['claimName']))
def test_delete_volume_in_use(self, request, admin_session,
harvester_api_endpoints, vm_with_volume):
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for volume in volumes:
# try to delete a volume in 'in-use' state and it should
# fail
resp = admin_session.delete(
harvester_api_endpoints.delete_volume % (
volume['persistentVolumeClaim']['claimName']))
assert resp.status_code not in [200, 201], (
'Deleting "in-use" volumes should not be permitted: %s' % (
resp.content))
def test_delete_vm_then_volumes(self, request, admin_session,
harvester_api_endpoints,
vm_with_volume, volume):
# delete the VM but keep the volumes
utils.delete_vm(request, admin_session, harvester_api_endpoints,
vm_with_volume, remove_all_disks=False)
volumes = vm_with_volume['spec']['template']['spec']['volumes']
for data_vol in volumes:
volume_name = data_vol['persistentVolumeClaim']['claimName']
resp = admin_session.get(harvester_api_endpoints.get_volume % (
volume_name))
assert resp.status_code == 200, (
'Failed to lookup data volume %s: %s' % (
volume_name, resp.content))
# now cleanup the volume
utils.delete_volume_by_name(request, admin_session,
harvester_api_endpoints, volume_name)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_s3,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_s3, backup_json)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_backup_single_vm_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
vm_name = basic_vm['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_s3(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3):
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_s3)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_backup_restore_migrated_vm_nfs(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs):
backup_restore_migrated_vm(request, admin_session,
harvester_api_endpoints,
vm_with_volume,
backuptarget_nfs)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_nfs(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_nfs):
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_nfs)
@pytest.mark.skip("https://github.com/harvester/harvester/issues/1339")
@pytest.mark.backups3
@pytest.mark.backup_and_restore_p2
@pytest.mark.p2
def test_update_backup_yaml_s3(request, admin_session,
harvester_api_endpoints, basic_vm,
backuptarget_s3):
update_backup_yaml(request, admin_session,
harvester_api_endpoints,
basic_vm,
backuptarget_s3)
@pytest.mark.backupnfs
@pytest.mark.backup_and_restore_p1
@pytest.mark.p1
def test_restore_backup_vm_on(request, admin_session,
harvester_api_endpoints,
basic_vm, backuptarget_nfs):
# make sure the VM instance is successfully created
vm_instance_json = utils.lookup_vm_instance(
admin_session, harvester_api_endpoints, basic_vm)
vm_name = vm_instance_json['metadata']['name']
backup_name = utils.random_name()
backup_json = None
try:
backup_json = utils.create_vm_backup(request, admin_session,
harvester_api_endpoints,
backuptarget_nfs,
name=backup_name,
vm_name=vm_name)
restore_name = utils.random_name()
request_json = utils.get_json_object_from_template(
'basic_vm_restore',
name=restore_name,
vm_name=vm_name,
backup_name=backup_name
)
resp = admin_session.post(
harvester_api_endpoints.create_vm_restore,
json=request_json)
content = resp.json()
assert 'please stop the VM' in content['message']
finally:
if not request.config.getoption('--do-not-cleanup'):
if backup_json:
utils.delete_vm(request, admin_session,
harvester_api_endpoints, basic_vm)
utils.delete_vm_backup(
request, admin_session, harvester_api_endpoints,
backuptarget_nfs, backup_json)
| true | true |
f7309124ed3ffe0189fdb1c51cf417808d3ed9f7 | 1,726 | py | Python | data/mnist.py | aPere3/MVAProject-RecVis16 | 83b581c37cb486ec855e4a40652860df4e56b363 | [
"MIT"
] | null | null | null | data/mnist.py | aPere3/MVAProject-RecVis16 | 83b581c37cb486ec855e4a40652860df4e56b363 | [
"MIT"
] | null | null | null | data/mnist.py | aPere3/MVAProject-RecVis16 | 83b581c37cb486ec855e4a40652860df4e56b363 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This module contains a method to load mnist data. Based on David Larson work at:
http://g.sweyla.com/blog/2012/mnist-numpy/a
"""
import numpy
import os, struct
from array import array as pyarray
def load_mnist(dataset="training", digits=numpy.arange(10), path="mnist"):
"""
The Mnist loading methods from David Larson. Can be checked out at: http://g.sweyla.com/blog/2012/mnist-numpy/a
:param dataset: 'training' or 'testing' depending on the files to load.
:param digits: digits to load
:param path: path to the mnist directory
:return: X, y: data and labels
"""
if dataset == "training":
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
elif dataset == "testing":
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = pyarray("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = pyarray("B", fimg.read())
fimg.close()
ind = [ k for k in range(size) if lbl[k] in digits ]
N = len(ind)
images = numpy.zeros((N, rows, cols), dtype=numpy.uint8)
labels = numpy.zeros((N, 1), dtype=numpy.int8)
for i in range(len(ind)):
images[i] = numpy.array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))
labels[i] = lbl[ind[i]]
return images, labels
| 32.566038 | 115 | 0.634994 |
import numpy
import os, struct
from array import array as pyarray
def load_mnist(dataset="training", digits=numpy.arange(10), path="mnist"):
if dataset == "training":
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
elif dataset == "testing":
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
flbl = open(fname_lbl, 'rb')
magic_nr, size = struct.unpack(">II", flbl.read(8))
lbl = pyarray("b", flbl.read())
flbl.close()
fimg = open(fname_img, 'rb')
magic_nr, size, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = pyarray("B", fimg.read())
fimg.close()
ind = [ k for k in range(size) if lbl[k] in digits ]
N = len(ind)
images = numpy.zeros((N, rows, cols), dtype=numpy.uint8)
labels = numpy.zeros((N, 1), dtype=numpy.int8)
for i in range(len(ind)):
images[i] = numpy.array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))
labels[i] = lbl[ind[i]]
return images, labels
| true | true |
f73092536f6b16f929d0669d98b5a821729b2f1a | 4,328 | py | Python | tests/modules/notifications/resources/utils.py | WildMeOrg/houston | 8102229421388e44234c07ee6cb73bf705b6fba0 | [
"Apache-2.0"
] | 6 | 2021-04-06T19:50:52.000Z | 2022-01-19T17:42:33.000Z | tests/modules/notifications/resources/utils.py | WildMeOrg/houston | 8102229421388e44234c07ee6cb73bf705b6fba0 | [
"Apache-2.0"
] | 491 | 2021-01-20T01:10:00.000Z | 2022-03-31T19:30:48.000Z | tests/modules/notifications/resources/utils.py | WildMeOrg/houston | 8102229421388e44234c07ee6cb73bf705b6fba0 | [
"Apache-2.0"
] | 2 | 2021-03-12T02:33:55.000Z | 2021-03-16T20:18:43.000Z | # -*- coding: utf-8 -*-
"""
notification resources utils
-------------
"""
import json
from tests import utils as test_utils
PATH = '/api/v1/notifications/'
EXPECTED_NOTIFICATION_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
'message_values',
}
EXPECTED_LIST_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
}
def create_notification(
flask_app_client, user, data, expected_status_code=200, expected_error=''
):
if user:
with flask_app_client.login(user, auth_scopes=('notifications:write',)):
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
else:
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid'})
elif 400 <= expected_status_code < 500:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
assert response.json['message'] == expected_error, response.json['message']
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
return response
def patch_notification(
flask_app_client,
notification_guid,
user,
data,
expected_status_code=200,
expected_error=None,
):
return test_utils.patch_via_flask(
flask_app_client,
user,
scopes='notifications:write',
path=f'{PATH}{notification_guid}',
data=data,
expected_status_code=expected_status_code,
response_200={'guid'},
expected_error=expected_error,
)
def read_notification(
flask_app_client, user, notification_guid, expected_status_code=200
):
return test_utils.get_dict_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}{notification_guid}',
expected_status_code=expected_status_code,
response_200=EXPECTED_NOTIFICATION_KEYS,
)
def read_all_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=PATH,
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def read_all_unread_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}unread',
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def get_unread_notifications(json_data, from_user_guid, notification_type):
return list(
filter(
lambda notif: notif['message_type'] == notification_type
and notif['sender_guid'] == from_user_guid
and notif['is_read'] is False,
json_data,
)
)
def mark_notification_as_read(
flask_app_client, user, notif_guid, expected_status_code=200
):
data = [test_utils.patch_replace_op('is_read', True)]
patch_notification(flask_app_client, notif_guid, user, data, expected_status_code)
def mark_all_notifications_as_read(flask_app_client, user):
unread_notifs = read_all_unread_notifications(flask_app_client, user)
for notif in unread_notifs.json:
mark_notification_as_read(flask_app_client, user, notif['guid'])
# Not a traditional util, this deletes all notifications in the system, the reason being that when many
# notifications are used, they are marked as read and cannot be recreated. This is intentional by design
# But it means that the tests can be non deterministic in that they can work or fail depending on what has
# happened before
def delete_all_notifications(db):
from app.modules.notifications.models import Notification
notifs = Notification.query.all()
for notif in notifs:
with db.session.begin(subtransactions=True):
db.session.delete(notif)
| 29.243243 | 106 | 0.676063 |
import json
from tests import utils as test_utils
PATH = '/api/v1/notifications/'
EXPECTED_NOTIFICATION_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
'message_values',
}
EXPECTED_LIST_KEYS = {
'guid',
'is_read',
'message_type',
'sender_name',
'sender_guid',
}
def create_notification(
flask_app_client, user, data, expected_status_code=200, expected_error=''
):
if user:
with flask_app_client.login(user, auth_scopes=('notifications:write',)):
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
else:
response = flask_app_client.post(
'%s' % PATH,
content_type='application/json',
data=json.dumps(data),
)
if expected_status_code == 200:
test_utils.validate_dict_response(response, 200, {'guid'})
elif 400 <= expected_status_code < 500:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
assert response.json['message'] == expected_error, response.json['message']
else:
test_utils.validate_dict_response(
response, expected_status_code, {'status', 'message'}
)
return response
def patch_notification(
flask_app_client,
notification_guid,
user,
data,
expected_status_code=200,
expected_error=None,
):
return test_utils.patch_via_flask(
flask_app_client,
user,
scopes='notifications:write',
path=f'{PATH}{notification_guid}',
data=data,
expected_status_code=expected_status_code,
response_200={'guid'},
expected_error=expected_error,
)
def read_notification(
flask_app_client, user, notification_guid, expected_status_code=200
):
return test_utils.get_dict_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}{notification_guid}',
expected_status_code=expected_status_code,
response_200=EXPECTED_NOTIFICATION_KEYS,
)
def read_all_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=PATH,
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def read_all_unread_notifications(flask_app_client, user, expected_status_code=200):
return test_utils.get_list_via_flask(
flask_app_client,
user,
scopes='notifications:read',
path=f'{PATH}unread',
expected_status_code=expected_status_code,
expected_fields=EXPECTED_LIST_KEYS,
)
def get_unread_notifications(json_data, from_user_guid, notification_type):
return list(
filter(
lambda notif: notif['message_type'] == notification_type
and notif['sender_guid'] == from_user_guid
and notif['is_read'] is False,
json_data,
)
)
def mark_notification_as_read(
flask_app_client, user, notif_guid, expected_status_code=200
):
data = [test_utils.patch_replace_op('is_read', True)]
patch_notification(flask_app_client, notif_guid, user, data, expected_status_code)
def mark_all_notifications_as_read(flask_app_client, user):
unread_notifs = read_all_unread_notifications(flask_app_client, user)
for notif in unread_notifs.json:
mark_notification_as_read(flask_app_client, user, notif['guid'])
def delete_all_notifications(db):
from app.modules.notifications.models import Notification
notifs = Notification.query.all()
for notif in notifs:
with db.session.begin(subtransactions=True):
db.session.delete(notif)
| true | true |
f730926324462a884c4e33f7f7f63d6260891d16 | 3,685 | py | Python | tutorials/plot_notebook.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | tutorials/plot_notebook.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | tutorials/plot_notebook.py | jnothman/sphinx-gallery | b930662613a32fe05f16b39f86fafdb4c8d6f424 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
========================
Notebook styled examples
========================
The gallery is capable of transforming python files into reStructuredText files
with a notebook structure. For this to be used you need to respect some syntax
rules.
It makes a lot of sense to contrast this output rst file with the
:download:`original python script <plot_notebook.py>` to get better feeling of
the necessary file structure.
Anything before the python script docstring is ignored by sphinx-gallery and
will not appear in the rst file, nor will it be executed.
This python docstring requires an reStructuredText title to name the file and
correctly build the reference links.
Once you close the docstring you would be writing python code. This code gets
executed by sphinx gallery shows the plots and attaches the generating code.
Nevertheless you can break your code into blocks and give the rendered file
a notebook style. In this case you have to include a code comment breaker
a line of at least 20 hashes and then every comment start with the a new hash.
As in this example we start by first writing this module
style docstring, then for the first code block we write the example file author
and script license continued by the import modules instructions.
"""
# Code source: Óscar Nájera
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
##############################################################################
# This code block is executed, although it produces no output. Lines starting
# with a simple hash are code comment and get treated as part of the code
# block. To include this new comment string we started the new block with a
# long line of hashes.
#
# The sphinx-gallery parser will assume everything after this splitter and that
# continues to start with a **comment hash and space** (respecting code style)
# is text that has to be rendered in
# html format. Keep in mind to always keep your comments always together by
# comment hashes. That means to break a paragraph you still need to commend
# that line break.
#
# In this example the next block of code produces some plotable data. Code is
# executed, figure is saved and then code is presented next, followed by the
# inlined figure.
x = np.linspace(-np.pi, np.pi, 300)
xx, yy = np.meshgrid(x, x)
z = np.cos(xx) + np.cos(yy)
plt.figure()
plt.imshow(z)
plt.colorbar()
plt.xlabel('$x$')
plt.ylabel('$y$')
###########################################################################
# Again it is possble to continue the discussion with a new python string. This
# time to introduce the next code block generates 2 separate figures.
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('hot'))
plt.figure()
plt.imshow(z, cmap=plt.cm.get_cmap('Spectral'), interpolation='none')
##########################################################################
# There's some subtle differences between rendered html rendered comment
# strings and code comment strings which I'll demonstrate below. (Some of this
# only makes sense if you look at the
# :download:`raw python script <plot_notebook.py>`)
#
# Comments in comment blocks remain nested in the text.
def dummy():
"""Dummy function to make sure docstrings don't get rendered as text"""
pass
# Code comments not preceded by the hash splitter are left in code blocks.
string = """
Triple-quoted string which tries to break parser but doesn't.
"""
############################################################################
# Finally, I'll call ``show`` at the end just so someone running the python
# code directly will see the plots; this is not necessary for creating the docs
plt.show()
| 37.989691 | 79 | 0.690638 |
import numpy as np
import matplotlib.pyplot as plt
| true | true |
f73092a62dc94f15d8005c38a7f97315b3879895 | 2,582 | py | Python | src/openstack_cli/commands/conf/keys/export.py | hapylestat/openstack_cli | be627f0b3c7ab9bf1032c36faca2ad101e53fb0e | [
"Apache-2.0"
] | null | null | null | src/openstack_cli/commands/conf/keys/export.py | hapylestat/openstack_cli | be627f0b3c7ab9bf1032c36faca2ad101e53fb0e | [
"Apache-2.0"
] | 1 | 2021-03-02T07:15:43.000Z | 2021-03-02T07:15:43.000Z | src/openstack_cli/commands/conf/keys/export.py | hapylestat/openstack_cli | be627f0b3c7ab9bf1032c36faca2ad101e53fb0e | [
"Apache-2.0"
] | 1 | 2021-03-23T10:00:56.000Z | 2021-03-23T10:00:56.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from openstack_cli.commands.conf.keys.list import _keys_list
from openstack_cli.modules.apputils.terminal import Console
from openstack_cli.core.config import Configuration
from openstack_cli.modules.apputils.discovery import CommandMetaInfo
from openstack_cli.modules.openstack import VMKeypairItemValue, OpenStack
__module__ = CommandMetaInfo("export", "Export ssh keys to disk")
__args__ = __module__.arg_builder\
.add_default_argument("name", str, "Name of the key to be exported", default="")
def _keys_export(conf: Configuration, ostack: OpenStack, name: str):
if not name:
_keys = _keys_list(conf, ostack, True)
item = Console.ask("Select key to export", _type=int)
if item is None or item > len(_keys) - 1:
Console.print_warning("Invalid selection, aborting")
return
name = _keys[item].name
_key: VMKeypairItemValue
try:
_key = conf.get_key(name)
except KeyError as e:
Console.print_error(str(e))
return
d = os.getcwd()
_public_file_path = os.path.join(d, f"{_key.name}.public.key")
_private_file_path = os.path.join(d, f"{_key.name}.private.key")
if _key.public_key:
try:
with open(_public_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.public_key)
Console.print(f"Public key: {_public_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(public): {str(e)}")
if _key.private_key:
try:
with open(_private_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.private_key)
Console.print(f"Private key: {_private_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(private): {str(e)}")
def __init__(conf: Configuration, name: str):
ostack = OpenStack(conf)
_keys_export(conf, ostack, name)
| 36.366197 | 82 | 0.72773 |
import os
from openstack_cli.commands.conf.keys.list import _keys_list
from openstack_cli.modules.apputils.terminal import Console
from openstack_cli.core.config import Configuration
from openstack_cli.modules.apputils.discovery import CommandMetaInfo
from openstack_cli.modules.openstack import VMKeypairItemValue, OpenStack
__module__ = CommandMetaInfo("export", "Export ssh keys to disk")
__args__ = __module__.arg_builder\
.add_default_argument("name", str, "Name of the key to be exported", default="")
def _keys_export(conf: Configuration, ostack: OpenStack, name: str):
if not name:
_keys = _keys_list(conf, ostack, True)
item = Console.ask("Select key to export", _type=int)
if item is None or item > len(_keys) - 1:
Console.print_warning("Invalid selection, aborting")
return
name = _keys[item].name
_key: VMKeypairItemValue
try:
_key = conf.get_key(name)
except KeyError as e:
Console.print_error(str(e))
return
d = os.getcwd()
_public_file_path = os.path.join(d, f"{_key.name}.public.key")
_private_file_path = os.path.join(d, f"{_key.name}.private.key")
if _key.public_key:
try:
with open(_public_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.public_key)
Console.print(f"Public key: {_public_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(public): {str(e)}")
if _key.private_key:
try:
with open(_private_file_path, "w+", encoding="UTF-8") as f:
f.write(_key.private_key)
Console.print(f"Private key: {_private_file_path}")
except IOError as e:
Console.print_error(f"{_key.name}(private): {str(e)}")
def __init__(conf: Configuration, name: str):
ostack = OpenStack(conf)
_keys_export(conf, ostack, name)
| true | true |
f73093a92dabd28a9639dd0153472843f0f05b2b | 4,305 | py | Python | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/accessory_limit_vo.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/accessory_limit_vo.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-osm/huaweicloudsdkosm/v2/model/accessory_limit_vo.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class AccessoryLimitVo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'limit_count': 'str',
'limit_size': 'str',
'limit_file_type': 'str'
}
attribute_map = {
'limit_count': 'limit_count',
'limit_size': 'limit_size',
'limit_file_type': 'limit_file_type'
}
def __init__(self, limit_count=None, limit_size=None, limit_file_type=None):
"""AccessoryLimitVo - a model defined in huaweicloud sdk"""
self._limit_count = None
self._limit_size = None
self._limit_file_type = None
self.discriminator = None
if limit_count is not None:
self.limit_count = limit_count
if limit_size is not None:
self.limit_size = limit_size
if limit_file_type is not None:
self.limit_file_type = limit_file_type
@property
def limit_count(self):
"""Gets the limit_count of this AccessoryLimitVo.
限制文件数量
:return: The limit_count of this AccessoryLimitVo.
:rtype: str
"""
return self._limit_count
@limit_count.setter
def limit_count(self, limit_count):
"""Sets the limit_count of this AccessoryLimitVo.
限制文件数量
:param limit_count: The limit_count of this AccessoryLimitVo.
:type: str
"""
self._limit_count = limit_count
@property
def limit_size(self):
"""Gets the limit_size of this AccessoryLimitVo.
限制文件大小,单位是M
:return: The limit_size of this AccessoryLimitVo.
:rtype: str
"""
return self._limit_size
@limit_size.setter
def limit_size(self, limit_size):
"""Sets the limit_size of this AccessoryLimitVo.
限制文件大小,单位是M
:param limit_size: The limit_size of this AccessoryLimitVo.
:type: str
"""
self._limit_size = limit_size
@property
def limit_file_type(self):
"""Gets the limit_file_type of this AccessoryLimitVo.
限制文件类型
:return: The limit_file_type of this AccessoryLimitVo.
:rtype: str
"""
return self._limit_file_type
@limit_file_type.setter
def limit_file_type(self, limit_file_type):
"""Sets the limit_file_type of this AccessoryLimitVo.
限制文件类型
:param limit_file_type: The limit_file_type of this AccessoryLimitVo.
:type: str
"""
self._limit_file_type = limit_file_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccessoryLimitVo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.090909 | 80 | 0.571196 |
import pprint
import re
import six
class AccessoryLimitVo:
sensitive_list = []
openapi_types = {
'limit_count': 'str',
'limit_size': 'str',
'limit_file_type': 'str'
}
attribute_map = {
'limit_count': 'limit_count',
'limit_size': 'limit_size',
'limit_file_type': 'limit_file_type'
}
def __init__(self, limit_count=None, limit_size=None, limit_file_type=None):
self._limit_count = None
self._limit_size = None
self._limit_file_type = None
self.discriminator = None
if limit_count is not None:
self.limit_count = limit_count
if limit_size is not None:
self.limit_size = limit_size
if limit_file_type is not None:
self.limit_file_type = limit_file_type
@property
def limit_count(self):
return self._limit_count
@limit_count.setter
def limit_count(self, limit_count):
self._limit_count = limit_count
@property
def limit_size(self):
return self._limit_size
@limit_size.setter
def limit_size(self, limit_size):
self._limit_size = limit_size
@property
def limit_file_type(self):
return self._limit_file_type
@limit_file_type.setter
def limit_file_type(self, limit_file_type):
self._limit_file_type = limit_file_type
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AccessoryLimitVo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f730942a921d93e237889ddabf84129fe2aacd58 | 26 | py | Python | e.py | Flonky/e | 0954acd82ebd74e6a433aa778a3844dc7b8acc60 | [
"MIT"
] | 1 | 2022-03-24T18:18:29.000Z | 2022-03-24T18:18:29.000Z | e.py | Flonky/e | 0954acd82ebd74e6a433aa778a3844dc7b8acc60 | [
"MIT"
] | null | null | null | e.py | Flonky/e | 0954acd82ebd74e6a433aa778a3844dc7b8acc60 | [
"MIT"
] | null | null | null | while True:
print("e") | 13 | 14 | 0.576923 | while True:
print("e") | true | true |
f7309493f11a75d980777cffc4ea62f00325b0e2 | 23,031 | py | Python | test/functional/test_framework/util.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 1 | 2020-04-07T10:09:00.000Z | 2020-04-07T10:09:00.000Z | test/functional/test_framework/util.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 13 | 2020-05-08T11:14:37.000Z | 2020-05-12T10:03:53.000Z | test/functional/test_framework/util.py | DancingAxolotl/encocoinplus | b3dcc750c48a4f4e2ffebd104e5426544fe2f6b8 | [
"MIT"
] | 22 | 2020-02-10T09:17:20.000Z | 2020-07-10T10:33:26.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring (%s) not found in: %s" % (message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "epgc.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("enablezeromint=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "epgc.conf")):
with open(os.path.join(datadir, "epgc.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_clique(nodes):
l = len(nodes)
for a in range(l):
for b in range(a, l):
connect_nodes_bi(nodes, a, b)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
### EPGC specific utils ###
vZC_DENOMS = [1, 5, 10, 50, 100, 500, 1000, 5000]
DEFAULT_FEE = 0.01
SPORK_ACTIVATION_TIME = 1563253447
SPORK_DEACTIVATION_TIME = 4070908800
def DecimalAmt(x):
"""Return Decimal from float for equality checks against rpc outputs"""
return Decimal("{:0.8f}".format(x))
| 38.00495 | 142 | 0.649516 |
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
nError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring (%s) not found in: %s" % (message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
t_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "epgc.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("enablezeromint=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "epgc.conf")):
with open(os.path.join(datadir, "epgc.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_clique(nodes):
l = len(nodes)
for a in range(l):
for b in range(a, l):
connect_nodes_bi(nodes, a, b)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
change_address = from_node.getnewaddress()
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
def gen_return_txouts():
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
txouts = txouts + "0000000000000000"
txouts = txouts + "fd0402"
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
T_FEE = 0.01
SPORK_ACTIVATION_TIME = 1563253447
SPORK_DEACTIVATION_TIME = 4070908800
def DecimalAmt(x):
return Decimal("{:0.8f}".format(x))
| true | true |
f7309534df4a2fc8176f000b46935824e18a7d45 | 6,764 | py | Python | bindings/python/ensmallen_graph/datasets/string/kandleriavitulina.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/kandleriavitulina.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/kandleriavitulina.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Kandleria vitulina.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:43:58.503677
The undirected graph Kandleria vitulina has 2015 nodes and 162591 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08013 and has 6 connected components, where the component with most
nodes has 2004 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 137, the mean node degree is 161.38, and
the node degree mode is 3. The top 5 most central nodes are 1410658.JHWI01000027_gene1818
(degree 942), 1410658.JHWI01000005_gene908 (degree 696), 1410658.JHWI01000008_gene614
(degree 672), 1410658.JHWI01000040_gene2018 (degree 626) and 1410658.JHWI01000014_gene961
(degree 598).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import KandleriaVitulina
# Then load the graph
graph = KandleriaVitulina()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def KandleriaVitulina(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Kandleria vitulina graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Kandleria vitulina graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:43:58.503677
The undirected graph Kandleria vitulina has 2015 nodes and 162591 weighted
edges, of which none are self-loops. The graph is dense as it has a density
of 0.08013 and has 6 connected components, where the component with most
nodes has 2004 nodes and the component with the least nodes has 2 nodes.
The graph median node degree is 137, the mean node degree is 161.38, and
the node degree mode is 3. The top 5 most central nodes are 1410658.JHWI01000027_gene1818
(degree 942), 1410658.JHWI01000005_gene908 (degree 696), 1410658.JHWI01000008_gene614
(degree 672), 1410658.JHWI01000040_gene2018 (degree 626) and 1410658.JHWI01000014_gene961
(degree 598).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import KandleriaVitulina
# Then load the graph
graph = KandleriaVitulina()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="KandleriaVitulina",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.413613 | 223 | 0.706978 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph
def KandleriaVitulina(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
return AutomaticallyRetrievedGraph(
graph_name="KandleriaVitulina",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f73095a96bda44dd33e649a8f9b2f81b0f8a2d5e | 108 | py | Python | modules/2.79/bpy/types/NodeSocketInterfaceIntUnsigned.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/NodeSocketInterfaceIntUnsigned.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | modules/2.79/bpy/types/NodeSocketInterfaceIntUnsigned.py | cmbasnett/fake-bpy-module | acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55 | [
"MIT"
] | null | null | null | class NodeSocketInterfaceIntUnsigned:
default_value = None
max_value = None
min_value = None
| 13.5 | 37 | 0.722222 | class NodeSocketInterfaceIntUnsigned:
default_value = None
max_value = None
min_value = None
| true | true |
f7309823f58463b82e823f3fd4ecc77467f835fd | 11,759 | py | Python | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 183 | 2015-01-11T13:01:01.000Z | 2022-02-08T04:45:33.000Z | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 13 | 2015-05-12T17:39:42.000Z | 2018-07-29T18:01:38.000Z | pml/engineer_tests.py | gatapia/py_ml_utils | 844d8b62a7c5cc0a80f4f62c0bfda092aac57ade | [
"MIT"
] | 166 | 2015-01-28T18:05:55.000Z | 2022-02-08T04:45:34.000Z | from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
| 46.478261 | 133 | 0.541628 | from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
| true | true |
f730982d2495ed4b89b4a58fb60e72212470ea17 | 11,358 | py | Python | nanopores/tools/errorest.py | mitschabaude/nanopores | b1a7effed8e99ef862dd24cd9aada577d6ce28e1 | [
"MIT"
] | 8 | 2016-09-07T01:59:31.000Z | 2021-03-06T12:14:31.000Z | nanopores/tools/errorest.py | mitschabaude/nanopores | b1a7effed8e99ef862dd24cd9aada577d6ce28e1 | [
"MIT"
] | null | null | null | nanopores/tools/errorest.py | mitschabaude/nanopores | b1a7effed8e99ef862dd24cd9aada577d6ce28e1 | [
"MIT"
] | 4 | 2017-12-06T17:43:01.000Z | 2020-05-01T05:41:14.000Z | from dolfin import *
import math, numpy
__all__ = ["edge_residual_indicator", "poisson_indicator", "zz_indicator",
"pb_indicator", "Estimator", "pb_indicator_GO", "pb_indicator_GO_cheap"]
class Estimator(object):
''' object consisting of pairs (N, f(N)) describing convergence of an error or similar '''
def __init__(self, name):
self.name = name
self.pairs = []
def __iadd__(self, pair):
self.pairs.append(pair)
return self
# implement list-like behavior:
def __len__(self):
return len(self.pairs)
def __getitem__(self, i):
return self.pairs[i]
def __setitem__(self, i, val):
self.pairs[i] = val
def __delitem__(self, i):
del self.pairs[i]
def __iter__(self):
return iter(self.pairs)
def split(self):
return ([x for x,y in self.pairs], [y for x,y in self.pairs])
# convergence rates:
def rates(self):
# assuming a rate f(N) = c*N^a, return list of a
tmp = zip(self.pairs[1:],self.pairs[:-1])
return [math.log(fN/fK)/math.log(float(N)/K) for (N,fN),(K,fK) in tmp]
# for plotting with matlab:
def save_to_matlab(self):
from scipy.io import savemat
from numpy import array
dic = {"N": array(self.pairs)[:,0], "err": array(self.pairs)[:,1]}
savemat("est_%s.mat" %self.name, dic)
# errorplot using matplotlib
def plot(self, rate=None, fig=True, style="s-"):
from matplotlib.pyplot import figure, loglog, xlabel, ylabel, legend, show
if fig is True:
figure()
N, err = self.split()
loglog(N, err, style, label=self.name)
if rate and N[0] != 0:
alg = [err[0]/(N[0]**rate)*n**rate for n in N]
loglog(N, alg, 'k--', label=r"$O(N^{%.2g})$" %rate)
#xlabel("# Elements")
xlabel("degrees of freedom")
ylabel("rel. error")
legend(loc='upper right')
# newtonplot using matplotlib
def newtonplot(self, fig=True, style="s-"):
from matplotlib.pyplot import semilogy, xlabel, ylabel, legend, show, figure
if fig is True:
figure()
i, err = self.split()
semilogy(i, err, style, label=str(self.name))
xlabel("# iterations")
ylabel("rel. error")
legend(loc='upper right')
#show()
def zz_indicator(v,flux=None,dx=None):
""" v is assumed to be scalar and of piece-wise polynomial degree >= 1 """
V = v.function_space()
mesh = V.mesh()
DV = VectorFunctionSpace(mesh, 'DG', V.ufl_element().degree()-1)
#DV_e = VectorFunctionSpace(mesh, 'DG', V.ufl_element().degree())
DV_e = VectorFunctionSpace(mesh, 'CG', V.ufl_element().degree())
DG = FunctionSpace(mesh, 'DG', 0)
if not flux:
flux = grad(v)
# flux recovery
# TODO: is there a better way to do this??
# (project is slow and in theory unnessecary)
g = project(flux,DV)
g_e = project(g,DV_e)
#g_e = Function(DV_e)
#g_e.extrapolate(g)
if not dx:
dx = Measure("dx")
w = TestFunction(DG)
r = w*inner(g-g_e,g-g_e)*dx
ind = Function(DG)
assemble(r,tensor=ind.vector())
err = errornorm(g_e,g,'L2')/norm(g,'L2')
return ind,err
def edge_residual_indicator(mesh,flux,force=Constant(0.0)):
residual = div(flux) + force
W = FunctionSpace(mesh,"DG",0)
w = TestFunction(W)
n = FacetNormal(mesh)
h = CellSize(mesh)
r = w*(h*residual)**2*dx + avg(w)*avg(h)*jump(flux,n)**2*dS
indicators = Function(W)
b = indicators.vector()
assemble(r,tensor=b)
return indicators
def poisson_indicator(geo, u, f=None, cyl=False):
mesh = geo.mesh
W = FunctionSpace(mesh,"DG",0)
w = TestFunction(W)
n = FacetNormal(mesh)
h = CellSize(mesh)/geo.physics.lscale
dS = geo.dS()
dx = geo.dx()
Aperm = geo.pwconst("permittivity")
volcharge = geo.pwconst("volcharge")
flux = Aperm*geo.physics.grad(u)
r = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
residual = geo.physics.div(flux) + volcharge
if f:
residual = residual + f
lscale = geo.physics.lscale
def Clscale(i):
return Constant( lscale**i )
res = Clscale(1)*avg(w)*avg(h)*jump(flux,n)**2*r('+')*dS + w*h**2*residual**2*r*dx
restot = Clscale(1)*avg(h)*jump(flux,n)**2*r('+')*dS + h**2*residual**2*r*dx
energynorm = inner(flux, geo.physics.grad(u))*r*dx
indicators = Function(W)
error = sqrt(assemble(restot)/assemble(energynorm))
assemble(res,tensor=indicators.vector())
return indicators, error
def pb_indicator(geo, phys, u, cyl=False):
c0 = phys.bulkcon
chi = geo.pwconst("ions", value={"ions":1.,"solid":0.})
f = Constant(-phys.cFarad*2*c0/phys.UT)*u*chi
return poisson_indicator(geo, u, f=f, cyl=cyl)
def pb_indicator_GO(geo, phys, u, z, cyl=False):
# u .. primal solution
# z .. dual solution
mesh = geo.mesh
V = FunctionSpace(mesh, "DG", 0)
W = FunctionSpace(mesh, "CG", 1)
EW = FunctionSpace(mesh, "CG", 2)
Ez = Function(EW)
Ez.extrapolate(z)
w = Ez - z #interpolate(Ez, W)
v = TestFunction(V)
n = FacetNormal(mesh)
h = CellSize(mesh)
r = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.)
dS = geo.dS() # interior facets
ds = geo.ds() # exterior facets
dx0 = geo.dx("ions")
dx = geo.dx()
c0 = phys.bulkcon
cFarad = phys.cFarad
UT = phys.UT
eps = geo.pwconst('permittivity')
def Clscale(i):
return Constant( (phys.lscale)**i )
flux = eps*phys.grad(u)
# local residuals
def rform(w):
return -v*phys.div(flux)*w*r*dx \
+v*Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(v*w*r, "volcharge") \
+Clscale(1)*(
+avg(v)*jump(n, flux*w)*r('+')*dS \
+v*inner(n, flux)*w*r*ds \
-geo.NeumannRHS(v*w*r, "surfcharge"))
# global residual
def R(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(w*r, "volcharge")
-Clscale(1)*geo.NeumannRHS(w*r, "surfcharge"))
# global functional value
def J(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0)
indicators = Function(V)
vec = indicators.vector()
assemble(rform(w), tensor=vec)
vec[:] = numpy.abs(vec[:])
goal = J(z)
goal_ex = J(Ez)
# precise relevant scale for error (abs value of functional)
scale = abs(1./goal) if not goal == 0. else 1e12*(1./phys.lscale**3)
#scale = 1e12*(1./phys.lscale**3) # rough scale (cheaper)
error_res = abs(R(z))*scale
error_rep = abs(R(Ez))*scale
error_sum = sum(vec)*scale
# cheaper estimator without extrapolation
indicators2 = Function(V)
vec2 = indicators2.vector()
assemble(rform(z), tensor=vec2)
vec2[:] = numpy.abs(vec2[:])
cheap_sum = sum(vec2)*scale
#plotind = plot(indicators2, title="indicator pb GO", elevate=0.0, interactive=True)
# FIXME ?
print "Goal (dual):", goal
print "Goal (extrapolated dual):", goal_ex
print "This should be zero (dual global residual):", error_res
print "Extrapolated dual residual:", error_rep
print "indicator sum:", error_sum
print "ind sum w/o extrap:", cheap_sum
# return indicators, error_rep, error_sum
return indicators, error_sum, error_rep, cheap_sum, goal, goal_ex
def pb_indicator_GO_cheap(geo, phys, u, z, cyl=False):
# u .. primal solution
# z .. dual solution
mesh = geo.mesh
V = FunctionSpace(mesh, "DG", 0)
v = TestFunction(V)
n = FacetNormal(mesh)
h = CellSize(mesh)
r = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.)
dS = geo.dS() # interior facets
ds = geo.ds() # exterior facets
dx0 = geo.dx("ions")
dx = geo.dx()
c0 = phys.bulkcon
cFarad = phys.cFarad
UT = phys.UT
eps = geo.pwconst('permittivity')
def Clscale(i):
return Constant( (phys.lscale)**i )
flux = eps*phys.grad(u)
# local residuals
def rform(w):
return -v*phys.div(flux)*w*r*dx \
+v*Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(v*w*r, "volcharge") \
+Clscale(1)*(
+avg(v)*jump(n, flux*w)*r('+')*dS \
+v*inner(n, flux)*w*r*ds \
-geo.NeumannRHS(v*w*r, "surfcharge"))
# global residual
def R(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(w*r, "volcharge")
-Clscale(1)*geo.NeumannRHS(w*r, "surfcharge"))
# global functional value
def J(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0)
goal = J(z)
scale = abs(1./goal) # precise relevant scale for error (abs value of functional)
#scale = 1e12*(1./phys.lscale**3) # rough scale (cheaper)
error_res = abs(R(z))*scale
# cheap estimator without extrapolation
indicators = Function(V)
vec = indicators.vector()
assemble(rform(z), tensor=vec)
vec[:] = numpy.abs(vec[:])
error_sum = sum(vec)*scale
#plotind = plot(indicators2, title="indicator pb GO", elevate=0.0, interactive=True)
print "Goal (dual):", goal
print "This should be zero (dual global residual):", error_res
print "indicator sum (does not make sense as error estimate):", error_sum
# return indicators, error_rep, error_sum
return indicators, error_sum, goal
def simple_pb_indicator_GO(geo, phys, u, z):
# u .. primal solution
# z .. dual solution
mesh = geo.mesh
V = FunctionSpace(mesh, "DG", 0)
EW = FunctionSpace(mesh, "CG", 2)
Ez = Function(EW)
Ez.extrapolate(z)
w = Ez - z #interpolate(Ez, W)
v = TestFunction(V)
n = FacetNormal(mesh)
r = phys.r2pi
dS = geo.dS() # interior facets
ds = geo.ds() # exterior facets
dx0 = geo.dx("ions")
dx = geo.dx()
c0 = phys.bulkcon
cFarad = phys.cFarad
UT = phys.UT
eps = geo.pwconst('permittivity')
k = Constant(cFarad*2.*c0/UT)
def Clscale(i):
return Constant( (phys.lscale)**i )
flux = eps*phys.grad(u)
# local residuals
def rform(w):
return -v*phys.div(flux)*w*r*dx + v*k*u*w*r*dx0 \
-geo.linearRHS(v*w*r, "volcharge") \
+Clscale(1)*(
+avg(v)*jump(n, flux*w)*r('+')*dS \
+v*inner(n, flux)*w*r*ds \
-geo.NeumannRHS(v*w*r, "surfcharge"))
# global residual
def R(w):
return assemble(
inner(flux, phys.grad(w))*r*dx + k*u*w*r*dx0 \
-geo.linearRHS(w*r, "volcharge")
-Clscale(1)*geo.NeumannRHS(w*r, "surfcharge"))
# global functional value
def J(w):
return assemble(inner(flux, phys.grad(w))*r*dx + k*u*w*r*dx0)
indicators = Function(V)
vec = indicators.vector()
assemble(rform(w), tensor=vec)
vec[:] = numpy.abs(vec[:])
# precise relevant scale for error (abs value of functional)
goal = J(z)
scale = abs(1./goal) if not goal == 0. else 1e12*(1./phys.lscale**3)
error_rep = abs(R(Ez))*scale
return indicators, error_rep
| 30.127321 | 94 | 0.588924 | from dolfin import *
import math, numpy
__all__ = ["edge_residual_indicator", "poisson_indicator", "zz_indicator",
"pb_indicator", "Estimator", "pb_indicator_GO", "pb_indicator_GO_cheap"]
class Estimator(object):
''' object consisting of pairs (N, f(N)) describing convergence of an error or similar '''
def __init__(self, name):
self.name = name
self.pairs = []
def __iadd__(self, pair):
self.pairs.append(pair)
return self
def __len__(self):
return len(self.pairs)
def __getitem__(self, i):
return self.pairs[i]
def __setitem__(self, i, val):
self.pairs[i] = val
def __delitem__(self, i):
del self.pairs[i]
def __iter__(self):
return iter(self.pairs)
def split(self):
return ([x for x,y in self.pairs], [y for x,y in self.pairs])
def rates(self):
tmp = zip(self.pairs[1:],self.pairs[:-1])
return [math.log(fN/fK)/math.log(float(N)/K) for (N,fN),(K,fK) in tmp]
def save_to_matlab(self):
from scipy.io import savemat
from numpy import array
dic = {"N": array(self.pairs)[:,0], "err": array(self.pairs)[:,1]}
savemat("est_%s.mat" %self.name, dic)
def plot(self, rate=None, fig=True, style="s-"):
from matplotlib.pyplot import figure, loglog, xlabel, ylabel, legend, show
if fig is True:
figure()
N, err = self.split()
loglog(N, err, style, label=self.name)
if rate and N[0] != 0:
alg = [err[0]/(N[0]**rate)*n**rate for n in N]
loglog(N, alg, 'k--', label=r"$O(N^{%.2g})$" %rate)
xlabel("degrees of freedom")
ylabel("rel. error")
legend(loc='upper right')
def newtonplot(self, fig=True, style="s-"):
from matplotlib.pyplot import semilogy, xlabel, ylabel, legend, show, figure
if fig is True:
figure()
i, err = self.split()
semilogy(i, err, style, label=str(self.name))
xlabel("# iterations")
ylabel("rel. error")
legend(loc='upper right')
def zz_indicator(v,flux=None,dx=None):
""" v is assumed to be scalar and of piece-wise polynomial degree >= 1 """
V = v.function_space()
mesh = V.mesh()
DV = VectorFunctionSpace(mesh, 'DG', V.ufl_element().degree()-1)
DV_e = VectorFunctionSpace(mesh, 'CG', V.ufl_element().degree())
DG = FunctionSpace(mesh, 'DG', 0)
if not flux:
flux = grad(v)
g = project(flux,DV)
g_e = project(g,DV_e)
if not dx:
dx = Measure("dx")
w = TestFunction(DG)
r = w*inner(g-g_e,g-g_e)*dx
ind = Function(DG)
assemble(r,tensor=ind.vector())
err = errornorm(g_e,g,'L2')/norm(g,'L2')
return ind,err
def edge_residual_indicator(mesh,flux,force=Constant(0.0)):
residual = div(flux) + force
W = FunctionSpace(mesh,"DG",0)
w = TestFunction(W)
n = FacetNormal(mesh)
h = CellSize(mesh)
r = w*(h*residual)**2*dx + avg(w)*avg(h)*jump(flux,n)**2*dS
indicators = Function(W)
b = indicators.vector()
assemble(r,tensor=b)
return indicators
def poisson_indicator(geo, u, f=None, cyl=False):
mesh = geo.mesh
W = FunctionSpace(mesh,"DG",0)
w = TestFunction(W)
n = FacetNormal(mesh)
h = CellSize(mesh)/geo.physics.lscale
dS = geo.dS()
dx = geo.dx()
Aperm = geo.pwconst("permittivity")
volcharge = geo.pwconst("volcharge")
flux = Aperm*geo.physics.grad(u)
r = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.0)
residual = geo.physics.div(flux) + volcharge
if f:
residual = residual + f
lscale = geo.physics.lscale
def Clscale(i):
return Constant( lscale**i )
res = Clscale(1)*avg(w)*avg(h)*jump(flux,n)**2*r('+')*dS + w*h**2*residual**2*r*dx
restot = Clscale(1)*avg(h)*jump(flux,n)**2*r('+')*dS + h**2*residual**2*r*dx
energynorm = inner(flux, geo.physics.grad(u))*r*dx
indicators = Function(W)
error = sqrt(assemble(restot)/assemble(energynorm))
assemble(res,tensor=indicators.vector())
return indicators, error
def pb_indicator(geo, phys, u, cyl=False):
c0 = phys.bulkcon
chi = geo.pwconst("ions", value={"ions":1.,"solid":0.})
f = Constant(-phys.cFarad*2*c0/phys.UT)*u*chi
return poisson_indicator(geo, u, f=f, cyl=cyl)
def pb_indicator_GO(geo, phys, u, z, cyl=False):
mesh = geo.mesh
V = FunctionSpace(mesh, "DG", 0)
W = FunctionSpace(mesh, "CG", 1)
EW = FunctionSpace(mesh, "CG", 2)
Ez = Function(EW)
Ez.extrapolate(z)
w = Ez - z
v = TestFunction(V)
n = FacetNormal(mesh)
h = CellSize(mesh)
r = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.)
dS = geo.dS()
ds = geo.ds()
dx0 = geo.dx("ions")
dx = geo.dx()
c0 = phys.bulkcon
cFarad = phys.cFarad
UT = phys.UT
eps = geo.pwconst('permittivity')
def Clscale(i):
return Constant( (phys.lscale)**i )
flux = eps*phys.grad(u)
def rform(w):
return -v*phys.div(flux)*w*r*dx \
+v*Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(v*w*r, "volcharge") \
+Clscale(1)*(
+avg(v)*jump(n, flux*w)*r('+')*dS \
+v*inner(n, flux)*w*r*ds \
-geo.NeumannRHS(v*w*r, "surfcharge"))
def R(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(w*r, "volcharge")
-Clscale(1)*geo.NeumannRHS(w*r, "surfcharge"))
def J(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0)
indicators = Function(V)
vec = indicators.vector()
assemble(rform(w), tensor=vec)
vec[:] = numpy.abs(vec[:])
goal = J(z)
goal_ex = J(Ez)
scale = abs(1./goal) if not goal == 0. else 1e12*(1./phys.lscale**3)
(z))*scale
error_rep = abs(R(Ez))*scale
error_sum = sum(vec)*scale
indicators2 = Function(V)
vec2 = indicators2.vector()
assemble(rform(z), tensor=vec2)
vec2[:] = numpy.abs(vec2[:])
cheap_sum = sum(vec2)*scale
print "Goal (dual):", goal
print "Goal (extrapolated dual):", goal_ex
print "This should be zero (dual global residual):", error_res
print "Extrapolated dual residual:", error_rep
print "indicator sum:", error_sum
print "ind sum w/o extrap:", cheap_sum
return indicators, error_sum, error_rep, cheap_sum, goal, goal_ex
def pb_indicator_GO_cheap(geo, phys, u, z, cyl=False):
mesh = geo.mesh
V = FunctionSpace(mesh, "DG", 0)
v = TestFunction(V)
n = FacetNormal(mesh)
h = CellSize(mesh)
r = Expression("2*pi*x[0]", degree=1) if cyl else Constant(1.)
dS = geo.dS()
ds = geo.ds()
dx0 = geo.dx("ions")
dx = geo.dx()
c0 = phys.bulkcon
cFarad = phys.cFarad
UT = phys.UT
eps = geo.pwconst('permittivity')
def Clscale(i):
return Constant( (phys.lscale)**i )
flux = eps*phys.grad(u)
def rform(w):
return -v*phys.div(flux)*w*r*dx \
+v*Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(v*w*r, "volcharge") \
+Clscale(1)*(
+avg(v)*jump(n, flux*w)*r('+')*dS \
+v*inner(n, flux)*w*r*ds \
-geo.NeumannRHS(v*w*r, "surfcharge"))
def R(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0 \
-geo.linearRHS(w*r, "volcharge")
-Clscale(1)*geo.NeumannRHS(w*r, "surfcharge"))
def J(w):
return assemble(
inner(flux, phys.grad(w))*r*dx \
+Constant(cFarad*2*c0/UT)*u*w*r*dx0)
goal = J(z)
scale = abs(1./goal)
(z))*scale
indicators = Function(V)
vec = indicators.vector()
assemble(rform(z), tensor=vec)
vec[:] = numpy.abs(vec[:])
error_sum = sum(vec)*scale
print "Goal (dual):", goal
print "This should be zero (dual global residual):", error_res
print "indicator sum (does not make sense as error estimate):", error_sum
return indicators, error_sum, goal
def simple_pb_indicator_GO(geo, phys, u, z):
mesh = geo.mesh
V = FunctionSpace(mesh, "DG", 0)
EW = FunctionSpace(mesh, "CG", 2)
Ez = Function(EW)
Ez.extrapolate(z)
w = Ez - z
v = TestFunction(V)
n = FacetNormal(mesh)
r = phys.r2pi
dS = geo.dS()
ds = geo.ds()
dx0 = geo.dx("ions")
dx = geo.dx()
c0 = phys.bulkcon
cFarad = phys.cFarad
UT = phys.UT
eps = geo.pwconst('permittivity')
k = Constant(cFarad*2.*c0/UT)
def Clscale(i):
return Constant( (phys.lscale)**i )
flux = eps*phys.grad(u)
def rform(w):
return -v*phys.div(flux)*w*r*dx + v*k*u*w*r*dx0 \
-geo.linearRHS(v*w*r, "volcharge") \
+Clscale(1)*(
+avg(v)*jump(n, flux*w)*r('+')*dS \
+v*inner(n, flux)*w*r*ds \
-geo.NeumannRHS(v*w*r, "surfcharge"))
def R(w):
return assemble(
inner(flux, phys.grad(w))*r*dx + k*u*w*r*dx0 \
-geo.linearRHS(w*r, "volcharge")
-Clscale(1)*geo.NeumannRHS(w*r, "surfcharge"))
def J(w):
return assemble(inner(flux, phys.grad(w))*r*dx + k*u*w*r*dx0)
indicators = Function(V)
vec = indicators.vector()
assemble(rform(w), tensor=vec)
vec[:] = numpy.abs(vec[:])
goal = J(z)
scale = abs(1./goal) if not goal == 0. else 1e12*(1./phys.lscale**3)
error_rep = abs(R(Ez))*scale
return indicators, error_rep
| false | true |
f73099b5d58d809058597f4eeb4632505a3407c8 | 404 | py | Python | package/cloudshell/cp/azure/models/azure_blob_url.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
] | 5 | 2016-09-08T08:33:47.000Z | 2020-02-10T12:31:15.000Z | package/cloudshell/cp/azure/models/azure_blob_url.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
] | 505 | 2016-08-09T07:41:03.000Z | 2021-02-08T20:26:46.000Z | package/cloudshell/cp/azure/models/azure_blob_url.py | tim-spiglanin/Azure-Shell | 58c52994f0d6cfd798c5dca33737419ec18363d4 | [
"Apache-2.0"
] | 5 | 2016-12-21T12:52:55.000Z | 2021-07-08T09:50:42.000Z | class AzureBlobUrlModel(object):
def __init__(self, storage_name, container_name, blob_name):
"""
:param storage_name: (str) Azure storage name
:param container_name: (str) Azure container name
:param blob_name: (str) Azure Blob name
"""
self.storage_name = storage_name
self.container_name = container_name
self.blob_name = blob_name
| 33.666667 | 64 | 0.658416 | class AzureBlobUrlModel(object):
def __init__(self, storage_name, container_name, blob_name):
self.storage_name = storage_name
self.container_name = container_name
self.blob_name = blob_name
| true | true |
f7309b8ab0ec6cc5672805a9ee0b86213e917ba4 | 964 | py | Python | webdriver_test_tools/config/__init__.py | connordelacruz/webdriver-test-tools | fe6906839e4423562c6d4d0aa6b10b2ea90bff6b | [
"MIT"
] | 5 | 2018-07-02T13:18:59.000Z | 2019-10-14T04:55:31.000Z | webdriver_test_tools/config/__init__.py | connordelacruz/webdriver-test-tools | fe6906839e4423562c6d4d0aa6b10b2ea90bff6b | [
"MIT"
] | 1 | 2019-10-16T20:54:25.000Z | 2019-10-16T20:54:25.000Z | webdriver_test_tools/config/__init__.py | connordelacruz/webdriver-test-tools | fe6906839e4423562c6d4d0aa6b10b2ea90bff6b | [
"MIT"
] | 1 | 2019-09-03T05:29:41.000Z | 2019-09-03T05:29:41.000Z | """Default configurations for various items in the test framework.
This module imports the following classes:
:class:`webdriver_test_tools.config.browser.BrowserConfig`
:class:`webdriver_test_tools.config.browser.BrowserStackConfig`
:class:`webdriver_test_tools.config.projectfiles.ProjectFilesConfig`
:class:`webdriver_test_tools.config.site.SiteConfig`
:class:`webdriver_test_tools.config.test.TestSuiteConfig`
:class:`webdriver_test_tools.config.webdriver.WebDriverConfig`
.. toctree::
webdriver_test_tools.config.browser
webdriver_test_tools.config.browserstack
webdriver_test_tools.config.projectfiles
webdriver_test_tools.config.site
webdriver_test_tools.config.test
webdriver_test_tools.config.webdriver
"""
from .projectfiles import ProjectFilesConfig
from .site import SiteConfig
from .test import TestSuiteConfig
from .webdriver import WebDriverConfig
from .browser import BrowserConfig, BrowserStackConfig
| 35.703704 | 72 | 0.823651 | from .projectfiles import ProjectFilesConfig
from .site import SiteConfig
from .test import TestSuiteConfig
from .webdriver import WebDriverConfig
from .browser import BrowserConfig, BrowserStackConfig
| true | true |
f7309bc8a28f4f8cdb7fb8535f464ad2bbe04bfe | 392 | py | Python | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-04-27T14:47:30.000Z | 2020-04-27T14:47:30.000Z | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-06-09T15:23:04.000Z | 2020-06-09T15:23:04.000Z | tests/test_invenio_circulation.py | NRodriguezcuellar/invenio-circulation | 8e0c977849eb76ba9a342542dae3b6a6ef5bae16 | [
"MIT"
] | 1 | 2020-01-13T17:10:13.000Z | 2020-01-13T17:10:13.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 CERN.
# Copyright (C) 2018-2019 RERO.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Module tests."""
def test_version():
"""Test version import."""
from invenio_circulation import __version__
assert __version__
| 23.058824 | 77 | 0.701531 |
def test_version():
from invenio_circulation import __version__
assert __version__
| true | true |
f7309c92dce2b8723801c5e1bdb0c965492b3e58 | 12,782 | py | Python | lib/coinchooser.py | parkbyte/electrum-parkbyte | 32fec1a172e1b39e5b57df93a972f4d9b4e595c4 | [
"MIT"
] | null | null | null | lib/coinchooser.py | parkbyte/electrum-parkbyte | 32fec1a172e1b39e5b57df93a972f4d9b4e595c4 | [
"MIT"
] | null | null | null | lib/coinchooser.py | parkbyte/electrum-parkbyte | 32fec1a172e1b39e5b57df93a972f4d9b4e595c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
# Electrum - lightweight ParkByte client
# Copyright (C) 2015 kyuupichan@gmail
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from collections import defaultdict, namedtuple
from math import floor, log10
import struct
from parkbyte import sha256, COIN, TYPE_ADDRESS
from transaction import Transaction
from util import NotEnoughFunds, PrintError, profiler
# A simple deterministic PRNG. Used to deterministically shuffle a
# set of coins - the same set of coins should produce the same output.
# Although choosing UTXOs "randomly" we want it to be deterministic,
# so if sending twice from the same UTXO set we choose the same UTXOs
# to spend. This prevents attacks on users by malicious or stale
# servers.
class PRNG:
def __init__(self, seed):
self.sha = sha256(seed)
self.pool = bytearray()
def get_bytes(self, n):
while len(self.pool) < n:
self.pool.extend(self.sha)
self.sha = sha256(self.sha)
result, self.pool = self.pool[:n], self.pool[n:]
return result
def random(self):
# Returns random double in [0, 1)
four = self.get_bytes(4)
return struct.unpack("I", four)[0] / 4294967296.0
def randint(self, start, end):
# Returns random integer in [start, end)
return start + int(self.random() * (end - start))
def choice(self, seq):
return seq[int(self.random() * len(seq))]
def shuffle(self, x):
for i in reversed(xrange(1, len(x))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(self.random() * (i+1))
x[i], x[j] = x[j], x[i]
Bucket = namedtuple('Bucket', ['desc', 'size', 'value', 'coins'])
def strip_unneeded(bkts, sufficient_funds):
'''Remove buckets that are unnecessary in achieving the spend amount'''
bkts = sorted(bkts, key = lambda bkt: bkt.value)
for i in range(len(bkts)):
if not sufficient_funds(bkts[i + 1:]):
return bkts[i:]
# Shouldn't get here
return bkts
class CoinChooserBase(PrintError):
def keys(self, coins):
raise NotImplementedError
def bucketize_coins(self, coins):
keys = self.keys(coins)
buckets = defaultdict(list)
for key, coin in zip(keys, coins):
buckets[key].append(coin)
def make_Bucket(desc, coins):
size = sum(Transaction.estimated_input_size(coin)
for coin in coins)
value = sum(coin['value'] for coin in coins)
return Bucket(desc, size, value, coins)
return map(make_Bucket, buckets.keys(), buckets.values())
def penalty_func(self, tx):
def penalty(candidate):
return 0
return penalty
def change_amounts(self, tx, count, fee_estimator, dust_threshold):
# Break change up if bigger than max_change
output_amounts = [o[2] for o in tx.outputs()]
# Don't split change of less than 0.02 PKB
max_change = max(max(output_amounts) * 1.25, 0.02 * COIN)
# Use N change outputs
for n in range(1, count + 1):
# How much is left if we add this many change outputs?
change_amount = max(0, tx.get_fee() - fee_estimator(n))
if change_amount // n <= max_change:
break
# Get a handle on the precision of the output amounts; round our
# change to look similar
def trailing_zeroes(val):
s = str(val)
return len(s) - len(s.rstrip('0'))
zeroes = map(trailing_zeroes, output_amounts)
min_zeroes = min(zeroes)
max_zeroes = max(zeroes)
zeroes = range(max(0, min_zeroes - 1), (max_zeroes + 1) + 1)
# Calculate change; randomize it a bit if using more than 1 output
remaining = change_amount
amounts = []
while n > 1:
average = remaining // n
amount = self.p.randint(int(average * 0.7), int(average * 1.3))
precision = min(self.p.choice(zeroes), int(floor(log10(amount))))
amount = int(round(amount, -precision))
amounts.append(amount)
remaining -= amount
n -= 1
# Last change output. Round down to maximum precision but lose
# no more than 100 satoshis to fees (2dp)
N = pow(10, min(2, zeroes[0]))
amount = (remaining // N) * N
amounts.append(amount)
assert sum(amounts) <= change_amount
return amounts
def change_outputs(self, tx, change_addrs, fee_estimator, dust_threshold):
amounts = self.change_amounts(tx, len(change_addrs), fee_estimator,
dust_threshold)
assert min(amounts) >= 0
assert len(change_addrs) >= len(amounts)
# If change is above dust threshold after accounting for the
# size of the change output, add it to the transaction.
dust = sum(amount for amount in amounts if amount < dust_threshold)
amounts = [amount for amount in amounts if amount >= dust_threshold]
change = [(TYPE_ADDRESS, addr, amount)
for addr, amount in zip(change_addrs, amounts)]
self.print_error('change:', change)
if dust:
self.print_error('not keeping dust', dust)
return change
def make_tx(self, coins, outputs, change_addrs, fee_estimator,
dust_threshold):
'''Select unspent coins to spend to pay outputs. If the change is
greater than dust_threshold (after adding the change output to
the transaction) it is kept, otherwise none is sent and it is
added to the transaction fee.'''
# Deterministic randomness from coins
utxos = [c['prevout_hash'] + str(c['prevout_n']) for c in coins]
self.p = PRNG(''.join(sorted(utxos)))
# Copy the ouputs so when adding change we don't modify "outputs"
tx = Transaction.from_io([], outputs[:])
# Size of the transaction with no inputs and no change
base_size = tx.estimated_size()
spent_amount = tx.output_value()
def sufficient_funds(buckets):
'''Given a list of buckets, return True if it has enough
value to pay for the transaction'''
total_input = sum(bucket.value for bucket in buckets)
total_size = sum(bucket.size for bucket in buckets) + base_size
return total_input >= spent_amount + fee_estimator(total_size)
# Collect the coins into buckets, choose a subset of the buckets
buckets = self.bucketize_coins(coins)
buckets = self.choose_buckets(buckets, sufficient_funds,
self.penalty_func(tx))
tx.add_inputs([coin for b in buckets for coin in b.coins])
tx_size = base_size + sum(bucket.size for bucket in buckets)
# This takes a count of change outputs and returns a tx fee;
# each pay-to-parkbyte-address output serializes as 34 bytes
fee = lambda count: fee_estimator(tx_size + count * 34)
change = self.change_outputs(tx, change_addrs, fee, dust_threshold)
tx.add_outputs(change)
self.print_error("using %d inputs" % len(tx.inputs()))
self.print_error("using buckets:", [bucket.desc for bucket in buckets])
return tx
class CoinChooserOldestFirst(CoinChooserBase):
'''Maximize transaction priority. Select the oldest unspent
transaction outputs in your wallet, that are sufficient to cover
the spent amount. Then, remove any unneeded inputs, starting with
the smallest in value.
'''
def keys(self, coins):
return [coin['prevout_hash'] + ':' + str(coin['prevout_n'])
for coin in coins]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
'''Spend the oldest buckets first.'''
# Unconfirmed coins are young, not old
adj_height = lambda height: 99999999 if height == 0 else height
buckets.sort(key = lambda b: max(adj_height(coin['height'])
for coin in b.coins))
selected = []
for bucket in buckets:
selected.append(bucket)
if sufficient_funds(selected):
return strip_unneeded(selected, sufficient_funds)
else:
raise NotEnoughFunds()
class CoinChooserRandom(CoinChooserBase):
def bucket_candidates(self, buckets, sufficient_funds):
'''Returns a list of bucket sets.'''
candidates = set()
# Add all singletons
for n, bucket in enumerate(buckets):
if sufficient_funds([bucket]):
candidates.add((n, ))
# And now some random ones
attempts = min(100, (len(buckets) - 1) * 10 + 1)
permutation = range(len(buckets))
for i in range(attempts):
# Get a random permutation of the buckets, and
# incrementally combine buckets until sufficient
self.p.shuffle(permutation)
bkts = []
for count, index in enumerate(permutation):
bkts.append(buckets[index])
if sufficient_funds(bkts):
candidates.add(tuple(sorted(permutation[:count + 1])))
break
else:
raise NotEnoughFunds()
candidates = [[buckets[n] for n in c] for c in candidates]
return [strip_unneeded(c, sufficient_funds) for c in candidates]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
candidates = self.bucket_candidates(buckets, sufficient_funds)
penalties = [penalty_func(cand) for cand in candidates]
winner = candidates[penalties.index(min(penalties))]
self.print_error("Bucket sets:", len(buckets))
self.print_error("Winning penalty:", min(penalties))
return winner
class CoinChooserPrivacy(CoinChooserRandom):
'''Attempts to better preserve user privacy. First, if any coin is
spent from a user address, all coins are. Compared to spending
from other addresses to make up an amount, this reduces
information leakage about sender holdings. It also helps to
reduce blockchain UTXO bloat, and reduce future privacy loss that
would come from reusing that address' remaining UTXOs. Second, it
penalizes change that is quite different to the sent amount.
Third, it penalizes change that is too big.'''
def keys(self, coins):
return [coin['address'] for coin in coins]
def penalty_func(self, tx):
min_change = min(o[2] for o in tx.outputs()) * 0.75
max_change = max(o[2] for o in tx.outputs()) * 1.33
spent_amount = sum(o[2] for o in tx.outputs())
def penalty(buckets):
badness = len(buckets) - 1
total_input = sum(bucket.value for bucket in buckets)
change = float(total_input - spent_amount)
# Penalize change not roughly in output range
if change < min_change:
badness += (min_change - change) / (min_change + 10000)
elif change > max_change:
badness += (change - max_change) / (max_change + 10000)
# Penalize large change; 5 PKB excess ~= using 1 more input
badness += change / (COIN * 5)
return badness
return penalty
COIN_CHOOSERS = {'Priority': CoinChooserOldestFirst,
'Privacy': CoinChooserPrivacy}
def get_name(config):
kind = config.get('coin_chooser')
if not kind in COIN_CHOOSERS:
kind = 'Priority'
return kind
def get_coin_chooser(config):
klass = COIN_CHOOSERS[get_name(config)]
return klass()
| 39.819315 | 79 | 0.634408 |
from collections import defaultdict, namedtuple
from math import floor, log10
import struct
from parkbyte import sha256, COIN, TYPE_ADDRESS
from transaction import Transaction
from util import NotEnoughFunds, PrintError, profiler
class PRNG:
def __init__(self, seed):
self.sha = sha256(seed)
self.pool = bytearray()
def get_bytes(self, n):
while len(self.pool) < n:
self.pool.extend(self.sha)
self.sha = sha256(self.sha)
result, self.pool = self.pool[:n], self.pool[n:]
return result
def random(self):
four = self.get_bytes(4)
return struct.unpack("I", four)[0] / 4294967296.0
def randint(self, start, end):
return start + int(self.random() * (end - start))
def choice(self, seq):
return seq[int(self.random() * len(seq))]
def shuffle(self, x):
for i in reversed(xrange(1, len(x))):
j = int(self.random() * (i+1))
x[i], x[j] = x[j], x[i]
Bucket = namedtuple('Bucket', ['desc', 'size', 'value', 'coins'])
def strip_unneeded(bkts, sufficient_funds):
bkts = sorted(bkts, key = lambda bkt: bkt.value)
for i in range(len(bkts)):
if not sufficient_funds(bkts[i + 1:]):
return bkts[i:]
return bkts
class CoinChooserBase(PrintError):
def keys(self, coins):
raise NotImplementedError
def bucketize_coins(self, coins):
keys = self.keys(coins)
buckets = defaultdict(list)
for key, coin in zip(keys, coins):
buckets[key].append(coin)
def make_Bucket(desc, coins):
size = sum(Transaction.estimated_input_size(coin)
for coin in coins)
value = sum(coin['value'] for coin in coins)
return Bucket(desc, size, value, coins)
return map(make_Bucket, buckets.keys(), buckets.values())
def penalty_func(self, tx):
def penalty(candidate):
return 0
return penalty
def change_amounts(self, tx, count, fee_estimator, dust_threshold):
# Break change up if bigger than max_change
output_amounts = [o[2] for o in tx.outputs()]
# Don't split change of less than 0.02 PKB
max_change = max(max(output_amounts) * 1.25, 0.02 * COIN)
for n in range(1, count + 1):
change_amount = max(0, tx.get_fee() - fee_estimator(n))
if change_amount // n <= max_change:
break
def trailing_zeroes(val):
s = str(val)
return len(s) - len(s.rstrip('0'))
zeroes = map(trailing_zeroes, output_amounts)
min_zeroes = min(zeroes)
max_zeroes = max(zeroes)
zeroes = range(max(0, min_zeroes - 1), (max_zeroes + 1) + 1)
remaining = change_amount
amounts = []
while n > 1:
average = remaining // n
amount = self.p.randint(int(average * 0.7), int(average * 1.3))
precision = min(self.p.choice(zeroes), int(floor(log10(amount))))
amount = int(round(amount, -precision))
amounts.append(amount)
remaining -= amount
n -= 1
N = pow(10, min(2, zeroes[0]))
amount = (remaining // N) * N
amounts.append(amount)
assert sum(amounts) <= change_amount
return amounts
def change_outputs(self, tx, change_addrs, fee_estimator, dust_threshold):
amounts = self.change_amounts(tx, len(change_addrs), fee_estimator,
dust_threshold)
assert min(amounts) >= 0
assert len(change_addrs) >= len(amounts)
dust = sum(amount for amount in amounts if amount < dust_threshold)
amounts = [amount for amount in amounts if amount >= dust_threshold]
change = [(TYPE_ADDRESS, addr, amount)
for addr, amount in zip(change_addrs, amounts)]
self.print_error('change:', change)
if dust:
self.print_error('not keeping dust', dust)
return change
def make_tx(self, coins, outputs, change_addrs, fee_estimator,
dust_threshold):
utxos = [c['prevout_hash'] + str(c['prevout_n']) for c in coins]
self.p = PRNG(''.join(sorted(utxos)))
tx = Transaction.from_io([], outputs[:])
# Size of the transaction with no inputs and no change
base_size = tx.estimated_size()
spent_amount = tx.output_value()
def sufficient_funds(buckets):
total_input = sum(bucket.value for bucket in buckets)
total_size = sum(bucket.size for bucket in buckets) + base_size
return total_input >= spent_amount + fee_estimator(total_size)
# Collect the coins into buckets, choose a subset of the buckets
buckets = self.bucketize_coins(coins)
buckets = self.choose_buckets(buckets, sufficient_funds,
self.penalty_func(tx))
tx.add_inputs([coin for b in buckets for coin in b.coins])
tx_size = base_size + sum(bucket.size for bucket in buckets)
# This takes a count of change outputs and returns a tx fee;
# each pay-to-parkbyte-address output serializes as 34 bytes
fee = lambda count: fee_estimator(tx_size + count * 34)
change = self.change_outputs(tx, change_addrs, fee, dust_threshold)
tx.add_outputs(change)
self.print_error("using %d inputs" % len(tx.inputs()))
self.print_error("using buckets:", [bucket.desc for bucket in buckets])
return tx
class CoinChooserOldestFirst(CoinChooserBase):
def keys(self, coins):
return [coin['prevout_hash'] + ':' + str(coin['prevout_n'])
for coin in coins]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
# Unconfirmed coins are young, not old
adj_height = lambda height: 99999999 if height == 0 else height
buckets.sort(key = lambda b: max(adj_height(coin['height'])
for coin in b.coins))
selected = []
for bucket in buckets:
selected.append(bucket)
if sufficient_funds(selected):
return strip_unneeded(selected, sufficient_funds)
else:
raise NotEnoughFunds()
class CoinChooserRandom(CoinChooserBase):
def bucket_candidates(self, buckets, sufficient_funds):
candidates = set()
# Add all singletons
for n, bucket in enumerate(buckets):
if sufficient_funds([bucket]):
candidates.add((n, ))
# And now some random ones
attempts = min(100, (len(buckets) - 1) * 10 + 1)
permutation = range(len(buckets))
for i in range(attempts):
# Get a random permutation of the buckets, and
# incrementally combine buckets until sufficient
self.p.shuffle(permutation)
bkts = []
for count, index in enumerate(permutation):
bkts.append(buckets[index])
if sufficient_funds(bkts):
candidates.add(tuple(sorted(permutation[:count + 1])))
break
else:
raise NotEnoughFunds()
candidates = [[buckets[n] for n in c] for c in candidates]
return [strip_unneeded(c, sufficient_funds) for c in candidates]
def choose_buckets(self, buckets, sufficient_funds, penalty_func):
candidates = self.bucket_candidates(buckets, sufficient_funds)
penalties = [penalty_func(cand) for cand in candidates]
winner = candidates[penalties.index(min(penalties))]
self.print_error("Bucket sets:", len(buckets))
self.print_error("Winning penalty:", min(penalties))
return winner
class CoinChooserPrivacy(CoinChooserRandom):
def keys(self, coins):
return [coin['address'] for coin in coins]
def penalty_func(self, tx):
min_change = min(o[2] for o in tx.outputs()) * 0.75
max_change = max(o[2] for o in tx.outputs()) * 1.33
spent_amount = sum(o[2] for o in tx.outputs())
def penalty(buckets):
badness = len(buckets) - 1
total_input = sum(bucket.value for bucket in buckets)
change = float(total_input - spent_amount)
# Penalize change not roughly in output range
if change < min_change:
badness += (min_change - change) / (min_change + 10000)
elif change > max_change:
badness += (change - max_change) / (max_change + 10000)
# Penalize large change; 5 PKB excess ~= using 1 more input
badness += change / (COIN * 5)
return badness
return penalty
COIN_CHOOSERS = {'Priority': CoinChooserOldestFirst,
'Privacy': CoinChooserPrivacy}
def get_name(config):
kind = config.get('coin_chooser')
if not kind in COIN_CHOOSERS:
kind = 'Priority'
return kind
def get_coin_chooser(config):
klass = COIN_CHOOSERS[get_name(config)]
return klass()
| true | true |
f7309d29d60d2cbdb6763d9b77b9ea53b7fcf2ae | 631 | py | Python | scripts/AddTag/R21/addsbtag.py | AsheAnn/C4D_Python | 3fa5e1d8b4f94efb03f820a61789d276c8a5b045 | [
"MIT"
] | null | null | null | scripts/AddTag/R21/addsbtag.py | AsheAnn/C4D_Python | 3fa5e1d8b4f94efb03f820a61789d276c8a5b045 | [
"MIT"
] | null | null | null | scripts/AddTag/R21/addsbtag.py | AsheAnn/C4D_Python | 3fa5e1d8b4f94efb03f820a61789d276c8a5b045 | [
"MIT"
] | null | null | null | # ----------------------------------------------------------------------
# Note:
# - This is the Python code used in Script Manager. ##
# Compatible:
# - Win / Mac
# - R21
# ----------------------------------------------------------------------
import c4d
# Main function
def main():
c4d.CallCommand(100004708, 100004708) # Large Icons
c4d.CallCommand(180000042, 180000042) # Soft Body Tag
# Execute main()
if __name__=='__main__':
main() | 31.55 | 204 | 0.328051 |
import c4d
def main():
c4d.CallCommand(100004708, 100004708)
c4d.CallCommand(180000042, 180000042)
if __name__=='__main__':
main() | true | true |
f7309d67644cc71130ee7f30f1834d0eaeb38d8f | 11,434 | py | Python | pyogp/apps/examples/smoke_test.py | grobertson/PyOGP.Apps | 03583baa8d3a2438b0d0a5452ee8c9e56aace9fd | [
"Apache-2.0"
] | null | null | null | pyogp/apps/examples/smoke_test.py | grobertson/PyOGP.Apps | 03583baa8d3a2438b0d0a5452ee8c9e56aace9fd | [
"Apache-2.0"
] | null | null | null | pyogp/apps/examples/smoke_test.py | grobertson/PyOGP.Apps | 03583baa8d3a2438b0d0a5452ee8c9e56aace9fd | [
"Apache-2.0"
] | null | null | null |
"""
Contributors can be viewed at:
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/CONTRIBUTORS.txt
$LicenseInfo:firstyear=2008&license=apachev2$
Copyright 2009, Linden Research, Inc.
Licensed under the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
or in
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/LICENSE.txt
$/LicenseInfo$
"""
# standard
import unittest
import getpass, logging
from optparse import OptionParser
import time
# pyogp
from pyogp.lib.client.agent import Agent
from pyogp.lib.client.enums import DeRezDestination, AssetType, \
InventoryType, WearablesIndex
from pyogp.lib.base.datatypes import UUID, Vector3
# related
from eventlet import api
client = Agent()
class Semaphore(object):
"""
Basic semaphore to allow the serialization of the tests
"""
waiting = True
timed_out = False
def wait(self, time_out=0):
start = now = time.time()
while self.waiting and now - start <= time_out:
api.sleep(0)
now = time.time()
if now - start > time_out:
self.timed_out = True
self.waiting = True
def signal(self):
self.waiting = False
def login():
""" login an to a login endpoint """
parser = OptionParser(usage="usage: %prog [options] firstname lastname")
logger = logging.getLogger("client.example")
parser.add_option("-l", "--loginuri", dest="loginuri",
default="https://login.aditi.lindenlab.com/cgi-bin/login.cgi",
help="specified the target loginuri")
parser.add_option("-r", "--region", dest="region", default=None,
help="specifies the region (regionname/x/y/z) to connect to")
parser.add_option("-q", "--quiet", dest="verbose", default=True,
action="store_false", help="enable verbose mode")
parser.add_option("-p", "--password", dest="password", default=None,
help="specifies password instead of being prompted for one")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("Expected arguments: firstname lastname")
if options.verbose:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG) # seems to be a no op, set it for the logger
formatter = logging.Formatter('%(asctime)-30s%(name)-30s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
# setting the level for the handler above seems to be a no-op
# it needs to be set for the logger, here the root logger
# otherwise it is NOTSET(=0) which means to log nothing.
logging.getLogger('').setLevel(logging.DEBUG)
else:
print "Attention: This script will print nothing if you use -q. So it might be boring to use it like that ;-)"
# example from a pure agent perspective
#grab a password!
if options.password:
password = options.password
else:
password = getpass.getpass()
# Now let's log it in
client.login(options.loginuri, args[0], args[1], password,
start_location=options.region)
# wait for the agent to connect
while client.connected == False:
api.sleep(0)
# let things settle down
while client.Position == None:
api.sleep(0)
# for folders whose parent = root folder aka My Inventory, request their contents
[client.inventory._request_folder_contents(folder.FolderID) \
for folder in client.inventory.folders if folder.ParentID == \
client.inventory.inventory_root.FolderID]
api.sleep(10)
for attr in client.__dict__:
print attr, ':\t\t\t', client.__dict__[attr]
return client
class TestServer(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
api.sleep(3)
def test_im(self):
"""
Tests im by sending an im to self and verify it is received
"""
s = Semaphore()
im_handler = client.events_handler.register('InstantMessageReceived')
def im_received(message_info):
if str(message_info.payload['FromAgentID']) == \
str(client.agent_id):
s.signal()
im_handler.subscribe(im_received)
client.instant_message(client.agent_id, "Smoke Test message")
s.wait(30)
self.assertFalse(s.timed_out)
def test_chat(self):
"""
Tests chat sending a global chat and verify that it is received.
"""
s = Semaphore()
chat_handler = client.events_handler.register('ChatReceived')
msg = "Smoke Test chat"
def chat_received(message_info):
if message_info.payload['Message'] == msg:
s.signal()
chat_handler.subscribe(chat_received)
client.say(msg)
s.wait(30)
self.assertFalse(s.timed_out)
def test_create_object(self):
"""
Tests object creation by rezzing a new prim, selecting it, and
then derezzing it.
"""
s = Semaphore()
object_handler = client.events_handler.register('ObjectSelected')
def object_created(object_info):
prim = object_info.payload['object']
matches = client.inventory.search_inventory(client.inventory.folders,
name="Objects")
folder = matches.pop()
transaction_id = UUID()
transaction_id.random()
prim.derez(client,
DeRezDestination.TakeIntoAgentInventory,
folder.FolderID,
transaction_id,
client.active_group_id)
s.signal()
object_handler.subscribe(object_created)
client.region.objects.create_default_box()
s.wait(30)
self.assertFalse(s.timed_out)
#verify object copied to inventory
def test_create_and_remove_item_in_inventory(self):
"""
Tests item can be created and removed from the inventory
"""
s = Semaphore()
matches = client.inventory.search_inventory(client.inventory.folders,
name='Notecards')
folder = matches.pop()
def inventory_item_created(item):
s.signal()
item_name = "Smoke Test notecard" + str(time.time())
item_desc = "Smoke Test desc"
client.inventory.create_new_item(folder,
item_name,
item_desc,
AssetType.Notecard,
InventoryType.Notecard,
WearablesIndex.WT_SHAPE,
0,
inventory_item_created)
s.wait(30)
self.assertFalse(s.timed_out)
#verify item created in inventory
matches = client.inventory.search_inventory(client.inventory.folders,
name=item_name)
self.assertTrue(len(matches) > 0)
item = matches.pop()
api.sleep(5)
client.inventory.remove_inventory_item(item,
folder,
client.inventory.folders)
#verify item removed
client.inventory.sendFetchInventoryDescendentsRequest(folder.FolderID)
api.sleep(5)
matches = client.inventory.search_inventory(client.inventory.folders,
name=item_name)
self.assertTrue(len(matches) == 0)
'''
def test_wear_something(self):
"""
Tests wearing something by finding a clothing item in the inventory,
wearing it and verifying it is worn.
"""
#check current pants
matches = client.inventory.search_inventory(client.inventory.folders,
name="Clubgoer Male Pants")
item = matches.pop()
client.appearance.wear_item(item, WearablesIndex.WT_PANTS)
api.sleep(10)
#verify avatar is wearing item
#switch pants back
#verify old pants
'''
def test_can_walk(self):
"""
Tests walking by walking for 5 seconds and verifying position change
"""
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
client.walk()
api.sleep(5)
client.walk(False)
api.sleep(5)
self.assertFalse(client.Position.X == old_pos.X and \
client.Position.Y == old_pos.Y and \
client.Position.Z == old_pos.Z)
def test_can_teleport(self):
"""
Tests teleport by teleporting to a new location and verifying position
has changed
"""
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
new_pos = Vector3(X=client.Position.X + 5,
Y=client.Position.Y + 5,
Z=client.Position.Z)
client.teleport(region_handle=client.region.RegionHandle,
position=new_pos)
api.sleep(5) # wait for object update
self.assertFalse(client.Position.X == old_pos.X and \
client.Position.Y == old_pos.Y and \
client.Position.Z == old_pos.Z)
client.teleport(region_handle=client.region.RegionHandle,
position=old_pos)
def test_physics(self):
"""
Physics by flying up, stopping, and verify avatar's position changes
every second over 5 seconds.
"""
client.fly()
client.up()
api.sleep(3)
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
client.up(False)
client.fly(False)
api.sleep(5)
new_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
self.assertFalse(new_pos.X == old_pos.X and \
new_pos.Y == old_pos.Y and \
new_pos.Z == old_pos.Z)
def test_fly(self):
"""
Tests flying by flying for 5 seconds and verifying position change
"""
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
client.fly()
api.sleep(5)
self.assertFalse(client.Position.X == old_pos.X and \
client.Position.Y == old_pos.Y and \
client.Position.Z == old_pos.Z)
client.fly(False)
def main():
client = login()
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestServer))
unittest.TextTestRunner().run(suite)
client.logout()
if __name__ == "__main__":
main()
| 35.399381 | 118 | 0.568305 |
"""
Contributors can be viewed at:
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/CONTRIBUTORS.txt
$LicenseInfo:firstyear=2008&license=apachev2$
Copyright 2009, Linden Research, Inc.
Licensed under the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
or in
http://svn.secondlife.com/svn/linden/projects/2008/pyogp/LICENSE.txt
$/LicenseInfo$
"""
import unittest
import getpass, logging
from optparse import OptionParser
import time
from pyogp.lib.client.agent import Agent
from pyogp.lib.client.enums import DeRezDestination, AssetType, \
InventoryType, WearablesIndex
from pyogp.lib.base.datatypes import UUID, Vector3
from eventlet import api
client = Agent()
class Semaphore(object):
"""
Basic semaphore to allow the serialization of the tests
"""
waiting = True
timed_out = False
def wait(self, time_out=0):
start = now = time.time()
while self.waiting and now - start <= time_out:
api.sleep(0)
now = time.time()
if now - start > time_out:
self.timed_out = True
self.waiting = True
def signal(self):
self.waiting = False
def login():
""" login an to a login endpoint """
parser = OptionParser(usage="usage: %prog [options] firstname lastname")
logger = logging.getLogger("client.example")
parser.add_option("-l", "--loginuri", dest="loginuri",
default="https://login.aditi.lindenlab.com/cgi-bin/login.cgi",
help="specified the target loginuri")
parser.add_option("-r", "--region", dest="region", default=None,
help="specifies the region (regionname/x/y/z) to connect to")
parser.add_option("-q", "--quiet", dest="verbose", default=True,
action="store_false", help="enable verbose mode")
parser.add_option("-p", "--password", dest="password", default=None,
help="specifies password instead of being prompted for one")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("Expected arguments: firstname lastname")
if options.verbose:
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)-30s%(name)-30s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.getLogger('').setLevel(logging.DEBUG)
else:
print "Attention: This script will print nothing if you use -q. So it might be boring to use it like that ;-)"
if options.password:
password = options.password
else:
password = getpass.getpass()
client.login(options.loginuri, args[0], args[1], password,
start_location=options.region)
# wait for the agent to connect
while client.connected == False:
api.sleep(0)
# let things settle down
while client.Position == None:
api.sleep(0)
# for folders whose parent = root folder aka My Inventory, request their contents
[client.inventory._request_folder_contents(folder.FolderID) \
for folder in client.inventory.folders if folder.ParentID == \
client.inventory.inventory_root.FolderID]
api.sleep(10)
for attr in client.__dict__:
print attr, ':\t\t\t', client.__dict__[attr]
return client
class TestServer(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
api.sleep(3)
def test_im(self):
"""
Tests im by sending an im to self and verify it is received
"""
s = Semaphore()
im_handler = client.events_handler.register('InstantMessageReceived')
def im_received(message_info):
if str(message_info.payload['FromAgentID']) == \
str(client.agent_id):
s.signal()
im_handler.subscribe(im_received)
client.instant_message(client.agent_id, "Smoke Test message")
s.wait(30)
self.assertFalse(s.timed_out)
def test_chat(self):
"""
Tests chat sending a global chat and verify that it is received.
"""
s = Semaphore()
chat_handler = client.events_handler.register('ChatReceived')
msg = "Smoke Test chat"
def chat_received(message_info):
if message_info.payload['Message'] == msg:
s.signal()
chat_handler.subscribe(chat_received)
client.say(msg)
s.wait(30)
self.assertFalse(s.timed_out)
def test_create_object(self):
"""
Tests object creation by rezzing a new prim, selecting it, and
then derezzing it.
"""
s = Semaphore()
object_handler = client.events_handler.register('ObjectSelected')
def object_created(object_info):
prim = object_info.payload['object']
matches = client.inventory.search_inventory(client.inventory.folders,
name="Objects")
folder = matches.pop()
transaction_id = UUID()
transaction_id.random()
prim.derez(client,
DeRezDestination.TakeIntoAgentInventory,
folder.FolderID,
transaction_id,
client.active_group_id)
s.signal()
object_handler.subscribe(object_created)
client.region.objects.create_default_box()
s.wait(30)
self.assertFalse(s.timed_out)
#verify object copied to inventory
def test_create_and_remove_item_in_inventory(self):
"""
Tests item can be created and removed from the inventory
"""
s = Semaphore()
matches = client.inventory.search_inventory(client.inventory.folders,
name='Notecards')
folder = matches.pop()
def inventory_item_created(item):
s.signal()
item_name = "Smoke Test notecard" + str(time.time())
item_desc = "Smoke Test desc"
client.inventory.create_new_item(folder,
item_name,
item_desc,
AssetType.Notecard,
InventoryType.Notecard,
WearablesIndex.WT_SHAPE,
0,
inventory_item_created)
s.wait(30)
self.assertFalse(s.timed_out)
#verify item created in inventory
matches = client.inventory.search_inventory(client.inventory.folders,
name=item_name)
self.assertTrue(len(matches) > 0)
item = matches.pop()
api.sleep(5)
client.inventory.remove_inventory_item(item,
folder,
client.inventory.folders)
#verify item removed
client.inventory.sendFetchInventoryDescendentsRequest(folder.FolderID)
api.sleep(5)
matches = client.inventory.search_inventory(client.inventory.folders,
name=item_name)
self.assertTrue(len(matches) == 0)
'''
def test_wear_something(self):
"""
Tests wearing something by finding a clothing item in the inventory,
wearing it and verifying it is worn.
"""
#check current pants
matches = client.inventory.search_inventory(client.inventory.folders,
name="Clubgoer Male Pants")
item = matches.pop()
client.appearance.wear_item(item, WearablesIndex.WT_PANTS)
api.sleep(10)
#verify avatar is wearing item
#switch pants back
#verify old pants
'''
def test_can_walk(self):
"""
Tests walking by walking for 5 seconds and verifying position change
"""
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
client.walk()
api.sleep(5)
client.walk(False)
api.sleep(5)
self.assertFalse(client.Position.X == old_pos.X and \
client.Position.Y == old_pos.Y and \
client.Position.Z == old_pos.Z)
def test_can_teleport(self):
"""
Tests teleport by teleporting to a new location and verifying position
has changed
"""
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
new_pos = Vector3(X=client.Position.X + 5,
Y=client.Position.Y + 5,
Z=client.Position.Z)
client.teleport(region_handle=client.region.RegionHandle,
position=new_pos)
api.sleep(5) # wait for object update
self.assertFalse(client.Position.X == old_pos.X and \
client.Position.Y == old_pos.Y and \
client.Position.Z == old_pos.Z)
client.teleport(region_handle=client.region.RegionHandle,
position=old_pos)
def test_physics(self):
"""
Physics by flying up, stopping, and verify avatar's position changes
every second over 5 seconds.
"""
client.fly()
client.up()
api.sleep(3)
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
client.up(False)
client.fly(False)
api.sleep(5)
new_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
self.assertFalse(new_pos.X == old_pos.X and \
new_pos.Y == old_pos.Y and \
new_pos.Z == old_pos.Z)
def test_fly(self):
"""
Tests flying by flying for 5 seconds and verifying position change
"""
old_pos = Vector3(X=client.Position.X,
Y=client.Position.Y,
Z=client.Position.Z)
client.fly()
api.sleep(5)
self.assertFalse(client.Position.X == old_pos.X and \
client.Position.Y == old_pos.Y and \
client.Position.Z == old_pos.Z)
client.fly(False)
def main():
client = login()
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestServer))
unittest.TextTestRunner().run(suite)
client.logout()
if __name__ == "__main__":
main()
| false | true |
f7309e2c4b077ea8962cfa2d8cd82d87e8d4bf65 | 2,158 | py | Python | Analysis Scripts/co2_offset.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | 1 | 2019-07-07T19:54:17.000Z | 2019-07-07T19:54:17.000Z | Analysis Scripts/co2_offset.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | null | null | null | Analysis Scripts/co2_offset.py | bonesbb/HASPR | 856af4a480f4ff135591bbbcc1267898d88cbf0d | [
"MIT"
] | null | null | null | # HASPR - High-Altitude Solar Power Research
# Script to calculate CO2-equivalent offset given generation profiles
# Version 0.1
# Author: neyring
from os import walk
import haspr
from haspr import Result
from haspr import Dataset
import numpy as np
from numpy import genfromtxt
# PARAMETERS #
# path to .csv file of grid CI data (Wh, UTC, 1h res, no leap days):
ciPath = "D:\\00_Results\\04_CO2 Offset\\1_Swiss Grid CI - 1h - UTC.csv"
# directory containing generation profiles (1h res, Wh) to run our analyses on (without leap days):
inputDirectory = "D:\\00_Results\\Out"
# directory to write output to:
haspr.outputDirectory = "D:\\00_Results\\04_CO2 Offset\\Case 5 - 30 to 65 deg winter opt"
# OS path delimiter ("\\" for windows, "/" for unix)"
haspr.osPathDelimiter = "\\"
# extract carbon intensity data:
ci = Dataset("ci")
haspr.get_csv_data(ciPath, ci)
timestamps = []
ci_values = []
for p in ci.payload:
timestamps.append(str(p[0]))
ci_values.append(float(p[1]))
ci_values = np.array(ci_values) # use numpy for efficient element-wise calculations
# get all file names in inputDirectory:
file_names = []
for (dirpath, dirnames, filenames) in walk(inputDirectory):
file_names.extend(filenames)
# cycle through files and build result objects:
results = []
for f in file_names:
file_path = inputDirectory + haspr.osPathDelimiter + f
# get generation profile:
extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1)
gen_values = extracted_array[:, 1] # we only want generation values
# get carbon offset for current generation profile:
carbon_offset = np.multiply(ci_values, gen_values)
# build current result object:
result_title = f[0:len(f) - 4] + " - CO2-eq offset"
current_result = Result(result_title)
current_result.payload.append("Time [UTC], CO2-eq offset [g]")
for i in range(8760):
str_to_append = str(timestamps[i]) + ", " + str(carbon_offset[i])
current_result.payload.append(str_to_append)
results.append(current_result)
# dump all results:
for r in results:
r.dump()
| 33.71875 | 100 | 0.696942 |
from os import walk
import haspr
from haspr import Result
from haspr import Dataset
import numpy as np
from numpy import genfromtxt
ciPath = "D:\\00_Results\\04_CO2 Offset\\1_Swiss Grid CI - 1h - UTC.csv"
inputDirectory = "D:\\00_Results\\Out"
haspr.outputDirectory = "D:\\00_Results\\04_CO2 Offset\\Case 5 - 30 to 65 deg winter opt"
haspr.osPathDelimiter = "\\"
# extract carbon intensity data:
ci = Dataset("ci")
haspr.get_csv_data(ciPath, ci)
timestamps = []
ci_values = []
for p in ci.payload:
timestamps.append(str(p[0]))
ci_values.append(float(p[1]))
ci_values = np.array(ci_values) # use numpy for efficient element-wise calculations
# get all file names in inputDirectory:
file_names = []
for (dirpath, dirnames, filenames) in walk(inputDirectory):
file_names.extend(filenames)
# cycle through files and build result objects:
results = []
for f in file_names:
file_path = inputDirectory + haspr.osPathDelimiter + f
# get generation profile:
extracted_array = genfromtxt(file_path, delimiter=',', skip_header=1)
gen_values = extracted_array[:, 1] # we only want generation values
# get carbon offset for current generation profile:
carbon_offset = np.multiply(ci_values, gen_values)
# build current result object:
result_title = f[0:len(f) - 4] + " - CO2-eq offset"
current_result = Result(result_title)
current_result.payload.append("Time [UTC], CO2-eq offset [g]")
for i in range(8760):
str_to_append = str(timestamps[i]) + ", " + str(carbon_offset[i])
current_result.payload.append(str_to_append)
results.append(current_result)
# dump all results:
for r in results:
r.dump()
| true | true |
f7309e42f5d8076d77031b024afee339b3886980 | 1,817 | py | Python | setup.py | JockeJarre/robotframework-CSVLibrary | e9e567ca3c7d3199ddde167c42c310fcd5657c96 | [
"Apache-2.0"
] | null | null | null | setup.py | JockeJarre/robotframework-CSVLibrary | e9e567ca3c7d3199ddde167c42c310fcd5657c96 | [
"Apache-2.0"
] | null | null | null | setup.py | JockeJarre/robotframework-CSVLibrary | e9e567ca3c7d3199ddde167c42c310fcd5657c96 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from os.path import join, dirname, abspath
from setuptools import setup
def read(rel_path):
here = abspath(dirname(__file__))
with open(join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
REQUIREMENTS = read('requirements.txt').splitlines()
DESCRIPTION = read('README.md')
setup(name='robotframework-csvlibrary',
version=get_version("CSVLibrary/__init__.py"),
description='CSV library for Robot Framework',
long_description=DESCRIPTION,
long_description_content_type='text/markdown',
author='Marcin Mierzejewski',
author_email='<mmierz@gmail.com>',
url='https://github.com/s4int/robotframework-CSVLibrary',
license='Apache License 2.0',
keywords='robotframework testing csv',
platforms='any',
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=REQUIREMENTS,
packages=['CSVLibrary'],
)
| 34.283019 | 63 | 0.624656 |
from os.path import join, dirname, abspath
from setuptools import setup
def read(rel_path):
here = abspath(dirname(__file__))
with open(join(here, rel_path)) as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
REQUIREMENTS = read('requirements.txt').splitlines()
DESCRIPTION = read('README.md')
setup(name='robotframework-csvlibrary',
version=get_version("CSVLibrary/__init__.py"),
description='CSV library for Robot Framework',
long_description=DESCRIPTION,
long_description_content_type='text/markdown',
author='Marcin Mierzejewski',
author_email='<mmierz@gmail.com>',
url='https://github.com/s4int/robotframework-CSVLibrary',
license='Apache License 2.0',
keywords='robotframework testing csv',
platforms='any',
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Testing",
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=REQUIREMENTS,
packages=['CSVLibrary'],
)
| true | true |
f7309efcb09c67631eaa89d0877022da259f67fd | 566 | py | Python | 083-remove-duplicates-from-sorted-list/remove_duplicates_from_sorted_list.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | 083-remove-duplicates-from-sorted-list/remove_duplicates_from_sorted_list.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | 083-remove-duplicates-from-sorted-list/remove_duplicates_from_sorted_list.py | cnluocj/leetcode | 5b870a63ba1aab3db1e05421c91f404a9aabc489 | [
"MIT"
] | null | null | null | """
60.36%
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
current = head
while current and current.next:
next_node = current.next
if next_node.val == current.val:
current.next = next_node.next
else:
current = current.next
return head | 23.583333 | 45 | 0.530035 |
class Solution(object):
def deleteDuplicates(self, head):
current = head
while current and current.next:
next_node = current.next
if next_node.val == current.val:
current.next = next_node.next
else:
current = current.next
return head | true | true |
f7309f8fa8176e6126bbfea93a4688ef973e5475 | 2,172 | py | Python | tests/util/test_rwlock.py | abichinger/pycasbin | 6166f298406d029a8540a12a4157d7b0072f8c8e | [
"Apache-2.0"
] | 915 | 2018-11-25T01:00:39.000Z | 2022-03-30T11:21:34.000Z | tests/util/test_rwlock.py | ffyuanda/pycasbin | 230132e459420aaa519d1eb9479f8996bdbbbd2a | [
"Apache-2.0"
] | 231 | 2019-02-13T09:29:51.000Z | 2022-03-28T16:32:51.000Z | tests/util/test_rwlock.py | ffyuanda/pycasbin | 230132e459420aaa519d1eb9479f8996bdbbbd2a | [
"Apache-2.0"
] | 173 | 2019-02-08T02:22:33.000Z | 2022-03-10T15:16:11.000Z | from unittest import TestCase
from casbin.util.rwlock import RWLockWrite
from concurrent.futures import ThreadPoolExecutor
import time
import queue
class TestRWLock(TestCase):
def gen_locks(self):
rw_lock = RWLockWrite()
rl = rw_lock.gen_rlock()
wl = rw_lock.gen_wlock()
return (rl, wl)
def test_multiple_readers(self):
[rl, _] = self.gen_locks()
delay = 5 / 1000 # 5ms
num_readers = 1000
start = time.time()
def read():
with rl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_readers)
futures = [executor.submit(read) for i in range(num_readers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertLess(exec_time, delay * num_readers)
def test_single_writer(self):
[_, wl] = self.gen_locks()
delay = 5 / 1000 # 5ms
num_writers = 10
start = time.time()
def write():
with wl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_writers)
futures = [executor.submit(write) for i in range(num_writers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertGreaterEqual(exec_time, delay * num_writers)
def test_writer_preference(self):
[rl, wl] = self.gen_locks()
q = queue.Queue()
delay = 5 / 1000 # 5ms
start = time.time()
def read():
with rl:
time.sleep(delay)
q.put("r")
def write():
with wl:
time.sleep(delay)
q.put("w")
executor = ThreadPoolExecutor(10)
futures = [executor.submit(read) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(write) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(read) for i in range(3)]
[future.result() for future in futures]
sequence = ""
while not q.empty():
sequence += q.get()
self.assertEqual(sequence, "rrrwwwrrr")
| 26.814815 | 70 | 0.563076 | from unittest import TestCase
from casbin.util.rwlock import RWLockWrite
from concurrent.futures import ThreadPoolExecutor
import time
import queue
class TestRWLock(TestCase):
def gen_locks(self):
rw_lock = RWLockWrite()
rl = rw_lock.gen_rlock()
wl = rw_lock.gen_wlock()
return (rl, wl)
def test_multiple_readers(self):
[rl, _] = self.gen_locks()
delay = 5 / 1000
num_readers = 1000
start = time.time()
def read():
with rl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_readers)
futures = [executor.submit(read) for i in range(num_readers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertLess(exec_time, delay * num_readers)
def test_single_writer(self):
[_, wl] = self.gen_locks()
delay = 5 / 1000
num_writers = 10
start = time.time()
def write():
with wl:
time.sleep(delay)
executor = ThreadPoolExecutor(num_writers)
futures = [executor.submit(write) for i in range(num_writers)]
[future.result() for future in futures]
exec_time = time.time() - start
self.assertGreaterEqual(exec_time, delay * num_writers)
def test_writer_preference(self):
[rl, wl] = self.gen_locks()
q = queue.Queue()
delay = 5 / 1000
start = time.time()
def read():
with rl:
time.sleep(delay)
q.put("r")
def write():
with wl:
time.sleep(delay)
q.put("w")
executor = ThreadPoolExecutor(10)
futures = [executor.submit(read) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(write) for i in range(3)]
time.sleep(1 / 1000)
futures += [executor.submit(read) for i in range(3)]
[future.result() for future in futures]
sequence = ""
while not q.empty():
sequence += q.get()
self.assertEqual(sequence, "rrrwwwrrr")
| true | true |
f7309fc308191a299ba7877254d0da7512e8a712 | 1,237 | py | Python | tests/python/unittest/test_lang_target.py | titikid/tvm | 0cf3765b28d457d2503ec20b551e9a8eadb1491d | [
"Apache-2.0"
] | 6 | 2019-08-29T19:00:57.000Z | 2020-06-15T14:55:16.000Z | tests/python/unittest/test_lang_target.py | titikid/tvm | 0cf3765b28d457d2503ec20b551e9a8eadb1491d | [
"Apache-2.0"
] | 1 | 2020-10-23T18:56:21.000Z | 2020-10-23T18:56:33.000Z | tests/python/unittest/test_lang_target.py | titikid/tvm | 0cf3765b28d457d2503ec20b551e9a8eadb1491d | [
"Apache-2.0"
] | 3 | 2018-06-29T17:19:21.000Z | 2020-12-11T07:50:33.000Z | import tvm
@tvm.target.generic_func
def mygeneric(data):
# default generic function
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
with tvm.target.create("cuda"):
assert mygeneric(1) == 3
with tvm.target.arm_cpu():
assert mygeneric(1) == 11
with tvm.target.create("metal"):
assert mygeneric(1) == 3
assert tvm.target.current_target() == None
def test_target_string_parse():
target = tvm.target.create("cuda -libs=cublas,cudnn")
assert target.target_name == "cuda"
assert target.options == ['-libs=cublas,cudnn']
assert target.keys == ['cuda', 'gpu']
assert target.libs == ['cublas', 'cudnn']
assert str(target) == str(tvm.target.cuda("-libs=cublas,cudnn"))
assert tvm.target.intel_graphics().device_name == "intel_graphics"
if __name__ == "__main__":
test_target_dispatch()
test_target_string_parse()
| 22.490909 | 70 | 0.654002 | import tvm
@tvm.target.generic_func
def mygeneric(data):
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
with tvm.target.create("cuda"):
assert mygeneric(1) == 3
with tvm.target.arm_cpu():
assert mygeneric(1) == 11
with tvm.target.create("metal"):
assert mygeneric(1) == 3
assert tvm.target.current_target() == None
def test_target_string_parse():
target = tvm.target.create("cuda -libs=cublas,cudnn")
assert target.target_name == "cuda"
assert target.options == ['-libs=cublas,cudnn']
assert target.keys == ['cuda', 'gpu']
assert target.libs == ['cublas', 'cudnn']
assert str(target) == str(tvm.target.cuda("-libs=cublas,cudnn"))
assert tvm.target.intel_graphics().device_name == "intel_graphics"
if __name__ == "__main__":
test_target_dispatch()
test_target_string_parse()
| true | true |
f730a004824585ea2e54e929e3732507886f6a0a | 516 | py | Python | testcase/log_tokenNotpresent_streamActive_fail.py | SubhasisDutta/Connector-Api | 76d074064500c21004e826b70883bee61efe8bf4 | [
"Apache-2.0"
] | null | null | null | testcase/log_tokenNotpresent_streamActive_fail.py | SubhasisDutta/Connector-Api | 76d074064500c21004e826b70883bee61efe8bf4 | [
"Apache-2.0"
] | null | null | null | testcase/log_tokenNotpresent_streamActive_fail.py | SubhasisDutta/Connector-Api | 76d074064500c21004e826b70883bee61efe8bf4 | [
"Apache-2.0"
] | null | null | null | import httplib,json
conn = httplib.HTTPConnection("localhost", 8888)
print "Testing with incorrect token - Expected FAIL"
token="qwertyujm-32ddfd-dfdfm-fgfvv"
streamId="1321-1321"
conn.request("GET", "/log?accessToken="+token+"&streamId="+streamId )
r2 = conn.getresponse()
print 'Response Status: '+str(r2.status)
data = r2.read()
j=json.loads(data)
if r2.status == 200 and j['message']== 'Invalid Token.':
print 'Json Message: '+j['message']
print 'Test Pass'
else:
print 'Test Fail'
print '====End===' | 32.25 | 69 | 0.699612 | import httplib,json
conn = httplib.HTTPConnection("localhost", 8888)
print "Testing with incorrect token - Expected FAIL"
token="qwertyujm-32ddfd-dfdfm-fgfvv"
streamId="1321-1321"
conn.request("GET", "/log?accessToken="+token+"&streamId="+streamId )
r2 = conn.getresponse()
print 'Response Status: '+str(r2.status)
data = r2.read()
j=json.loads(data)
if r2.status == 200 and j['message']== 'Invalid Token.':
print 'Json Message: '+j['message']
print 'Test Pass'
else:
print 'Test Fail'
print '====End===' | false | true |
f730a01780aed4d018a947bb7fd6e5d025daca5d | 9,499 | py | Python | modules/tests/photons_canvas_tests/points/test_helpers.py | Djelibeybi/photons | bc0aa91771d8e88fd3c691fb58f18cb876f292ec | [
"MIT"
] | 51 | 2020-07-03T08:34:48.000Z | 2022-03-16T10:56:08.000Z | modules/tests/photons_canvas_tests/points/test_helpers.py | delfick/photons | bc0aa91771d8e88fd3c691fb58f18cb876f292ec | [
"MIT"
] | 81 | 2020-07-03T08:13:59.000Z | 2022-03-31T23:02:54.000Z | modules/tests/photons_canvas_tests/points/test_helpers.py | Djelibeybi/photons | bc0aa91771d8e88fd3c691fb58f18cb876f292ec | [
"MIT"
] | 8 | 2020-07-24T23:48:20.000Z | 2021-05-24T17:20:16.000Z | # coding: spec
from photons_canvas.points import helpers as php
import pytest
describe "Color":
it "has ZERO":
assert php.Color.ZERO == (0, 0, 0, 0)
it "has WHITE":
assert php.Color.WHITE == (0, 0, 1, 3500)
it "has EMPTIES":
assert php.Color.EMPTIES == (php.Color.ZERO, None)
it "can tell if a color is 'dead'":
assert php.Color.dead(None)
assert php.Color.dead((0, 0, 0, 0))
assert php.Color.dead((40, 1, 0, 3500))
assert not php.Color.dead((1, 0, 0.2, 0))
assert not php.Color.dead((40, 1, 0.1, 3500))
describe "override a color":
it "does nothing if no overrides":
color = (0, 1, 2, 3)
assert php.Color.override(color) is color
it "can override properties":
color = (0, 1, 2, 3)
assert php.Color.override(color, hue=20) == (20, 1, 2, 3)
assert php.Color.override(color, saturation=0.5) == (0, 0.5, 2, 3)
assert php.Color.override(color, brightness=0.5) == (0, 1, 0.5, 3)
assert php.Color.override(color, kelvin=20) == (0, 1, 2, 20)
assert php.Color.override(
color, hue=30, saturation=0.9, brightness=0.1, kelvin=9000
) == (30, 0.9, 0.1, 9000)
it "doesn't allow out of limits":
color = (40, 1, 2, 3)
assert php.Color.override(color, hue=-1) == (0, 1, 2, 3)
assert php.Color.override(color, saturation=-1) == (40, 0, 2, 3)
assert php.Color.override(color, brightness=-1) == (40, 1, 0, 3)
assert php.Color.override(color, kelvin=-1) == (40, 1, 2, 0)
want = (0, 0, 0, 0)
assert (
php.Color.override(color, hue=-1, saturation=-1, brightness=-1, kelvin=-1) == want
)
assert php.Color.override(color, hue=361) == (360, 1, 2, 3)
assert php.Color.override(color, saturation=1.1) == (40, 1, 2, 3)
assert php.Color.override(color, brightness=1.1) == (40, 1, 1, 3)
assert php.Color.override(color, kelvin=666661) == (40, 1, 2, 65535)
assert php.Color.override(
color, hue=361, saturation=1.1, brightness=1.1, kelvin=66666
) == (360, 1, 1, 65535)
describe "adjust":
it "can adjust hue":
color = (100, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=-50) == (50, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=50) == (150, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=(60,)) == (60, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=-150) == (0, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=400) == (360, 0.1, 0.3, 9000)
it "can adjust saturation":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=-0.1) == (100, 0.4, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=0.2) == (100, 0.7, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=(0.3,)) == (100, 0.3, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=-0.7) == (100, 0, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=0.9) == (100, 1, 0.3, 9000)
it "can adjust brightness":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, brightness_change=-0.1) == (100, 0.5, 0.3 - 0.1, 9000)
assert php.Color.adjust(color, brightness_change=0.2) == (100, 0.5, 0.5, 9000)
assert php.Color.adjust(color, brightness_change=(0.4,)) == (100, 0.5, 0.4, 9000)
assert php.Color.adjust(color, brightness_change=-0.7) == (100, 0.5, 0, 9000)
assert php.Color.adjust(color, brightness_change=0.9) == (100, 0.5, 1, 9000)
it "can adjust kelvin":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, kelvin_change=-1000) == (100, 0.5, 0.3, 8000)
assert php.Color.adjust(color, kelvin_change=1000) == (100, 0.5, 0.3, 10000)
assert php.Color.adjust(color, kelvin_change=(3500,)) == (100, 0.5, 0.3, 3500)
assert php.Color.adjust(color, kelvin_change=-45000) == (100, 0.5, 0.3, 0)
assert php.Color.adjust(color, kelvin_change=66666) == (100, 0.5, 0.3, 65535)
it "can adjust combination":
got = php.Color.adjust(
(100, 0.5, 0.3, 9000),
hue_change=20,
saturation_change=-0.2,
brightness_change=(0.8,),
kelvin_change=-3000,
)
assert got == (120, 0.3, 0.8, 6000)
describe "average_color":
def assertColorAlmostEqual(self, got, want):
assert want[0] == pytest.approx(got[0], rel=1e-3)
assert want[1] == pytest.approx(got[1], rel=1e-3)
assert want[2] == pytest.approx(got[2], rel=1e-3)
assert want[3] == pytest.approx(got[3], rel=1e-3)
it "returns None if no colors":
color = php.average_color([])
assert color is None
color = php.average_color([None])
assert color is None
it "averages saturation, brightness and kelvin":
colors = [
(0, 0.1, 0.2, 3500),
(0, 0.2, 0.3, 4500),
(0, 0.3, 0.4, 5500),
]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (0, 0.2, 0.3, 4500))
it "it sets kelvin to 3500 if 0":
colors = [
(0, 0.1, 0.2, 3500),
(0, 0.2, 0.3, 0),
(0, 0.3, 0.4, 3500),
]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (0, 0.2, 0.3, 3500))
it "does special math to the hue":
#
# NOTE: I'm not sure how to test this maths so I've just put these values into the algorithm
# and asserting the results I got back.
#
colors = [(hue, 1, 1, 3500) for hue in (10, 20, 30)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (19.9999, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 20, 30)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (48.2227, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 20, 30, 300)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (24.2583, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 300)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (20, 1, 1, 3500))
colors = [(100, 1, 1, 3500), None, (300, 1, 1, 3500)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (20, 1, 1, 3500))
describe "Points":
it "can get cols":
bounds = ((3, 8), (5, 1), (5, 4))
cols = php.Points.cols(bounds)
assert cols == [
[(3, 5), (3, 4), (3, 3), (3, 2)],
[(4, 5), (4, 4), (4, 3), (4, 2)],
[(5, 5), (5, 4), (5, 3), (5, 2)],
[(6, 5), (6, 4), (6, 3), (6, 2)],
[(7, 5), (7, 4), (7, 3), (7, 2)],
]
it "can get rows":
bounds = ((3, 8), (5, 1), (5, 4))
rows = php.Points.rows(bounds)
assert rows == [
[(3, 5), (4, 5), (5, 5), (6, 5), (7, 5)],
[(3, 4), (4, 4), (5, 4), (6, 4), (7, 4)],
[(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)],
[(3, 2), (4, 2), (5, 2), (6, 2), (7, 2)],
]
it "can get all":
bounds = ((3, 8), (5, 1), (5, 4))
all_points = php.Points.all_points(bounds)
r1 = [(3, 5), (4, 5), (5, 5), (6, 5), (7, 5)]
r2 = [(3, 4), (4, 4), (5, 4), (6, 4), (7, 4)]
r3 = [(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)]
r4 = [(3, 2), (4, 2), (5, 2), (6, 2), (7, 2)]
assert all_points == [*r1, *r2, *r3, *r4]
it "can count points":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.count_points(bounds) == 20
bounds = ((1, 8), (6, 0), (7, 6))
assert php.Points.count_points(bounds) == 42
bounds = ((1, 1), (6, 6), (1, 1))
assert php.Points.count_points(bounds) == 0
it "can get points for a row":
bounds = ((3, 8), (5, 1), (5, 4))
row = php.Points.row(3, bounds)
assert row == [(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)]
it "can get points for a column":
bounds = ((3, 8), (5, 1), (5, 4))
col = php.Points.col(2, bounds)
assert col == [(2, 5), (2, 4), (2, 3), (2, 2)]
it "can expand a bounds":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.expand(bounds, 5) == ((-2, 13), (10, -4), (15, 14))
assert php.Points.expand(bounds, 3) == ((0, 11), (8, -2), (11, 10))
it "can get a point relative to bounds":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.relative((4, 4), bounds) == (1, 1)
assert php.Points.relative((5, 2), bounds) == (2, 3)
it "can get the bottom row":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.bottom_row(bounds) == 1
bounds = ((3, 8), (11, 9), (5, 2))
assert php.Points.bottom_row(bounds) == 9
it "can get the top row":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.top_row(bounds) == 5
bounds = ((3, 8), (11, 9), (5, 2))
assert php.Points.top_row(bounds) == 11
| 38.45749 | 100 | 0.505106 |
from photons_canvas.points import helpers as php
import pytest
describe "Color":
it "has ZERO":
assert php.Color.ZERO == (0, 0, 0, 0)
it "has WHITE":
assert php.Color.WHITE == (0, 0, 1, 3500)
it "has EMPTIES":
assert php.Color.EMPTIES == (php.Color.ZERO, None)
it "can tell if a color is 'dead'":
assert php.Color.dead(None)
assert php.Color.dead((0, 0, 0, 0))
assert php.Color.dead((40, 1, 0, 3500))
assert not php.Color.dead((1, 0, 0.2, 0))
assert not php.Color.dead((40, 1, 0.1, 3500))
describe "override a color":
it "does nothing if no overrides":
color = (0, 1, 2, 3)
assert php.Color.override(color) is color
it "can override properties":
color = (0, 1, 2, 3)
assert php.Color.override(color, hue=20) == (20, 1, 2, 3)
assert php.Color.override(color, saturation=0.5) == (0, 0.5, 2, 3)
assert php.Color.override(color, brightness=0.5) == (0, 1, 0.5, 3)
assert php.Color.override(color, kelvin=20) == (0, 1, 2, 20)
assert php.Color.override(
color, hue=30, saturation=0.9, brightness=0.1, kelvin=9000
) == (30, 0.9, 0.1, 9000)
it "doesn't allow out of limits":
color = (40, 1, 2, 3)
assert php.Color.override(color, hue=-1) == (0, 1, 2, 3)
assert php.Color.override(color, saturation=-1) == (40, 0, 2, 3)
assert php.Color.override(color, brightness=-1) == (40, 1, 0, 3)
assert php.Color.override(color, kelvin=-1) == (40, 1, 2, 0)
want = (0, 0, 0, 0)
assert (
php.Color.override(color, hue=-1, saturation=-1, brightness=-1, kelvin=-1) == want
)
assert php.Color.override(color, hue=361) == (360, 1, 2, 3)
assert php.Color.override(color, saturation=1.1) == (40, 1, 2, 3)
assert php.Color.override(color, brightness=1.1) == (40, 1, 1, 3)
assert php.Color.override(color, kelvin=666661) == (40, 1, 2, 65535)
assert php.Color.override(
color, hue=361, saturation=1.1, brightness=1.1, kelvin=66666
) == (360, 1, 1, 65535)
describe "adjust":
it "can adjust hue":
color = (100, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=-50) == (50, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=50) == (150, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=(60,)) == (60, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=-150) == (0, 0.1, 0.3, 9000)
assert php.Color.adjust(color, hue_change=400) == (360, 0.1, 0.3, 9000)
it "can adjust saturation":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=-0.1) == (100, 0.4, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=0.2) == (100, 0.7, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=(0.3,)) == (100, 0.3, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=-0.7) == (100, 0, 0.3, 9000)
assert php.Color.adjust(color, saturation_change=0.9) == (100, 1, 0.3, 9000)
it "can adjust brightness":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, brightness_change=-0.1) == (100, 0.5, 0.3 - 0.1, 9000)
assert php.Color.adjust(color, brightness_change=0.2) == (100, 0.5, 0.5, 9000)
assert php.Color.adjust(color, brightness_change=(0.4,)) == (100, 0.5, 0.4, 9000)
assert php.Color.adjust(color, brightness_change=-0.7) == (100, 0.5, 0, 9000)
assert php.Color.adjust(color, brightness_change=0.9) == (100, 0.5, 1, 9000)
it "can adjust kelvin":
color = (100, 0.5, 0.3, 9000)
assert php.Color.adjust(color, kelvin_change=-1000) == (100, 0.5, 0.3, 8000)
assert php.Color.adjust(color, kelvin_change=1000) == (100, 0.5, 0.3, 10000)
assert php.Color.adjust(color, kelvin_change=(3500,)) == (100, 0.5, 0.3, 3500)
assert php.Color.adjust(color, kelvin_change=-45000) == (100, 0.5, 0.3, 0)
assert php.Color.adjust(color, kelvin_change=66666) == (100, 0.5, 0.3, 65535)
it "can adjust combination":
got = php.Color.adjust(
(100, 0.5, 0.3, 9000),
hue_change=20,
saturation_change=-0.2,
brightness_change=(0.8,),
kelvin_change=-3000,
)
assert got == (120, 0.3, 0.8, 6000)
describe "average_color":
def assertColorAlmostEqual(self, got, want):
assert want[0] == pytest.approx(got[0], rel=1e-3)
assert want[1] == pytest.approx(got[1], rel=1e-3)
assert want[2] == pytest.approx(got[2], rel=1e-3)
assert want[3] == pytest.approx(got[3], rel=1e-3)
it "returns None if no colors":
color = php.average_color([])
assert color is None
color = php.average_color([None])
assert color is None
it "averages saturation, brightness and kelvin":
colors = [
(0, 0.1, 0.2, 3500),
(0, 0.2, 0.3, 4500),
(0, 0.3, 0.4, 5500),
]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (0, 0.2, 0.3, 4500))
it "it sets kelvin to 3500 if 0":
colors = [
(0, 0.1, 0.2, 3500),
(0, 0.2, 0.3, 0),
(0, 0.3, 0.4, 3500),
]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (0, 0.2, 0.3, 3500))
it "does special math to the hue":
#
# NOTE: I'm not sure how to test this maths so I've just put these values into the algorithm
# and asserting the results I got back.
#
colors = [(hue, 1, 1, 3500) for hue in (10, 20, 30)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (19.9999, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 20, 30)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (48.2227, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 20, 30, 300)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (24.2583, 1, 1, 3500))
colors = [(hue, 1, 1, 3500) for hue in (100, 300)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (20, 1, 1, 3500))
colors = [(100, 1, 1, 3500), None, (300, 1, 1, 3500)]
color = php.average_color(colors)
self.assertColorAlmostEqual(color, (20, 1, 1, 3500))
describe "Points":
it "can get cols":
bounds = ((3, 8), (5, 1), (5, 4))
cols = php.Points.cols(bounds)
assert cols == [
[(3, 5), (3, 4), (3, 3), (3, 2)],
[(4, 5), (4, 4), (4, 3), (4, 2)],
[(5, 5), (5, 4), (5, 3), (5, 2)],
[(6, 5), (6, 4), (6, 3), (6, 2)],
[(7, 5), (7, 4), (7, 3), (7, 2)],
]
it "can get rows":
bounds = ((3, 8), (5, 1), (5, 4))
rows = php.Points.rows(bounds)
assert rows == [
[(3, 5), (4, 5), (5, 5), (6, 5), (7, 5)],
[(3, 4), (4, 4), (5, 4), (6, 4), (7, 4)],
[(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)],
[(3, 2), (4, 2), (5, 2), (6, 2), (7, 2)],
]
it "can get all":
bounds = ((3, 8), (5, 1), (5, 4))
all_points = php.Points.all_points(bounds)
r1 = [(3, 5), (4, 5), (5, 5), (6, 5), (7, 5)]
r2 = [(3, 4), (4, 4), (5, 4), (6, 4), (7, 4)]
r3 = [(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)]
r4 = [(3, 2), (4, 2), (5, 2), (6, 2), (7, 2)]
assert all_points == [*r1, *r2, *r3, *r4]
it "can count points":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.count_points(bounds) == 20
bounds = ((1, 8), (6, 0), (7, 6))
assert php.Points.count_points(bounds) == 42
bounds = ((1, 1), (6, 6), (1, 1))
assert php.Points.count_points(bounds) == 0
it "can get points for a row":
bounds = ((3, 8), (5, 1), (5, 4))
row = php.Points.row(3, bounds)
assert row == [(3, 3), (4, 3), (5, 3), (6, 3), (7, 3)]
it "can get points for a column":
bounds = ((3, 8), (5, 1), (5, 4))
col = php.Points.col(2, bounds)
assert col == [(2, 5), (2, 4), (2, 3), (2, 2)]
it "can expand a bounds":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.expand(bounds, 5) == ((-2, 13), (10, -4), (15, 14))
assert php.Points.expand(bounds, 3) == ((0, 11), (8, -2), (11, 10))
it "can get a point relative to bounds":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.relative((4, 4), bounds) == (1, 1)
assert php.Points.relative((5, 2), bounds) == (2, 3)
it "can get the bottom row":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.bottom_row(bounds) == 1
bounds = ((3, 8), (11, 9), (5, 2))
assert php.Points.bottom_row(bounds) == 9
it "can get the top row":
bounds = ((3, 8), (5, 1), (5, 4))
assert php.Points.top_row(bounds) == 5
bounds = ((3, 8), (11, 9), (5, 2))
assert php.Points.top_row(bounds) == 11
| false | true |
f730a126b5abb572ff7afb60caf111aa9e871c02 | 116 | py | Python | LEVEL2/행렬테두리회전하기/tempCodeRunnerFile.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | LEVEL2/행렬테두리회전하기/tempCodeRunnerFile.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | LEVEL2/행렬테두리회전하기/tempCodeRunnerFile.py | seunghwanly/CODING-TEST | a820da950c163d399594770199aa2e782d1fbbde | [
"MIT"
] | null | null | null | for u in range(columns):
# for k in range(rows):
# print(map[u][k], end= "\t")
# print() | 29 | 41 | 0.448276 | for u in range(columns):
| false | true |
f730a1a680a5e31f14718b5fcf0fe938a393cf81 | 4,041 | py | Python | examples/decoupledibpm/flatplate3dRe100_GPU/scripts/plotForceCoefficients.py | jedbrown/PetIBM | 8584d824e0ffbbe2ea413dcf081e79a72b39bf5f | [
"BSD-3-Clause"
] | null | null | null | examples/decoupledibpm/flatplate3dRe100_GPU/scripts/plotForceCoefficients.py | jedbrown/PetIBM | 8584d824e0ffbbe2ea413dcf081e79a72b39bf5f | [
"BSD-3-Clause"
] | null | null | null | examples/decoupledibpm/flatplate3dRe100_GPU/scripts/plotForceCoefficients.py | jedbrown/PetIBM | 8584d824e0ffbbe2ea413dcf081e79a72b39bf5f | [
"BSD-3-Clause"
] | null | null | null | """
Plots the steady-state force coefficients of an inclined flat-plate with
aspect-ratio 2 at Reynolds number 100 for angles of attack between 0 and 90
degrees.
Compares with experimental results reported in Taira et al. (2007).
_References:_
* Taira, K., Dickson, W. B., Colonius,
T., Dickinson, M. H., & Rowley, C. W. (2007).
Unsteadiness in flow over a flat plate at angle-of-attack at low Reynolds
numbers.
AIAA Paper, 710, 2007.
"""
import os
import numpy
from matplotlib import pyplot
if not os.environ.get('PETIBM_EXAMPLES'):
raise KeyError('Environment variable PETIBM_EXAMPLES is not set; '
'Set PETIBM_EXAMPLES as the root directory of the examples.')
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.sep.join(script_dir.split(os.sep)[:-1])
# Read forces and computes mean values for each angle of inclination.
time_limits = (15.0, 20.0)
angles = numpy.arange(0, 90 + 1, 10, dtype=numpy.int32)
cd = numpy.zeros_like(angles, dtype=numpy.float64)
cl = numpy.zeros_like(angles, dtype=numpy.float64)
for i, angle in enumerate(angles):
filepath = os.path.join(root_dir, 'AoA{}'.format(angle), 'forces.txt')
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
mask = numpy.where(numpy.logical_and(data[0] >= time_limits[0],
data[0] <= time_limits[1]))[0]
cd[i], cl[i] = data[1][mask].mean(), data[2][mask].mean()
# Read experimental data from Taira et al. (2007).
directory = os.path.join(os.environ['PETIBM_EXAMPLES'], 'data')
taira = {'cd': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_CdvsAoA.dat'},
'cl': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_ClvsAoA.dat'}}
for key in taira.keys():
filepath = os.path.join(directory, taira[key]['filename'])
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
taira[key]['aoa'], taira[key]['values'] = data[0], data[1]
# Plots the force coefficients versus the angle-of-attack and compares with
# experimental results reported in Taira et al. (2007).
pyplot.style.use('seaborn-dark')
fig, ax = pyplot.subplots(2, figsize=(6.0, 6.0), sharex=True)
ax[0].grid(zorder=0)
ax[0].set_ylabel('$C_D$', fontname='DejaVu Serif', fontsize=16)
ax[0].scatter(angles, cd,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[0].scatter(taira['cd']['aoa'], taira['cd']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[0].set_ylim(0.0, 2.0)
ax[1].grid(zorder=0)
ax[1].set_xlabel('angle of attack (deg)',
fontname='DejaVu Serif', fontsize=16)
ax[1].set_ylabel('$C_L$', fontname='DejaVu Serif', fontsize=16)
ax[1].scatter(angles, cl,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[1].scatter(taira['cl']['aoa'], taira['cl']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[1].set_xlim(0.0, 90.0)
ax[1].set_ylim(0.0, 2.0)
for a in ax:
for method in ['get_xticklabels', 'get_yticklabels']:
for label in getattr(a, method)():
label.set_fontname('DejaVu Serif')
label.set_fontsize(14)
handles, labels = ax[0].get_legend_handles_labels()
fig.legend(handles, labels,
ncol=2, loc='center', prop={'family': 'serif', 'size': 14},
frameon=False, bbox_to_anchor=(0.54, 0.53))
fig.tight_layout()
# Save the figure.
figures_dir = os.path.join(root_dir, 'figures')
if not os.path.isdir(figures_dir):
os.makedirs(figures_dir)
filepath = os.path.join(figures_dir, 'forceCoefficients.png')
fig.savefig(filepath)
pyplot.show()
| 39.23301 | 78 | 0.644642 |
import os
import numpy
from matplotlib import pyplot
if not os.environ.get('PETIBM_EXAMPLES'):
raise KeyError('Environment variable PETIBM_EXAMPLES is not set; '
'Set PETIBM_EXAMPLES as the root directory of the examples.')
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.sep.join(script_dir.split(os.sep)[:-1])
time_limits = (15.0, 20.0)
angles = numpy.arange(0, 90 + 1, 10, dtype=numpy.int32)
cd = numpy.zeros_like(angles, dtype=numpy.float64)
cl = numpy.zeros_like(angles, dtype=numpy.float64)
for i, angle in enumerate(angles):
filepath = os.path.join(root_dir, 'AoA{}'.format(angle), 'forces.txt')
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
mask = numpy.where(numpy.logical_and(data[0] >= time_limits[0],
data[0] <= time_limits[1]))[0]
cd[i], cl[i] = data[1][mask].mean(), data[2][mask].mean()
directory = os.path.join(os.environ['PETIBM_EXAMPLES'], 'data')
taira = {'cd': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_CdvsAoA.dat'},
'cl': {'aoa': None, 'values': None,
'filename': 'taira_et_al_2007_flatPlateRe100AR2_ClvsAoA.dat'}}
for key in taira.keys():
filepath = os.path.join(directory, taira[key]['filename'])
with open(filepath, 'r') as infile:
data = numpy.loadtxt(infile, dtype=numpy.float64, unpack=True)
taira[key]['aoa'], taira[key]['values'] = data[0], data[1]
pyplot.style.use('seaborn-dark')
fig, ax = pyplot.subplots(2, figsize=(6.0, 6.0), sharex=True)
ax[0].grid(zorder=0)
ax[0].set_ylabel('$C_D$', fontname='DejaVu Serif', fontsize=16)
ax[0].scatter(angles, cd,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[0].scatter(taira['cd']['aoa'], taira['cd']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[0].set_ylim(0.0, 2.0)
ax[1].grid(zorder=0)
ax[1].set_xlabel('angle of attack (deg)',
fontname='DejaVu Serif', fontsize=16)
ax[1].set_ylabel('$C_L$', fontname='DejaVu Serif', fontsize=16)
ax[1].scatter(angles, cl,
label='PetIBM',
marker='x', s=40,
facecolors='black', edgecolors='none',
zorder=4)
ax[1].scatter(taira['cl']['aoa'], taira['cl']['values'],
label='Taira et al. (2007)',
marker='o', s=40,
facecolors='none', edgecolors='#1B9E77',
zorder=3)
ax[1].set_xlim(0.0, 90.0)
ax[1].set_ylim(0.0, 2.0)
for a in ax:
for method in ['get_xticklabels', 'get_yticklabels']:
for label in getattr(a, method)():
label.set_fontname('DejaVu Serif')
label.set_fontsize(14)
handles, labels = ax[0].get_legend_handles_labels()
fig.legend(handles, labels,
ncol=2, loc='center', prop={'family': 'serif', 'size': 14},
frameon=False, bbox_to_anchor=(0.54, 0.53))
fig.tight_layout()
figures_dir = os.path.join(root_dir, 'figures')
if not os.path.isdir(figures_dir):
os.makedirs(figures_dir)
filepath = os.path.join(figures_dir, 'forceCoefficients.png')
fig.savefig(filepath)
pyplot.show()
| true | true |
f730a1ae102d08afb1c37051c52914eb79ec8212 | 1,395 | py | Python | utils/parser/parser.py | reeegry/youtube-parser-bot | 475e232f80445ae6ba3e988d844b61bada6c0aed | [
"MIT"
] | null | null | null | utils/parser/parser.py | reeegry/youtube-parser-bot | 475e232f80445ae6ba3e988d844b61bada6c0aed | [
"MIT"
] | null | null | null | utils/parser/parser.py | reeegry/youtube-parser-bot | 475e232f80445ae6ba3e988d844b61bada6c0aed | [
"MIT"
] | null | null | null | import urllib.request
import json
import requests
from data.config import *
def youtube_get_information(channel_id):
api_key = YOUTUBE_API_KEY
base_search_url = "https://www.googleapis.com/youtube/v3/search?"
base_video_link = "https://www.youtube.com/watch?v="
first_url = base_search_url + f"key={api_key}&channelId={channel_id}&part=snippet,id&order=date&maxResults=1"
inp = urllib.request.urlopen(first_url)
resp = json.load(inp)
for i in resp["items"]:
print(i)
if i["id"]["kind"] == "youtube#video":
video_link = base_video_link + i["id"]["videoId"]
video_title = i["snippet"]["title"]
return video_title, video_link
def twitch_get_information(channel_name):
URL = f'https://api.twitch.tv/helix/streams?user_login={channel_name}'
auth_url = 'https://id.twitch.tv/oauth2/token'
aut_params = {'client_id': CLIENT_ID,
'client_secret': SECRET,
'grant_type': 'client_credentials'
}
aut_call = requests.post(url=auth_url, params=aut_params)
access_token = aut_call.json()['access_token']
head = {
'Client-ID' : CLIENT_ID,
'Authorization' : "Bearer " + access_token
}
r = requests.get(URL, headers = head).json()['data']
if r:
if r[0]['type'] == 'live':
return True
return False
| 29.680851 | 113 | 0.629391 | import urllib.request
import json
import requests
from data.config import *
def youtube_get_information(channel_id):
api_key = YOUTUBE_API_KEY
base_search_url = "https://www.googleapis.com/youtube/v3/search?"
base_video_link = "https://www.youtube.com/watch?v="
first_url = base_search_url + f"key={api_key}&channelId={channel_id}&part=snippet,id&order=date&maxResults=1"
inp = urllib.request.urlopen(first_url)
resp = json.load(inp)
for i in resp["items"]:
print(i)
if i["id"]["kind"] == "youtube#video":
video_link = base_video_link + i["id"]["videoId"]
video_title = i["snippet"]["title"]
return video_title, video_link
def twitch_get_information(channel_name):
URL = f'https://api.twitch.tv/helix/streams?user_login={channel_name}'
auth_url = 'https://id.twitch.tv/oauth2/token'
aut_params = {'client_id': CLIENT_ID,
'client_secret': SECRET,
'grant_type': 'client_credentials'
}
aut_call = requests.post(url=auth_url, params=aut_params)
access_token = aut_call.json()['access_token']
head = {
'Client-ID' : CLIENT_ID,
'Authorization' : "Bearer " + access_token
}
r = requests.get(URL, headers = head).json()['data']
if r:
if r[0]['type'] == 'live':
return True
return False
| true | true |
f730a1efa3b379ce51a02f804023a5cabbc94433 | 12,104 | py | Python | models/yolo.py | QamarQuqa/Real-time-Traffic-and-Pedestrian-Counting | c53384f496e816bb852afea328d94b9e963fe753 | [
"MIT"
] | 4 | 2021-07-21T07:48:02.000Z | 2022-03-16T00:42:33.000Z | models/yolo.py | QamarQuqa/Real-time-Traffic-and-Pedestrian-Counting | c53384f496e816bb852afea328d94b9e963fe753 | [
"MIT"
] | 3 | 2021-11-06T09:18:21.000Z | 2022-01-11T14:19:40.000Z | models/yolo.py | QamarQuqa/Real-time-Traffic-and-Pedestrian-Counting | c53384f496e816bb852afea328d94b9e963fe753 | [
"MIT"
] | 3 | 2021-12-26T03:05:06.000Z | 2022-01-14T07:54:56.000Z | # YOLOv5 YOLO-specific modules
import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./') # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop # for FLOPS computation
except ImportError:
thop = None
class Detect(nn.Module):
stride = None # strides computed during build
export = False # onnx export
def __init__(self, nc=80, anchors=(), ch=()): # detection layer
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.grid = [torch.zeros(1)] * self.nl # init grid
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a) # shape(nl,na,2)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2)
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv
def forward(self, x):
# x = x.copy() # for profiling
z = [] # inference output
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i]) # conv
bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training: # inference
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names
# print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])
# Build strides, anchors
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases() # only run once
# print('Strides: %s' % m.stride.tolist())
# Init weights, biases
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:] # height, width
s = [1, 0.83, 0.67] # scales
f = [None, 3, None] # flips (2-ud, 3-lr)
y = [] # outputs
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0] # forward
# cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save
yi[..., :4] /= si # de-scale
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr
y.append(yi)
return torch.cat(y, 1), None # augmented inference, train
else:
return self.forward_once(x, profile) # single-scale inference, train
def forward_once(self, x, profile=False):
y, dt = [], [] # outputs
for m in self.model:
if m.f != -1: # if not from previous layer
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x) # run
y.append(x if m.i in self.save else None) # save output
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
m = self.model[-1] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1] # Detect() module
for mi in m.m: # from
b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
# def _print_weights(self):
# for m in self.model.modules():
# if type(m) is Bottleneck:
# print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights
def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv
delattr(m, 'bn') # remove batchnorm
m.forward = m.fuseforward # update forward
self.info()
return self
def nms(self, mode=True): # add or remove NMS module
present = type(self.model[-1]) is NMS # last layer is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS() # module
m.f = -1 # from
m.i = self.model[-1].i + 1 # index
self.model.add_module(name='%s' % m.i, module=m) # add
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1] # remove
return self
def autoshape(self): # add autoShape module
print('Adding autoShape... ')
m = autoShape(self) # wrap model
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes
return m
def info(self, verbose=False, img_size=640): # print model information
model_info(self, verbose, img_size)
def parse_model(d, ch): # model_dict, input_channels(3)
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors
no = na * (nc + 5) # number of outputs = anchors * (classes + 5)
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except:
pass
n = max(round(n * gd), 1) if n > 1 else n # depth gain
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg) # check file
set_logging()
device = select_device(opt.device)
# Create model
model = Model(opt.cfg).to(device)
model.train()
# Profile
# img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device)
# y = model(img, profile=True)
# Tensorboard
# from torch.utils.tensorboard import SummaryWriter
# tb_writer = SummaryWriter()
# print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/")
# tb_writer.add_graph(model.model, img) # add model to tensorboard
# tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard
| 43.539568 | 119 | 0.542052 |
import argparse
import logging
import sys
from copy import deepcopy
sys.path.append('./')
logger = logging.getLogger(__name__)
from models.common import *
from models.experimental import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
select_device, copy_attr
try:
import thop
except ImportError:
thop = None
class Detect(nn.Module):
stride = None
export = False
def __init__(self, nc=80, anchors=(), ch=()):
super(Detect, self).__init__()
self.nc = nc
self.no = nc + 5
self.nl = len(anchors)
self.na = len(anchors[0]) // 2
self.grid = [torch.zeros(1)] * self.nl
a = torch.tensor(anchors).float().view(self.nl, -1, 2)
self.register_buffer('anchors', a)
self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2))
self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch)
def forward(self, x):
self.training |= self.export
for i in range(self.nl):
x[i] = self.m[i](x[i])
bs, _, ny, nx = x[i].shape
x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
if not self.training:
if self.grid[i].shape[2:4] != x[i].shape[2:4]:
self.grid[i] = self._make_grid(nx, ny).to(x[i].device)
y = x[i].sigmoid()
y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i]
y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i]
z.append(y.view(bs, -1, self.no))
return x if self.training else (torch.cat(z, 1), x)
@staticmethod
def _make_grid(nx=20, ny=20):
yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float()
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None):
super(Model, self).__init__()
if isinstance(cfg, dict):
self.yaml = cfg
else:
import yaml
self.yaml_file = Path(cfg).name
with open(cfg) as f:
self.yaml = yaml.load(f, Loader=yaml.SafeLoader)
ch = self.yaml['ch'] = self.yaml.get('ch', ch)
if nc and nc != self.yaml['nc']:
logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc
if anchors:
logger.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors)
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch])
self.names = [str(i) for i in range(self.yaml['nc'])]
m = self.model[-1]
if isinstance(m, Detect):
s = 256
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))])
m.anchors /= m.stride.view(-1, 1, 1)
check_anchor_order(m)
self.stride = m.stride
self._initialize_biases()
initialize_weights(self)
self.info()
logger.info('')
def forward(self, x, augment=False, profile=False):
if augment:
img_size = x.shape[-2:]
s = [1, 0.83, 0.67]
f = [None, 3, None]
y = []
for si, fi in zip(s, f):
xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))
yi = self.forward_once(xi)[0]
yi[..., :4] /= si
if fi == 2:
yi[..., 1] = img_size[0] - yi[..., 1]
elif fi == 3:
yi[..., 0] = img_size[1] - yi[..., 0]
y.append(yi)
return torch.cat(y, 1), None
else:
return self.forward_once(x, profile)
def forward_once(self, x, profile=False):
y, dt = [], []
for m in self.model:
if m.f != -1:
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
if profile:
o = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 if thop else 0
t = time_synchronized()
for _ in range(10):
_ = m(x)
dt.append((time_synchronized() - t) * 100)
print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))
x = m(x)
y.append(x if m.i in self.save else None)
if profile:
print('%.1fms total' % sum(dt))
return x
def _initialize_biases(self, cf=None):
m = self.model[-1]
for mi, s in zip(m.m, m.stride):
b = mi.bias.view(m.na, -1)
b.data[:, 4] += math.log(8 / (640 / s) ** 2)
b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum())
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def _print_biases(self):
m = self.model[-1]
for mi in m.m:
b = mi.bias.detach().view(m.na, -1).T
print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))
f):
print('Fusing layers... ')
for m in self.model.modules():
if type(m) is Conv and hasattr(m, 'bn'):
m.conv = fuse_conv_and_bn(m.conv, m.bn)
delattr(m, 'bn')
m.forward = m.fuseforward
self.info()
return self
def nms(self, mode=True):
present = type(self.model[-1]) is NMS
if mode and not present:
print('Adding NMS... ')
m = NMS()
m.f = -1
m.i = self.model[-1].i + 1
self.model.add_module(name='%s' % m.i, module=m)
self.eval()
elif not mode and present:
print('Removing NMS... ')
self.model = self.model[:-1]
return self
def autoshape(self):
print('Adding autoShape... ')
m = autoShape(self)
copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=())
return m
def info(self, verbose=False, img_size=640):
model_info(self, verbose, img_size)
def parse_model(d, ch):
logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors
no = na * (nc + 5)
layers, save, c2 = [], [], ch[-1]
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']):
m = eval(m) if isinstance(m, str) else m
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a
except:
pass
n = max(round(n * gd), 1) if n > 1 else n
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, DWConv, MixConv2d, Focus, CrossConv, BottleneckCSP,
C3, C3TR]:
c1, c2 = ch[f], args[0]
if c2 != no:
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
if m in [BottleneckCSP, C3, C3TR]:
args.insert(2, n)
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f])
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int):
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args)
t = str(m)[8:-2].replace('__main__.', '')
np = sum([x.numel() for x in m_.parameters()])
m_.i, m_.f, m_.type, m_.np = i, f, t, np
logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args))
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1)
layers.append(m_)
if i == 0:
ch = []
ch.append(c2)
return nn.Sequential(*layers), sorted(save)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
opt.cfg = check_file(opt.cfg)
set_logging()
device = select_device(opt.device)
model = Model(opt.cfg).to(device)
model.train()
| true | true |
f730a306db38384a6b876c13045c0824325f5a80 | 95 | py | Python | matversion.py | lukasijus/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-gradient-checking | ce33f2a830abaf974aa1b04b61a7f25d2c4b2bd7 | [
"MIT"
] | 1 | 2020-08-06T18:51:26.000Z | 2020-08-06T18:51:26.000Z | matversion.py | lukasijus/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-gradient-checking | ce33f2a830abaf974aa1b04b61a7f25d2c4b2bd7 | [
"MIT"
] | null | null | null | matversion.py | lukasijus/Improving-Deep-Neural-Networks-Hyperparameter-tuning-Regularization-and-gradient-checking | ce33f2a830abaf974aa1b04b61a7f25d2c4b2bd7 | [
"MIT"
] | null | null | null | #import matplotlib
#print(matplotlib.__version__)
import tensorflow as tf
print(tf.__version__) | 23.75 | 30 | 0.842105 |
import tensorflow as tf
print(tf.__version__) | true | true |
f730a4182acb8d42e7b585c4b972507dc5f1fbb5 | 5,276 | py | Python | egs/voxceleb/v1/nnet/lib/extract.py | deciding/tf-kaldi-speaker | ceaed721e502a71434d910fd73b202940ea2ce60 | [
"Apache-2.0"
] | null | null | null | egs/voxceleb/v1/nnet/lib/extract.py | deciding/tf-kaldi-speaker | ceaed721e502a71434d910fd73b202940ea2ce60 | [
"Apache-2.0"
] | 1 | 2022-02-10T06:48:05.000Z | 2022-02-10T06:48:05.000Z | egs/voxceleb/v1/nnet/lib/extract.py | deciding/tf-kaldi-speaker | ceaed721e502a71434d910fd73b202940ea2ce60 | [
"Apache-2.0"
] | null | null | null | import argparse
import numpy as np
import os
import sys
import numpy, scipy, sklearn
from model.trainer import Trainer
from misc.utils import Params
from dataset.kaldi_io import FeatureReader, open_or_fd, read_mat_ark, write_vec_flt
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpu", type=int, default=-1, help="The GPU id. GPU disabled if -1.")
parser.add_argument("-m", "--min-chunk-size", type=int, default=25, help="The minimum length of the segments. Any segment shorted than this value will be ignored.")
parser.add_argument("-s", "--chunk-size", type=int, default=10000, help="The length of the segments used to extract the embeddings. "
"Segments longer than this value will be splited before extraction. "
"Then the splited embeddings will be averaged to get the final embedding. "
"L2 normalizaion will be applied before the averaging if specified.")
parser.add_argument("-n", "--normalize", action="store_true", help="Normalize the embedding before averaging and output.")
parser.add_argument("--node", type=str, default="", help="The node to output the embeddings.")
parser.add_argument("model_dir", type=str, help="The model directory.")
parser.add_argument("rspecifier", type=str, help="Kaldi feature rspecifier (or ark file).")
parser.add_argument("wspecifier", type=str, help="Kaldi output wspecifier (or ark file).")
args = parser.parse_args()
if args.gpu == -1:
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# In the GPU situation, it is difficult to know how to specify the GPU id.
# If the program is launched locally, you can set CUDA_VISIBLE_DEVICES to the id.
# However, if SGE is used, we cannot simply set CUDA_VISIBLE_DEVICES.
# So it is better to specify the GPU id outside the program.
# Give an arbitrary number (except for -1) to --gpu can enable it. Leave it blank if you want to disable gpu.
import tensorflow as tf
if __name__ == '__main__':
tf.reset_default_graph()
tf.logging.set_verbosity(tf.logging.INFO)
nnet_dir = os.path.join(args.model_dir, "nnet")
config_json = os.path.join(args.model_dir, "nnet/config.json")
if not os.path.isfile(config_json):
sys.exit("Cannot find params.json in %s" % config_json)
params = Params(config_json)
# Change the output node if necessary
if len(args.node) != 0:
params.embedding_node = args.node
tf.logging.info("Extract embedding from %s" % params.embedding_node)
with open(os.path.join(nnet_dir, "feature_dim"), "r") as f:
dim = int(f.readline().strip())
#trainer = Trainer(params, args.model_dir, dim, single_cpu=True)
trainer = Trainer(params, args.model_dir, dim)
trainer.build("predict")
if args.rspecifier.rsplit(".", 1)[1] == "scp":
# The rspecifier cannot be scp
sys.exit("The rspecifier must be ark or input pipe")
fp_out = open_or_fd(args.wspecifier, "wb")
# import pdb;pdb.set_trace()
# args.rspecifier=args.rspecifier.replace('JOB', '1')
for index, (key, feature) in enumerate(read_mat_ark(args.rspecifier)):
if feature.shape[0] < args.min_chunk_size:
tf.logging.info("[INFO] Key %s length too short, %d < %d, skip." % (key, feature.shape[0], args.min_chunk_size))
continue
if feature.shape[0] > args.chunk_size:
feature_array = []
feature_length = []
num_chunks = int(np.ceil(float(feature.shape[0] - args.chunk_size) / (args.chunk_size / 2))) + 1
tf.logging.info("[INFO] Key %s length %d > %d, split to %d segments." % (key, feature.shape[0], args.chunk_size, num_chunks))
for i in range(num_chunks):
start = int(i * (args.chunk_size / 2))
this_chunk_size = args.chunk_size if feature.shape[0] - start > args.chunk_size else feature.shape[0] - start
feature_length.append(this_chunk_size)
feature_array.append(feature[start:start+this_chunk_size])
feature_length = np.expand_dims(np.array(feature_length), axis=1)
# Except for the last feature, the length of other features should be the same (=chunk_size)
embeddings = trainer.predict(np.array(feature_array[:-1], dtype=np.float32))
embedding_last = trainer.predict(feature_array[-1])
embeddings = np.concatenate([embeddings, np.expand_dims(embedding_last, axis=0)], axis=0)
if args.normalize:
embeddings /= np.sqrt(np.sum(np.square(embeddings), axis=1, keepdims=True))
embedding = np.sum(embeddings * feature_length, axis=0) / np.sum(feature_length)
else:
tf.logging.info("[INFO] Key %s length %d." % (key, feature.shape[0]))
embedding = trainer.predict(feature)
tf.logging.info("[INFO] Key %s finished predicting" % (key))
if args.normalize:
embedding /= np.sqrt(np.sum(np.square(embedding)))
write_vec_flt(fp_out, embedding, key=key)
tf.logging.info("[INFO] Key %s finished writing" % (key))
fp_out.close()
trainer.close()
| 52.237624 | 164 | 0.657127 | import argparse
import numpy as np
import os
import sys
import numpy, scipy, sklearn
from model.trainer import Trainer
from misc.utils import Params
from dataset.kaldi_io import FeatureReader, open_or_fd, read_mat_ark, write_vec_flt
from six.moves import range
parser = argparse.ArgumentParser()
parser.add_argument("-g", "--gpu", type=int, default=-1, help="The GPU id. GPU disabled if -1.")
parser.add_argument("-m", "--min-chunk-size", type=int, default=25, help="The minimum length of the segments. Any segment shorted than this value will be ignored.")
parser.add_argument("-s", "--chunk-size", type=int, default=10000, help="The length of the segments used to extract the embeddings. "
"Segments longer than this value will be splited before extraction. "
"Then the splited embeddings will be averaged to get the final embedding. "
"L2 normalizaion will be applied before the averaging if specified.")
parser.add_argument("-n", "--normalize", action="store_true", help="Normalize the embedding before averaging and output.")
parser.add_argument("--node", type=str, default="", help="The node to output the embeddings.")
parser.add_argument("model_dir", type=str, help="The model directory.")
parser.add_argument("rspecifier", type=str, help="Kaldi feature rspecifier (or ark file).")
parser.add_argument("wspecifier", type=str, help="Kaldi output wspecifier (or ark file).")
args = parser.parse_args()
if args.gpu == -1:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
import tensorflow as tf
if __name__ == '__main__':
tf.reset_default_graph()
tf.logging.set_verbosity(tf.logging.INFO)
nnet_dir = os.path.join(args.model_dir, "nnet")
config_json = os.path.join(args.model_dir, "nnet/config.json")
if not os.path.isfile(config_json):
sys.exit("Cannot find params.json in %s" % config_json)
params = Params(config_json)
if len(args.node) != 0:
params.embedding_node = args.node
tf.logging.info("Extract embedding from %s" % params.embedding_node)
with open(os.path.join(nnet_dir, "feature_dim"), "r") as f:
dim = int(f.readline().strip())
trainer = Trainer(params, args.model_dir, dim)
trainer.build("predict")
if args.rspecifier.rsplit(".", 1)[1] == "scp":
sys.exit("The rspecifier must be ark or input pipe")
fp_out = open_or_fd(args.wspecifier, "wb")
for index, (key, feature) in enumerate(read_mat_ark(args.rspecifier)):
if feature.shape[0] < args.min_chunk_size:
tf.logging.info("[INFO] Key %s length too short, %d < %d, skip." % (key, feature.shape[0], args.min_chunk_size))
continue
if feature.shape[0] > args.chunk_size:
feature_array = []
feature_length = []
num_chunks = int(np.ceil(float(feature.shape[0] - args.chunk_size) / (args.chunk_size / 2))) + 1
tf.logging.info("[INFO] Key %s length %d > %d, split to %d segments." % (key, feature.shape[0], args.chunk_size, num_chunks))
for i in range(num_chunks):
start = int(i * (args.chunk_size / 2))
this_chunk_size = args.chunk_size if feature.shape[0] - start > args.chunk_size else feature.shape[0] - start
feature_length.append(this_chunk_size)
feature_array.append(feature[start:start+this_chunk_size])
feature_length = np.expand_dims(np.array(feature_length), axis=1)
embeddings = trainer.predict(np.array(feature_array[:-1], dtype=np.float32))
embedding_last = trainer.predict(feature_array[-1])
embeddings = np.concatenate([embeddings, np.expand_dims(embedding_last, axis=0)], axis=0)
if args.normalize:
embeddings /= np.sqrt(np.sum(np.square(embeddings), axis=1, keepdims=True))
embedding = np.sum(embeddings * feature_length, axis=0) / np.sum(feature_length)
else:
tf.logging.info("[INFO] Key %s length %d." % (key, feature.shape[0]))
embedding = trainer.predict(feature)
tf.logging.info("[INFO] Key %s finished predicting" % (key))
if args.normalize:
embedding /= np.sqrt(np.sum(np.square(embedding)))
write_vec_flt(fp_out, embedding, key=key)
tf.logging.info("[INFO] Key %s finished writing" % (key))
fp_out.close()
trainer.close()
| true | true |
f730a42cacff927c57b8618033a20ff148ab5b71 | 1,637 | py | Python | config.py | rafael-carvalho/investigo-meraki | 3c75ebc54e38b822e5ae452f4faf12d527d95f4d | [
"Apache-2.0"
] | 2 | 2018-04-05T08:52:54.000Z | 2020-05-16T15:43:44.000Z | config.py | meraki/investigo-spark-bot | 34665143724522d463873d704dd8d77861748358 | [
"Apache-2.0"
] | null | null | null | config.py | meraki/investigo-spark-bot | 34665143724522d463873d704dd8d77861748358 | [
"Apache-2.0"
] | 2 | 2018-04-18T08:35:59.000Z | 2020-03-12T22:13:19.000Z | import os
class Config(object):
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
DATABASE_URL = os.environ['DATABASE_URL']
SQLALCHEMY_DATABASE_URI = DATABASE_URL
SQLALCHEMY_TRACK_MODIFICATIONS = True
TROPO_API_KEY_TEXT = os.environ.get('TROPO_API_KEY_TEXT', "TEXT TOKEN NOT PROVIDED")
TROPO_API_KEY_VOICE = os.environ.get('TROPO_API_KEY_VOICE', "VOICE TOKEN NOT PROVIDED")
SPARK_TOKEN = os.environ.get('SPARK_TOKEN', "TOKEN-NOT-PROVIDED")
ON_CISCO_NETWORK = os.environ.get('ON_CISCO_NETWORK', False)
NOTIFICATION_SMS_PHONE_NUMBER = os.environ.get('NOTIFICATION_SMS_PHONE_NUMBER', False)
SPARK_DEFAULT_ROOM_ID = os.environ.get('SPARK_DEFAULT_ROOM_ID', False)
SMS_ENABLED = os.environ.get('SMS_ENABLED', False)
SMS_ENABLED = (SMS_ENABLED == 'True' or SMS_ENABLED == 'TRUE')
SHOW_WEB_LINK = os.environ.get('SHOW_WEB_LINK', False)
SHOW_WEB_LINK = (SHOW_WEB_LINK == 'True' or SHOW_WEB_LINK == 'TRUE')
ADMIN_NAME = os.environ.get('ADMIN_NAME', '')
MERAKI_VALIDATOR_TOKEN = os.environ.get('MERAKI_VALIDATOR', "TOKEN-NOT-PROVIDED")
# Application threads. A common general assumption is
# using 2 per available processor cores - to handle
# incoming requests using one and performing background
# operations using the other.
THREADS_PER_PAGE = 2
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| 34.829787 | 91 | 0.729994 | import os
class Config(object):
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
DEBUG = False
SECRET_KEY = os.environ['SECRET_KEY']
DATABASE_URL = os.environ['DATABASE_URL']
SQLALCHEMY_DATABASE_URI = DATABASE_URL
SQLALCHEMY_TRACK_MODIFICATIONS = True
TROPO_API_KEY_TEXT = os.environ.get('TROPO_API_KEY_TEXT', "TEXT TOKEN NOT PROVIDED")
TROPO_API_KEY_VOICE = os.environ.get('TROPO_API_KEY_VOICE', "VOICE TOKEN NOT PROVIDED")
SPARK_TOKEN = os.environ.get('SPARK_TOKEN', "TOKEN-NOT-PROVIDED")
ON_CISCO_NETWORK = os.environ.get('ON_CISCO_NETWORK', False)
NOTIFICATION_SMS_PHONE_NUMBER = os.environ.get('NOTIFICATION_SMS_PHONE_NUMBER', False)
SPARK_DEFAULT_ROOM_ID = os.environ.get('SPARK_DEFAULT_ROOM_ID', False)
SMS_ENABLED = os.environ.get('SMS_ENABLED', False)
SMS_ENABLED = (SMS_ENABLED == 'True' or SMS_ENABLED == 'TRUE')
SHOW_WEB_LINK = os.environ.get('SHOW_WEB_LINK', False)
SHOW_WEB_LINK = (SHOW_WEB_LINK == 'True' or SHOW_WEB_LINK == 'TRUE')
ADMIN_NAME = os.environ.get('ADMIN_NAME', '')
MERAKI_VALIDATOR_TOKEN = os.environ.get('MERAKI_VALIDATOR', "TOKEN-NOT-PROVIDED")
THREADS_PER_PAGE = 2
class ProductionConfig(Config):
DEBUG = False
class StagingConfig(Config):
DEVELOPMENT = True
DEBUG = True
class DevelopmentConfig(Config):
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
TESTING = True
| true | true |
f730a48aa8629fddb68ef5a3a25e8da73942fea2 | 5,876 | py | Python | tensorflow/python/compat/compat.py | lightyang/tensorflow | 1a455a77d80fa788fd7963530dd130ad7d902226 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/compat/compat.py | lightyang/tensorflow | 1a455a77d80fa788fd7963530dd130ad7d902226 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:13:06.000Z | 2022-02-10T02:19:43.000Z | tensorflow/python/compat/compat.py | Hyperclaw79/tensorflow | 14c58e1d380b2001ffdf7ef782d44ad1a21f763c | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 2, 5)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| 35.185629 | 82 | 0.747617 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2020, 2, 5)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def _date_to_date_number(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = _date_to_date_number(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
return _FORWARD_COMPATIBILITY_DATE_NUMBER > _date_to_date_number(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number()
| true | true |
f730a4db01573a009901b98513880dc1ffb68715 | 807 | py | Python | env/lib/python3.6/site-packages/nibabel/tests/test_imageglobals.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 8 | 2019-05-29T09:38:30.000Z | 2021-01-20T03:36:59.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_imageglobals.py | Raniac/neurolearn_dev | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 12 | 2021-03-09T03:01:16.000Z | 2022-03-11T23:59:36.000Z | env/lib/python3.6/site-packages/nibabel/tests/test_imageglobals.py | Raniac/NEURO-LEARN | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | [
"Apache-2.0"
] | 1 | 2020-07-17T12:49:49.000Z | 2020-07-17T12:49:49.000Z | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Tests for imageglobals module
"""
from nose.tools import (assert_true, assert_false, assert_raises,
assert_equal, assert_not_equal)
from .. import imageglobals as igs
def test_errorlevel():
orig_level = igs.error_level
for level in (10, 20, 30):
with igs.ErrorLevel(level):
assert_equal(igs.error_level, level)
assert_equal(igs.error_level, orig_level)
| 33.625 | 78 | 0.545229 | true | true | |
f730a5be097b0dd193d78fd67d840787ea818e19 | 886 | py | Python | graph_peak_caller/analysis/diploratio_v_motifrate.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 10 | 2018-04-19T21:54:31.000Z | 2021-07-22T12:46:33.000Z | graph_peak_caller/analysis/diploratio_v_motifrate.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 9 | 2018-01-30T20:41:36.000Z | 2021-01-28T23:00:18.000Z | graph_peak_caller/analysis/diploratio_v_motifrate.py | uio-bmi/graph_peak_caller | 89deeabf3cd0b23fba49b1304f1c81222fb534d7 | [
"BSD-3-Clause"
] | 3 | 2019-08-20T21:43:53.000Z | 2022-01-20T14:39:34.000Z | import numpy as np
import matplotlib.pyplot as plt
def plot(base_name):
def get_hist(s):
return s["summary"][0]*s["diplo_hist"]
motif = np.load(base_name + "/limited_summits_alignments_motif_summary.npz")
nonmotif = np.load(base_name + "/limited_summits_alignments_nonmotif_summary.npz")
motif_hist = get_hist(motif)
nonmotif_hist = get_hist(nonmotif)
cum_motif = np.cumsum(motif_hist)
cum_nonmotif = np.cumsum(nonmotif_hist)
cum_total = cum_motif + cum_nonmotif
ratio = np.where(cum_total == 0, 0, cum_motif/cum_total)
plt.plot(np.linspace(0, 1, 100), ratio, label=base_name)
if __name__ == "__main__":
import sys
paths = sys.argv[1].split(",")
for path in paths:
plot(path)
plt.xlabel("Ratio of reads covered by diplotypes threshold")
plt.ylabel("Motif match percentage")
plt.legend()
plt.show()
| 31.642857 | 86 | 0.691874 | import numpy as np
import matplotlib.pyplot as plt
def plot(base_name):
def get_hist(s):
return s["summary"][0]*s["diplo_hist"]
motif = np.load(base_name + "/limited_summits_alignments_motif_summary.npz")
nonmotif = np.load(base_name + "/limited_summits_alignments_nonmotif_summary.npz")
motif_hist = get_hist(motif)
nonmotif_hist = get_hist(nonmotif)
cum_motif = np.cumsum(motif_hist)
cum_nonmotif = np.cumsum(nonmotif_hist)
cum_total = cum_motif + cum_nonmotif
ratio = np.where(cum_total == 0, 0, cum_motif/cum_total)
plt.plot(np.linspace(0, 1, 100), ratio, label=base_name)
if __name__ == "__main__":
import sys
paths = sys.argv[1].split(",")
for path in paths:
plot(path)
plt.xlabel("Ratio of reads covered by diplotypes threshold")
plt.ylabel("Motif match percentage")
plt.legend()
plt.show()
| true | true |
f730a5caa27df176131385076f8a70b5752aa206 | 1,224 | py | Python | os_windows/_i18n.py | cloudbase/oslo.windows | 8dd9a41499653ea7c58e238877942ab1d4c46636 | [
"Apache-2.0"
] | 2 | 2015-09-02T21:39:10.000Z | 2016-06-16T01:54:20.000Z | os_windows/_i18n.py | cloudbase/oslo.windows | 8dd9a41499653ea7c58e238877942ab1d4c46636 | [
"Apache-2.0"
] | null | null | null | os_windows/_i18n.py | cloudbase/oslo.windows | 8dd9a41499653ea7c58e238877942ab1d4c46636 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html .
"""
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.windows')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| 31.384615 | 78 | 0.747549 |
import oslo_i18n
_translators = oslo_i18n.TranslatorFactory(domain='oslo.windows')
_ = _translators.primary
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
| true | true |
f730a64dcfd7ad054c235267a643fa3816394fc1 | 261 | py | Python | month01/all_code/day02/exercise06.py | chaofan-zheng/tedu-python-demo | abe983ddc52690f4726cf42cc6390cba815026d8 | [
"Apache-2.0"
] | 4 | 2021-01-07T14:25:15.000Z | 2021-02-01T10:36:10.000Z | month01/all_code/day02/exercise06.py | chaofan-zheng/tedu-python-demo | abe983ddc52690f4726cf42cc6390cba815026d8 | [
"Apache-2.0"
] | null | null | null | month01/all_code/day02/exercise06.py | chaofan-zheng/tedu-python-demo | abe983ddc52690f4726cf42cc6390cba815026d8 | [
"Apache-2.0"
] | null | null | null | """
练习1:在终端中输入一个疫情确诊人数再录入一个治愈人数,
打印治愈比例
格式:治愈比例为xx%
效果:
请输入确诊人数:500
请 输入治愈人数:495
治愈比例为99.0%
"""
confirmed = int(input("请输入确诊人数:"))
cure = int(input("请输入治愈人数:"))
result = cure / confirmed * 100
print("治愈比例为" + str(result) + "%")
| 18.642857 | 34 | 0.59387 | confirmed = int(input("请输入确诊人数:"))
cure = int(input("请输入治愈人数:"))
result = cure / confirmed * 100
print("治愈比例为" + str(result) + "%")
| true | true |
f730a690b6b79400b52d582014d1ddf3b05f2bf6 | 369 | py | Python | h1st_contrib/iot_mgmt/maint_ops/migrations/0073_auto_20181121_0024.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | 1 | 2022-02-19T18:55:43.000Z | 2022-02-19T18:55:43.000Z | h1st_contrib/iot_mgmt/maint_ops/migrations/0073_auto_20181121_0024.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | null | null | null | h1st_contrib/iot_mgmt/maint_ops/migrations/0073_auto_20181121_0024.py | h1st-ai/h1st-contrib | 38fbb1fff4513bb3433bc12f2b436836e5e51c80 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.1 on 2018-11-21 00:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('IoT_MaintOps', '0072_auto_20181115_0423'),
]
operations = [
migrations.AlterModelOptions(
name='equipmentinstancedailyriskscore',
options={'ordering': ()},
),
]
| 20.5 | 52 | 0.620596 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('IoT_MaintOps', '0072_auto_20181115_0423'),
]
operations = [
migrations.AlterModelOptions(
name='equipmentinstancedailyriskscore',
options={'ordering': ()},
),
]
| true | true |
f730a88b0aa1fb26bbd6dfb345fe135d202fd7b6 | 8,692 | py | Python | sdk/python/pulumi_mongodbatlas/get_cloud_provider_snapshot.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2020-04-28T19:12:30.000Z | 2022-03-22T23:04:46.000Z | sdk/python/pulumi_mongodbatlas/get_cloud_provider_snapshot.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 59 | 2020-06-12T12:12:52.000Z | 2022-03-28T18:14:50.000Z | sdk/python/pulumi_mongodbatlas/get_cloud_provider_snapshot.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-25T21:22:08.000Z | 2021-08-30T20:06:18.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetCloudProviderSnapshotResult',
'AwaitableGetCloudProviderSnapshotResult',
'get_cloud_provider_snapshot',
]
@pulumi.output_type
class GetCloudProviderSnapshotResult:
"""
A collection of values returned by getCloudProviderSnapshot.
"""
def __init__(__self__, cluster_name=None, created_at=None, description=None, expires_at=None, id=None, master_key_uuid=None, mongod_version=None, project_id=None, snapshot_id=None, snapshot_type=None, status=None, storage_size_bytes=None, type=None):
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if expires_at and not isinstance(expires_at, str):
raise TypeError("Expected argument 'expires_at' to be a str")
pulumi.set(__self__, "expires_at", expires_at)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if master_key_uuid and not isinstance(master_key_uuid, str):
raise TypeError("Expected argument 'master_key_uuid' to be a str")
pulumi.set(__self__, "master_key_uuid", master_key_uuid)
if mongod_version and not isinstance(mongod_version, str):
raise TypeError("Expected argument 'mongod_version' to be a str")
pulumi.set(__self__, "mongod_version", mongod_version)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if snapshot_id and not isinstance(snapshot_id, str):
raise TypeError("Expected argument 'snapshot_id' to be a str")
pulumi.set(__self__, "snapshot_id", snapshot_id)
if snapshot_type and not isinstance(snapshot_type, str):
raise TypeError("Expected argument 'snapshot_type' to be a str")
pulumi.set(__self__, "snapshot_type", snapshot_type)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if storage_size_bytes and not isinstance(storage_size_bytes, int):
raise TypeError("Expected argument 'storage_size_bytes' to be a int")
pulumi.set(__self__, "storage_size_bytes", storage_size_bytes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> str:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
UTC ISO 8601 formatted point in time when Atlas took the snapshot.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def description(self) -> str:
"""
UDescription of the snapshot. Only present for on-demand snapshots.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expiresAt")
def expires_at(self) -> str:
"""
UTC ISO 8601 formatted point in time when Atlas will delete the snapshot.
"""
return pulumi.get(self, "expires_at")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="masterKeyUuid")
def master_key_uuid(self) -> str:
"""
Unique ID of the AWS KMS Customer Master Key used to encrypt the snapshot. Only visible for clusters using Encryption at Rest via Customer KMS.
"""
return pulumi.get(self, "master_key_uuid")
@property
@pulumi.getter(name="mongodVersion")
def mongod_version(self) -> str:
"""
Version of the MongoDB server.
"""
return pulumi.get(self, "mongod_version")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> str:
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> str:
"""
Specified the type of snapshot. Valid values are onDemand and scheduled.
"""
return pulumi.get(self, "snapshot_type")
@property
@pulumi.getter
def status(self) -> str:
"""
Current status of the snapshot. One of the following values: queued, inProgress, completed, failed.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageSizeBytes")
def storage_size_bytes(self) -> int:
"""
Specifies the size of the snapshot in bytes.
"""
return pulumi.get(self, "storage_size_bytes")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of cluster: replicaSet or shardedCluster.
"""
return pulumi.get(self, "type")
class AwaitableGetCloudProviderSnapshotResult(GetCloudProviderSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCloudProviderSnapshotResult(
cluster_name=self.cluster_name,
created_at=self.created_at,
description=self.description,
expires_at=self.expires_at,
id=self.id,
master_key_uuid=self.master_key_uuid,
mongod_version=self.mongod_version,
project_id=self.project_id,
snapshot_id=self.snapshot_id,
snapshot_type=self.snapshot_type,
status=self.status,
storage_size_bytes=self.storage_size_bytes,
type=self.type)
def get_cloud_provider_snapshot(cluster_name: Optional[str] = None,
project_id: Optional[str] = None,
snapshot_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCloudProviderSnapshotResult:
"""
`CloudProviderSnapshot` provides an Cloud Backup Snapshot datasource. Atlas Cloud Backup Snapshots provide localized backup storage using the native snapshot functionality of the cluster’s cloud service.
> **NOTE:** Groups and projects are synonymous terms. You may find `groupId` in the official documentation.
:param str cluster_name: The name of the Atlas cluster that contains the snapshot you want to retrieve.
:param str snapshot_id: The unique identifier of the snapshot you want to retrieve.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['projectId'] = project_id
__args__['snapshotId'] = snapshot_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('mongodbatlas:index/getCloudProviderSnapshot:getCloudProviderSnapshot', __args__, opts=opts, typ=GetCloudProviderSnapshotResult).value
return AwaitableGetCloudProviderSnapshotResult(
cluster_name=__ret__.cluster_name,
created_at=__ret__.created_at,
description=__ret__.description,
expires_at=__ret__.expires_at,
id=__ret__.id,
master_key_uuid=__ret__.master_key_uuid,
mongod_version=__ret__.mongod_version,
project_id=__ret__.project_id,
snapshot_id=__ret__.snapshot_id,
snapshot_type=__ret__.snapshot_type,
status=__ret__.status,
storage_size_bytes=__ret__.storage_size_bytes,
type=__ret__.type)
| 40.0553 | 254 | 0.666015 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetCloudProviderSnapshotResult',
'AwaitableGetCloudProviderSnapshotResult',
'get_cloud_provider_snapshot',
]
@pulumi.output_type
class GetCloudProviderSnapshotResult:
def __init__(__self__, cluster_name=None, created_at=None, description=None, expires_at=None, id=None, master_key_uuid=None, mongod_version=None, project_id=None, snapshot_id=None, snapshot_type=None, status=None, storage_size_bytes=None, type=None):
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if created_at and not isinstance(created_at, str):
raise TypeError("Expected argument 'created_at' to be a str")
pulumi.set(__self__, "created_at", created_at)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if expires_at and not isinstance(expires_at, str):
raise TypeError("Expected argument 'expires_at' to be a str")
pulumi.set(__self__, "expires_at", expires_at)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if master_key_uuid and not isinstance(master_key_uuid, str):
raise TypeError("Expected argument 'master_key_uuid' to be a str")
pulumi.set(__self__, "master_key_uuid", master_key_uuid)
if mongod_version and not isinstance(mongod_version, str):
raise TypeError("Expected argument 'mongod_version' to be a str")
pulumi.set(__self__, "mongod_version", mongod_version)
if project_id and not isinstance(project_id, str):
raise TypeError("Expected argument 'project_id' to be a str")
pulumi.set(__self__, "project_id", project_id)
if snapshot_id and not isinstance(snapshot_id, str):
raise TypeError("Expected argument 'snapshot_id' to be a str")
pulumi.set(__self__, "snapshot_id", snapshot_id)
if snapshot_type and not isinstance(snapshot_type, str):
raise TypeError("Expected argument 'snapshot_type' to be a str")
pulumi.set(__self__, "snapshot_type", snapshot_type)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if storage_size_bytes and not isinstance(storage_size_bytes, int):
raise TypeError("Expected argument 'storage_size_bytes' to be a int")
pulumi.set(__self__, "storage_size_bytes", storage_size_bytes)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> str:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
return pulumi.get(self, "created_at")
@property
@pulumi.getter
def description(self) -> str:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expiresAt")
def expires_at(self) -> str:
return pulumi.get(self, "expires_at")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="masterKeyUuid")
def master_key_uuid(self) -> str:
return pulumi.get(self, "master_key_uuid")
@property
@pulumi.getter(name="mongodVersion")
def mongod_version(self) -> str:
return pulumi.get(self, "mongod_version")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> str:
return pulumi.get(self, "project_id")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> str:
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> str:
return pulumi.get(self, "snapshot_type")
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="storageSizeBytes")
def storage_size_bytes(self) -> int:
return pulumi.get(self, "storage_size_bytes")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetCloudProviderSnapshotResult(GetCloudProviderSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCloudProviderSnapshotResult(
cluster_name=self.cluster_name,
created_at=self.created_at,
description=self.description,
expires_at=self.expires_at,
id=self.id,
master_key_uuid=self.master_key_uuid,
mongod_version=self.mongod_version,
project_id=self.project_id,
snapshot_id=self.snapshot_id,
snapshot_type=self.snapshot_type,
status=self.status,
storage_size_bytes=self.storage_size_bytes,
type=self.type)
def get_cloud_provider_snapshot(cluster_name: Optional[str] = None,
project_id: Optional[str] = None,
snapshot_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCloudProviderSnapshotResult:
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['projectId'] = project_id
__args__['snapshotId'] = snapshot_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('mongodbatlas:index/getCloudProviderSnapshot:getCloudProviderSnapshot', __args__, opts=opts, typ=GetCloudProviderSnapshotResult).value
return AwaitableGetCloudProviderSnapshotResult(
cluster_name=__ret__.cluster_name,
created_at=__ret__.created_at,
description=__ret__.description,
expires_at=__ret__.expires_at,
id=__ret__.id,
master_key_uuid=__ret__.master_key_uuid,
mongod_version=__ret__.mongod_version,
project_id=__ret__.project_id,
snapshot_id=__ret__.snapshot_id,
snapshot_type=__ret__.snapshot_type,
status=__ret__.status,
storage_size_bytes=__ret__.storage_size_bytes,
type=__ret__.type)
| true | true |
f730a8d6bd9bfbf555172824425acb3083358392 | 686 | py | Python | coq/tools/make-both-time-files.py | reichel3/TacTok | c344e76263de04311af8a0030c07aec95d87f71c | [
"MIT"
] | 7 | 2020-11-23T02:45:36.000Z | 2022-03-18T03:03:33.000Z | coq/tools/make-both-time-files.py | reichel3/TacTok | c344e76263de04311af8a0030c07aec95d87f71c | [
"MIT"
] | 4 | 2021-02-23T03:03:51.000Z | 2021-11-13T00:07:38.000Z | coq/tools/make-both-time-files.py | reichel3/TacTok | c344e76263de04311af8a0030c07aec95d87f71c | [
"MIT"
] | 2 | 2021-01-19T17:56:28.000Z | 2022-03-28T04:39:41.000Z | #!/usr/bin/env python
import sys
from TimeFileMaker import *
if __name__ == '__main__':
USAGE = 'Usage: %s [--sort-by=auto|absolute|diff] AFTER_FILE_NAME BEFORE_FILE_NAME [OUTPUT_FILE_NAME ..]' % sys.argv[0]
HELP_STRING = r'''Formats timing information from the output of two invocations of `make TIMED=1` into a sorted table.
The input is expected to contain lines in the format:
FILE_NAME (...user: NUMBER_IN_SECONDS...)
'''
sort_by, args = parse_args(sys.argv, USAGE, HELP_STRING)
left_dict = get_times(args[1])
right_dict = get_times(args[2])
table = make_diff_table_string(left_dict, right_dict, sort_by=sort_by)
print_or_write_table(table, args[3:])
| 40.352941 | 123 | 0.728863 |
import sys
from TimeFileMaker import *
if __name__ == '__main__':
USAGE = 'Usage: %s [--sort-by=auto|absolute|diff] AFTER_FILE_NAME BEFORE_FILE_NAME [OUTPUT_FILE_NAME ..]' % sys.argv[0]
HELP_STRING = r'''Formats timing information from the output of two invocations of `make TIMED=1` into a sorted table.
The input is expected to contain lines in the format:
FILE_NAME (...user: NUMBER_IN_SECONDS...)
'''
sort_by, args = parse_args(sys.argv, USAGE, HELP_STRING)
left_dict = get_times(args[1])
right_dict = get_times(args[2])
table = make_diff_table_string(left_dict, right_dict, sort_by=sort_by)
print_or_write_table(table, args[3:])
| true | true |
f730a93f581867bc217a894c85120b1515d73e81 | 2,516 | py | Python | z2/part3/updated_part2_batch/jm/parser_errors_2/125359744.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/125359744.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part3/updated_part2_batch/jm/parser_errors_2/125359744.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 125359744
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 4, 4, 4)
assert board is not None
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_free_fields(board, 2) == 8
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_busy_fields(board, 3) == 3
assert gamma_move(board, 4, 2, 2) == 1
assert gamma_move(board, 4, 2, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
board860826090 = gamma_board(board)
assert board860826090 is not None
assert board860826090 == ("..1\n" "114\n" "33.\n" "324\n")
del board860826090
board860826090 = None
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_golden_possible(board, 4) == 1
board371322770 = gamma_board(board)
assert board371322770 is not None
assert board371322770 == ("3.1\n" "114\n" "33.\n" "324\n")
del board371322770
board371322770 = None
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 1, 2) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_free_fields(board, 1) == 2
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_busy_fields(board, 3) == 4
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 3) == 1
assert gamma_move(board, 4, 1, 3) == 0
gamma_delete(board)
| 29.255814 | 58 | 0.678855 | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
board = gamma_new(3, 4, 4, 4)
assert board is not None
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_free_fields(board, 2) == 8
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 3, 0, 1) == 1
assert gamma_busy_fields(board, 3) == 3
assert gamma_move(board, 4, 2, 2) == 1
assert gamma_move(board, 4, 2, 0) == 1
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 1, 2, 3) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 2, 0, 1) == 0
board860826090 = gamma_board(board)
assert board860826090 is not None
assert board860826090 == ("..1\n" "114\n" "33.\n" "324\n")
del board860826090
board860826090 = None
assert gamma_move(board, 3, 1, 2) == 0
assert gamma_move(board, 3, 0, 3) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_golden_possible(board, 4) == 1
board371322770 = gamma_board(board)
assert board371322770 is not None
assert board371322770 == ("3.1\n" "114\n" "33.\n" "324\n")
del board371322770
board371322770 = None
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 1, 2) == 0
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 1, 2, 3) == 0
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 0, 3) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_free_fields(board, 1) == 2
assert gamma_move(board, 2, 1, 2) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 3, 3, 1) == 0
assert gamma_busy_fields(board, 3) == 4
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 1, 3) == 1
assert gamma_move(board, 4, 1, 3) == 0
gamma_delete(board)
| true | true |
f730a9b7a1db7a1618efa71421a99e18b273e3d6 | 2,861 | py | Python | flask_app.py | mnksingh94/Flipkart-Review-Scraper | 63ab0d4e9c640d3f88e81dc520b66dcb9931b5dc | [
"MIT"
] | null | null | null | flask_app.py | mnksingh94/Flipkart-Review-Scraper | 63ab0d4e9c640d3f88e81dc520b66dcb9931b5dc | [
"MIT"
] | null | null | null | flask_app.py | mnksingh94/Flipkart-Review-Scraper | 63ab0d4e9c640d3f88e81dc520b66dcb9931b5dc | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
import requests
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen as uReq
import pymongo
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
searchString = request.form['content'].replace(" ", "")
try:
dbConn = pymongo.MongoClient("mongodb://localhost:27017")
db =dbConn['crawlerDB']
reviews = db[searchString].find({})
print(reviews.count())
if reviews.count() > 0:
return render_template('results.html', reviews=reviews)
else:
flipkart_url = "https://www.flipkart.com/search?q=" + searchString
uClient = uReq(flipkart_url)
flipkartPage = uClient.read()
uClient.close()
flipkart_html = bs(flipkartPage, "html.parser")
bigboxes = flipkart_html.find_all("div", {"class": "_1AtVbE col-12-12"})
del bigboxes[0:3]
box = bigboxes[0]
productLink = "https://www.flipkart.com" + box.div.div.div.a['href']
prodRes = requests.get(productLink)
prod_html = bs(prodRes.text, "html.parser")
commentboxs = prod_html.find_all('div', {'class': "_16PBlm"})
table = db[searchString]
reviews = []
for commentbox in commentboxs:
try:
name = commentbox.div.div.find_all('p', {'class':'_2sc7ZR _2V5EHH'})[0].text
except:
name = 'No name'
try:
rating = commentbox.div.div.div.div.text
except:
rating = 'No rating'
try:
commentHead = commentbox.div.find_all('p', {'class':'_2-N8zT'})[0].text
except:
commentHead = 'No comment heading'
try:
comtag = commentbox.div.div.find_all('div', {'class':''})
custComment = comtag[0].div.text
except:
custComment = 'no customer comment'
mydict = {'Product':searchString, 'Name':name, 'Rating':rating, 'CommentHead': commentHead,
'Comment':custComment}
x = table.insert_one(mydict)
reviews.append(mydict)
return render_template('results.html', reviews=reviews)
except:
return 'something is wrong'
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(port=8000, debug=True)
| 40.871429 | 112 | 0.51136 | from flask import Flask, render_template, request, jsonify
from flask_cors import CORS, cross_origin
import requests
from bs4 import BeautifulSoup as bs
from urllib.request import urlopen as uReq
import pymongo
app = Flask(__name__)
@app.route('/', methods=['POST', 'GET'])
def index():
if request.method == 'POST':
searchString = request.form['content'].replace(" ", "")
try:
dbConn = pymongo.MongoClient("mongodb://localhost:27017")
db =dbConn['crawlerDB']
reviews = db[searchString].find({})
print(reviews.count())
if reviews.count() > 0:
return render_template('results.html', reviews=reviews)
else:
flipkart_url = "https://www.flipkart.com/search?q=" + searchString
uClient = uReq(flipkart_url)
flipkartPage = uClient.read()
uClient.close()
flipkart_html = bs(flipkartPage, "html.parser")
bigboxes = flipkart_html.find_all("div", {"class": "_1AtVbE col-12-12"})
del bigboxes[0:3]
box = bigboxes[0]
productLink = "https://www.flipkart.com" + box.div.div.div.a['href']
prodRes = requests.get(productLink)
prod_html = bs(prodRes.text, "html.parser")
commentboxs = prod_html.find_all('div', {'class': "_16PBlm"})
table = db[searchString]
reviews = []
for commentbox in commentboxs:
try:
name = commentbox.div.div.find_all('p', {'class':'_2sc7ZR _2V5EHH'})[0].text
except:
name = 'No name'
try:
rating = commentbox.div.div.div.div.text
except:
rating = 'No rating'
try:
commentHead = commentbox.div.find_all('p', {'class':'_2-N8zT'})[0].text
except:
commentHead = 'No comment heading'
try:
comtag = commentbox.div.div.find_all('div', {'class':''})
custComment = comtag[0].div.text
except:
custComment = 'no customer comment'
mydict = {'Product':searchString, 'Name':name, 'Rating':rating, 'CommentHead': commentHead,
'Comment':custComment}
x = table.insert_one(mydict)
reviews.append(mydict)
return render_template('results.html', reviews=reviews)
except:
return 'something is wrong'
else:
return render_template('index.html')
if __name__ == "__main__":
app.run(port=8000, debug=True)
| true | true |
f730a9e2fce406cfb29f69a52f9dfdd07eb5199b | 96 | py | Python | Python Pattern Programs/Alphabetic Patterns/Pattern 5.py | trial1user/Printing-Pattern-Programs | dde29e056b8e067fb3a824edb7ecb7dd9c9a776a | [
"MIT"
] | 61 | 2021-01-07T03:56:25.000Z | 2022-02-26T14:39:52.000Z | PythonPatternPrograms/AlphabeticPatterns/Pattern 5.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 51 | 2020-12-25T17:06:26.000Z | 2021-05-07T12:52:56.000Z | PythonPatternPrograms/AlphabeticPatterns/Pattern 5.py | Ankur-586/Printing-Pattern-Programs | 33e534ed66a02705e6cd6bc1992d4818a44d1b6b | [
"MIT"
] | 13 | 2021-01-07T09:50:21.000Z | 2021-12-17T11:03:57.000Z | for x in range(65, 70):
for y in range(65, x + 1):
print(chr(x), end="")
print() | 24 | 30 | 0.489583 | for x in range(65, 70):
for y in range(65, x + 1):
print(chr(x), end="")
print() | true | true |
f730a9e647e5d1432afdb7a94d9a14a04264fc9b | 5,738 | py | Python | platform.py | arhi/platform-nordicnrf51 | 267cd3fe97a6dd694c7c8b5fbae0919caeef5304 | [
"Apache-2.0"
] | null | null | null | platform.py | arhi/platform-nordicnrf51 | 267cd3fe97a6dd694c7c8b5fbae0919caeef5304 | [
"Apache-2.0"
] | null | null | null | platform.py | arhi/platform-nordicnrf51 | 267cd3fe97a6dd694c7c8b5fbae0919caeef5304 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import platform
from platformio.managers.platform import PlatformBase
from platformio.util import get_systype
class Nordicnrf51Platform(PlatformBase):
def is_embedded(self):
return True
def configure_default_packages(self, variables, targets):
if "erase" in targets:
self.packages["tool-nrfjprog"]["optional"] = False
if "zephyr" in variables.get("pioframework", []):
for p in self.packages:
if p.startswith("framework-zephyr-") or p in (
"tool-cmake", "tool-dtc", "tool-ninja"):
self.packages[p]["optional"] = False
self.packages["toolchain-gccarmnoneeabi"]["version"] = "~1.80201.0"
if "windows" not in get_systype():
self.packages["tool-gperf"]["optional"] = False
# configure J-LINK tool
jlink_conds = [
"jlink" in variables.get(option, "")
for option in ("upload_protocol", "debug_tool")
]
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
jlink_conds.extend([
"jlink" in board_config.get(key, "")
for key in ("debug.default_tools", "upload.protocol")
])
jlink_pkgname = "tool-jlink"
if not any(jlink_conds) and jlink_pkgname in self.packages:
del self.packages[jlink_pkgname]
return PlatformBase.configure_default_packages(self, variables,
targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_default_debug_tools(result)
else:
for key, value in result.items():
result[key] = self._add_default_debug_tools(result[key])
return result
def _add_default_debug_tools(self, board):
debug = board.manifest.get("debug", {})
upload_protocols = board.manifest.get("upload", {}).get(
"protocols", [])
if "tools" not in debug:
debug["tools"] = {}
# J-Link / ST-Link / BlackMagic Probe / CMSIS-DAP
for link in ("blackmagic", "jlink", "stlink", "cmsis-dap"):
if link not in upload_protocols or link in debug["tools"]:
continue
if link == "blackmagic":
debug["tools"]["blackmagic"] = {
"hwids": [["0x1d50", "0x6018"]],
"require_debug_port": True
}
elif link == "jlink":
assert debug.get("jlink_device"), (
"Missed J-Link Device ID for %s" % board.id)
debug["tools"][link] = {
"server": {
"package": "tool-jlink",
"arguments": [
"-singlerun",
"-if", "SWD",
"-select", "USB",
"-device", debug.get("jlink_device"),
"-port", "2331"
],
"executable": ("JLinkGDBServerCL.exe"
if platform.system() == "Windows" else
"JLinkGDBServer")
}
}
else:
server_args = [
"-s", "$PACKAGE_DIR/scripts",
"-f", "interface/%s.cfg" % link
]
if link == "stlink":
server_args.extend([
"-c",
"transport select hla_swd; set WORKAREASIZE 0x4000"
])
server_args.extend(["-f", "target/nrf51.cfg"])
debug["tools"][link] = {
"server": {
"package": "tool-openocd",
"executable": "bin/openocd",
"arguments": server_args
}
}
debug["tools"][link]["onboard"] = link in debug.get("onboard_tools", [])
debug["tools"][link]["default"] = link in debug.get("default_tools", [])
board.manifest['debug'] = debug
return board
def configure_debug_options(self, initial_debug_options, ide_data):
debug_options = copy.deepcopy(initial_debug_options)
adapter_speed = initial_debug_options.get("speed")
if adapter_speed:
server_options = debug_options.get("server") or {}
server_executable = server_options.get("executable", "").lower()
if "openocd" in server_executable:
debug_options["server"]["arguments"].extend(
["-c", "adapter speed %s" % adapter_speed]
)
elif "jlink" in server_executable:
debug_options["server"]["arguments"].extend(
["-speed", adapter_speed]
)
return debug_options
| 39.30137 | 84 | 0.518822 |
import copy
import platform
from platformio.managers.platform import PlatformBase
from platformio.util import get_systype
class Nordicnrf51Platform(PlatformBase):
def is_embedded(self):
return True
def configure_default_packages(self, variables, targets):
if "erase" in targets:
self.packages["tool-nrfjprog"]["optional"] = False
if "zephyr" in variables.get("pioframework", []):
for p in self.packages:
if p.startswith("framework-zephyr-") or p in (
"tool-cmake", "tool-dtc", "tool-ninja"):
self.packages[p]["optional"] = False
self.packages["toolchain-gccarmnoneeabi"]["version"] = "~1.80201.0"
if "windows" not in get_systype():
self.packages["tool-gperf"]["optional"] = False
jlink_conds = [
"jlink" in variables.get(option, "")
for option in ("upload_protocol", "debug_tool")
]
if variables.get("board"):
board_config = self.board_config(variables.get("board"))
jlink_conds.extend([
"jlink" in board_config.get(key, "")
for key in ("debug.default_tools", "upload.protocol")
])
jlink_pkgname = "tool-jlink"
if not any(jlink_conds) and jlink_pkgname in self.packages:
del self.packages[jlink_pkgname]
return PlatformBase.configure_default_packages(self, variables,
targets)
def get_boards(self, id_=None):
result = PlatformBase.get_boards(self, id_)
if not result:
return result
if id_:
return self._add_default_debug_tools(result)
else:
for key, value in result.items():
result[key] = self._add_default_debug_tools(result[key])
return result
def _add_default_debug_tools(self, board):
debug = board.manifest.get("debug", {})
upload_protocols = board.manifest.get("upload", {}).get(
"protocols", [])
if "tools" not in debug:
debug["tools"] = {}
for link in ("blackmagic", "jlink", "stlink", "cmsis-dap"):
if link not in upload_protocols or link in debug["tools"]:
continue
if link == "blackmagic":
debug["tools"]["blackmagic"] = {
"hwids": [["0x1d50", "0x6018"]],
"require_debug_port": True
}
elif link == "jlink":
assert debug.get("jlink_device"), (
"Missed J-Link Device ID for %s" % board.id)
debug["tools"][link] = {
"server": {
"package": "tool-jlink",
"arguments": [
"-singlerun",
"-if", "SWD",
"-select", "USB",
"-device", debug.get("jlink_device"),
"-port", "2331"
],
"executable": ("JLinkGDBServerCL.exe"
if platform.system() == "Windows" else
"JLinkGDBServer")
}
}
else:
server_args = [
"-s", "$PACKAGE_DIR/scripts",
"-f", "interface/%s.cfg" % link
]
if link == "stlink":
server_args.extend([
"-c",
"transport select hla_swd; set WORKAREASIZE 0x4000"
])
server_args.extend(["-f", "target/nrf51.cfg"])
debug["tools"][link] = {
"server": {
"package": "tool-openocd",
"executable": "bin/openocd",
"arguments": server_args
}
}
debug["tools"][link]["onboard"] = link in debug.get("onboard_tools", [])
debug["tools"][link]["default"] = link in debug.get("default_tools", [])
board.manifest['debug'] = debug
return board
def configure_debug_options(self, initial_debug_options, ide_data):
debug_options = copy.deepcopy(initial_debug_options)
adapter_speed = initial_debug_options.get("speed")
if adapter_speed:
server_options = debug_options.get("server") or {}
server_executable = server_options.get("executable", "").lower()
if "openocd" in server_executable:
debug_options["server"]["arguments"].extend(
["-c", "adapter speed %s" % adapter_speed]
)
elif "jlink" in server_executable:
debug_options["server"]["arguments"].extend(
["-speed", adapter_speed]
)
return debug_options
| true | true |
f730a9f146914b909d2007a03b578d74b8950165 | 248 | py | Python | setup.py | decisionscients/Cancer | cdf86ed654881a1d113b9623d5e21f76dc9d36d2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | decisionscients/Cancer | cdf86ed654881a1d113b9623d5e21f76dc9d36d2 | [
"BSD-3-Clause"
] | null | null | null | setup.py | decisionscients/Cancer | cdf86ed654881a1d113b9623d5e21f76dc9d36d2 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Learning Eligibility in Clinical Trials Using Neural Networks',
author='John James',
license='BSD-3',
)
| 22.545455 | 80 | 0.693548 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='Learning Eligibility in Clinical Trials Using Neural Networks',
author='John James',
license='BSD-3',
)
| true | true |
f730aa6163e999ba3e3dedeced39133e1bbdaa3b | 4,662 | py | Python | src/aoikprojectstarter/mediator.py | AoiKuiyuyou/AoikProjectStarter-Python | 448789ad012be8774d4a3792639b155394b16048 | [
"MIT"
] | 3 | 2016-09-15T12:49:08.000Z | 2016-10-22T11:47:11.000Z | src/aoikprojectstarter/mediator.py | AoiKuiyuyou/AoikProjectStarter-Python | 448789ad012be8774d4a3792639b155394b16048 | [
"MIT"
] | null | null | null | src/aoikprojectstarter/mediator.py | AoiKuiyuyou/AoikProjectStarter-Python | 448789ad012be8774d4a3792639b155394b16048 | [
"MIT"
] | null | null | null | # coding: utf-8
#
"""
Mediator module.
This module puts together other modules to implement the program logic.
"""
from __future__ import absolute_import
# Standard-library imports
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import sys
import traceback
# Local imports
from .func import factorial
def int_ge0(text):
"""
Convert given text to an integer greater than or equal to 0.
Used by `ArgumentParser`.
:param text: Text to convert to integer.
:return: An integer greater than or equal to 0.
"""
try:
# Convert to int
int_value = int(text)
# Ensure greater than or equal to 0
assert int_value >= 0
except Exception:
# Raise an exception to notify ArgumentParser
raise ArgumentTypeError(
'`%s` is not an integer greater than or equal to 0.' % text)
# Return the valid value
return int_value
def get_cmdargs_parser():
"""
Create command line arguments parser.
:return: Command line arguments parser, an `ArgumentParser` instance.
"""
# Create command line arguments parser
parser = ArgumentParser(prog='aoikprojectstarter')
# Add argument
parser.add_argument(
'number',
type=int_ge0,
default=None,
metavar='NUMBER',
help='the number for which to compute factorial.',
)
# Return the command line arguments parser
return parser
def main_core(args=None, step_func=None):
"""
Implement program core logic.
:param args: Command line arguments list.
:param step_func: Step info setter function.
:return: Exit code.
"""
# If step function is not given
if step_func is None:
# Raise error
raise ValueError('Error (3P92V): Argument `step_func` is not given.')
# If step function is given.
# Set step info
step_func(title='Parse command line arguments')
# Create command line arguments parser
cmdargs_parser = get_cmdargs_parser()
# If command line arguments list is not given
if args is None:
# Use default command line arguments list
args = sys.argv[1:]
# If command line arguments list is empty
if not args:
# Print help
cmdargs_parser.print_help()
# Return without error
return 0
# If command line arguments list is not empty.
# Parse command line arguments
cmdargs = cmdargs_parser.parse_args(args)
# Set step info
step_func(title='Compute factorial')
# Get number
number = cmdargs.number
# Compute the number's factorial
result = factorial(number)
# Get message
msg = 'Factorial of {} is {}'.format(number, result)
# Print the message
print(msg)
# Return without error
return 0
def main_wrap(args=None):
"""
Wrap `main_core` to provide uncaught exception handling.
:param args: Command line arguments list.
:return: Exit code.
"""
# Dict that contains step info
step_info = {
# Step title
'title': '',
# Exit code
'exit_code': 0
}
# Create step info setter function
def _step_func(title=None, exit_code=None):
"""
Step info setter function.
:param title: Step title.
:param exit_code: Exit code.
:return: None.
"""
# If step title is given
if title is not None:
# Update step title
step_info['title'] = title
# If exit code is given
if exit_code is not None:
# Update exit code
step_info['exit_code'] = exit_code
#
try:
# Call `main_core` to implement program core logic
return main_core(args=args, step_func=_step_func)
# Catch exit
except SystemExit:
# Re-raise
raise
# Catch keyboard interrupt
except KeyboardInterrupt:
# Return without error
return 0
# Catch other exceptions
except BaseException:
# Get step title
step_title = step_info.get('title', '')
# Get traceback
tb_msg = traceback.format_exc()
# If step title is not empty
if step_title:
# Get message
msg = '# Error (5QDEX): {0}\n---\n{1}---\n'.format(
step_title, tb_msg
)
else:
# Get message
msg = '# Error (5QDEX)\n---\n{0}---\n'.format(tb_msg)
# Print message
sys.stderr.write(msg)
# Get exit code
exit_code = step_info.get('exit_code', 8)
# Return exit code
return exit_code
| 22.631068 | 77 | 0.611111 |
from __future__ import absolute_import
from argparse import ArgumentParser
from argparse import ArgumentTypeError
import sys
import traceback
from .func import factorial
def int_ge0(text):
try:
int_value = int(text)
assert int_value >= 0
except Exception:
raise ArgumentTypeError(
'`%s` is not an integer greater than or equal to 0.' % text)
return int_value
def get_cmdargs_parser():
parser = ArgumentParser(prog='aoikprojectstarter')
parser.add_argument(
'number',
type=int_ge0,
default=None,
metavar='NUMBER',
help='the number for which to compute factorial.',
)
return parser
def main_core(args=None, step_func=None):
if step_func is None:
raise ValueError('Error (3P92V): Argument `step_func` is not given.')
step_func(title='Parse command line arguments')
cmdargs_parser = get_cmdargs_parser()
if args is None:
args = sys.argv[1:]
if not args:
cmdargs_parser.print_help()
return 0
cmdargs = cmdargs_parser.parse_args(args)
step_func(title='Compute factorial')
number = cmdargs.number
result = factorial(number)
# Get message
msg = 'Factorial of {} is {}'.format(number, result)
# Print the message
print(msg)
# Return without error
return 0
def main_wrap(args=None):
# Dict that contains step info
step_info = {
# Step title
'title': '',
# Exit code
'exit_code': 0
}
# Create step info setter function
def _step_func(title=None, exit_code=None):
# If step title is given
if title is not None:
# Update step title
step_info['title'] = title
# If exit code is given
if exit_code is not None:
# Update exit code
step_info['exit_code'] = exit_code
#
try:
# Call `main_core` to implement program core logic
return main_core(args=args, step_func=_step_func)
# Catch exit
except SystemExit:
# Re-raise
raise
# Catch keyboard interrupt
except KeyboardInterrupt:
# Return without error
return 0
# Catch other exceptions
except BaseException:
# Get step title
step_title = step_info.get('title', '')
# Get traceback
tb_msg = traceback.format_exc()
# If step title is not empty
if step_title:
# Get message
msg = '
step_title, tb_msg
)
else:
# Get message
msg = '
# Print message
sys.stderr.write(msg)
# Get exit code
exit_code = step_info.get('exit_code', 8)
# Return exit code
return exit_code
| true | true |
f730abdde75909808210df1f3886c99110f87f72 | 11,682 | py | Python | poetry/repositories/legacy_repository.py | jancespivo/poetry | 4aee3fb9c2e99c189d2723a81fce2356c3589047 | [
"MIT"
] | null | null | null | poetry/repositories/legacy_repository.py | jancespivo/poetry | 4aee3fb9c2e99c189d2723a81fce2356c3589047 | [
"MIT"
] | null | null | null | poetry/repositories/legacy_repository.py | jancespivo/poetry | 4aee3fb9c2e99c189d2723a81fce2356c3589047 | [
"MIT"
] | null | null | null | import cgi
import re
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from html import unescape
except ImportError:
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
unescape = HTMLParser().unescape
from typing import Generator
from typing import Union
import html5lib
import requests
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachy import CacheManager
import poetry.packages
from poetry.config import Config
from poetry.locations import CACHE_DIR
from poetry.masonry.publishing.uploader import wheel_file_re
from poetry.packages import Package
from poetry.packages import dependency_from_pep_508
from poetry.packages.utils.link import Link
from poetry.semver import parse_constraint
from poetry.semver import Version
from poetry.semver import VersionConstraint
from poetry.utils._compat import Path
from poetry.utils.helpers import canonicalize_name, get_http_basic_auth
from poetry.version.markers import InvalidMarker
from .pypi_repository import PyPiRepository
class Page:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
SUPPORTED_FORMATS = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url, content, headers):
if not url.endswith("/"):
url += "/"
self._url = url
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params["charset"]
self._content = content
if encoding is None:
self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
else:
self._parsed = html5lib.parse(
content, transport_encoding=encoding, namespaceHTMLElements=False
)
@property
def versions(self): # type: () -> Generator[Version]
seen = set()
for link in self.links:
version = self.link_version(link)
if not version:
continue
if version in seen:
continue
seen.add(version)
yield version
@property
def links(self): # type: () -> Generator[Link]
for anchor in self._parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self._url, href))
pyrequire = anchor.get("data-requires-python")
pyrequire = unescape(pyrequire) if pyrequire else None
link = Link(url, self, requires_python=pyrequire)
if link.ext not in self.SUPPORTED_FORMATS:
continue
yield link
def links_for_version(self, version): # type: (Version) -> Generator[Link]
for link in self.links:
if self.link_version(link) == version:
yield link
def link_version(self, link): # type: (Link) -> Union[Version, None]
m = wheel_file_re.match(link.filename)
if m:
version = m.group("ver")
else:
info, ext = link.splitext()
match = self.VERSION_REGEX.match(info)
if not match:
return
version = match.group(2)
try:
version = Version.parse(version)
except ValueError:
return
return version
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
class LegacyRepository(PyPiRepository):
def __init__(self, name, url, disable_cache=False):
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
self._packages = []
self._name = name
self._url = url.rstrip("/")
self._cache_dir = Path(CACHE_DIR) / "cache" / "repositories" / name
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(self._cache_dir)},
"packages": {"driver": "dict"},
"matches": {"driver": "dict"},
},
}
)
self._session = CacheControl(
requests.session(), cache=FileCache(str(self._cache_dir / "_http"))
)
url_parts = urlparse.urlparse(self._url)
if not url_parts.username:
self._session.auth = get_http_basic_auth(
Config.create("auth.toml"), self.name
)
self._disable_cache = disable_cache
@property
def name(self):
return self._name
def find_packages(
self, name, constraint=None, extras=None, allow_prereleases=False
):
packages = []
if constraint is not None and not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
key = name
if constraint:
key = "{}:{}".format(key, str(constraint))
if self._cache.store("matches").has(key):
versions = self._cache.store("matches").get(key)
else:
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
return []
versions = []
for version in page.versions:
if not constraint or (constraint and constraint.allows(version)):
versions.append(version)
self._cache.store("matches").put(key, versions, 5)
for version in versions:
package = Package(name, version)
package.source_type = "legacy"
package.source_url = self._url
if extras is not None:
package.requires_extras = extras
packages.append(package)
self._log(
"{} packages found for {} {}".format(len(packages), name, str(constraint)),
level="debug",
)
return packages
def package(
self, name, version, extras=None
): # type: (...) -> poetry.packages.Package
"""
Retrieve the release information.
This is a heavy task which takes time.
We have to download a package to get the dependencies.
We also need to download every file matching this release
to get the various hashes.
Note that, this will be cached so the subsequent operations
should be much faster.
"""
try:
index = self._packages.index(
poetry.packages.Package(name, version, version)
)
return self._packages[index]
except ValueError:
if extras is None:
extras = []
release_info = self.get_release_info(name, version)
package = poetry.packages.Package(name, version, version)
package.source_type = "legacy"
package.source_url = self._url
package.source_reference = self.name
requires_dist = release_info["requires_dist"] or []
for req in requires_dist:
try:
dependency = dependency_from_pep_508(req)
except InvalidMarker:
# Invalid marker
# We strip the markers hoping for the best
req = req.split(";")[0]
dependency = dependency_from_pep_508(req)
if dependency.extras:
for extra in dependency.extras:
if extra not in package.extras:
package.extras[extra] = []
package.extras[extra].append(dependency)
if not dependency.is_optional():
package.requires.append(dependency)
# Adding description
package.description = release_info.get("summary", "")
# Adding hashes information
package.hashes = release_info["digests"]
# Activate extra dependencies
for extra in extras:
if extra in package.extras:
for dep in package.extras[extra]:
dep.activate()
package.requires += package.extras[extra]
self._packages.append(package)
return package
def _get_release_info(self, name, version): # type: (str, str) -> dict
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
raise ValueError('No package named "{}"'.format(name))
data = {
"name": name,
"version": version,
"summary": "",
"requires_dist": [],
"requires_python": [],
"digests": [],
}
links = list(page.links_for_version(Version.parse(version)))
if not links:
raise ValueError(
'No valid distribution links found for package: "{}" version: "{}"'.format(
name, version
)
)
urls = {}
hashes = []
default_link = links[0]
for link in links:
if link.is_wheel:
urls["bdist_wheel"] = link.url
elif link.filename.endswith(".tar.gz"):
urls["sdist"] = link.url
elif (
link.filename.endswith((".zip", ".bz2", ".xz", ".Z", ".tar"))
and "sdist" not in urls
):
urls["sdist"] = link.url
hash = link.hash
if link.hash_name == "sha256":
hashes.append(hash)
data["digests"] = hashes
if not urls:
if default_link.is_wheel:
m = wheel_file_re.match(default_link.filename)
python = m.group("pyver")
platform = m.group("plat")
if python == "py2.py3" and platform == "any":
urls["bdist_wheel"] = default_link.url
elif default_link.filename.endswith(".tar.gz"):
urls["sdist"] = default_link.url
elif (
default_link.filename.endswith((".zip", ".bz2")) and "sdist" not in urls
):
urls["sdist"] = default_link.url
else:
return data
info = self._get_info_from_urls(urls)
data["summary"] = info["summary"]
data["requires_dist"] = info["requires_dist"]
data["requires_python"] = info["requires_python"]
return data
def _download(self, url, dest): # type: (str, str) -> None
r = self._session.get(url, stream=True)
with open(dest, "wb") as f:
for chunk in r.raw.stream(1024):
if chunk:
f.write(chunk)
def _get(self, endpoint): # type: (str) -> Union[Page, None]
url = self._url + endpoint
response = self._session.get(url)
if response.status_code == 404:
return
return Page(url, response.content, response.headers)
| 31.069149 | 91 | 0.54982 | import cgi
import re
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
try:
from html import unescape
except ImportError:
try:
from html.parser import HTMLParser
except ImportError:
from HTMLParser import HTMLParser
unescape = HTMLParser().unescape
from typing import Generator
from typing import Union
import html5lib
import requests
from cachecontrol import CacheControl
from cachecontrol.caches.file_cache import FileCache
from cachy import CacheManager
import poetry.packages
from poetry.config import Config
from poetry.locations import CACHE_DIR
from poetry.masonry.publishing.uploader import wheel_file_re
from poetry.packages import Package
from poetry.packages import dependency_from_pep_508
from poetry.packages.utils.link import Link
from poetry.semver import parse_constraint
from poetry.semver import Version
from poetry.semver import VersionConstraint
from poetry.utils._compat import Path
from poetry.utils.helpers import canonicalize_name, get_http_basic_auth
from poetry.version.markers import InvalidMarker
from .pypi_repository import PyPiRepository
class Page:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
SUPPORTED_FORMATS = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url, content, headers):
if not url.endswith("/"):
url += "/"
self._url = url
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params["charset"]
self._content = content
if encoding is None:
self._parsed = html5lib.parse(content, namespaceHTMLElements=False)
else:
self._parsed = html5lib.parse(
content, transport_encoding=encoding, namespaceHTMLElements=False
)
@property
def versions(self):
seen = set()
for link in self.links:
version = self.link_version(link)
if not version:
continue
if version in seen:
continue
seen.add(version)
yield version
@property
def links(self):
for anchor in self._parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self._url, href))
pyrequire = anchor.get("data-requires-python")
pyrequire = unescape(pyrequire) if pyrequire else None
link = Link(url, self, requires_python=pyrequire)
if link.ext not in self.SUPPORTED_FORMATS:
continue
yield link
def links_for_version(self, version):
for link in self.links:
if self.link_version(link) == version:
yield link
def link_version(self, link):
m = wheel_file_re.match(link.filename)
if m:
version = m.group("ver")
else:
info, ext = link.splitext()
match = self.VERSION_REGEX.match(info)
if not match:
return
version = match.group(2)
try:
version = Version.parse(version)
except ValueError:
return
return version
_clean_re = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
def clean_link(self, url):
return self._clean_re.sub(lambda match: "%%%2x" % ord(match.group(0)), url)
class LegacyRepository(PyPiRepository):
def __init__(self, name, url, disable_cache=False):
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
self._packages = []
self._name = name
self._url = url.rstrip("/")
self._cache_dir = Path(CACHE_DIR) / "cache" / "repositories" / name
self._cache = CacheManager(
{
"default": "releases",
"serializer": "json",
"stores": {
"releases": {"driver": "file", "path": str(self._cache_dir)},
"packages": {"driver": "dict"},
"matches": {"driver": "dict"},
},
}
)
self._session = CacheControl(
requests.session(), cache=FileCache(str(self._cache_dir / "_http"))
)
url_parts = urlparse.urlparse(self._url)
if not url_parts.username:
self._session.auth = get_http_basic_auth(
Config.create("auth.toml"), self.name
)
self._disable_cache = disable_cache
@property
def name(self):
return self._name
def find_packages(
self, name, constraint=None, extras=None, allow_prereleases=False
):
packages = []
if constraint is not None and not isinstance(constraint, VersionConstraint):
constraint = parse_constraint(constraint)
key = name
if constraint:
key = "{}:{}".format(key, str(constraint))
if self._cache.store("matches").has(key):
versions = self._cache.store("matches").get(key)
else:
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
return []
versions = []
for version in page.versions:
if not constraint or (constraint and constraint.allows(version)):
versions.append(version)
self._cache.store("matches").put(key, versions, 5)
for version in versions:
package = Package(name, version)
package.source_type = "legacy"
package.source_url = self._url
if extras is not None:
package.requires_extras = extras
packages.append(package)
self._log(
"{} packages found for {} {}".format(len(packages), name, str(constraint)),
level="debug",
)
return packages
def package(
self, name, version, extras=None
):
try:
index = self._packages.index(
poetry.packages.Package(name, version, version)
)
return self._packages[index]
except ValueError:
if extras is None:
extras = []
release_info = self.get_release_info(name, version)
package = poetry.packages.Package(name, version, version)
package.source_type = "legacy"
package.source_url = self._url
package.source_reference = self.name
requires_dist = release_info["requires_dist"] or []
for req in requires_dist:
try:
dependency = dependency_from_pep_508(req)
except InvalidMarker:
req = req.split(";")[0]
dependency = dependency_from_pep_508(req)
if dependency.extras:
for extra in dependency.extras:
if extra not in package.extras:
package.extras[extra] = []
package.extras[extra].append(dependency)
if not dependency.is_optional():
package.requires.append(dependency)
package.description = release_info.get("summary", "")
package.hashes = release_info["digests"]
for extra in extras:
if extra in package.extras:
for dep in package.extras[extra]:
dep.activate()
package.requires += package.extras[extra]
self._packages.append(package)
return package
def _get_release_info(self, name, version):
page = self._get("/{}/".format(canonicalize_name(name).replace(".", "-")))
if page is None:
raise ValueError('No package named "{}"'.format(name))
data = {
"name": name,
"version": version,
"summary": "",
"requires_dist": [],
"requires_python": [],
"digests": [],
}
links = list(page.links_for_version(Version.parse(version)))
if not links:
raise ValueError(
'No valid distribution links found for package: "{}" version: "{}"'.format(
name, version
)
)
urls = {}
hashes = []
default_link = links[0]
for link in links:
if link.is_wheel:
urls["bdist_wheel"] = link.url
elif link.filename.endswith(".tar.gz"):
urls["sdist"] = link.url
elif (
link.filename.endswith((".zip", ".bz2", ".xz", ".Z", ".tar"))
and "sdist" not in urls
):
urls["sdist"] = link.url
hash = link.hash
if link.hash_name == "sha256":
hashes.append(hash)
data["digests"] = hashes
if not urls:
if default_link.is_wheel:
m = wheel_file_re.match(default_link.filename)
python = m.group("pyver")
platform = m.group("plat")
if python == "py2.py3" and platform == "any":
urls["bdist_wheel"] = default_link.url
elif default_link.filename.endswith(".tar.gz"):
urls["sdist"] = default_link.url
elif (
default_link.filename.endswith((".zip", ".bz2")) and "sdist" not in urls
):
urls["sdist"] = default_link.url
else:
return data
info = self._get_info_from_urls(urls)
data["summary"] = info["summary"]
data["requires_dist"] = info["requires_dist"]
data["requires_python"] = info["requires_python"]
return data
def _download(self, url, dest):
r = self._session.get(url, stream=True)
with open(dest, "wb") as f:
for chunk in r.raw.stream(1024):
if chunk:
f.write(chunk)
def _get(self, endpoint):
url = self._url + endpoint
response = self._session.get(url)
if response.status_code == 404:
return
return Page(url, response.content, response.headers)
| true | true |
f730abe304c37698a7a929d72c33c9de240a15f7 | 9,543 | py | Python | config/settings/base.py | szymanskirafal/tryread | f7e395c318e33de84992c79eaa5844028c378a2d | [
"MIT"
] | null | null | null | config/settings/base.py | szymanskirafal/tryread | f7e395c318e33de84992c79eaa5844028c378a2d | [
"MIT"
] | null | null | null | config/settings/base.py | szymanskirafal/tryread | f7e395c318e33de84992c79eaa5844028c378a2d | [
"MIT"
] | null | null | null | """
Base settings to build other settings files upon.
"""
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (tryread/config/settings/base.py - 3 = tryread/)
APPS_DIR = ROOT_DIR.path('tryread')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR.path('.env')))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': env.db('DATABASE_URL', default='postgres:///tryread'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = 'config.urls'
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# 'django.contrib.humanize', # Handy template tags
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
'django_pdb',
]
LOCAL_APPS = [
'tryread.users.apps.UsersConfig',
# Your stuff: custom apps go here
'books',
'home',
'read',
'writer',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {
'sites': 'tryread.contrib.sites.migrations'
}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = 'users.User'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'users:redirect'
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = 'account_login'
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
'django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [
str(APPS_DIR.path('static')),
]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = 'admin/'
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [
("""Rafal Szymanski""", 'r.szymansky@gmail.com'),
]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = 'username'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = 'tryread.users.adapters.AccountAdapter'
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = 'tryread.users.adapters.SocialAccountAdapter'
# Your stuff...
# ------------------------------------------------------------------------------
| 38.635628 | 98 | 0.625799 |
import environ
ROOT_DIR = environ.Path(__file__) - 3
APPS_DIR = ROOT_DIR.path('tryread')
env = environ.Env()
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
env.read_env(str(ROOT_DIR.path('.env')))
= env.bool('DJANGO_DEBUG', False)
TIME_ZONE = 'UTC'
= 'en-us'
= 1
= True
= True
= True
S = {
'default': env.db('DATABASE_URL', default='postgres:///tryread'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
= 'config.urls'
= 'config.wsgi.application'
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
admin',
]
THIRD_PARTY_APPS = [
'crispy_forms',
'allauth',
'allauth.account',
'allauth.socialaccount',
'rest_framework',
'django_pdb',
]
LOCAL_APPS = [
'tryread.users.apps.UsersConfig',
'books',
'home',
'read',
'writer',
]
= DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
= {
'sites': 'tryread.contrib.sites.migrations'
}
= [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
]
= 'users.User'
= 'users:redirect'
= 'account_login'
= [
.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
= [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
= [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_pdb.middleware.PdbMiddleware',
]
= str(ROOT_DIR('staticfiles'))
= '/static/'
(APPS_DIR.path('static')),
]
= [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
= str(APPS_DIR('media'))
= '/media/'
= [
{
mplate.backends.django.DjangoTemplates',
': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
ebug': DEBUG,
ders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
sors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
E_PACK = 'bootstrap4'
= (
str(APPS_DIR.path('fixtures')),
)
= env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
ADMIN_URL = 'admin/'
= [
("""Rafal Szymanski""", 'r.szymansky@gmail.com'),
]
= ADMINS
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ADAPTER = 'tryread.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'tryread.users.adapters.SocialAccountAdapter'
| true | true |
f730acfe327038eafb73c8469ea9eb060bf5ea02 | 4,297 | py | Python | inference.py | trainOwn/yolov3-Inference | 5695469292b82ca54209b99b75115c1aabad0b9e | [
"MIT"
] | null | null | null | inference.py | trainOwn/yolov3-Inference | 5695469292b82ca54209b99b75115c1aabad0b9e | [
"MIT"
] | null | null | null | inference.py | trainOwn/yolov3-Inference | 5695469292b82ca54209b99b75115c1aabad0b9e | [
"MIT"
] | null | null | null | import numpy as np
import argparse
import cv2 as cv
import subprocess
import time
import os
from support import infer_image, show_image
FLAGS = []
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path',
type=str,
default='./model/',
help='The directory where the model weights and \
configuration files are.')
parser.add_argument('-w', '--weights',
type=str,
default='./model/yolov3.weights',
help='Path to the file which contains the weights \
for YOLOv3.')
parser.add_argument('-cfg', '--config',
type=str,
default='./model/yolov3.cfg',
help='Path to the configuration file for the YOLOv3 model.')
parser.add_argument('-i', '--image-path',
type=str,
help='The path to the image file')
parser.add_argument('-v', '--video-path',
type=str,
help='The path to the video file')
parser.add_argument('-vo', '--video-output-path',
type=str,
default='./output.avi',
help='The path of the output video file')
parser.add_argument('-l', '--labels',
type=str,
default='./model/coco-labels',
help='Path to the file having the \
labels in a new-line seperated way.')
parser.add_argument('-c', '--confidence',
type=float,
default=0.5,
help='The model will reject boundaries which has a \
probabiity less than the confidence value. \
default: 0.5')
parser.add_argument('-th', '--threshold',
type=float,
default=0.3,
help='The threshold to use when applying the \
Non-Max Suppresion')
parser.add_argument('--download-model',
type=bool,
default=False,
help='Set to True, if the model weights and configurations \
are not present on your local machine.')
parser.add_argument('-t', '--show-time',
type=bool,
default=False,
help='Show the time taken to infer each image.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.download_model:
subprocess.call(['./model/get_model.sh'])
labels = open(FLAGS.labels).read().strip().split('\n')
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
if FLAGS.image_path is None and FLAGS.video_path is None:
print ('Neither path to an image or path to video provided')
print ('Starting Inference on Webcam')
if FLAGS.image_path:
try:
img = cv.imread(FLAGS.image_path)
height, width = img.shape[:2]
except:
raise 'Image cannot be loaded!\n\
Please check the path provided!'
finally:
img, _, _, _, _ = infer_image(net, layer_names, height, width, img, colors, labels, FLAGS)
show_image(img)
elif FLAGS.video_path:
# Read the video
try:
vid = cv.VideoCapture(FLAGS.video_path)
height, width = None, None
writer = None
except:
raise 'Video cannot be loaded!\n\
Please check the path provided!'
finally:
print("Inferencing on video:", FLAGS.video_path)
while True:
grabbed, frame = vid.read()
if not grabbed:
break
if width is None or height is None:
height, width = frame.shape[:2]
frame, _, _, _, _ = infer_image(net, layer_names, height, width, frame, colors, labels, FLAGS)
if writer is None:
# Initialize the video writer
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(FLAGS.video_output_path, fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
print ("[INFO] Cleaning up...")
writer.release()
vid.release()
else:
count = 0
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
height, width = frame.shape[:2]
if count == 0:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS)
count += 1
else:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS, boxes, confidences, classids, idxs, infer=False)
count = (count + 1) % 6
cv.imshow('webcam', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv.destroyAllWindows()
| 26.20122 | 105 | 0.658366 | import numpy as np
import argparse
import cv2 as cv
import subprocess
import time
import os
from support import infer_image, show_image
FLAGS = []
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model-path',
type=str,
default='./model/',
help='The directory where the model weights and \
configuration files are.')
parser.add_argument('-w', '--weights',
type=str,
default='./model/yolov3.weights',
help='Path to the file which contains the weights \
for YOLOv3.')
parser.add_argument('-cfg', '--config',
type=str,
default='./model/yolov3.cfg',
help='Path to the configuration file for the YOLOv3 model.')
parser.add_argument('-i', '--image-path',
type=str,
help='The path to the image file')
parser.add_argument('-v', '--video-path',
type=str,
help='The path to the video file')
parser.add_argument('-vo', '--video-output-path',
type=str,
default='./output.avi',
help='The path of the output video file')
parser.add_argument('-l', '--labels',
type=str,
default='./model/coco-labels',
help='Path to the file having the \
labels in a new-line seperated way.')
parser.add_argument('-c', '--confidence',
type=float,
default=0.5,
help='The model will reject boundaries which has a \
probabiity less than the confidence value. \
default: 0.5')
parser.add_argument('-th', '--threshold',
type=float,
default=0.3,
help='The threshold to use when applying the \
Non-Max Suppresion')
parser.add_argument('--download-model',
type=bool,
default=False,
help='Set to True, if the model weights and configurations \
are not present on your local machine.')
parser.add_argument('-t', '--show-time',
type=bool,
default=False,
help='Show the time taken to infer each image.')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.download_model:
subprocess.call(['./model/get_model.sh'])
labels = open(FLAGS.labels).read().strip().split('\n')
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)
layer_names = net.getLayerNames()
layer_names = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
if FLAGS.image_path is None and FLAGS.video_path is None:
print ('Neither path to an image or path to video provided')
print ('Starting Inference on Webcam')
if FLAGS.image_path:
try:
img = cv.imread(FLAGS.image_path)
height, width = img.shape[:2]
except:
raise 'Image cannot be loaded!\n\
Please check the path provided!'
finally:
img, _, _, _, _ = infer_image(net, layer_names, height, width, img, colors, labels, FLAGS)
show_image(img)
elif FLAGS.video_path:
try:
vid = cv.VideoCapture(FLAGS.video_path)
height, width = None, None
writer = None
except:
raise 'Video cannot be loaded!\n\
Please check the path provided!'
finally:
print("Inferencing on video:", FLAGS.video_path)
while True:
grabbed, frame = vid.read()
if not grabbed:
break
if width is None or height is None:
height, width = frame.shape[:2]
frame, _, _, _, _ = infer_image(net, layer_names, height, width, frame, colors, labels, FLAGS)
if writer is None:
fourcc = cv.VideoWriter_fourcc(*"MJPG")
writer = cv.VideoWriter(FLAGS.video_output_path, fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
writer.write(frame)
print ("[INFO] Cleaning up...")
writer.release()
vid.release()
else:
count = 0
vid = cv.VideoCapture(0)
while True:
_, frame = vid.read()
height, width = frame.shape[:2]
if count == 0:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS)
count += 1
else:
frame, boxes, confidences, classids, idxs = infer_image(net, layer_names, \
height, width, frame, colors, labels, FLAGS, boxes, confidences, classids, idxs, infer=False)
count = (count + 1) % 6
cv.imshow('webcam', frame)
if cv.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
cv.destroyAllWindows()
| true | true |
f730ad586dc64abb0dc959278121e0408d62bbe2 | 1,041 | py | Python | src/Python/12_Napisy_anagramy_palindromy/Zad6.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | 3 | 2020-09-19T21:38:30.000Z | 2022-03-30T11:02:26.000Z | src/Python/12_Napisy_anagramy_palindromy/Zad6.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | null | null | null | src/Python/12_Napisy_anagramy_palindromy/Zad6.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | 1 | 2022-02-04T09:13:20.000Z | 2022-02-04T09:13:20.000Z | """
Sprawdz czy istnieje permutacja danego slowa bedaca palindromem.
"""
# Wersja 1
def znajdz_permutacje(napis, start, koniec, wynik=[]):
if start >= koniec:
if "".join(napis) not in wynik:
wynik.append("".join(napis))
else:
for i in range(start, koniec):
napis[start], napis[i] = napis[i], napis[start]
znajdz_permutacje(napis, start + 1, koniec, wynik)
napis[start], napis[i] = napis[i], napis[start]
return wynik
def czy_palindrom(slowo):
for i in range(len(slowo) // 2):
if slowo[i] != slowo[-i - 1]:
return False
return True
def czy_istnieje_permutacja_bedaca_palindromem_v1(slowo):
permutacje = znajdz_permutacje(list(slowo), 0, len(slowo))
wynik = []
for p in permutacje:
if czy_palindrom(p):
wynik.append(p)
return wynik
# testy poprawnosci
slowo = "adamm"
wynik = ["madam", "amdma"]
assert sorted(czy_istnieje_permutacja_bedaca_palindromem_v1(slowo)) == sorted(wynik)
| 20.82 | 84 | 0.621518 |
def znajdz_permutacje(napis, start, koniec, wynik=[]):
if start >= koniec:
if "".join(napis) not in wynik:
wynik.append("".join(napis))
else:
for i in range(start, koniec):
napis[start], napis[i] = napis[i], napis[start]
znajdz_permutacje(napis, start + 1, koniec, wynik)
napis[start], napis[i] = napis[i], napis[start]
return wynik
def czy_palindrom(slowo):
for i in range(len(slowo) // 2):
if slowo[i] != slowo[-i - 1]:
return False
return True
def czy_istnieje_permutacja_bedaca_palindromem_v1(slowo):
permutacje = znajdz_permutacje(list(slowo), 0, len(slowo))
wynik = []
for p in permutacje:
if czy_palindrom(p):
wynik.append(p)
return wynik
slowo = "adamm"
wynik = ["madam", "amdma"]
assert sorted(czy_istnieje_permutacja_bedaca_palindromem_v1(slowo)) == sorted(wynik)
| true | true |
f730adbf9c1f3a579711a5941736814a6d65e98c | 7,139 | py | Python | Tiny-ImageNet/SSKD/cifar.py | UBCDingXin/cGAN-KD | c32a4b014fe024222101ff11d63de518448f7f8d | [
"MIT"
] | 1 | 2021-08-21T09:19:17.000Z | 2021-08-21T09:19:17.000Z | CIFAR/CIFAR_50K/SSKD/cifar.py | UBCDingXin/cGAN-KD | c32a4b014fe024222101ff11d63de518448f7f8d | [
"MIT"
] | null | null | null | CIFAR/CIFAR_50K/SSKD/cifar.py | UBCDingXin/cGAN-KD | c32a4b014fe024222101ff11d63de518448f7f8d | [
"MIT"
] | 2 | 2021-04-15T08:23:49.000Z | 2021-09-15T06:52:25.000Z | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
import pickle
import torch
import torch.utils.data as data
from itertools import permutations
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root, transforms=None, transform=None, target_transform=None):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can "
"be passed as argument")
# for backwards-compatibility
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return '\n'.join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def extra_repr(self):
return ""
class CIFAR10(VisionDataset):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True,
transform=None, download=False):
super(CIFAR10, self).__init__(root)
self.transform = transform
self.train = train # training set or test set
if download:
raise ValueError('cannot download.')
exit()
#self.download()
#if not self._check_integrity():
# raise RuntimeError('Dataset not found or corrupted.' +
# ' You can use download=True to download it')
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
#if not check_integrity(path, self.meta['md5']):
# raise RuntimeError('Dataset metadata file not found or corrupted.' +
# ' You can use download=True to download it')
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.train:
if np.random.rand() < 0.5:
img = img[:,::-1,:]
img0 = np.rot90(img, 0).copy()
img0 = Image.fromarray(img0)
img0 = self.transform(img0)
img1 = np.rot90(img, 1).copy()
img1 = Image.fromarray(img1)
img1 = self.transform(img1)
img2 = np.rot90(img, 2).copy()
img2 = Image.fromarray(img2)
img2 = self.transform(img2)
img3 = np.rot90(img, 3).copy()
img3 = Image.fromarray(img3)
img3 = self.transform(img3)
img = torch.stack([img0,img1,img2,img3])
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
# extract file
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
"""`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
This is a subclass of the `CIFAR10` Dataset.
"""
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| 32.747706 | 86 | 0.595321 | from __future__ import print_function
from PIL import Image
import os
import os.path
import numpy as np
import sys
import pickle
import torch
import torch.utils.data as data
from itertools import permutations
class VisionDataset(data.Dataset):
_repr_indent = 4
def __init__(self, root, transforms=None, transform=None, target_transform=None):
if isinstance(root, torch._six.string_classes):
root = os.path.expanduser(root)
self.root = root
has_transforms = transforms is not None
has_separate_transform = transform is not None or target_transform is not None
if has_transforms and has_separate_transform:
raise ValueError("Only transforms or transform/target_transform can "
"be passed as argument")
self.transform = transform
self.target_transform = target_transform
if has_separate_transform:
transforms = StandardTransform(transform, target_transform)
self.transforms = transforms
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
head = "Dataset " + self.__class__.__name__
body = ["Number of datapoints: {}".format(self.__len__())]
if self.root is not None:
body.append("Root location: {}".format(self.root))
body += self.extra_repr().splitlines()
if self.transforms is not None:
body += [repr(self.transforms)]
lines = [head] + [" " * self._repr_indent + line for line in body]
return '\n'.join(lines)
def _format_transform_repr(self, transform, head):
lines = transform.__repr__().splitlines()
return (["{}{}".format(head, lines[0])] +
["{}{}".format(" " * len(head), line) for line in lines[1:]])
def extra_repr(self):
return ""
class CIFAR10(VisionDataset):
base_folder = 'cifar-10-batches-py'
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [
['data_batch_1', 'c99cafc152244af753f735de768cd75f'],
['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'],
['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'],
['data_batch_4', '634d18415352ddfa80567beed471001a'],
['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'],
]
test_list = [
['test_batch', '40351d587109b95175f43aff81a1287e'],
]
meta = {
'filename': 'batches.meta',
'key': 'label_names',
'md5': '5ff9c542aee3614f3951f8cda6e48888',
}
def __init__(self, root, train=True,
transform=None, download=False):
super(CIFAR10, self).__init__(root)
self.transform = transform
self.train = train
if download:
raise ValueError('cannot download.')
exit()
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, 'rb') as f:
if sys.version_info[0] == 2:
entry = pickle.load(f)
else:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if 'labels' in entry:
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1))
self._load_meta()
def _load_meta(self):
path = os.path.join(self.root, self.base_folder, self.meta['filename'])
with open(path, 'rb') as infile:
if sys.version_info[0] == 2:
data = pickle.load(infile)
else:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
if self.train:
if np.random.rand() < 0.5:
img = img[:,::-1,:]
img0 = np.rot90(img, 0).copy()
img0 = Image.fromarray(img0)
img0 = self.transform(img0)
img1 = np.rot90(img, 1).copy()
img1 = Image.fromarray(img1)
img1 = self.transform(img1)
img2 = np.rot90(img, 2).copy()
img2 = Image.fromarray(img2)
img2 = self.transform(img2)
img3 = np.rot90(img, 3).copy()
img3 = Image.fromarray(img3)
img3 = self.transform(img3)
img = torch.stack([img0,img1,img2,img3])
return img, target
def __len__(self):
return len(self.data)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not check_integrity(fpath, md5):
return False
return True
def download(self):
import tarfile
if self._check_integrity():
print('Files already downloaded and verified')
return
download_url(self.url, self.root, self.filename, self.tgz_md5)
with tarfile.open(os.path.join(self.root, self.filename), "r:gz") as tar:
tar.extractall(path=self.root)
def extra_repr(self):
return "Split: {}".format("Train" if self.train is True else "Test")
class CIFAR100(CIFAR10):
base_folder = 'cifar-100-python'
url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
filename = "cifar-100-python.tar.gz"
tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85'
train_list = [
['train', '16019d7e3df5f24257cddd939b257f8d'],
]
test_list = [
['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'],
]
meta = {
'filename': 'meta',
'key': 'fine_label_names',
'md5': '7973b15100ade9c7d40fb424638fde48',
}
| true | true |
f730ae6a31dcc419b37475dbdeeafb8f61dff61c | 3,593 | py | Python | src/diffwave/inference.py | egaebel/diffwave | c5d7d8d90b662f208ecdfba616782559146dc116 | [
"Apache-2.0"
] | null | null | null | src/diffwave/inference.py | egaebel/diffwave | c5d7d8d90b662f208ecdfba616782559146dc116 | [
"Apache-2.0"
] | null | null | null | src/diffwave/inference.py | egaebel/diffwave | c5d7d8d90b662f208ecdfba616782559146dc116 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 LMNT, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import os
import torch
import torchaudio
from argparse import ArgumentParser
from diffwave.params import AttrDict, params as base_params
from diffwave.model import DiffWave
models = {}
def load_model(model_dir, device):
global models
if os.path.exists(f"{model_dir}/weights.pt"):
checkpoint = torch.load(f"{model_dir}/weights.pt", map_location=device)
else:
checkpoint = torch.load(model_dir, map_location=device)
model = DiffWave(AttrDict(base_params)).to(device)
model.load_state_dict(checkpoint["model"])
model.eval()
models[model_dir] = model
def predict(spectrogram, model_dir=None, params=None, device=torch.device("cuda")):
global models
# Lazy load model.
if not model_dir in models:
load_model(model_dir, device)
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
beta = np.array(model.params.noise_schedule)
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
# Expand rank 2 tensors by adding a batch dimension.
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(
spectrogram.shape[0],
model.params.hop_samples * spectrogram.shape[-1],
device=device,
)
noise_scale = torch.from_numpy(alpha_cum ** 0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
audio = c1 * (
audio
- c2
* model(
audio, spectrogram, torch.tensor([n], device=audio.device)
).squeeze(1)
)
if n > 0:
noise = torch.randn_like(audio)
sigma = (
(1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]
) ** 0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio, model.params.sample_rate
def main(args):
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
audio, sr = predict(spectrogram, model_dir=args.model_dir)
torchaudio.save(args.output, audio.cpu(), sample_rate=sr)
if __name__ == "__main__":
parser = ArgumentParser(
description="runs inference on a spectrogram file generated by diffwave.preprocess"
)
parser.add_argument(
"model_dir",
help="directory containing a trained model (or full path to weights.pt file)",
)
parser.add_argument(
"spectrogram_path",
help="path to a spectrogram file generated by diffwave.preprocess",
)
parser.add_argument("--output", "-o", default="output.wav", help="output file name")
main(parser.parse_args())
| 33.579439 | 91 | 0.618981 |
import numpy as np
import os
import torch
import torchaudio
from argparse import ArgumentParser
from diffwave.params import AttrDict, params as base_params
from diffwave.model import DiffWave
models = {}
def load_model(model_dir, device):
global models
if os.path.exists(f"{model_dir}/weights.pt"):
checkpoint = torch.load(f"{model_dir}/weights.pt", map_location=device)
else:
checkpoint = torch.load(model_dir, map_location=device)
model = DiffWave(AttrDict(base_params)).to(device)
model.load_state_dict(checkpoint["model"])
model.eval()
models[model_dir] = model
def predict(spectrogram, model_dir=None, params=None, device=torch.device("cuda")):
global models
if not model_dir in models:
load_model(model_dir, device)
model = models[model_dir]
model.params.override(params)
with torch.no_grad():
beta = np.array(model.params.noise_schedule)
alpha = 1 - beta
alpha_cum = np.cumprod(alpha)
if len(spectrogram.shape) == 2:
spectrogram = spectrogram.unsqueeze(0)
spectrogram = spectrogram.to(device)
audio = torch.randn(
spectrogram.shape[0],
model.params.hop_samples * spectrogram.shape[-1],
device=device,
)
noise_scale = torch.from_numpy(alpha_cum ** 0.5).float().unsqueeze(1).to(device)
for n in range(len(alpha) - 1, -1, -1):
c1 = 1 / alpha[n] ** 0.5
c2 = beta[n] / (1 - alpha_cum[n]) ** 0.5
audio = c1 * (
audio
- c2
* model(
audio, spectrogram, torch.tensor([n], device=audio.device)
).squeeze(1)
)
if n > 0:
noise = torch.randn_like(audio)
sigma = (
(1.0 - alpha_cum[n - 1]) / (1.0 - alpha_cum[n]) * beta[n]
) ** 0.5
audio += sigma * noise
audio = torch.clamp(audio, -1.0, 1.0)
return audio, model.params.sample_rate
def main(args):
spectrogram = torch.from_numpy(np.load(args.spectrogram_path))
audio, sr = predict(spectrogram, model_dir=args.model_dir)
torchaudio.save(args.output, audio.cpu(), sample_rate=sr)
if __name__ == "__main__":
parser = ArgumentParser(
description="runs inference on a spectrogram file generated by diffwave.preprocess"
)
parser.add_argument(
"model_dir",
help="directory containing a trained model (or full path to weights.pt file)",
)
parser.add_argument(
"spectrogram_path",
help="path to a spectrogram file generated by diffwave.preprocess",
)
parser.add_argument("--output", "-o", default="output.wav", help="output file name")
main(parser.parse_args())
| true | true |
f730ae92f94c6a1c98bcbfd1b9c656dbf9f0d677 | 576 | py | Python | emr_mine_python_scipts/pq_tree/Queue.py | debprakash/emr-view | 6b5690c2335482e97b8dabbdec616c8a1d7df898 | [
"MIT"
] | null | null | null | emr_mine_python_scipts/pq_tree/Queue.py | debprakash/emr-view | 6b5690c2335482e97b8dabbdec616c8a1d7df898 | [
"MIT"
] | null | null | null | emr_mine_python_scipts/pq_tree/Queue.py | debprakash/emr-view | 6b5690c2335482e97b8dabbdec616c8a1d7df898 | [
"MIT"
] | 1 | 2018-10-24T02:54:40.000Z | 2018-10-24T02:54:40.000Z | '''
Created on Dec 30, 2010
@author: patnaik
'''
from collections import deque
class Queue(object):
def __init__(self, data = None):
if data:
self.internal_queue = deque(data)
else:
self.internal_queue = deque()
def enqueue(self, value):
self.internal_queue.append(value)
def dequeue(self):
return self.internal_queue.popleft()
def __len__(self):
return len(self.internal_queue)
def __str__(self):
return "%s" % (list(self.internal_queue)) | 20.571429 | 49 | 0.578125 |
from collections import deque
class Queue(object):
def __init__(self, data = None):
if data:
self.internal_queue = deque(data)
else:
self.internal_queue = deque()
def enqueue(self, value):
self.internal_queue.append(value)
def dequeue(self):
return self.internal_queue.popleft()
def __len__(self):
return len(self.internal_queue)
def __str__(self):
return "%s" % (list(self.internal_queue)) | true | true |
f730af67a6528163daebb523295431d1e6c54c82 | 16,885 | py | Python | intersight/model/storage_base_host.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 5 | 2021-12-16T15:13:32.000Z | 2022-03-29T16:09:54.000Z | intersight/model/storage_base_host.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 4 | 2022-01-25T19:05:51.000Z | 2022-03-29T20:18:37.000Z | intersight/model/storage_base_host.py | CiscoDevNet/intersight-python | 04b721f37c3044646a91c185c7259edfb991557a | [
"Apache-2.0"
] | 2 | 2020-07-07T15:01:08.000Z | 2022-01-31T04:27:35.000Z | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.storage_base_capacity import StorageBaseCapacity
from intersight.model.storage_base_host_all_of import StorageBaseHostAllOf
from intersight.model.storage_base_initiator import StorageBaseInitiator
from intersight.model.storage_hitachi_host import StorageHitachiHost
from intersight.model.storage_net_app_initiator_group import StorageNetAppInitiatorGroup
from intersight.model.storage_pure_host import StoragePureHost
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['StorageBaseCapacity'] = StorageBaseCapacity
globals()['StorageBaseHostAllOf'] = StorageBaseHostAllOf
globals()['StorageBaseInitiator'] = StorageBaseInitiator
globals()['StorageHitachiHost'] = StorageHitachiHost
globals()['StorageNetAppInitiatorGroup'] = StorageNetAppInitiatorGroup
globals()['StoragePureHost'] = StoragePureHost
class StorageBaseHost(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
('object_type',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'description': (str,), # noqa: E501
'initiators': ([StorageBaseInitiator], none_type,), # noqa: E501
'name': (str,), # noqa: E501
'os_type': (str,), # noqa: E501
'storage_utilization': (StorageBaseCapacity,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
}
@cached_property
def discriminator():
lazy_import()
val = {
'storage.HitachiHost': StorageHitachiHost,
'storage.NetAppInitiatorGroup': StorageNetAppInitiatorGroup,
'storage.PureHost': StoragePureHost,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'description': 'Description', # noqa: E501
'initiators': 'Initiators', # noqa: E501
'name': 'Name', # noqa: E501
'os_type': 'OsType', # noqa: E501
'storage_utilization': 'StorageUtilization', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501
"""StorageBaseHost - a model defined in OpenAPI
Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type.
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): Short description about the host.. [optional] # noqa: E501
initiators ([StorageBaseInitiator], none_type): [optional] # noqa: E501
name (str): Name of the host in storage array.. [optional] # noqa: E501
os_type (str): Operating system running on the host.. [optional] # noqa: E501
storage_utilization (StorageBaseCapacity): [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseMo,
StorageBaseHostAllOf,
],
'oneOf': [
],
}
| 52.601246 | 1,678 | 0.640687 |
import re
import sys
from intersight.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.storage_base_capacity import StorageBaseCapacity
from intersight.model.storage_base_host_all_of import StorageBaseHostAllOf
from intersight.model.storage_base_initiator import StorageBaseInitiator
from intersight.model.storage_hitachi_host import StorageHitachiHost
from intersight.model.storage_net_app_initiator_group import StorageNetAppInitiatorGroup
from intersight.model.storage_pure_host import StoragePureHost
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['StorageBaseCapacity'] = StorageBaseCapacity
globals()['StorageBaseHostAllOf'] = StorageBaseHostAllOf
globals()['StorageBaseInitiator'] = StorageBaseInitiator
globals()['StorageHitachiHost'] = StorageHitachiHost
globals()['StorageNetAppInitiatorGroup'] = StorageNetAppInitiatorGroup
globals()['StoragePureHost'] = StoragePureHost
class StorageBaseHost(ModelComposed):
allowed_values = {
('class_id',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
('object_type',): {
'HITACHIHOST': "storage.HitachiHost",
'NETAPPINITIATORGROUP': "storage.NetAppInitiatorGroup",
'PUREHOST': "storage.PureHost",
},
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'class_id': (str,),
'object_type': (str,),
'description': (str,),
'initiators': ([StorageBaseInitiator], none_type,),
'name': (str,),
'os_type': (str,),
'storage_utilization': (StorageBaseCapacity,),
'account_moid': (str,),
'create_time': (datetime,),
'domain_group_moid': (str,),
'mod_time': (datetime,),
'moid': (str,),
'owners': ([str], none_type,),
'shared_scope': (str,),
'tags': ([MoTag], none_type,),
'version_context': (MoVersionContext,),
'ancestors': ([MoBaseMoRelationship], none_type,),
'parent': (MoBaseMoRelationship,),
'permission_resources': ([MoBaseMoRelationship], none_type,),
'display_names': (DisplayNames,),
}
@cached_property
def discriminator():
lazy_import()
val = {
'storage.HitachiHost': StorageHitachiHost,
'storage.NetAppInitiatorGroup': StorageNetAppInitiatorGroup,
'storage.PureHost': StoragePureHost,
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId',
'object_type': 'ObjectType',
'description': 'Description',
'initiators': 'Initiators',
'name': 'Name',
'os_type': 'OsType',
'storage_utilization': 'StorageUtilization',
'account_moid': 'AccountMoid',
'create_time': 'CreateTime',
'domain_group_moid': 'DomainGroupMoid',
'mod_time': 'ModTime',
'moid': 'Moid',
'owners': 'Owners',
'shared_scope': 'SharedScope',
'tags': 'Tags',
'version_context': 'VersionContext',
'ancestors': 'Ancestors',
'parent': 'Parent',
'permission_resources': 'PermissionResources',
'display_names': 'DisplayNames',
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, class_id, object_type, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseMo,
StorageBaseHostAllOf,
],
'oneOf': [
],
}
| true | true |
f730af851f20a268b6a0f44fd441c1856d5e489a | 8,362 | py | Python | transformers/modeling_tf_transfo_xl_utilities.py | wietsedv/transformers | 8efc6dd544bf1a30d99d4b5abfc5e214699eab2b | [
"Apache-2.0"
] | 4 | 2020-02-18T22:33:35.000Z | 2021-06-24T20:34:50.000Z | transformers/modeling_tf_transfo_xl_utilities.py | eangelica2014/transformers | 5e289f69bc564c94132f77c89a34e5f1dd69a592 | [
"Apache-2.0"
] | null | null | null | transformers/modeling_tf_transfo_xl_utilities.py | eangelica2014/transformers | 5e289f69bc564c94132f77c89a34e5f1dd69a592 | [
"Apache-2.0"
] | 1 | 2020-07-01T01:16:11.000Z | 2020-07-01T01:16:11.000Z | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A TF 2.0 Adaptive Softmax for Transformer XL model.
"""
from collections import defaultdict
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import shape_list
class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer):
def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False, **kwargs):
super(TFAdaptiveSoftmaxMask, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [vocab_size]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.keep_order = keep_order
self.out_layers = []
self.out_projs = []
def build(self, input_shape):
if self.n_clusters > 0:
self.cluster_weight = self.add_weight(shape=(self.n_clusters, self.d_embed),
initializer='zeros',
trainable=True,
name='cluster_weight')
self.cluster_bias = self.add_weight(shape=(self.n_clusters,),
initializer='zeros',
trainable=True,
name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
weight = self.add_weight(shape=(self.d_embed, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
else:
self.out_projs.append(None)
weight = self.add_weight(shape=(self.vocab_size, self.d_embed,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(self.vocab_size,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = self.d_embed // (self.div_val ** i)
weight = self.add_weight(shape=(d_emb_i, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
weight = self.add_weight(shape=(r_idx-l_idx, d_emb_i,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(r_idx-l_idx,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
super(TFAdaptiveSoftmaxMask, self).build(input_shape)
@staticmethod
def _logit(x, W, b, proj=None):
y = x
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
@staticmethod
def _gather_logprob(logprob, target):
lp_size = shape_list(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
def call(self, inputs, return_mean=True, training=False):
hidden, target = inputs
head_logprob = 0
if self.n_clusters == 0:
softmax_b = tf.get_variable('bias', [self.config.vocab_size], initializer=tf.zeros_initializer())
output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
if target is not None:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
out = tf.nn.log_softmax(output, axis=-1)
else:
hidden_sizes = shape_list(hidden)
out = []
loss = tf.zeros(hidden_sizes[:2], dtype=tf.float32)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
if self.div_val == 1:
cur_W = self.out_layers[0][0][l_idx:r_idx]
cur_b = self.out_layers[0][1][l_idx:r_idx]
else:
cur_W = self.out_layers[i][0]
cur_b = self.out_layers[i][1]
if i == 0:
cur_W = tf.concat([cur_W, self.cluster_weight], 0)
cur_b = tf.concat([cur_b, self.cluster_bias], 0)
head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
head_logprob = tf.nn.log_softmax(head_logit)
out.append(head_logprob[..., :self.cutoffs[0]])
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
else:
tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
tail_logprob = tf.nn.log_softmax(tail_logit)
cluster_prob_idx = self.cutoffs[0] + i - 1 # No probability for the head cluster
logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(logprob_i)
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(shape_list(loss), dtype=tf.int64))
out = tf.concat(out, axis=-1)
if target is not None:
if return_mean:
loss = tf.reduce_mean(loss)
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(loss)
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(loss, name=self.name, aggregation='mean' if return_mean else '')
return out
| 47.511364 | 110 | 0.527864 |
from collections import defaultdict
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import shape_list
class TFAdaptiveSoftmaxMask(tf.keras.layers.Layer):
def __init__(self, vocab_size, d_embed, d_proj, cutoffs, div_val=1,
keep_order=False, **kwargs):
super(TFAdaptiveSoftmaxMask, self).__init__(**kwargs)
self.vocab_size = vocab_size
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = cutoffs + [vocab_size]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
self.keep_order = keep_order
self.out_layers = []
self.out_projs = []
def build(self, input_shape):
if self.n_clusters > 0:
self.cluster_weight = self.add_weight(shape=(self.n_clusters, self.d_embed),
initializer='zeros',
trainable=True,
name='cluster_weight')
self.cluster_bias = self.add_weight(shape=(self.n_clusters,),
initializer='zeros',
trainable=True,
name='cluster_bias')
if self.div_val == 1:
for i in range(len(self.cutoffs)):
if self.d_proj != self.d_embed:
weight = self.add_weight(shape=(self.d_embed, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
else:
self.out_projs.append(None)
weight = self.add_weight(shape=(self.vocab_size, self.d_embed,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(self.vocab_size,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = self.d_embed // (self.div_val ** i)
weight = self.add_weight(shape=(d_emb_i, self.d_proj),
initializer='zeros',
trainable=True,
name='out_projs_._{}'.format(i))
self.out_projs.append(weight)
weight = self.add_weight(shape=(r_idx-l_idx, d_emb_i,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._weight'.format(i))
bias = self.add_weight(shape=(r_idx-l_idx,),
initializer='zeros',
trainable=True,
name='out_layers_._{}_._bias'.format(i))
self.out_layers.append((weight, bias))
super(TFAdaptiveSoftmaxMask, self).build(input_shape)
@staticmethod
def _logit(x, W, b, proj=None):
y = x
if proj is not None:
y = tf.einsum('ibd,ed->ibe', y, proj)
return tf.einsum('ibd,nd->ibn', y, W) + b
@staticmethod
def _gather_logprob(logprob, target):
lp_size = shape_list(logprob)
r = tf.range(lp_size[0])
idx = tf.stack([r, target], 1)
return tf.gather_nd(logprob, idx)
def call(self, inputs, return_mean=True, training=False):
hidden, target = inputs
head_logprob = 0
if self.n_clusters == 0:
softmax_b = tf.get_variable('bias', [self.config.vocab_size], initializer=tf.zeros_initializer())
output = self._logit(hidden, self.out_layers[0][0], self.out_layers[0][1], self.out_projs[0])
if target is not None:
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target, logits=output)
out = tf.nn.log_softmax(output, axis=-1)
else:
hidden_sizes = shape_list(hidden)
out = []
loss = tf.zeros(hidden_sizes[:2], dtype=tf.float32)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
mask = (target >= l_idx) & (target < r_idx)
mask_idx = tf.where(mask)
cur_target = tf.boolean_mask(target, mask) - l_idx
if self.div_val == 1:
cur_W = self.out_layers[0][0][l_idx:r_idx]
cur_b = self.out_layers[0][1][l_idx:r_idx]
else:
cur_W = self.out_layers[i][0]
cur_b = self.out_layers[i][1]
if i == 0:
cur_W = tf.concat([cur_W, self.cluster_weight], 0)
cur_b = tf.concat([cur_b, self.cluster_bias], 0)
head_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[0])
head_logprob = tf.nn.log_softmax(head_logit)
out.append(head_logprob[..., :self.cutoffs[0]])
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_logprob = self._gather_logprob(cur_head_logprob, cur_target)
else:
tail_logit = self._logit(hidden, cur_W, cur_b, self.out_projs[i])
tail_logprob = tf.nn.log_softmax(tail_logit)
cluster_prob_idx = self.cutoffs[0] + i - 1
logprob_i = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(logprob_i)
if target is not None:
cur_head_logprob = tf.boolean_mask(head_logprob, mask)
cur_tail_logprob = tf.boolean_mask(tail_logprob, mask)
cur_logprob = self._gather_logprob(cur_tail_logprob, cur_target)
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(mask_idx, -cur_logprob, tf.cast(shape_list(loss), dtype=tf.int64))
out = tf.concat(out, axis=-1)
if target is not None:
if return_mean:
loss = tf.reduce_mean(loss)
self.add_loss(loss)
self.add_metric(loss, name=self.name, aggregation='mean' if return_mean else '')
return out
| true | true |
f730b057549fb7fa3a7ab3af3435630749ad857a | 131 | py | Python | program1.py | saenzzzup/criptografia-UNAM-2020 | 05a19e076ea19218753fdc6aa8525e297cbb0b12 | [
"MIT"
] | null | null | null | program1.py | saenzzzup/criptografia-UNAM-2020 | 05a19e076ea19218753fdc6aa8525e297cbb0b12 | [
"MIT"
] | null | null | null | program1.py | saenzzzup/criptografia-UNAM-2020 | 05a19e076ea19218753fdc6aa8525e297cbb0b12 | [
"MIT"
] | null | null | null | import fileinput
suma = sum(float(num) for num in fileinput.input())
if (suma).is_integer():
print(int(suma))
else:
print(suma) | 16.375 | 51 | 0.709924 | import fileinput
suma = sum(float(num) for num in fileinput.input())
if (suma).is_integer():
print(int(suma))
else:
print(suma) | true | true |
f730b0aa8d6d0ccc56c5c91946429f4bb15d48a4 | 218 | py | Python | src/redistool/basic.py | vinthony/racpider | 56cea984b69ed4faf0f1c5d264ce38690d4b449e | [
"MIT"
] | 4 | 2015-05-27T04:07:59.000Z | 2016-03-08T15:13:35.000Z | src/redistool/basic.py | vinthony/racpider | 56cea984b69ed4faf0f1c5d264ce38690d4b449e | [
"MIT"
] | null | null | null | src/redistool/basic.py | vinthony/racpider | 56cea984b69ed4faf0f1c5d264ce38690d4b449e | [
"MIT"
] | null | null | null | from redis import Redis
from config.getconfig import getconfig
def conn():
config = config()
rec = getconfig()["redis"]
self.conn = Redis(host=rec["host"],port=int(rec["port"]),db=int(rec['db']))
return self.conn | 27.25 | 77 | 0.697248 | from redis import Redis
from config.getconfig import getconfig
def conn():
config = config()
rec = getconfig()["redis"]
self.conn = Redis(host=rec["host"],port=int(rec["port"]),db=int(rec['db']))
return self.conn | true | true |
f730b168fcf7dec7355c788c5f9285fee330b4ac | 325 | py | Python | 1876.py | romanvelichkin/acm.timus.ru | ff2e946b04fec857be142dca18e1ac3c49b14cf5 | [
"Apache-2.0"
] | null | null | null | 1876.py | romanvelichkin/acm.timus.ru | ff2e946b04fec857be142dca18e1ac3c49b14cf5 | [
"Apache-2.0"
] | null | null | null | 1876.py | romanvelichkin/acm.timus.ru | ff2e946b04fec857be142dca18e1ac3c49b14cf5 | [
"Apache-2.0"
] | null | null | null | # 1876. Утро сороконожки
# solved
boots = input().split(' ')
left_boots = int(boots[0])
right_boots = int(boots[1])
left_legs = 40
right_legs = 40
result = 0
if right_boots >= left_boots:
result = right_boots*2 + left_legs
else:
result = (right_legs - 1)*2 + left_legs + (left_boots - left_legs)*2 + 1
print(result) | 21.666667 | 76 | 0.683077 |
boots = input().split(' ')
left_boots = int(boots[0])
right_boots = int(boots[1])
left_legs = 40
right_legs = 40
result = 0
if right_boots >= left_boots:
result = right_boots*2 + left_legs
else:
result = (right_legs - 1)*2 + left_legs + (left_boots - left_legs)*2 + 1
print(result) | true | true |
f730b1d1edd280ab8da6c0db314c7a6d740157a1 | 12,189 | py | Python | fastai/callback.py | fish5421/fastai_update | c3dbdfba59512b5004093119f7676f224eb1d15c | [
"Apache-2.0"
] | null | null | null | fastai/callback.py | fish5421/fastai_update | c3dbdfba59512b5004093119f7676f224eb1d15c | [
"Apache-2.0"
] | null | null | null | fastai/callback.py | fish5421/fastai_update | c3dbdfba59512b5004093119f7676f224eb1d15c | [
"Apache-2.0"
] | 1 | 2018-08-26T02:48:51.000Z | 2018-08-26T02:48:51.000Z | "Callbacks provides extensibility to the `basic_train` loop. See `train` for examples of custom callbacks."
from .data import *
from .torch_core import *
__all__ = ['Callback', 'CallbackHandler', 'OptimWrapper', 'SmoothenValue', 'Stepper', 'annealing_cos', 'CallbackList',
'annealing_exp', 'annealing_linear', 'annealing_no', 'annealing_poly', 'do_annealing_poly']
class OptimWrapper():
"Basic wrapper around an optimizer to simplify HP changes."
def __init__(self, opt:optim.Optimizer, wd:Floats=0., true_wd:bool=False, bn_wd:bool=True):
self.opt,self.true_wd,self.bn_wd = opt,true_wd,bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_fn:Union[type,Callable], lr:Union[float,Tuple,List],
layer_groups:ModuleList, **kwargs:Any)->optim.Optimizer:
"Create an optim.Optimizer from `opt_fn` with `lr`. Set lr on `layer_groups`."
split_groups = split_bn_bias(layer_groups)
opt = opt_fn([{'params': trainable_params(l), 'lr':0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr = listify(lr, layer_groups)
return opt
def __repr__(self)->str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
#Pytorch optimizer methods
def step(self)->None:
"Set weight decay and step optimizer."
# weight decay outside of optimizer step (AdamW)
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self)->None:
"Clear optimizer gradients."
self.opt.zero_grad()
#Hyperparameters as properties
@property
def lr(self)->float:
"Get learning rate."
return self._lr[-1]
@lr.setter
def lr(self, val:float)->None:
"Set learning rate."
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self)->float:
"Get momentum."
return self._mom[-1]
@mom.setter
def mom(self, val:float)->None:
"Set momentum."
if 'momentum' in self.opt_keys: self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys: self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self)->float:
"Get beta (or alpha as makes sense for given optimizer)."
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val:float)->None:
"Set beta (or alpha as makes sense for given optimizer)."
if val is None: return
if 'betas' in self.opt_keys: self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys: self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self)->float:
"Get weight decay."
return self._wd[-1]
@wd.setter
def wd(self, val:float)->None:
"Set weight decay."
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
#Helper functions
def read_defaults(self)->None:
"Read the values inside the optimizer for the hyper-parameters."
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
"Set the values inside the optimizer dictionary at the key."
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
"Read a hyperparameter key in the optimizer dictionary."
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class Callback():
"Base class for callbacks that want to record values, dynamically change learner params, etc."
_order=0
def on_train_begin(self, **kwargs:Any)->None:
"To initialize constants in the callback."
pass
def on_epoch_begin(self, **kwargs:Any)->None:
"At the beginning of each epoch."
pass
def on_batch_begin(self, **kwargs:Any)->None:
"Set HP before the step is done. Returns xb, yb (which can allow us to modify the input at that step if needed)."
pass
def on_loss_begin(self, **kwargs:Any)->None:
"Called after forward pass but before loss has been computed. Returns the output (which can allow us to modify it)."
pass
def on_backward_begin(self, **kwargs:Any)->None:
"""Called after the forward pass and the loss has been computed, but before backprop.
Returns the loss (which can allow us to modify it, for instance for reg functions)"""
pass
def on_backward_end(self, **kwargs:Any)->None:
"Called after backprop but before optimizer step. Useful for true weight decay in AdamW."
pass
def on_step_end(self, **kwargs:Any)->None:
"Called after the step of the optimizer but before the gradients are zeroed."
pass
def on_batch_end(self, **kwargs:Any)->None:
"Called at the end of the batch."
pass
def on_epoch_end(self, **kwargs:Any)->bool:
"Called at the end of an epoch."
return False
def on_train_end(self, **kwargs:Any)->None:
"Useful for cleaning up things and saving files/models."
pass
class SmoothenValue():
"Create a smooth moving average for a value (loss, etc)."
def __init__(self, beta:float):
"Create smoother for value, beta should be 0<beta<1."
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
"Add current value to calculate updated smoothed value."
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
CallbackList = Collection[Callback]
def _get_init_state(): return {'epoch':0, 'iteration':0, 'num_batch':0}
@dataclass
class CallbackHandler():
"Manage all of the registered callback objects, smoothing loss by momentum `beta`."
callbacks:CallbackList
beta:float=0.98
def __post_init__(self)->None:
"Initialize smoother and learning stats."
self.callbacks = sorted(self.callbacks, key=lambda o: getattr(o, '_order', 0))
self.smoothener = SmoothenValue(self.beta)
self.state_dict:Dict[str,Union[int,float,Tensor]]=_get_init_state()
def __call__(self, cb_name, **kwargs)->None:
"Call through to all of the `CallbakHandler` functions."
return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
"About to start learning."
self.state_dict = _get_init_state()
self.state_dict['n_epochs'],self.state_dict['pbar'],self.state_dict['metrics'] = epochs,pbar,metrics
self('train_begin')
def on_epoch_begin(self)->None:
"Handle new epoch."
self.state_dict['num_batch'] = 0
self('epoch_begin')
def on_batch_begin(self, xb:Tensor, yb:Tensor)->None:
"Handle new batch `xb`,`yb`."
self.state_dict['last_input'], self.state_dict['last_target'] = xb, yb
for cb in self.callbacks:
a = cb.on_batch_begin(**self.state_dict)
if a is not None: self.state_dict['last_input'], self.state_dict['last_target'] = a
return self.state_dict['last_input'], self.state_dict['last_target']
def on_loss_begin(self, out:Tensor)->None:
"Handle start of loss calculation with model output `out`."
self.state_dict['last_output'] = out
for cb in self.callbacks:
a = cb.on_loss_begin(**self.state_dict)
if a is not None: self.state_dict['last_output'] = a
return self.state_dict['last_output']
def on_backward_begin(self, loss:Tensor)->None:
"Handle gradient calculation on `loss`."
self.smoothener.add_value(loss.detach())
self.state_dict['last_loss'], self.state_dict['smooth_loss'] = loss, self.smoothener.smooth
for cb in self.callbacks:
a = cb.on_backward_begin(**self.state_dict)
if a is not None: self.state_dict['last_loss'] = a
return self.state_dict['last_loss']
def on_backward_end(self)->None:
"Handle end of gradient calculation."
self('backward_end')
def on_step_end(self)->None:
"Handle end of optimization step."
self('step_end')
def on_batch_end(self, loss:Tensor)->None:
"Handle end of processing one batch with `loss`."
self.state_dict['last_loss'] = loss
stop = np.any(self('batch_end'))
self.state_dict['iteration'] += 1
self.state_dict['num_batch'] += 1
return stop
def on_epoch_end(self, val_metrics:MetricsList)->bool:
"Epoch is done, process `val_metrics`."
self.state_dict['last_metrics'] = val_metrics
stop = np.any(self('epoch_end'))
self.state_dict['epoch'] += 1
return stop
def on_train_end(self, exception:Union[bool,Exception])->None:
"Handle end of training, `exception` is an `Exception` or False if no exceptions during training."
self('train_end', exception=exception)
def annealing_no(start:Number, end:Number, pct:float)->Number:
"No annealing, always return `start`."
return start
def annealing_linear(start:Number, end:Number, pct:float)->Number:
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start:Number, end:Number, pct:float)->Number:
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start:Number, end:Number, pct:float)->Number:
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
"Helper function for `anneal_poly`."
return end + (start-end) * (1-pct)**degree
def annealing_poly(degree:Number)->Number:
"Anneal polynomically from `start` to `end` as pct goes from 0.0 to 1.0."
return functools.partial(do_annealing_poly, degree=degree)
class Stepper():
"Used to \"step\" from start,end (`vals`) over `n_iter` iterations on a schedule defined by `func`"
def __init__(self, vals:StartOptEnd, n_iter:int, func:Optional[AnnealFunc]=None):
self.start,self.end = (vals[0],vals[1]) if is_tuple(vals) else (vals,0)
self.n_iter = n_iter
if func is None: self.func = annealing_linear if is_tuple(vals) else annealing_no
else: self.func = func
self.n = 0
def step(self)->Number:
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
"Schedule completed."
return self.n >= self.n_iter
| 42.618881 | 124 | 0.644844 | from .data import *
from .torch_core import *
__all__ = ['Callback', 'CallbackHandler', 'OptimWrapper', 'SmoothenValue', 'Stepper', 'annealing_cos', 'CallbackList',
'annealing_exp', 'annealing_linear', 'annealing_no', 'annealing_poly', 'do_annealing_poly']
class OptimWrapper():
def __init__(self, opt:optim.Optimizer, wd:Floats=0., true_wd:bool=False, bn_wd:bool=True):
self.opt,self.true_wd,self.bn_wd = opt,true_wd,bn_wd
self.opt_keys = list(self.opt.param_groups[0].keys())
self.opt_keys.remove('params')
self.read_defaults()
self.wd = wd
@classmethod
def create(cls, opt_fn:Union[type,Callable], lr:Union[float,Tuple,List],
layer_groups:ModuleList, **kwargs:Any)->optim.Optimizer:
split_groups = split_bn_bias(layer_groups)
opt = opt_fn([{'params': trainable_params(l), 'lr':0} for l in split_groups])
opt = cls(opt, **kwargs)
opt.lr = listify(lr, layer_groups)
return opt
def __repr__(self)->str:
return f'OptimWrapper over {repr(self.opt)}.\nTrue weight decay: {self.true_wd}'
def step(self)->None:
if self.true_wd:
for lr,wd,pg1,pg2 in zip(self._lr,self._wd,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
for p in pg1['params']: p.data.mul_(1 - wd*lr)
if self.bn_wd:
for p in pg2['params']: p.data.mul_(1 - wd*lr)
self.set_val('weight_decay', listify(0, self._wd))
self.opt.step()
def zero_grad(self)->None:
self.opt.zero_grad()
@property
def lr(self)->float:
return self._lr[-1]
@lr.setter
def lr(self, val:float)->None:
self._lr = self.set_val('lr', listify(val, self._lr))
@property
def mom(self)->float:
return self._mom[-1]
@mom.setter
def mom(self, val:float)->None:
if 'momentum' in self.opt_keys: self.set_val('momentum', listify(val, self._mom))
elif 'betas' in self.opt_keys: self.set_val('betas', (listify(val, self._mom), self._beta))
self._mom = listify(val, self._mom)
@property
def beta(self)->float:
return None if self._beta is None else self._beta[-1]
@beta.setter
def beta(self, val:float)->None:
if val is None: return
if 'betas' in self.opt_keys: self.set_val('betas', (self._mom, listify(val, self._beta)))
elif 'alpha' in self.opt_keys: self.set_val('alpha', listify(val, self._beta))
self._beta = listify(val, self._beta)
@property
def wd(self)->float:
return self._wd[-1]
@wd.setter
def wd(self, val:float)->None:
if not self.true_wd: self.set_val('weight_decay', listify(val, self._wd), bn_groups=self.bn_wd)
self._wd = listify(val, self._wd)
def read_defaults(self)->None:
self._beta = None
if 'lr' in self.opt_keys: self._lr = self.read_val('lr')
if 'momentum' in self.opt_keys: self._mom = self.read_val('momentum')
if 'alpha' in self.opt_keys: self._beta = self.read_val('alpha')
if 'betas' in self.opt_keys: self._mom,self._beta = self.read_val('betas')
if 'weight_decay' in self.opt_keys: self._wd = self.read_val('weight_decay')
def set_val(self, key:str, val:Any, bn_groups:bool=True)->Any:
if is_tuple(val): val = [(v1,v2) for v1,v2 in zip(*val)]
for v,pg1,pg2 in zip(val,self.opt.param_groups[::2],self.opt.param_groups[1::2]):
pg1[key] = v
if bn_groups: pg2[key] = v
return val
def read_val(self, key:str) -> Union[List[float],Tuple[List[float],List[float]]]:
val = [pg[key] for pg in self.opt.param_groups[::2]]
if is_tuple(val[0]): val = [o[0] for o in val], [o[1] for o in val]
return val
class Callback():
_order=0
def on_train_begin(self, **kwargs:Any)->None:
pass
def on_epoch_begin(self, **kwargs:Any)->None:
pass
def on_batch_begin(self, **kwargs:Any)->None:
pass
def on_loss_begin(self, **kwargs:Any)->None:
pass
def on_backward_begin(self, **kwargs:Any)->None:
pass
def on_backward_end(self, **kwargs:Any)->None:
pass
def on_step_end(self, **kwargs:Any)->None:
pass
def on_batch_end(self, **kwargs:Any)->None:
pass
def on_epoch_end(self, **kwargs:Any)->bool:
return False
def on_train_end(self, **kwargs:Any)->None:
pass
class SmoothenValue():
def __init__(self, beta:float):
self.beta,self.n,self.mov_avg = beta,0,0
def add_value(self, val:float)->None:
self.n += 1
self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val
self.smooth = self.mov_avg / (1 - self.beta ** self.n)
CallbackList = Collection[Callback]
def _get_init_state(): return {'epoch':0, 'iteration':0, 'num_batch':0}
@dataclass
class CallbackHandler():
callbacks:CallbackList
beta:float=0.98
def __post_init__(self)->None:
self.callbacks = sorted(self.callbacks, key=lambda o: getattr(o, '_order', 0))
self.smoothener = SmoothenValue(self.beta)
self.state_dict:Dict[str,Union[int,float,Tensor]]=_get_init_state()
def __call__(self, cb_name, **kwargs)->None:
return [getattr(cb, f'on_{cb_name}')(**self.state_dict, **kwargs) for cb in self.callbacks]
def on_train_begin(self, epochs:int, pbar:PBar, metrics:MetricFuncList)->None:
self.state_dict = _get_init_state()
self.state_dict['n_epochs'],self.state_dict['pbar'],self.state_dict['metrics'] = epochs,pbar,metrics
self('train_begin')
def on_epoch_begin(self)->None:
self.state_dict['num_batch'] = 0
self('epoch_begin')
def on_batch_begin(self, xb:Tensor, yb:Tensor)->None:
self.state_dict['last_input'], self.state_dict['last_target'] = xb, yb
for cb in self.callbacks:
a = cb.on_batch_begin(**self.state_dict)
if a is not None: self.state_dict['last_input'], self.state_dict['last_target'] = a
return self.state_dict['last_input'], self.state_dict['last_target']
def on_loss_begin(self, out:Tensor)->None:
self.state_dict['last_output'] = out
for cb in self.callbacks:
a = cb.on_loss_begin(**self.state_dict)
if a is not None: self.state_dict['last_output'] = a
return self.state_dict['last_output']
def on_backward_begin(self, loss:Tensor)->None:
self.smoothener.add_value(loss.detach())
self.state_dict['last_loss'], self.state_dict['smooth_loss'] = loss, self.smoothener.smooth
for cb in self.callbacks:
a = cb.on_backward_begin(**self.state_dict)
if a is not None: self.state_dict['last_loss'] = a
return self.state_dict['last_loss']
def on_backward_end(self)->None:
self('backward_end')
def on_step_end(self)->None:
self('step_end')
def on_batch_end(self, loss:Tensor)->None:
self.state_dict['last_loss'] = loss
stop = np.any(self('batch_end'))
self.state_dict['iteration'] += 1
self.state_dict['num_batch'] += 1
return stop
def on_epoch_end(self, val_metrics:MetricsList)->bool:
self.state_dict['last_metrics'] = val_metrics
stop = np.any(self('epoch_end'))
self.state_dict['epoch'] += 1
return stop
def on_train_end(self, exception:Union[bool,Exception])->None:
self('train_end', exception=exception)
def annealing_no(start:Number, end:Number, pct:float)->Number:
return start
def annealing_linear(start:Number, end:Number, pct:float)->Number:
return start + pct * (end-start)
def annealing_exp(start:Number, end:Number, pct:float)->Number:
return start * (end/start) ** pct
def annealing_cos(start:Number, end:Number, pct:float)->Number:
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
def do_annealing_poly(start:Number, end:Number, pct:float, degree:Number)->Number:
return end + (start-end) * (1-pct)**degree
def annealing_poly(degree:Number)->Number:
return functools.partial(do_annealing_poly, degree=degree)
class Stepper():
def __init__(self, vals:StartOptEnd, n_iter:int, func:Optional[AnnealFunc]=None):
self.start,self.end = (vals[0],vals[1]) if is_tuple(vals) else (vals,0)
self.n_iter = n_iter
if func is None: self.func = annealing_linear if is_tuple(vals) else annealing_no
else: self.func = func
self.n = 0
def step(self)->Number:
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self)->bool:
return self.n >= self.n_iter
| true | true |
f730b21126e67ebc36e6bc1ec1c6b28c3f9a4d71 | 174 | py | Python | .history/py/main_20210613213845.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | .history/py/main_20210613213845.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | .history/py/main_20210613213845.py | minefarmer/Coding101-OOP | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | [
"Unlicense"
] | null | null | null | class IceCream:
def __init__(self):
self.scoops = 3
def eat(self, scoops):
self.scoops = self.scoops = scoops
def add(self, scoops):
| 14.5 | 42 | 0.557471 | class IceCream:
def __init__(self):
self.scoops = 3
def eat(self, scoops):
self.scoops = self.scoops = scoops
def add(self, scoops):
| false | true |
f730b2ef46405f107ae301a6a6737279f08c4c9d | 2,465 | py | Python | main.py | wiseleywu/Conference-Central-API | dedb765097c3f0378352c54def575d5ce598cd84 | [
"Apache-2.0"
] | null | null | null | main.py | wiseleywu/Conference-Central-API | dedb765097c3f0378352c54def575d5ce598cd84 | [
"Apache-2.0"
] | null | null | null | main.py | wiseleywu/Conference-Central-API | dedb765097c3f0378352c54def575d5ce598cd84 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
"""
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import ndb
from conference import ConferenceApi
from settings import MEMCACHE_SPEAKER_KEY
from models import Conference, Session, Speaker
__author__ = 'wiseleywu@gmail.com (Wiseley Wu)'
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class checkedFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
"""Check Featured Speaker within a Conference"""
conf = ndb.Key(urlsafe=self.request.get('wsck')).get()
speaker = ndb.Key(Speaker, int(self.request.get('speakerId'))).get()
sessions = Session.query(ancestor=conf.key)
sessions = sessions.filter(
Session.speakerId == int(self.request.get('speakerId')))
# don't featured speaker if only in 0 or 1 session
if sessions.count() <= 1:
announcement = ""
else:
announcement = '%s %s %s %s' % (
'Featured Speaker - ',
speaker.displayName,
'. You can find the speaker in the following sessions: ',
', '.join(
session.name for session in sessions)
)
memcache.set(MEMCACHE_SPEAKER_KEY, announcement)
self.response.set_status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/check_featured_speaker', checkedFeaturedSpeaker),
], debug=True)
| 33.767123 | 76 | 0.636917 |
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import memcache
from google.appengine.ext import ndb
from conference import ConferenceApi
from settings import MEMCACHE_SPEAKER_KEY
from models import Conference, Session, Speaker
__author__ = 'wiseleywu@gmail.com (Wiseley Wu)'
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
mail.send_mail(
'noreply@%s.appspotmail.com' % (
app_identity.get_application_id()),
self.request.get('email'),
'You created a new Conference!',
'Hi, you have created a following '
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class checkedFeaturedSpeaker(webapp2.RequestHandler):
def post(self):
conf = ndb.Key(urlsafe=self.request.get('wsck')).get()
speaker = ndb.Key(Speaker, int(self.request.get('speakerId'))).get()
sessions = Session.query(ancestor=conf.key)
sessions = sessions.filter(
Session.speakerId == int(self.request.get('speakerId')))
if sessions.count() <= 1:
announcement = ""
else:
announcement = '%s %s %s %s' % (
'Featured Speaker - ',
speaker.displayName,
'. You can find the speaker in the following sessions: ',
', '.join(
session.name for session in sessions)
)
memcache.set(MEMCACHE_SPEAKER_KEY, announcement)
self.response.set_status(204)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/check_featured_speaker', checkedFeaturedSpeaker),
], debug=True)
| true | true |
f730b4147de5dae0eab5ed3e717e1a146221dfe5 | 972 | py | Python | src/sage/combinat/posets/forest.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/combinat/posets/forest.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/combinat/posets/forest.py | hsm207/sage | 020bd59ec28717bfab9af44d2231c53da1ff99f1 | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | r"""
Forest Posets
AUTHORS:
- Stefan Grosser (06-2020): initial implementation
"""
# ****************************************************************************
# Copyright (C) 2020 Stefan Grosser <stefan.grosser1@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.combinat.posets.posets import FinitePoset
from sage.combinat.posets.linear_extensions import LinearExtensionsOfForest
class ForestPoset(FinitePoset):
r"""
A forest poset is a poset where the underlying Hasse diagram and is
directed acyclic graph.
"""
_lin_ext_type = LinearExtensionsOfForest
_desc = 'Finite forest poset'
| 32.4 | 78 | 0.616255 |
from sage.combinat.posets.posets import FinitePoset
from sage.combinat.posets.linear_extensions import LinearExtensionsOfForest
class ForestPoset(FinitePoset):
_lin_ext_type = LinearExtensionsOfForest
_desc = 'Finite forest poset'
| true | true |
f730b4360fcf30609dec2be0e7dc23ff7f04436d | 190 | py | Python | torch_inception_resnet_v2/utils/convolution_config.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 9 | 2019-11-28T01:33:43.000Z | 2021-09-06T06:51:47.000Z | torch_inception_resnet_v2/utils/convolution_config.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 1 | 2022-02-06T12:03:00.000Z | 2022-02-07T02:30:47.000Z | torch_inception_resnet_v2/utils/convolution_config.py | mhconradt/inception-resnet-v2 | 0816e9885eb7034d99a67519efa0642578526a3e | [
"MIT"
] | 4 | 2019-12-12T05:35:28.000Z | 2021-04-30T18:41:41.000Z | from collections import namedtuple
ConvolutionConfig = namedtuple('ConvolutionConfig', ['n_filters', 'kernel_size', 'stride', 'padding'])
PadConfig = namedtuple('PadConfig', ['padding'])
| 27.142857 | 102 | 0.752632 | from collections import namedtuple
ConvolutionConfig = namedtuple('ConvolutionConfig', ['n_filters', 'kernel_size', 'stride', 'padding'])
PadConfig = namedtuple('PadConfig', ['padding'])
| true | true |
f730b62321ee0ae029cfec6153d97c708713be29 | 37,777 | py | Python | web2py/gluon/packages/dal/pydal/base.py | aduckworth1969/smc | b1771d9ed68f0e35f46271aab5b1e1fab363e3d9 | [
"MIT"
] | 1 | 2018-04-19T05:09:06.000Z | 2018-04-19T05:09:06.000Z | web2py/gluon/packages/dal/pydal/base.py | aduckworth1969/smc | b1771d9ed68f0e35f46271aab5b1e1fab363e3d9 | [
"MIT"
] | 14 | 2018-03-04T22:56:41.000Z | 2020-12-10T19:49:43.000Z | web2py/gluon/packages/dal/pydal/base.py | aduckworth1969/smc | b1771d9ed68f0e35f46271aab5b1e1fab363e3d9 | [
"MIT"
] | 2 | 2020-09-18T15:12:26.000Z | 2020-11-10T22:09:59.000Z | # -*- coding: utf-8 -*-
# pylint: disable=no-member
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: BSD
|
This file contains the DAL support for many relational databases, including:
- SQLite & SpatiaLite
- MySQL
- Postgres
- Firebird
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- Informix (9+ and SE)
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
- Teradata
- IMAP (experimental)
Example of usage::
>>> # from dal import DAL, Field
### create DAL connection (and create DB if it doesn't exist)
>>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
... folder=None)
### define a table 'person' (create/alter as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James') & (person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
<Row {'id': 1, 'name': 'Jim'}>
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower() == 'jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,
... groupby=person.name, limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name == 'James').first()
>>> print james.id, james.name
1 James
### check aggregates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported DAL URI strings::
'sqlite://test.db'
'spatialite://test.db'
'sqlite:memory'
'spatialite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:password@localhost/test'
'postgres:psycopg2://mdipierro:password@localhost/test'
'postgres:pg8000://mdipierro:password@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'mssql3://web2py:none@A64X2/web2py_test' # better pagination (requires >= 2005)
'mssql4://web2py:none@A64X2/web2py_test' # best pagination (requires >= 2012)
'pytds://user:password@server:port/database' # python-tds
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2:ibm_db_dbi://DSN=dsn;UID=user;PWD=pass'
'db2:pyodbc://driver=DB2;hostname=host;database=database;uid=user;pwd=password;port=port'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
'google:datastore' # for google app engine datastore (uses ndb by default)
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
'imap://user:password@server:port' # experimental
'mongodb://user:password@server:port/database' # experimental
For more info::
help(DAL)
help(Field)
"""
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from ._compat import (
PY2,
pickle,
hashlib_md5,
pjoin,
copyreg,
integer_types,
with_metaclass,
long,
unquote,
iteritems,
)
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import (
Serializable,
SQLCallableList,
BasicStorage,
RecordUpdater,
RecordDeleter,
TimingHandler,
)
from .helpers.methods import hide_password, smart_query, auto_validators, auto_represent, uuidstr
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME
from .helpers.rest import RestParser
from .helpers.serializers import serializers
from .objects import Table, Field, Rows, Row, Set
from .adapters.base import BaseAdapter, NullAdapter
from .default_validators import default_validators
TABLE_ARGS = set(
(
"migrate",
"primarykey",
"fake_migrate",
"format",
"redefine",
"singular",
"plural",
"trigger_name",
"sequence_name",
"fields",
"common_filter",
"polymodel",
"table_class",
"on_define",
"rname",
)
)
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
#: intercept arguments for DAL customisation on call
intercepts = [
"logger",
"representers",
"serializers",
"uuid",
"validators",
"validators_method",
"Table",
"Row",
]
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
"""
An instance of this class represents a database connection
Args:
uri(str): contains information for connecting to a database.
Defaults to `'sqlite://dummy.db'`
Note:
experimental: you can specify a dictionary as uri
parameter i.e. with::
db = DAL({"uri": "sqlite://storage.sqlite",
"tables": {...}, ...})
for an example of dict input you can check the output
of the scaffolding db model with
db.as_dict()
Note that for compatibility with Python older than
version 2.6.5 you should cast your dict input keys
to str due to a syntax limitation on kwarg names.
for proper DAL dictionary input you can use one of::
obj = serializers.cast_keys(dict, [encoding="utf-8"])
#or else (for parsing json input)
obj = serializers.loads_json(data, unicode_keys=False)
pool_size: How many open connections to make to the database object.
folder: where .table files will be created. Automatically set within
web2py. Use an explicit path when using DAL outside web2py
db_codec: string encoding of the database (default: 'UTF-8')
table_hash: database identifier with .tables. If your connection hash
change you can still using old .tables if they have db_hash
as prefix
check_reserved: list of adapters to check tablenames and column names
against sql/nosql reserved keywords. Defaults to `None`
- 'common' List of sql keywords that are common to all database
types such as "SELECT, INSERT". (recommended)
- 'all' Checks against all known SQL keywords
- '<adaptername>'' Checks against the specific adapters list of
keywords
- '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
migrate: sets default migrate behavior for all tables
fake_migrate: sets default fake_migrate behavior for all tables
migrate_enabled: If set to False disables ALL migrations
fake_migrate_all: If set to True fake migrates ALL tables
attempts: Number of times to attempt connecting
auto_import: If set to True, tries import automatically table
definitions from the databases folder (works only for simple models)
bigint_id: If set, turn on bigint instead of int for id and reference
fields
lazy_tables: delays table definition until table access
after_connection: can a callable that will be executed after the
connection
Example:
Use as::
db = DAL('sqlite://test.db')
or::
db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
serializers = None
validators = None
representers = {}
validators_method = default_validators
uuid = uuidstr
logger = logging.getLogger("pyDAL")
Field = Field
Table = Table
Rows = Rows
Row = Row
record_operators = {"update_record": RecordUpdater, "delete_record": RecordDeleter}
execution_handlers = [TimingHandler]
def __new__(cls, uri="sqlite://dummy.db", *args, **kwargs):
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_"):
THREAD_LOCAL._pydal_db_instances_ = {}
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_zombie_"):
THREAD_LOCAL._pydal_db_instances_zombie_ = {}
if uri == "<zombie>":
db_uid = kwargs["db_uid"] # a zombie must have a db_uid!
if db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL._pydal_db_instances_zombie_[db_uid] = db
else:
db_uid = kwargs.get("db_uid", hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
del THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL._pydal_db_instances_.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL._pydal_db_instances_[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
BaseAdapter.set_folder(folder)
@staticmethod
def get_instances():
"""
Returns a dictionary with uri as key with timings and defined tables::
{'sqlite://storage.sqlite': {
'dbstats': [(select auth_user.email from auth_user, 0.02009)],
'dbtables': {
'defined': ['auth_cas', 'auth_event', 'auth_group',
'auth_membership', 'auth_permission', 'auth_user'],
'lazy': '[]'
}
}
}
"""
dbs = getattr(THREAD_LOCAL, "_pydal_db_instances_", {}).items()
infos = {}
for db_uid, db_group in dbs:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats=[(row[0], row[1]) for row in db._timings],
dbtables={
"defined": sorted(
list(set(db.tables) - set(db._LAZY_TABLES.keys()))
),
"lazy": sorted(db._LAZY_TABLES.keys()),
},
)
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
instances = enumerate(instances)
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbname
)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbanme
)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError("failure to commit distributed transaction")
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(
self,
uri="sqlite://dummy.db",
pool_size=0,
folder=None,
db_codec="UTF-8",
check_reserved=None,
migrate=True,
fake_migrate=False,
migrate_enabled=True,
fake_migrate_all=False,
decode_credentials=False,
driver_args=None,
adapter_args=None,
attempts=5,
auto_import=False,
bigint_id=False,
debug=False,
lazy_tables=False,
db_uid=None,
after_connection=None,
tables=None,
ignore_field_case=True,
entity_quoting=True,
table_hash=None,
):
if uri == "<zombie>" and db_uid is not None:
return
super(DAL, self).__init__()
if not issubclass(self.Rows, Rows):
raise RuntimeError("`Rows` class must be a subclass of pydal.objects.Rows")
if not issubclass(self.Row, Row):
raise RuntimeError("`Row` class must be a subclass of pydal.objects.Row")
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._pending_references = {}
self._request_tenant = "request_tenant"
self._common_fields = []
self._referee_name = "%(table)s"
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._aliased_tables = threading.local()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
connected = False
for k in range(attempts):
for uri in uris:
try:
from .adapters import adapters
if is_jdbc and not uri.startswith("jdbc:"):
uri = "jdbc:" + uri
self._dbname = REGEX_DBNAME.match(uri).group()
# notice that driver args or {} else driver_args
# defaults to {} global, not correct
kwargs = dict(
db=self,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
after_connection=after_connection,
entity_quoting=entity_quoting,
)
adapter = adapters.get_for(self._dbname)
self._adapter = adapter(**kwargs)
# self._adapter.ignore_field_case = ignore_field_case
if bigint_id:
self._adapter.dialect._force_bigints()
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug(
"DEBUG: connect attempt %i, connection error:\n%s" % (k, tb)
)
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError(
"Failure to connect, tried %d times:\n%s" % (attempts, tb)
)
else:
self._adapter = NullAdapter(
db=self,
pool_size=0,
uri="None",
folder=folder,
db_codec=db_codec,
after_connection=after_connection,
entity_quoting=entity_quoting,
)
migrate = fake_migrate = False
self.validators_method = None
self.validators = None
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
if check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder, tables=tables)
@property
def tables(self):
return self._tables
@property
def _timings(self):
return getattr(THREAD_LOCAL, "_pydal_timings_", [])
@property
def _lastsql(self):
return self._timings[-1] if self._timings else None
def import_table_definitions(
self, path, migrate=False, fake_migrate=False, tables=None
):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path, self._uri_hash + "_*.table")
for filename in glob.glob(pattern):
tfile = self._adapter.migrator.file_open(filename, "r" if PY2 else "rb")
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern) - 7 : -6]
mf = [
(
value["sortable"],
Field(
key,
type=value["type"],
length=value.get("length", None),
notnull=value.get("notnull", False),
unique=value.get("unique", False),
),
)
for key, value in iteritems(sql_fields)
]
mf.sort(key=lambda a: a[0])
self.define_table(
name,
*[item[1] for item in mf],
**dict(migrate=migrate, fake_migrate=fake_migrate)
)
finally:
self._adapter.migrator.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates `name` against SQL keywords
Uses self._check_reserved which is a list of operators to use.
"""
for backend in self._check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword'
% (name, backend.upper())
)
def parse_as_rest(self, patterns, args, vars, queries=None, nested_select=True):
return RestParser(self).parse(patterns, args, vars, queries, nested_select)
def define_table(self, tablename, *fields, **kwargs):
invalid_kwargs = set(kwargs) - TABLE_ARGS
if invalid_kwargs:
raise SyntaxError(
'invalid table "%s" attributes: %s' % (tablename, invalid_kwargs)
)
if not fields and "fields" in kwargs:
fields = kwargs.get("fields", ())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
redefine = kwargs.get("redefine", False)
if tablename in self.tables:
if redefine:
try:
delattr(self, tablename)
except:
pass
else:
raise SyntaxError("table already defined: %s" % tablename)
elif (
tablename.startswith("_")
or tablename in dir(self)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError("invalid table name: %s" % tablename)
elif self._check_reserved:
self.check_reserved_keyword(tablename)
if self._lazy_tables:
if tablename not in self._LAZY_TABLES or redefine:
self._LAZY_TABLES[tablename] = (tablename, fields, kwargs)
table = None
else:
table = self.lazy_define_table(tablename, *fields, **kwargs)
if tablename not in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(self, tablename, *fields, **kwargs):
kwargs_get = kwargs.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + [
f if isinstance(f, Table) else f.clone() for f in common_fields
]
table_class = kwargs_get("table_class", Table)
table = table_class(self, tablename, *fields, **kwargs)
table._actual = True
self[tablename] = table
# must follow above line to handle self references
table._create_references()
for field in table:
if field.requires is DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and kwargs_get("migrate", self._migrate)
if (
migrate
and self._uri not in (None, "None")
or self._adapter.dbengine == "google:datastore"
):
fake_migrate = self._fake_migrate_all or kwargs_get(
"fake_migrate", self._fake_migrate
)
polymodel = kwargs_get("polymodel", None)
try:
GLOBAL_LOCKER.acquire()
self._adapter.create_table(
table,
migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel,
)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = kwargs_get("on_define", None)
if on_define:
on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[
(k, getattr(self, "_" + k, None))
for k in [
"pool_size",
"folder",
"db_codec",
"check_reserved",
"migrate",
"fake_migrate",
"migrate_enabled",
"fake_migrate_all",
"decode_credentials",
"driver_args",
"adapter_args",
"attempts",
"bigint_id",
"debug",
"lazy_tables",
]
]
)
)
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat, sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
# The instance has no .tables attribute yet
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(
self, "_lazy_tables"
) and key in object.__getattribute__(self, "_LAZY_TABLES"):
tablename, fields, kwargs = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **kwargs)
aliased_tables = object.__getattribute__(self, "_aliased_tables")
aliased = getattr(aliased_tables, key, None)
if aliased:
return aliased
return BasicStorage.__getattribute__(self, key)
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object %s exists and cannot be redefined" % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, "_uri"):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
def where(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query != None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf:
ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def rollback(self):
self._adapter.rollback()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL._pydal_db_instances_[self._db_uid]
self._adapter._clean_tlocals()
def executesql(
self,
query,
placeholders=None,
as_dict=False,
fields=None,
colnames=None,
as_ordered_dict=False,
):
"""
Executes an arbitrary query
Args:
query (str): the query to submit to the backend
placeholders: is optional and will always be None.
If using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, (if supported by the DB driver), a dictionary with keys
matching named placeholders in your SQL.
as_dict: will always be None when using DAL.
If using raw SQL can be set to True and the results cursor
returned by the DB driver will be converted to a sequence of
dictionaries keyed with the db field names. Results returned
with as_dict=True are the same as those returned when applying
.to_list() to a DAL query. If "as_ordered_dict"=True the
behaviour is the same as when "as_dict"=True with the keys
(field names) guaranteed to be in the same order as returned
by the select name executed on the database.
fields: list of DAL Fields that match the fields returned from the
DB. The Field objects should be part of one or more Table
objects defined on the DAL object. The "fields" list can include
one or more DAL Table objects in addition to or instead of
including Field objects, or it can be just a single table
(not in a list). In that case, the Field objects will be
extracted from the table(s).
Note:
if either `fields` or `colnames` is provided, the results
will be converted to a DAL `Rows` object using the
`db._adapter.parse()` method
colnames: list of field names in tablename.fieldname format
Note:
It is also possible to specify both "fields" and the associated
"colnames". In that case, "fields" can also include DAL Expression
objects in addition to Field objects. For Field objects in "fields",
the associated "colnames" must still be in tablename.fieldname
format. For Expression objects in "fields", the associated
"colnames" can be any arbitrary labels.
DAL Table objects referred to by "fields" or "colnames" can be dummy
tables and do not have to represent any real tables in the database.
Also, note that the "fields" and "colnames" must be in the
same order as the fields in the results cursor returned from the DB.
"""
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor, "description"):
raise RuntimeError(
"database does not support executesql(...,as_dict=True)"
)
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = adapter.cursor.description
# reduce the column info down to just the field names
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError(
"Result set includes duplicate column names. Specify unique column names using the 'colnames' argument"
)
#: avoid bytes strings in columns names (py3)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
# will hold our finished resultset in a list
data = adapter.fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields, row)) for row in data]
try:
data = adapter.fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = [f.sqlsafe for f in extracted_fields]
else:
#: extracted_fields is empty we should make it from colnames
# what 'col_fields' is for
col_fields = [] # [[tablename, fieldname], ....]
newcolnames = []
for tf in colnames:
if "." in tf:
t_f = tf.split(".")
tf = ".".join(adapter.dialect.quote(f) for f in t_f)
else:
t_f = None
if not extracted_fields:
col_fields.append(t_f)
newcolnames.append(tf)
colnames = newcolnames
data = adapter.parse(
data,
fields = extracted_fields or [tf and self[tf[0]][tf[1]] for tf in col_fields],
colnames=colnames
)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [
field for field in table._referenced_by if not field.table == thistable
]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get("max_fetch_rows,", 500))
write_colnames = kwargs["write_colnames"] = kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write("TABLE %s\r\n" % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs["write_colnames"] = write_colnames
for k in range(0, nrows, step):
self(query).select(limitby=(k, k + step)).export_to_csv_file(
ofile, *args, **kwargs
)
kwargs["write_colnames"] = False
ofile.write("\r\n\r\n")
ofile.write("END")
def import_from_csv_file(
self,
ifile,
id_map=None,
null="<NULL>",
unique="uuid",
map_tablenames=None,
ignore_missing_tables=False,
*args,
**kwargs
):
# if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == "END":
return
elif not line.startswith("TABLE "):
raise SyntaxError("Invalid file format")
elif not line[6:] in self.tables:
raise SyntaxError("Unknown table : %s" % line[6:])
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename, tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset, *args, **kwargs
)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError(
"Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)"
)
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL("<zombie>", db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
| 36.819688 | 169 | 0.556953 |
import glob
import logging
import socket
import threading
import time
import traceback
import urllib
from ._compat import (
PY2,
pickle,
hashlib_md5,
pjoin,
copyreg,
integer_types,
with_metaclass,
long,
unquote,
iteritems,
)
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL, DEFAULT
from ._load import OrderedDict
from .helpers.classes import (
Serializable,
SQLCallableList,
BasicStorage,
RecordUpdater,
RecordDeleter,
TimingHandler,
)
from .helpers.methods import hide_password, smart_query, auto_validators, auto_represent, uuidstr
from .helpers.regex import REGEX_PYTHON_KEYWORDS, REGEX_DBNAME
from .helpers.rest import RestParser
from .helpers.serializers import serializers
from .objects import Table, Field, Rows, Row, Set
from .adapters.base import BaseAdapter, NullAdapter
from .default_validators import default_validators
TABLE_ARGS = set(
(
"migrate",
"primarykey",
"fake_migrate",
"format",
"redefine",
"singular",
"plural",
"trigger_name",
"sequence_name",
"fields",
"common_filter",
"polymodel",
"table_class",
"on_define",
"rname",
)
)
class MetaDAL(type):
def __call__(cls, *args, **kwargs):
intercepts = [
"logger",
"representers",
"serializers",
"uuid",
"validators",
"validators_method",
"Table",
"Row",
]
intercepted = []
for name in intercepts:
val = kwargs.get(name)
if val:
intercepted.append((name, val))
del kwargs[name]
for tup in intercepted:
setattr(cls, tup[0], tup[1])
obj = super(MetaDAL, cls).__call__(*args, **kwargs)
return obj
class DAL(with_metaclass(MetaDAL, Serializable, BasicStorage)):
serializers = None
validators = None
representers = {}
validators_method = default_validators
uuid = uuidstr
logger = logging.getLogger("pyDAL")
Field = Field
Table = Table
Rows = Rows
Row = Row
record_operators = {"update_record": RecordUpdater, "delete_record": RecordDeleter}
execution_handlers = [TimingHandler]
def __new__(cls, uri="sqlite://dummy.db", *args, **kwargs):
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_"):
THREAD_LOCAL._pydal_db_instances_ = {}
if not hasattr(THREAD_LOCAL, "_pydal_db_instances_zombie_"):
THREAD_LOCAL._pydal_db_instances_zombie_ = {}
if uri == "<zombie>":
db_uid = kwargs["db_uid"]
if db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[db_uid]
db = db_group[-1]
elif db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
THREAD_LOCAL._pydal_db_instances_zombie_[db_uid] = db
else:
db_uid = kwargs.get("db_uid", hashlib_md5(repr(uri)).hexdigest())
if db_uid in THREAD_LOCAL._pydal_db_instances_zombie_:
db = THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
del THREAD_LOCAL._pydal_db_instances_zombie_[db_uid]
else:
db = super(DAL, cls).__new__(cls)
db_group = THREAD_LOCAL._pydal_db_instances_.get(db_uid, [])
db_group.append(db)
THREAD_LOCAL._pydal_db_instances_[db_uid] = db_group
db._db_uid = db_uid
return db
@staticmethod
def set_folder(folder):
s:
for db in db_group:
if not db._uri:
continue
k = hide_password(db._adapter.uri)
infos[k] = dict(
dbstats=[(row[0], row[1]) for row in db._timings],
dbtables={
"defined": sorted(
list(set(db.tables) - set(db._LAZY_TABLES.keys()))
),
"lazy": sorted(db._LAZY_TABLES.keys()),
},
)
return infos
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
instances = enumerate(instances)
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbname
)
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = "%s.%s" % (socket.gethostname(), threading.currentThread())
keys = ["%s.%i" % (thread_key, i) for (i, db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError(
"distributed transaction not suported by %s" % db._dbanme
)
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError("failure to commit distributed transaction")
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(
self,
uri="sqlite://dummy.db",
pool_size=0,
folder=None,
db_codec="UTF-8",
check_reserved=None,
migrate=True,
fake_migrate=False,
migrate_enabled=True,
fake_migrate_all=False,
decode_credentials=False,
driver_args=None,
adapter_args=None,
attempts=5,
auto_import=False,
bigint_id=False,
debug=False,
lazy_tables=False,
db_uid=None,
after_connection=None,
tables=None,
ignore_field_case=True,
entity_quoting=True,
table_hash=None,
):
if uri == "<zombie>" and db_uid is not None:
return
super(DAL, self).__init__()
if not issubclass(self.Rows, Rows):
raise RuntimeError("`Rows` class must be a subclass of pydal.objects.Rows")
if not issubclass(self.Row, Row):
raise RuntimeError("`Row` class must be a subclass of pydal.objects.Row")
from .drivers import DRIVERS, is_jdbc
self._drivers_available = DRIVERS
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: unquote(cred)
self._folder = folder
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._pending_references = {}
self._request_tenant = "request_tenant"
self._common_fields = []
self._referee_name = "%(table)s"
self._bigint_id = bigint_id
self._debug = debug
self._migrated = []
self._LAZY_TABLES = {}
self._lazy_tables = lazy_tables
self._tables = SQLCallableList()
self._aliased_tables = threading.local()
self._driver_args = driver_args
self._adapter_args = adapter_args
self._check_reserved = check_reserved
self._decode_credentials = decode_credentials
self._attempts = attempts
self._ignore_field_case = ignore_field_case
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri, (list, tuple)) and uri or [uri]
connected = False
for k in range(attempts):
for uri in uris:
try:
from .adapters import adapters
if is_jdbc and not uri.startswith("jdbc:"):
uri = "jdbc:" + uri
self._dbname = REGEX_DBNAME.match(uri).group()
kwargs = dict(
db=self,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args or {},
adapter_args=adapter_args or {},
after_connection=after_connection,
entity_quoting=entity_quoting,
)
adapter = adapters.get_for(self._dbname)
self._adapter = adapter(**kwargs)
if bigint_id:
self._adapter.dialect._force_bigints()
connected = True
break
except SyntaxError:
raise
except Exception:
tb = traceback.format_exc()
self.logger.debug(
"DEBUG: connect attempt %i, connection error:\n%s" % (k, tb)
)
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError(
"Failure to connect, tried %d times:\n%s" % (attempts, tb)
)
else:
self._adapter = NullAdapter(
db=self,
pool_size=0,
uri="None",
folder=folder,
db_codec=db_codec,
after_connection=after_connection,
entity_quoting=entity_quoting,
)
migrate = fake_migrate = False
self.validators_method = None
self.validators = None
adapter = self._adapter
self._uri_hash = table_hash or hashlib_md5(adapter.uri).hexdigest()
if check_reserved:
from .contrib.reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if self.serializers is not None:
for k, v in self.serializers.items():
serializers._custom_[k] = v
if auto_import or tables:
self.import_table_definitions(adapter.folder, tables=tables)
@property
def tables(self):
return self._tables
@property
def _timings(self):
return getattr(THREAD_LOCAL, "_pydal_timings_", [])
@property
def _lastsql(self):
return self._timings[-1] if self._timings else None
def import_table_definitions(
self, path, migrate=False, fake_migrate=False, tables=None
):
if tables:
for table in tables:
self.define_table(**table)
else:
pattern = pjoin(path, self._uri_hash + "_*.table")
for filename in glob.glob(pattern):
tfile = self._adapter.migrator.file_open(filename, "r" if PY2 else "rb")
try:
sql_fields = pickle.load(tfile)
name = filename[len(pattern) - 7 : -6]
mf = [
(
value["sortable"],
Field(
key,
type=value["type"],
length=value.get("length", None),
notnull=value.get("notnull", False),
unique=value.get("unique", False),
),
)
for key, value in iteritems(sql_fields)
]
mf.sort(key=lambda a: a[0])
self.define_table(
name,
*[item[1] for item in mf],
**dict(migrate=migrate, fake_migrate=fake_migrate)
)
finally:
self._adapter.migrator.file_close(tfile)
def check_reserved_keyword(self, name):
for backend in self._check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError(
'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword'
% (name, backend.upper())
)
def parse_as_rest(self, patterns, args, vars, queries=None, nested_select=True):
return RestParser(self).parse(patterns, args, vars, queries, nested_select)
def define_table(self, tablename, *fields, **kwargs):
invalid_kwargs = set(kwargs) - TABLE_ARGS
if invalid_kwargs:
raise SyntaxError(
'invalid table "%s" attributes: %s' % (tablename, invalid_kwargs)
)
if not fields and "fields" in kwargs:
fields = kwargs.get("fields", ())
if not isinstance(tablename, str):
if isinstance(tablename, unicode):
try:
tablename = str(tablename)
except UnicodeEncodeError:
raise SyntaxError("invalid unicode table name")
else:
raise SyntaxError("missing table name")
redefine = kwargs.get("redefine", False)
if tablename in self.tables:
if redefine:
try:
delattr(self, tablename)
except:
pass
else:
raise SyntaxError("table already defined: %s" % tablename)
elif (
tablename.startswith("_")
or tablename in dir(self)
or REGEX_PYTHON_KEYWORDS.match(tablename)
):
raise SyntaxError("invalid table name: %s" % tablename)
elif self._check_reserved:
self.check_reserved_keyword(tablename)
if self._lazy_tables:
if tablename not in self._LAZY_TABLES or redefine:
self._LAZY_TABLES[tablename] = (tablename, fields, kwargs)
table = None
else:
table = self.lazy_define_table(tablename, *fields, **kwargs)
if tablename not in self.tables:
self.tables.append(tablename)
return table
def lazy_define_table(self, tablename, *fields, **kwargs):
kwargs_get = kwargs.get
common_fields = self._common_fields
if common_fields:
fields = list(fields) + [
f if isinstance(f, Table) else f.clone() for f in common_fields
]
table_class = kwargs_get("table_class", Table)
table = table_class(self, tablename, *fields, **kwargs)
table._actual = True
self[tablename] = table
table._create_references()
for field in table:
if field.requires is DEFAULT:
field.requires = auto_validators(field)
if field.represent is None:
field.represent = auto_represent(field)
migrate = self._migrate_enabled and kwargs_get("migrate", self._migrate)
if (
migrate
and self._uri not in (None, "None")
or self._adapter.dbengine == "google:datastore"
):
fake_migrate = self._fake_migrate_all or kwargs_get(
"fake_migrate", self._fake_migrate
)
polymodel = kwargs_get("polymodel", None)
try:
GLOBAL_LOCKER.acquire()
self._adapter.create_table(
table,
migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel,
)
finally:
GLOBAL_LOCKER.release()
else:
table._dbt = None
on_define = kwargs_get("on_define", None)
if on_define:
on_define(table)
return table
def as_dict(self, flat=False, sanitize=True):
db_uid = uri = None
if not sanitize:
uri, db_uid = (self._uri, self._db_uid)
db_as_dict = dict(
tables=[],
uri=uri,
db_uid=db_uid,
**dict(
[
(k, getattr(self, "_" + k, None))
for k in [
"pool_size",
"folder",
"db_codec",
"check_reserved",
"migrate",
"fake_migrate",
"migrate_enabled",
"fake_migrate_all",
"decode_credentials",
"driver_args",
"adapter_args",
"attempts",
"bigint_id",
"debug",
"lazy_tables",
]
]
)
)
for table in self:
db_as_dict["tables"].append(table.as_dict(flat=flat, sanitize=sanitize))
return db_as_dict
def __contains__(self, tablename):
try:
return tablename in self.tables
except AttributeError:
return False
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return self.__getattr__(str(key))
def __getattr__(self, key):
if object.__getattribute__(
self, "_lazy_tables"
) and key in object.__getattribute__(self, "_LAZY_TABLES"):
tablename, fields, kwargs = self._LAZY_TABLES.pop(key)
return self.lazy_define_table(tablename, *fields, **kwargs)
aliased_tables = object.__getattribute__(self, "_aliased_tables")
aliased = getattr(aliased_tables, key, None)
if aliased:
return aliased
return BasicStorage.__getattribute__(self, key)
def __setattr__(self, key, value):
if key[:1] != "_" and key in self:
raise SyntaxError("Object %s exists and cannot be redefined" % key)
return super(DAL, self).__setattr__(key, value)
def __repr__(self):
if hasattr(self, "_uri"):
return '<DAL uri="%s">' % hide_password(self._adapter.uri)
else:
return '<DAL db_uid="%s">' % self._db_uid
def smart_query(self, fields, text):
return Set(self, smart_query(fields, text))
def __call__(self, query=None, ignore_common_filters=None):
return self.where(query, ignore_common_filters)
def where(self, query=None, ignore_common_filters=None):
if isinstance(query, Table):
query = self._adapter.id_query(query)
elif isinstance(query, Field):
query = query != None
elif isinstance(query, dict):
icf = query.get("ignore_common_filters")
if icf:
ignore_common_filters = icf
return Set(self, query, ignore_common_filters=ignore_common_filters)
def commit(self):
self._adapter.commit()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def rollback(self):
self._adapter.rollback()
object.__getattribute__(self, "_aliased_tables").__dict__.clear()
def close(self):
self._adapter.close()
if self._db_uid in THREAD_LOCAL._pydal_db_instances_:
db_group = THREAD_LOCAL._pydal_db_instances_[self._db_uid]
db_group.remove(self)
if not db_group:
del THREAD_LOCAL._pydal_db_instances_[self._db_uid]
self._adapter._clean_tlocals()
def executesql(
self,
query,
placeholders=None,
as_dict=False,
fields=None,
colnames=None,
as_ordered_dict=False,
):
adapter = self._adapter
if placeholders:
adapter.execute(query, placeholders)
else:
adapter.execute(query)
if as_dict or as_ordered_dict:
if not hasattr(adapter.cursor, "description"):
raise RuntimeError(
"database does not support executesql(...,as_dict=True)"
)
columns = adapter.cursor.description
fields = colnames or [f[0] for f in columns]
if len(fields) != len(set(fields)):
raise RuntimeError(
"Result set includes duplicate column names. Specify unique column names using the 'colnames' argument"
)
if columns and not PY2:
for i in range(0, len(fields)):
if isinstance(fields[i], bytes):
fields[i] = fields[i].decode("utf8")
data = adapter.fetchall()
# easier to work with. row['field_name'] rather than row[0]
if as_ordered_dict:
_dict = OrderedDict
else:
_dict = dict
return [_dict(zip(fields, row)) for row in data]
try:
data = adapter.fetchall()
except:
return None
if fields or colnames:
fields = [] if fields is None else fields
if not isinstance(fields, list):
fields = [fields]
extracted_fields = []
for field in fields:
if isinstance(field, Table):
extracted_fields.extend([f for f in field])
else:
extracted_fields.append(field)
if not colnames:
colnames = [f.sqlsafe for f in extracted_fields]
else:
#: extracted_fields is empty we should make it from colnames
# what 'col_fields' is for
col_fields = [] # [[tablename, fieldname], ....]
newcolnames = []
for tf in colnames:
if "." in tf:
t_f = tf.split(".")
tf = ".".join(adapter.dialect.quote(f) for f in t_f)
else:
t_f = None
if not extracted_fields:
col_fields.append(t_f)
newcolnames.append(tf)
colnames = newcolnames
data = adapter.parse(
data,
fields = extracted_fields or [tf and self[tf[0]][tf[1]] for tf in col_fields],
colnames=colnames
)
return data
def _remove_references_to(self, thistable):
for table in self:
table._referenced_by = [
field for field in table._referenced_by if not field.table == thistable
]
def has_representer(self, name):
return callable(self.representers.get(name))
def represent(self, name, *args, **kwargs):
return self.representers[name](*args, **kwargs)
def export_to_csv_file(self, ofile, *args, **kwargs):
step = long(kwargs.get("max_fetch_rows,", 500))
write_colnames = kwargs["write_colnames"] = kwargs.get("write_colnames", True)
for table in self.tables:
ofile.write("TABLE %s\r\n" % table)
query = self._adapter.id_query(self[table])
nrows = self(query).count()
kwargs["write_colnames"] = write_colnames
for k in range(0, nrows, step):
self(query).select(limitby=(k, k + step)).export_to_csv_file(
ofile, *args, **kwargs
)
kwargs["write_colnames"] = False
ofile.write("\r\n\r\n")
ofile.write("END")
def import_from_csv_file(
self,
ifile,
id_map=None,
null="<NULL>",
unique="uuid",
map_tablenames=None,
ignore_missing_tables=False,
*args,
**kwargs
):
# if id_map is None: id_map={}
id_offset = {} # only used if id_map is None
map_tablenames = map_tablenames or {}
for line in ifile:
line = line.strip()
if not line:
continue
elif line == "END":
return
elif not line.startswith("TABLE "):
raise SyntaxError("Invalid file format")
elif not line[6:] in self.tables:
raise SyntaxError("Unknown table : %s" % line[6:])
else:
tablename = line[6:]
tablename = map_tablenames.get(tablename, tablename)
if tablename is not None and tablename in self.tables:
self[tablename].import_from_csv_file(
ifile, id_map, null, unique, id_offset, *args, **kwargs
)
elif tablename is None or ignore_missing_tables:
# skip all non-empty lines
for line in ifile:
if not line.strip():
break
else:
raise RuntimeError(
"Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)"
)
def can_join(self):
return self._adapter.can_join()
def DAL_unpickler(db_uid):
return DAL("<zombie>", db_uid=db_uid)
def DAL_pickler(db):
return DAL_unpickler, (db._db_uid,)
copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
| true | true |
f730b66e7a44d8029420d31e049ca3f917f8e10f | 10,128 | py | Python | billforward/models/data_synchronization_job_paged_metadata.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 2 | 2016-11-23T17:32:37.000Z | 2022-02-24T05:13:20.000Z | billforward/models/data_synchronization_job_paged_metadata.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | null | null | null | billforward/models/data_synchronization_job_paged_metadata.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 1 | 2016-12-30T20:02:48.000Z | 2016-12-30T20:02:48.000Z | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class DataSynchronizationJobPagedMetadata(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, next_page=None, current_page=None, current_offset=None, records_requested=None, records_returned=None, execution_time=None, results=None):
"""
DataSynchronizationJobPagedMetadata - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'next_page': 'str',
'current_page': 'int',
'current_offset': 'int',
'records_requested': 'int',
'records_returned': 'int',
'execution_time': 'int',
'results': 'list[MutableBillingEntity]'
}
self.attribute_map = {
'next_page': 'nextPage',
'current_page': 'currentPage',
'current_offset': 'currentOffset',
'records_requested': 'recordsRequested',
'records_returned': 'recordsReturned',
'execution_time': 'executionTime',
'results': 'results'
}
self._next_page = next_page
self._current_page = current_page
self._current_offset = current_offset
self._records_requested = records_requested
self._records_returned = records_returned
self._execution_time = execution_time
self._results = results
@property
def next_page(self):
"""
Gets the next_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. URL fragment that can be used to fetch next page of results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The next_page of this DataSynchronizationJobPagedMetadata.
:rtype: str
"""
return self._next_page
@next_page.setter
def next_page(self, next_page):
"""
Sets the next_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. URL fragment that can be used to fetch next page of results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param next_page: The next_page of this DataSynchronizationJobPagedMetadata.
:type: str
"""
self._next_page = next_page
@property
def current_page(self):
"""
Gets the current_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes which page (given a page size of `recordsRequested`) of the result set you are viewing.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The current_page of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._current_page
@current_page.setter
def current_page(self, current_page):
"""
Sets the current_page of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes which page (given a page size of `recordsRequested`) of the result set you are viewing.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param current_page: The current_page of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._current_page = current_page
@property
def current_offset(self):
"""
Gets the current_offset of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes your current location within a pageable list of query results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The current_offset of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._current_offset
@current_offset.setter
def current_offset(self, current_offset):
"""
Sets the current_offset of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Paging parameter. 0-indexed. Describes your current location within a pageable list of query results.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param current_offset: The current_offset of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._current_offset = current_offset
@property
def records_requested(self):
"""
Gets the records_requested of this DataSynchronizationJobPagedMetadata.
{\"default\":10,\"description\":\"Paging parameter. Describes how many records you requested.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The records_requested of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._records_requested
@records_requested.setter
def records_requested(self, records_requested):
"""
Sets the records_requested of this DataSynchronizationJobPagedMetadata.
{\"default\":10,\"description\":\"Paging parameter. Describes how many records you requested.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param records_requested: The records_requested of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._records_requested = records_requested
@property
def records_returned(self):
"""
Gets the records_returned of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Describes how many records were returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The records_returned of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._records_returned
@records_returned.setter
def records_returned(self, records_returned):
"""
Sets the records_returned of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Describes how many records were returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param records_returned: The records_returned of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._records_returned = records_returned
@property
def execution_time(self):
"""
Gets the execution_time of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Number of milliseconds taken by API to calculate response.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The execution_time of this DataSynchronizationJobPagedMetadata.
:rtype: int
"""
return self._execution_time
@execution_time.setter
def execution_time(self, execution_time):
"""
Sets the execution_time of this DataSynchronizationJobPagedMetadata.
{\"description\":\"Number of milliseconds taken by API to calculate response.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param execution_time: The execution_time of this DataSynchronizationJobPagedMetadata.
:type: int
"""
self._execution_time = execution_time
@property
def results(self):
"""
Gets the results of this DataSynchronizationJobPagedMetadata.
{\"description\":\"The results returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:return: The results of this DataSynchronizationJobPagedMetadata.
:rtype: list[MutableBillingEntity]
"""
return self._results
@results.setter
def results(self, results):
"""
Sets the results of this DataSynchronizationJobPagedMetadata.
{\"description\":\"The results returned by your query.\",\"verbs\":[\"GET\",\"PUT\",\"POST\"]}
:param results: The results of this DataSynchronizationJobPagedMetadata.
:type: list[MutableBillingEntity]
"""
self._results = results
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.042705 | 193 | 0.626086 |
from pprint import pformat
from six import iteritems
import re
class DataSynchronizationJobPagedMetadata(object):
def __init__(self, next_page=None, current_page=None, current_offset=None, records_requested=None, records_returned=None, execution_time=None, results=None):
self.swagger_types = {
'next_page': 'str',
'current_page': 'int',
'current_offset': 'int',
'records_requested': 'int',
'records_returned': 'int',
'execution_time': 'int',
'results': 'list[MutableBillingEntity]'
}
self.attribute_map = {
'next_page': 'nextPage',
'current_page': 'currentPage',
'current_offset': 'currentOffset',
'records_requested': 'recordsRequested',
'records_returned': 'recordsReturned',
'execution_time': 'executionTime',
'results': 'results'
}
self._next_page = next_page
self._current_page = current_page
self._current_offset = current_offset
self._records_requested = records_requested
self._records_returned = records_returned
self._execution_time = execution_time
self._results = results
@property
def next_page(self):
return self._next_page
@next_page.setter
def next_page(self, next_page):
self._next_page = next_page
@property
def current_page(self):
return self._current_page
@current_page.setter
def current_page(self, current_page):
self._current_page = current_page
@property
def current_offset(self):
return self._current_offset
@current_offset.setter
def current_offset(self, current_offset):
self._current_offset = current_offset
@property
def records_requested(self):
return self._records_requested
@records_requested.setter
def records_requested(self, records_requested):
self._records_requested = records_requested
@property
def records_returned(self):
return self._records_returned
@records_returned.setter
def records_returned(self, records_returned):
self._records_returned = records_returned
@property
def execution_time(self):
return self._execution_time
@execution_time.setter
def execution_time(self, execution_time):
self._execution_time = execution_time
@property
def results(self):
return self._results
@results.setter
def results(self, results):
self._results = results
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f730b975efb7291bf0ef3c3242905078b950a0fa | 45 | py | Python | packages/pyolite-kernel/py/nbformat/nbformat/__init__.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | 1,392 | 2021-03-28T01:11:50.000Z | 2022-03-23T21:46:27.000Z | packages/pyolite-kernel/py/nbformat/nbformat/__init__.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | 195 | 2021-03-27T20:38:54.000Z | 2021-10-01T12:43:17.000Z | packages/pyolite-kernel/py/nbformat/nbformat/__init__.py | marimeireles/jupyterlite | 65c9304cf89d311b8a48f227a0cbb2b7f50cf4bd | [
"BSD-3-Clause"
] | 50 | 2021-04-16T07:08:03.000Z | 2022-02-21T05:06:47.000Z | """A nbformat mock"""
__version__ = "4.2.0"
| 11.25 | 21 | 0.6 |
__version__ = "4.2.0"
| true | true |
f730ba393656175bda415e196e26904e2b289597 | 1,606 | py | Python | release/xgboost_tests/workloads/train_gpu.py | manishnish138/ray | 790dca28cf6972edebc44826c8aa3d62cbee1a5e | [
"Apache-2.0"
] | 3 | 2021-08-29T20:41:21.000Z | 2022-01-31T18:47:51.000Z | release/xgboost_tests/workloads/train_gpu.py | QPC-database/amazon-ray | 55aa4cac02a412b96252aea4e8c3f177a28324a1 | [
"Apache-2.0"
] | 59 | 2021-01-14T14:59:36.000Z | 2022-03-25T23:07:05.000Z | release/xgboost_tests/workloads/train_gpu.py | majacQ/ray | bc08c6cdcc7ddf4da751ca2a972defd3db509061 | [
"Apache-2.0"
] | null | null | null | """Training on a GPU cluster.
This will train a small dataset on a distributed GPU cluster.
Test owner: krfricke
Acceptance criteria: Should run through and report final results.
Notes: The test will report output such as this:
```
[05:14:49] WARNING: ../src/gbm/gbtree.cc:350: Loading from a raw memory buffer
on CPU only machine. Changing tree_method to hist.
[05:14:49] WARNING: ../src/learner.cc:222: No visible GPU is found, setting
`gpu_id` to -1
```
This is _not_ an error. This is due to the checkpoints being loaded on the
XGBoost driver, and since the driver lives on the head node (which has no
GPU), XGBoost warns that it can't use the GPU. Training still happened using
the GPUs.
"""
import json
import os
import time
import ray
from xgboost_ray import RayParams
from _train import train_ray
if __name__ == "__main__":
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=4,
cpus_per_actor=4,
gpus_per_actor=1)
start = time.time()
train_ray(
path="/data/classification.parquet",
num_workers=4,
num_boost_rounds=100,
num_files=25,
regression=False,
use_gpu=True,
ray_params=ray_params,
xgboost_params=None,
)
taken = time.time() - start
result = {
"time_taken": taken,
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/train_gpu.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
| 25.492063 | 78 | 0.663138 | import json
import os
import time
import ray
from xgboost_ray import RayParams
from _train import train_ray
if __name__ == "__main__":
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=4,
cpus_per_actor=4,
gpus_per_actor=1)
start = time.time()
train_ray(
path="/data/classification.parquet",
num_workers=4,
num_boost_rounds=100,
num_files=25,
regression=False,
use_gpu=True,
ray_params=ray_params,
xgboost_params=None,
)
taken = time.time() - start
result = {
"time_taken": taken,
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/train_gpu.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
| true | true |
f730bb742a33abcb801bbbb522892e6ea4f4e546 | 2,152 | py | Python | DeepJanus-BNG/self_driving/road_storage.py | zohdit/DeepJanus | c32022bdff2994e91df7af8af64a022d3e7e6a75 | [
"MIT"
] | 7 | 2020-10-12T10:46:30.000Z | 2021-06-23T10:42:30.000Z | DeepJanus-BNG/self_driving/road_storage.py | zohdit/DeepJanus | c32022bdff2994e91df7af8af64a022d3e7e6a75 | [
"MIT"
] | null | null | null | DeepJanus-BNG/self_driving/road_storage.py | zohdit/DeepJanus | c32022bdff2994e91df7af8af64a022d3e7e6a75 | [
"MIT"
] | 2 | 2021-04-26T12:46:44.000Z | 2021-09-16T08:27:53.000Z | import json
import os
from typing import Tuple, List, Callable
from core.folders import folders
class RoadStorage:
def __init__(self, path: str = None):
if path is None:
path='test_driving'
self.folder = str(folders.member_seeds.joinpath(path))
os.makedirs(self.folder, exist_ok=True)
def all_files(self) -> List[str]:
expanded = [os.path.join(self.folder, filename) for filename in os.listdir(self.folder)]
return [path for path in expanded if os.path.isfile(path)]
def get_road_path_by_index(self, index) -> str:
assert index > 0
path = os.path.join(self.folder, 'road{:03}_nodes.json'.format(index))
return path
def get_road_nodes_by_index(self, index) -> List[Tuple[float, float, float, float]]:
path = self.get_road_path_by_index(index)
nodes = self.get_road_nodes(path)
return nodes
def get_road_nodes(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
nodes = json.loads(f.read())
return nodes
def cache(self, road_name: str, get_points: Callable) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
if os.path.exists(path):
with open(path, 'r') as f:
nodes = json.loads(f.read())
else:
nodes = get_points()
with open(path, 'w') as f:
f.write(json.dumps(nodes))
return nodes
def save(self, road_name: str, contents: str) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
with open(path, 'w') as f:
f.write(contents)
def read(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
beamng_member = json.loads(f.read())
return beamng_member
if __name__ == '__main__':
for i in range(1, 31):
nodes = RoadStorage().get_road_nodes_by_index(i)
print(i, len(nodes))
| 34.15873 | 101 | 0.608271 | import json
import os
from typing import Tuple, List, Callable
from core.folders import folders
class RoadStorage:
def __init__(self, path: str = None):
if path is None:
path='test_driving'
self.folder = str(folders.member_seeds.joinpath(path))
os.makedirs(self.folder, exist_ok=True)
def all_files(self) -> List[str]:
expanded = [os.path.join(self.folder, filename) for filename in os.listdir(self.folder)]
return [path for path in expanded if os.path.isfile(path)]
def get_road_path_by_index(self, index) -> str:
assert index > 0
path = os.path.join(self.folder, 'road{:03}_nodes.json'.format(index))
return path
def get_road_nodes_by_index(self, index) -> List[Tuple[float, float, float, float]]:
path = self.get_road_path_by_index(index)
nodes = self.get_road_nodes(path)
return nodes
def get_road_nodes(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
nodes = json.loads(f.read())
return nodes
def cache(self, road_name: str, get_points: Callable) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
if os.path.exists(path):
with open(path, 'r') as f:
nodes = json.loads(f.read())
else:
nodes = get_points()
with open(path, 'w') as f:
f.write(json.dumps(nodes))
return nodes
def save(self, road_name: str, contents: str) -> List[Tuple[float, float, float, float]]:
path = os.path.join(self.folder, road_name + '.json')
with open(path, 'w') as f:
f.write(contents)
def read(self, path) -> List[Tuple[float, float, float, float]]:
assert os.path.exists(path), path
with open(path, 'r') as f:
beamng_member = json.loads(f.read())
return beamng_member
if __name__ == '__main__':
for i in range(1, 31):
nodes = RoadStorage().get_road_nodes_by_index(i)
print(i, len(nodes))
| true | true |
f730bbfbb24db521361e11a81c81fd782b3e533d | 10,665 | py | Python | inbreast.py | wentaozhu/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 106 | 2017-03-12T17:26:49.000Z | 2022-02-12T01:37:17.000Z | inbreast.py | huhansan666666/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 17 | 2017-04-11T14:49:34.000Z | 2022-03-19T07:57:37.000Z | inbreast.py | huhansan666666/deep-mil-for-whole-mammogram-classification | 8c046bbd77d268499849319cf57254015778549c | [
"MIT"
] | 41 | 2017-03-21T09:48:39.000Z | 2021-11-29T06:51:16.000Z | #import dicom # some machines not install pydicom
import scipy.misc
import numpy as np
from sklearn.model_selection import StratifiedKFold
import cPickle
#import matplotlib
#import matplotlib.pyplot as plt
from skimage.filters import threshold_otsu
import os
from os.path import join as join
import csv
import scipy.ndimage
import dicom
#import cv2
path = '../AllDICOMs/'
preprocesspath = '../preprocesspath/'
labelfile = './label.txt'
def readlabel():
'''read the label as a dict from labelfile'''
mydict = {}
with open(labelfile, 'r') as f:
flines = f.readlines()
for line in flines:
data = line.split()
if int(data[1]) == 0:
mydict[data[0]] = int(data[1])
else:
assert(int(data[1])==2 or int(data[1])==1)
mydict[data[0]] = int(data[1])-1
return mydict
def readdicom(mydict):
'''read the dicom image, rename it consistently with the name in labels, crop and resize, and save as pickle.
mydict is the returned value of readlabel'''
img_ext = '.dcm'
img_fnames = [x for x in os.listdir(path) if x.endswith(img_ext)]
for f in img_fnames:
names = f.split('_')
if names[0] not in mydict:
print(names[0]+'occur error')
dicom_content = dicom.read_file(join(path,f))
img = dicom_content.pixel_array
'''fig = plt.figure()
ax1 = plt.subplot(3,3,1)
ax2 = plt.subplot(3,3,2)
ax3 = plt.subplot(3,3,3)
ax4 = plt.subplot(3,3,4)
ax5 = plt.subplot(3,3,5)
ax6 = plt.subplot(3,3,6)
ax7 = plt.subplot(3,3,7)
ax8 = plt.subplot(3,3,8)
ax9 = plt.subplot(3,3,9)
ax1.imshow(img, cmap='Greys_r')
ax1.set_title('Original')
ax1.axis('off')'''
thresh = threshold_otsu(img)
binary = img > thresh
#ax2.imshow(binary, cmap='Greys_r')
#ax2.set_title('mask')
#ax2.axis('off')
minx, miny = 0, 0
maxx, maxy = img.shape[0], img.shape[1]
for xx in xrange(img.shape[1]):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
minx = xx
break
for xx in xrange(img.shape[0]-1,0,-1):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
maxx = xx
break
if names[3] == 'R':
maxy = img.shape[1]
for yy in xrange(int(img.shape[1]*3.0/4), -1, -1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
miny = yy
break
else:
miny = 0
for yy in xrange(int(img.shape[1]/4.0), img.shape[1], 1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
maxy = yy
break
print(minx, maxx, miny, maxy)
#ax3.set_title('Foreground')
#ax3.imshow(img[minx:maxx+1, miny:maxy+1], cmap='Greys_r')
#ax3.axis('off')
img = img.astype(np.float32)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (227, 227), interp='cubic')
with open(join(preprocesspath, names[0])+'227.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (299, 299), interp='cubic')
with open(join(preprocesspath, names[0])+'299.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
'''ax4.set_title('Resize')
ax4.imshow(img, cmap='Greys_r')
ax4.axis('off')
img = img.astype(np.float32)
img -= np.mean(img)
img /= np.std(img)
ax5.set_title('Norm')
ax5.imshow(img, cmap='Greys_r')
ax5.axis('off')
with open(join(preprocesspath, names[0])+'norm.pickle', 'wb') as outfile:
cPickle.dump(img, outfile)
#imgshape = img.shape
img = np.fliplr(img)
ax6.set_title('Flip')
ax6.imshow(img, cmap='Greys_r')
ax6.axis('off')
num_rot = np.random.choice(4) #rotate 90 randomly
img = np.rot90(img, num_rot)
ax7.set_title('Rotation')
ax7.imshow(img, cmap='Greys_r')
ax7.axis('off')
fig.savefig(join(preprocesspath, names[0])+'.jpg')
plt.close(fig)'''
def cvsplit(fold, totalfold, mydict):
'''get the split of train and test
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation
mydict is the return dict from readlabel'''
skf = StratifiedKFold(n_splits=totalfold) # default shuffle is false, okay!
#readdicom(mydict)
y = mydict.values()
x = mydict.keys()
count = 0
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
#print test
return train, test
count += 1
def cvsplitenhance(fold, totalfold, mydict, valfold=-1):
'''get the split of train and test
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation
mydict is the return dict from readlabel
sperate the data into train, validation, test'''
skf = StratifiedKFold(n_splits=totalfold) # default shuffle is false, okay!
#readdicom(mydict)
y = mydict.values()
x = mydict.keys()
count = 0
if valfold == -1:
valfold = (fold+1) % totalfold
print('valfold'+str(valfold))
trainls, valls, testls = [], [], []
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
#print test[:]
testls = test[:]
elif count == valfold:
valls = test[:]
else:
for i in test:
trainls.append(i)
count += 1
return trainls, valls, testls
def loadim(fname, preprocesspath=preprocesspath):
''' from preprocess path load fname
fname file name in preprocesspath
aug is true, we augment im fliplr, rot 4'''
ims = []
with open(join(preprocesspath, fname), 'rb') as inputfile:
im = cPickle.load(inputfile)
#up_bound = np.random.choice(174) #zero out square
#right_bound = np.random.choice(174)
img = im
#img[up_bound:(up_bound+50), right_bound:(right_bound+50)] = 0.0
ims.append(img)
inputfile.close()
return ims
def loaddata(fold, totalfold, usedream=True, aug=True):
'''get the fold th train and test data from inbreast
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation'''
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, testindex = cvsplit(fold, totalfold, mydict)
if aug == True:
traindata, trainlabel = np.zeros((6*len(trainindex),227,227)), np.zeros((6*len(trainindex),))
else:
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'.pickle', aug=aug)
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = mydictvalue[trainindex[i]]
traincount += 1
assert(traincount==traindata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
ims = loadim(mydictkey[testindex[i]]+'.pickle', aug=aug)
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = mydictvalue[testindex[i]]
testcount += 1
assert(testcount==testdata.shape[0])
if usedream:
outx, outy = extractdreamdata()
traindata = np.concatenate((traindata,outx), axis=0)
trainlabel = np.concatenate((trainlabel,outy), axis=0)
return traindata, trainlabel, testdata, testlabel
def loaddataenhance(fold, totalfold, valfold=-1, valnum=60):
'''get the fold th train and test data from inbreast
fold is the returned fold th data, from 0 to totalfold-1
total fold is for the cross validation'''
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, valindex, testindex = cvsplitenhance(fold, totalfold, mydict, valfold=valfold)
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
valdata, vallabel = np.zeros((len(valindex),227,227)), np.zeros((len(valindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'227.pickle')
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = int(mydictvalue[trainindex[i]])
traincount += 1
assert(traincount==traindata.shape[0])
valcount = 0
for i in xrange(len(valindex)):
ims = loadim(mydictkey[valindex[i]]+'227.pickle')
valdata[valcount,:,:] = ims[0]
vallabel[valcount] = int(mydictvalue[valindex[i]])
valcount += 1
assert(valcount==valdata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
#print mydictkey[testindex[i]]
ims = loadim(mydictkey[testindex[i]]+'227.pickle')
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = int(mydictvalue[testindex[i]])
testcount += 1
assert(testcount==testdata.shape[0])
#print(valdata.shape)
randindex = np.random.permutation(valdata.shape[0])
valdata = valdata[randindex,:,:]
vallabel = vallabel[randindex]
#print(valdata.shape)
traindata = np.concatenate((traindata, valdata[valnum:,:,:]), axis=0)
trainlabel = np.concatenate((trainlabel, vallabel[valnum:]), axis=0)
valdata = valdata[:valnum,:,:]
vallabel = vallabel[:valnum]
maxvalue = (traindata.max()*1.0)
print('inbreast max %f', maxvalue)
traindata = traindata / maxvalue
valdata = valdata / maxvalue
testdata = testdata / maxvalue
print('train data feature')
#meanx = traindata.mean()
#stdx = traindata.std()
#traindata -= meanx
#traindata /= stdx
#valdata -= meanx
#valdata /= stdx
#testdata -= meanx
#testdata /= stdx
print(traindata.mean(), traindata.std(), traindata.max(), traindata.min())
print('val data feature')
print(valdata.mean(), valdata.std(), valdata.max(), valdata.min())
print('test data feature')
print(testdata.mean(), testdata.std(), testdata.max(), testdata.min())
#meandata = traindata.mean()
#stddata = traindata.std()
#traindata = traindata - meandata
#traindata = traindata / stddata
#valdata = valdata - meandata
#valdata = valdata / stddata
#testdata = testdata - meandata
#testdata = testdata / stddata
return traindata, trainlabel, valdata, vallabel, testdata, testlabel
if __name__ == '__main__':
traindata, trainlabel, testdata, testlabel = loaddata(0, 5)
print(sum(trainlabel), sum(testlabel))
traindata, trainlabel, valdata, vallabel, testdata, testlabel = loaddataenhance(0, 5)
print(sum(trainlabel), sum(vallabel), sum(testlabel))
| 35.909091 | 112 | 0.634974 | np
from sklearn.model_selection import StratifiedKFold
import cPickle
from skimage.filters import threshold_otsu
import os
from os.path import join as join
import csv
import scipy.ndimage
import dicom
path = '../AllDICOMs/'
preprocesspath = '../preprocesspath/'
labelfile = './label.txt'
def readlabel():
mydict = {}
with open(labelfile, 'r') as f:
flines = f.readlines()
for line in flines:
data = line.split()
if int(data[1]) == 0:
mydict[data[0]] = int(data[1])
else:
assert(int(data[1])==2 or int(data[1])==1)
mydict[data[0]] = int(data[1])-1
return mydict
def readdicom(mydict):
img_ext = '.dcm'
img_fnames = [x for x in os.listdir(path) if x.endswith(img_ext)]
for f in img_fnames:
names = f.split('_')
if names[0] not in mydict:
print(names[0]+'occur error')
dicom_content = dicom.read_file(join(path,f))
img = dicom_content.pixel_array
thresh = threshold_otsu(img)
binary = img > thresh
minx, miny = 0, 0
maxx, maxy = img.shape[0], img.shape[1]
for xx in xrange(img.shape[1]):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
minx = xx
break
for xx in xrange(img.shape[0]-1,0,-1):
if sum(binary[xx, :]==0) < binary.shape[1]-60:
maxx = xx
break
if names[3] == 'R':
maxy = img.shape[1]
for yy in xrange(int(img.shape[1]*3.0/4), -1, -1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
miny = yy
break
else:
miny = 0
for yy in xrange(int(img.shape[1]/4.0), img.shape[1], 1):
if sum(binary[:,yy]==0) > binary.shape[0]-10:
maxy = yy
break
print(minx, maxx, miny, maxy)
img = img.astype(np.float32)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (227, 227), interp='cubic')
with open(join(preprocesspath, names[0])+'227.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
img1 = scipy.misc.imresize(img[minx:maxx+1, miny:maxy+1], (299, 299), interp='cubic')
with open(join(preprocesspath, names[0])+'299.pickle', 'wb') as outfile:
cPickle.dump(img1, outfile)
def cvsplit(fold, totalfold, mydict):
skf = StratifiedKFold(n_splits=totalfold)
y = mydict.values()
x = mydict.keys()
count = 0
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
return train, test
count += 1
def cvsplitenhance(fold, totalfold, mydict, valfold=-1):
skf = StratifiedKFold(n_splits=totalfold)
y = mydict.values()
x = mydict.keys()
count = 0
if valfold == -1:
valfold = (fold+1) % totalfold
print('valfold'+str(valfold))
trainls, valls, testls = [], [], []
for train, test in skf.split(x,y):
print(len(train), len(test))
if count == fold:
testls = test[:]
elif count == valfold:
valls = test[:]
else:
for i in test:
trainls.append(i)
count += 1
return trainls, valls, testls
def loadim(fname, preprocesspath=preprocesspath):
ims = []
with open(join(preprocesspath, fname), 'rb') as inputfile:
im = cPickle.load(inputfile)
m
ims.append(img)
inputfile.close()
return ims
def loaddata(fold, totalfold, usedream=True, aug=True):
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, testindex = cvsplit(fold, totalfold, mydict)
if aug == True:
traindata, trainlabel = np.zeros((6*len(trainindex),227,227)), np.zeros((6*len(trainindex),))
else:
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'.pickle', aug=aug)
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = mydictvalue[trainindex[i]]
traincount += 1
assert(traincount==traindata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
ims = loadim(mydictkey[testindex[i]]+'.pickle', aug=aug)
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = mydictvalue[testindex[i]]
testcount += 1
assert(testcount==testdata.shape[0])
if usedream:
outx, outy = extractdreamdata()
traindata = np.concatenate((traindata,outx), axis=0)
trainlabel = np.concatenate((trainlabel,outy), axis=0)
return traindata, trainlabel, testdata, testlabel
def loaddataenhance(fold, totalfold, valfold=-1, valnum=60):
mydict = readlabel()
mydictkey = mydict.keys()
mydictvalue = mydict.values()
trainindex, valindex, testindex = cvsplitenhance(fold, totalfold, mydict, valfold=valfold)
traindata, trainlabel = np.zeros((len(trainindex),227,227)), np.zeros((len(trainindex),))
valdata, vallabel = np.zeros((len(valindex),227,227)), np.zeros((len(valindex),))
testdata, testlabel = np.zeros((len(testindex),227,227)), np.zeros((len(testindex),))
traincount = 0
for i in xrange(len(trainindex)):
ims = loadim(mydictkey[trainindex[i]]+'227.pickle')
for im in ims:
traindata[traincount, :, :] = im
trainlabel[traincount] = int(mydictvalue[trainindex[i]])
traincount += 1
assert(traincount==traindata.shape[0])
valcount = 0
for i in xrange(len(valindex)):
ims = loadim(mydictkey[valindex[i]]+'227.pickle')
valdata[valcount,:,:] = ims[0]
vallabel[valcount] = int(mydictvalue[valindex[i]])
valcount += 1
assert(valcount==valdata.shape[0])
testcount = 0
for i in xrange(len(testindex)):
ims = loadim(mydictkey[testindex[i]]+'227.pickle')
testdata[testcount,:,:] = ims[0]
testlabel[testcount] = int(mydictvalue[testindex[i]])
testcount += 1
assert(testcount==testdata.shape[0])
randindex = np.random.permutation(valdata.shape[0])
valdata = valdata[randindex,:,:]
vallabel = vallabel[randindex]
traindata = np.concatenate((traindata, valdata[valnum:,:,:]), axis=0)
trainlabel = np.concatenate((trainlabel, vallabel[valnum:]), axis=0)
valdata = valdata[:valnum,:,:]
vallabel = vallabel[:valnum]
maxvalue = (traindata.max()*1.0)
print('inbreast max %f', maxvalue)
traindata = traindata / maxvalue
valdata = valdata / maxvalue
testdata = testdata / maxvalue
print('train data feature')
print(traindata.mean(), traindata.std(), traindata.max(), traindata.min())
print('val data feature')
print(valdata.mean(), valdata.std(), valdata.max(), valdata.min())
print('test data feature')
print(testdata.mean(), testdata.std(), testdata.max(), testdata.min())
return traindata, trainlabel, valdata, vallabel, testdata, testlabel
if __name__ == '__main__':
traindata, trainlabel, testdata, testlabel = loaddata(0, 5)
print(sum(trainlabel), sum(testlabel))
traindata, trainlabel, valdata, vallabel, testdata, testlabel = loaddataenhance(0, 5)
print(sum(trainlabel), sum(vallabel), sum(testlabel))
| true | true |
f730bcb2483c07c6b27c50e0f64f975de88ddef7 | 1,707 | py | Python | sdks/python/apache_beam/io/__init__.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 2 | 2019-12-14T04:24:33.000Z | 2020-02-21T07:17:40.000Z | sdks/python/apache_beam/io/__init__.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 14 | 2020-02-12T22:20:41.000Z | 2021-11-09T19:41:23.000Z | sdks/python/apache_beam/io/__init__.py | charithe/beam | f085cb500730cf0c67c467ac55f92b3c59f52b39 | [
"Apache-2.0"
] | 2 | 2020-06-22T11:17:44.000Z | 2020-11-04T04:11:59.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A package defining several input sources and output sinks."""
# pylint: disable=wildcard-import
from __future__ import absolute_import
from apache_beam.io.avroio import *
from apache_beam.io.filebasedsink import *
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Sink
from apache_beam.io.iobase import Write
from apache_beam.io.iobase import Writer
from apache_beam.io.mongodbio import *
from apache_beam.io.parquetio import *
from apache_beam.io.textio import *
from apache_beam.io.tfrecordio import *
from apache_beam.io.range_trackers import *
# Protect against environments where clientslibrary is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apache_beam.io.gcp.bigquery import *
from apache_beam.io.gcp.pubsub import *
from apache_beam.io.gcp import gcsio
except ImportError:
pass
# pylint: enable=wrong-import-order, wrong-import-position
| 39.697674 | 74 | 0.794376 |
from __future__ import absolute_import
from apache_beam.io.avroio import *
from apache_beam.io.filebasedsink import *
from apache_beam.io.iobase import Read
from apache_beam.io.iobase import Sink
from apache_beam.io.iobase import Write
from apache_beam.io.iobase import Writer
from apache_beam.io.mongodbio import *
from apache_beam.io.parquetio import *
from apache_beam.io.textio import *
from apache_beam.io.tfrecordio import *
from apache_beam.io.range_trackers import *
try:
from apache_beam.io.gcp.bigquery import *
from apache_beam.io.gcp.pubsub import *
from apache_beam.io.gcp import gcsio
except ImportError:
pass
| true | true |
f730bd8c30f682808cde1355d3c15659e66ef93c | 582 | py | Python | src/tests/spider_test.py | volvet/spider | c374de5ad299423eb47b662e8c2f1d16ead58a9f | [
"MIT"
] | null | null | null | src/tests/spider_test.py | volvet/spider | c374de5ad299423eb47b662e8c2f1d16ead58a9f | [
"MIT"
] | null | null | null | src/tests/spider_test.py | volvet/spider | c374de5ad299423eb47b662e8c2f1d16ead58a9f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 10 21:23:13 2021
@author: Administrator
"""
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from utils import SpiderFormat # noqa: E402
from spider_factory import SpiderFactory # noqa: E402
def test_onnxspider():
spider = SpiderFactory.create(SpiderFormat.ONNX)
assert spider is not None
def test_torchspider():
spider = SpiderFactory.create(SpiderFormat.TORCH)
assert spider is None
if __name__ == '__main__':
print('Hello, test')
test_onnxspider()
test_torchspider()
| 20.068966 | 68 | 0.725086 |
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../')
from utils import SpiderFormat
from spider_factory import SpiderFactory
def test_onnxspider():
spider = SpiderFactory.create(SpiderFormat.ONNX)
assert spider is not None
def test_torchspider():
spider = SpiderFactory.create(SpiderFormat.TORCH)
assert spider is None
if __name__ == '__main__':
print('Hello, test')
test_onnxspider()
test_torchspider()
| true | true |
f730bdfa184e875f2155e79716a3aed6b404a24c | 1,861 | py | Python | tests/config.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 562 | 2015-02-20T08:25:24.000Z | 2021-11-12T19:58:44.000Z | tests/config.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 129 | 2015-02-20T07:41:14.000Z | 2022-02-17T21:14:40.000Z | tests/config.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 54 | 2015-02-28T01:12:23.000Z | 2021-03-02T11:14:52.000Z | SQLALCHEMY_DATABASE_URI = "postgresql:///test_freight"
LOG_LEVEL = "INFO"
WORKSPACE_ROOT = "/tmp/freight-tests"
SSH_PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEArvyc+vZVxUjC5ZcFg1VN3jQOCOjO94gwQKFxlz0zOCrCz+Sq\nnWk28YdUpOU016Zinlh4ZZk2136nCKKTMnNMjd6cTTCn5fWomjR+F2CSdaYYpYfO\nNtVnq0SIDUgGmjyPncOGrxVT6EzjjSvgE8W8YIc5rVJqNMAH5OywUH0nqISYN2yP\nwbUPVf8zqu3kpnTt7YcWZ+Ye4b3jX6Fo2Xw5P1TTwQ92K9JdVAltBRpwSLtBQUYC\nMkwtNf6QIbRYKoVZuEhi/8XCxT0zG78Lsqpbld8IEnLWUGifCtx9mKqVi8Y3QTsT\nknMWFaf+Su8htgw/W7tufmrtTKNJYDtPTGiBeQIDAQABAoIBABYsC/gAnn2Q6qEM\nsbYiaOtuzRhz50WWDAckbbAsIQFM6cJNxxCK9FtGOoNqR3fLrVNDAn5dG4XSlneR\nofUShvCy9DsTnzKUHfjsDc4IfoZJtXXD720jPS+GT3bfWXbRlaD31Wj52tfkZjDN\nDmdy9puEhtpfRvXIHzfyhaStNwkzDh0jp8e8yok1mLA+3FPqkJPF6ptxPs6HEQS8\npY75jxvypbux2+W9249J/HqMmd5/+r7tt62vciqnXb2LG2AmUxLhTAQU9mGM2OSL\nrh2j+7/2apEQLdJ0DbS19IkQZRpO/DLPyhg6C29ZuNQffQWoLiZlfgIEaBT939aM\nkFdzy8ECgYEA4BdisLRCyCdm2M7fMDsV7j71z48Q1Kdl5A6/ngiK1dCwnjRMvkLx\nKOHtmvpJxHTH+JAewrrGUg0GF1YpM3gi0FQ7f9qTlAeFIrU3udV8F/m6+rIOpx92\nB2FSrYTaonLX8g4OzXKNtQcwzx91mFWTIEmfQl9let0WMrCRzReXp0sCgYEAx+dC\ncbERCVcJvs9+SUwVXXOreCF4PedLrg7bjkfYSpmAJk9c36EOi1jIGO5rat5/k7Nb\n0plWghADjtcb4r8oO6pzhMR81cESgFOk1UasP4rPYX4mEYPBwVGgN7ECUXj9XFPZ\n/tk7lgneBc1/6eV978MTprXiHU5Rv7yZBMuf68sCgYAd6YE27Rjs9rV3w0VvfrOS\ntbzCE+q/OAkVxBI32hQOLmkk9P45d14RgvbgdQBbxOrcdwBkJeJLGYnym4GsaSDc\nhiHbEyYX4FkZJO9nUuPZn3Ah/pqOHFj46zjKCK3WeVXx7YZ0ThI0U91kCGL+Do4x\nBSLJDUrSd6h6467SnY+UuQKBgGV0/AYT5h+lay7KxL+Su+04Pbi01AAnGgP3SnuF\n/0KtcZsAAJUHewhCQRxWNXKCBqICEAJtDLjqQ8QFbQPCHTtbIVIrH2ilmyxCR5Bv\nVBDT9Lj4e328L2Rcd0KMti5/h6eKb0OnIVTfIS40xE0Dys0bZyfffCl/jIIRyF/k\nsP/NAoGBAIfxtr881cDFrxahrTJ3AtGXxjJjMUW/S6+gKd7Lj9i+Uadb9vjD8Wt8\ngWrUDwXVAhD5Sxv+OCBizPF1CxXTgC3+/ophkUcy5VTcBchgQI7JrItujxUc0EvR\nCwA7/JPyO8DaUtvpodUKO27vr11G/NmXYrOohCP6VxH/Y6p5L9o4\n-----END RSA PRIVATE KEY-----"
GITHUB_TOKEN = "a" * 40
| 186.1 | 1,720 | 0.922085 | SQLALCHEMY_DATABASE_URI = "postgresql:///test_freight"
LOG_LEVEL = "INFO"
WORKSPACE_ROOT = "/tmp/freight-tests"
SSH_PRIVATE_KEY = "-----BEGIN RSA PRIVATE KEY-----\nMIIEowIBAAKCAQEArvyc+vZVxUjC5ZcFg1VN3jQOCOjO94gwQKFxlz0zOCrCz+Sq\nnWk28YdUpOU016Zinlh4ZZk2136nCKKTMnNMjd6cTTCn5fWomjR+F2CSdaYYpYfO\nNtVnq0SIDUgGmjyPncOGrxVT6EzjjSvgE8W8YIc5rVJqNMAH5OywUH0nqISYN2yP\nwbUPVf8zqu3kpnTt7YcWZ+Ye4b3jX6Fo2Xw5P1TTwQ92K9JdVAltBRpwSLtBQUYC\nMkwtNf6QIbRYKoVZuEhi/8XCxT0zG78Lsqpbld8IEnLWUGifCtx9mKqVi8Y3QTsT\nknMWFaf+Su8htgw/W7tufmrtTKNJYDtPTGiBeQIDAQABAoIBABYsC/gAnn2Q6qEM\nsbYiaOtuzRhz50WWDAckbbAsIQFM6cJNxxCK9FtGOoNqR3fLrVNDAn5dG4XSlneR\nofUShvCy9DsTnzKUHfjsDc4IfoZJtXXD720jPS+GT3bfWXbRlaD31Wj52tfkZjDN\nDmdy9puEhtpfRvXIHzfyhaStNwkzDh0jp8e8yok1mLA+3FPqkJPF6ptxPs6HEQS8\npY75jxvypbux2+W9249J/HqMmd5/+r7tt62vciqnXb2LG2AmUxLhTAQU9mGM2OSL\nrh2j+7/2apEQLdJ0DbS19IkQZRpO/DLPyhg6C29ZuNQffQWoLiZlfgIEaBT939aM\nkFdzy8ECgYEA4BdisLRCyCdm2M7fMDsV7j71z48Q1Kdl5A6/ngiK1dCwnjRMvkLx\nKOHtmvpJxHTH+JAewrrGUg0GF1YpM3gi0FQ7f9qTlAeFIrU3udV8F/m6+rIOpx92\nB2FSrYTaonLX8g4OzXKNtQcwzx91mFWTIEmfQl9let0WMrCRzReXp0sCgYEAx+dC\ncbERCVcJvs9+SUwVXXOreCF4PedLrg7bjkfYSpmAJk9c36EOi1jIGO5rat5/k7Nb\n0plWghADjtcb4r8oO6pzhMR81cESgFOk1UasP4rPYX4mEYPBwVGgN7ECUXj9XFPZ\n/tk7lgneBc1/6eV978MTprXiHU5Rv7yZBMuf68sCgYAd6YE27Rjs9rV3w0VvfrOS\ntbzCE+q/OAkVxBI32hQOLmkk9P45d14RgvbgdQBbxOrcdwBkJeJLGYnym4GsaSDc\nhiHbEyYX4FkZJO9nUuPZn3Ah/pqOHFj46zjKCK3WeVXx7YZ0ThI0U91kCGL+Do4x\nBSLJDUrSd6h6467SnY+UuQKBgGV0/AYT5h+lay7KxL+Su+04Pbi01AAnGgP3SnuF\n/0KtcZsAAJUHewhCQRxWNXKCBqICEAJtDLjqQ8QFbQPCHTtbIVIrH2ilmyxCR5Bv\nVBDT9Lj4e328L2Rcd0KMti5/h6eKb0OnIVTfIS40xE0Dys0bZyfffCl/jIIRyF/k\nsP/NAoGBAIfxtr881cDFrxahrTJ3AtGXxjJjMUW/S6+gKd7Lj9i+Uadb9vjD8Wt8\ngWrUDwXVAhD5Sxv+OCBizPF1CxXTgC3+/ophkUcy5VTcBchgQI7JrItujxUc0EvR\nCwA7/JPyO8DaUtvpodUKO27vr11G/NmXYrOohCP6VxH/Y6p5L9o4\n-----END RSA PRIVATE KEY-----"
GITHUB_TOKEN = "a" * 40
| true | true |
f730be982c887d5e48842dce53d62d19a740a19a | 8,899 | py | Python | tests_gpu/test_multi_gpu/test_core_pytorch_compare/test_ddp/test_mnist_cnn.py | mv1388/AIToolbox | c64ac4810a02d230ce471d86b758e82ea232a7e7 | [
"MIT"
] | 3 | 2019-10-12T12:24:09.000Z | 2020-08-02T02:42:43.000Z | tests_gpu/test_multi_gpu/test_core_pytorch_compare/test_ddp/test_mnist_cnn.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | 3 | 2020-04-10T14:07:07.000Z | 2020-04-22T19:04:38.000Z | tests_gpu/test_multi_gpu/test_core_pytorch_compare/test_ddp/test_mnist_cnn.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | null | null | null | import unittest
import os
import shutil
import random
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from aitoolbox import TrainLoop, TTModel
from tests_gpu.test_multi_gpu.ddp_prediction_saver import DDPPredictionSave
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class CNNNet(TTModel):
def __init__(self):
super(CNNNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_loss(self, batch_data, criterion, device):
data, target = batch_data
data, target = data.to(device), target.to(device)
output = self(data)
loss = criterion(output, target)
return loss
def get_predictions(self, batch_data, device):
data, y_test = batch_data
data = data.to(device)
output = self(data)
y_pred = output.argmax(dim=1, keepdim=False)
return y_pred.cpu(), y_test, {}
class TestMNISTCNN(unittest.TestCase):
def test_trainloop_core_pytorch_compare(self):
os.mkdir(f'{THIS_DIR}/ddp_cnn_save')
val_loss_tl, y_pred_tl, y_true_tl = self.train_eval_trainloop(num_epochs=5, use_real_train_data=True)
val_loss_pt, y_pred_pt, y_true_pt = self.train_eval_core_pytorch(num_epochs=5, use_real_train_data=True)
self.assertAlmostEqual(val_loss_tl, val_loss_pt, places=8)
self.assertEqual(y_pred_tl, y_pred_pt)
self.assertEqual(y_true_tl, y_true_pt)
project_path = os.path.join(THIS_DIR, 'ddp_cnn_save')
if os.path.exists(project_path):
shutil.rmtree(project_path)
project_path = os.path.join(THIS_DIR, 'data')
if os.path.exists(project_path):
shutil.rmtree(project_path)
def train_eval_trainloop(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model = CNNNet()
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion = nn.NLLLoss()
print('Starting train loop')
tl = TrainLoop(
model,
train_loader, val_loader, None,
optimizer, criterion,
gpu_mode='ddp'
)
self.assertEqual(tl.device.type, "cuda")
tl.fit(num_epochs=num_epochs,
callbacks=[DDPPredictionSave(dir_path=f'{THIS_DIR}/ddp_cnn_save',
file_name='tl_ddp_predictions.p')])
with open(f'{THIS_DIR}/ddp_cnn_save/tl_ddp_predictions.p', 'rb') as f:
val_loss, y_pred, y_true = pickle.load(f)
return val_loss, y_pred, y_true
def train_eval_core_pytorch(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model_pt = CNNNet()
optimizer_pt = optim.Adam(model_pt.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion_pt = nn.NLLLoss()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8888'
print('Starting the manual DDP training')
mp.spawn(
self.manual_ddp_training,
args=(num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader),
nprocs=torch.cuda.device_count()
)
val_loss, y_pred, y_true = [], [], []
for idx in range(torch.cuda.device_count()):
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{idx}.p', 'rb') as f:
val_loss_f, y_pred_f, y_true_f = pickle.load(f)
val_loss += val_loss_f
y_pred += y_pred_f
y_true += y_true_f
val_loss = np.mean(val_loss)
return val_loss, y_pred, y_true
@staticmethod
def manual_ddp_training(gpu, num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader):
rank = gpu
dist.init_process_group(backend='nccl', init_method='env://', world_size=torch.cuda.device_count(), rank=rank)
torch.manual_seed(0)
torch.cuda.set_device(gpu)
device = torch.device(f"cuda:{gpu}")
train_sampler = DistributedSampler(dataset=train_loader.dataset, shuffle=True,
num_replicas=torch.cuda.device_count(), rank=rank)
val_sampler = DistributedSampler(dataset=val_loader.dataset, shuffle=False,
num_replicas=torch.cuda.device_count(), rank=rank)
train_loader_ddp = DataLoader(train_loader.dataset, batch_size=100, sampler=train_sampler)
val_loader_ddp = DataLoader(val_loader.dataset, batch_size=100, sampler=val_sampler)
model_pt = model_pt.to(device)
criterion_pt = criterion_pt.to(device)
model_pt = DistributedDataParallel(model_pt, device_ids=[gpu])
model_pt.train()
for epoch in range(num_epochs):
print(f'Epoch: {epoch}')
train_sampler.set_epoch(epoch)
for i, (input_data, target) in enumerate(train_loader_ddp):
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss = criterion_pt(predicted, target)
loss.backward()
optimizer_pt.step()
optimizer_pt.zero_grad()
# Imitate what happens in auto_execute_end_of_epoch() in TrainLoop
for _ in train_loader:
pass
for _ in val_loader:
pass
print('Evaluating')
val_loss, val_pred, val_true = [], [], []
model_pt.eval()
with torch.no_grad():
for input_data, target in val_loader_ddp:
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss_batch = criterion_pt(predicted, target).cpu().item()
val_pred += predicted.argmax(dim=1, keepdim=False).cpu().tolist()
val_true += target.cpu().tolist()
val_loss.append(loss_batch)
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{gpu}.p', 'wb') as f:
pickle.dump([val_loss, val_pred, val_true], f)
@staticmethod
def set_seeds():
manual_seed = 0
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(manual_seed)
random.seed(manual_seed)
torch.manual_seed(manual_seed)
# if you are suing GPU
torch.cuda.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
| 37.23431 | 118 | 0.60681 | import unittest
import os
import shutil
import random
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from aitoolbox import TrainLoop, TTModel
from tests_gpu.test_multi_gpu.ddp_prediction_saver import DDPPredictionSave
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
class CNNNet(TTModel):
def __init__(self):
super(CNNNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_loss(self, batch_data, criterion, device):
data, target = batch_data
data, target = data.to(device), target.to(device)
output = self(data)
loss = criterion(output, target)
return loss
def get_predictions(self, batch_data, device):
data, y_test = batch_data
data = data.to(device)
output = self(data)
y_pred = output.argmax(dim=1, keepdim=False)
return y_pred.cpu(), y_test, {}
class TestMNISTCNN(unittest.TestCase):
def test_trainloop_core_pytorch_compare(self):
os.mkdir(f'{THIS_DIR}/ddp_cnn_save')
val_loss_tl, y_pred_tl, y_true_tl = self.train_eval_trainloop(num_epochs=5, use_real_train_data=True)
val_loss_pt, y_pred_pt, y_true_pt = self.train_eval_core_pytorch(num_epochs=5, use_real_train_data=True)
self.assertAlmostEqual(val_loss_tl, val_loss_pt, places=8)
self.assertEqual(y_pred_tl, y_pred_pt)
self.assertEqual(y_true_tl, y_true_pt)
project_path = os.path.join(THIS_DIR, 'ddp_cnn_save')
if os.path.exists(project_path):
shutil.rmtree(project_path)
project_path = os.path.join(THIS_DIR, 'data')
if os.path.exists(project_path):
shutil.rmtree(project_path)
def train_eval_trainloop(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100, shuffle=True)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model = CNNNet()
optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion = nn.NLLLoss()
print('Starting train loop')
tl = TrainLoop(
model,
train_loader, val_loader, None,
optimizer, criterion,
gpu_mode='ddp'
)
self.assertEqual(tl.device.type, "cuda")
tl.fit(num_epochs=num_epochs,
callbacks=[DDPPredictionSave(dir_path=f'{THIS_DIR}/ddp_cnn_save',
file_name='tl_ddp_predictions.p')])
with open(f'{THIS_DIR}/ddp_cnn_save/tl_ddp_predictions.p', 'rb') as f:
val_loss, y_pred, y_true = pickle.load(f)
return val_loss, y_pred, y_true
def train_eval_core_pytorch(self, num_epochs, use_real_train_data=False):
self.set_seeds()
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=use_real_train_data, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
val_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join(THIS_DIR, 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=100)
model_pt = CNNNet()
optimizer_pt = optim.Adam(model_pt.parameters(), lr=0.001, betas=(0.9, 0.999))
criterion_pt = nn.NLLLoss()
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '8888'
print('Starting the manual DDP training')
mp.spawn(
self.manual_ddp_training,
args=(num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader),
nprocs=torch.cuda.device_count()
)
val_loss, y_pred, y_true = [], [], []
for idx in range(torch.cuda.device_count()):
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{idx}.p', 'rb') as f:
val_loss_f, y_pred_f, y_true_f = pickle.load(f)
val_loss += val_loss_f
y_pred += y_pred_f
y_true += y_true_f
val_loss = np.mean(val_loss)
return val_loss, y_pred, y_true
@staticmethod
def manual_ddp_training(gpu, num_epochs, model_pt, optimizer_pt, criterion_pt, train_loader, val_loader):
rank = gpu
dist.init_process_group(backend='nccl', init_method='env://', world_size=torch.cuda.device_count(), rank=rank)
torch.manual_seed(0)
torch.cuda.set_device(gpu)
device = torch.device(f"cuda:{gpu}")
train_sampler = DistributedSampler(dataset=train_loader.dataset, shuffle=True,
num_replicas=torch.cuda.device_count(), rank=rank)
val_sampler = DistributedSampler(dataset=val_loader.dataset, shuffle=False,
num_replicas=torch.cuda.device_count(), rank=rank)
train_loader_ddp = DataLoader(train_loader.dataset, batch_size=100, sampler=train_sampler)
val_loader_ddp = DataLoader(val_loader.dataset, batch_size=100, sampler=val_sampler)
model_pt = model_pt.to(device)
criterion_pt = criterion_pt.to(device)
model_pt = DistributedDataParallel(model_pt, device_ids=[gpu])
model_pt.train()
for epoch in range(num_epochs):
print(f'Epoch: {epoch}')
train_sampler.set_epoch(epoch)
for i, (input_data, target) in enumerate(train_loader_ddp):
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss = criterion_pt(predicted, target)
loss.backward()
optimizer_pt.step()
optimizer_pt.zero_grad()
for _ in train_loader:
pass
for _ in val_loader:
pass
print('Evaluating')
val_loss, val_pred, val_true = [], [], []
model_pt.eval()
with torch.no_grad():
for input_data, target in val_loader_ddp:
input_data = input_data.to(device)
target = target.to(device)
predicted = model_pt(input_data)
loss_batch = criterion_pt(predicted, target).cpu().item()
val_pred += predicted.argmax(dim=1, keepdim=False).cpu().tolist()
val_true += target.cpu().tolist()
val_loss.append(loss_batch)
with open(f'{THIS_DIR}/ddp_cnn_save/pt_ddp_predictions_{gpu}.p', 'wb') as f:
pickle.dump([val_loss, val_pred, val_true], f)
@staticmethod
def set_seeds():
manual_seed = 0
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(manual_seed)
random.seed(manual_seed)
torch.manual_seed(manual_seed)
torch.cuda.manual_seed(manual_seed)
torch.cuda.manual_seed_all(manual_seed)
| true | true |
f730bee814b7a46573d0b55a5eaf76c8bd13efe5 | 1,913 | py | Python | 2021/day03.py | sree-cfa/adventOfCode | 0dffcf2b6668a37505afeedd0869f7ae2b5b93cf | [
"BSD-3-Clause"
] | null | null | null | 2021/day03.py | sree-cfa/adventOfCode | 0dffcf2b6668a37505afeedd0869f7ae2b5b93cf | [
"BSD-3-Clause"
] | null | null | null | 2021/day03.py | sree-cfa/adventOfCode | 0dffcf2b6668a37505afeedd0869f7ae2b5b93cf | [
"BSD-3-Clause"
] | null | null | null | from util.inputReader import read_as_strings
LENGTH = 12
def part1(list_of_strings):
one_count = [0] * LENGTH
zero_count = [0] * LENGTH
for string in list_of_strings:
for i, val in enumerate(string):
if val == '0':
zero_count[i] += 1
else:
one_count[i] += 1
epsilon = ""
gamma = ""
for i in range(LENGTH):
if one_count[i] >= zero_count[i]:
epsilon += '1'
gamma += '0'
else:
epsilon += '0'
gamma += '1'
return int(epsilon, 2) * int(gamma, 2)
def part2(numbers):
ogr_str = ""
co2_str = ""
ogr_bin, co2_bin = 0, 0
for i in range(LENGTH):
ogr_count_0, ogr_count_1 = 0, 0
co2_count_0, co2_count_1 = 0, 0
for number in numbers:
if number.startswith(ogr_str):
ogr_count_0 += 1 if number[i] == '0' else 0
ogr_count_1 += 1 if number[i] == '1' else 0
if number.startswith(co2_str):
co2_count_0 += 1 if number[i] == '0' else 0
co2_count_1 += 1 if number[i] == '1' else 0
if ogr_count_1 + ogr_count_0 == 1: # one number left
ogr_str = next(filter(lambda x: x.startswith(ogr_str), numbers))
ogr_bin = int(ogr_str, 2)
if co2_count_1 + co2_count_0 == 1: # one number left
co2_str = next(filter(lambda x: x.startswith(co2_str), numbers))
co2_bin = int(co2_str, 2)
ogr_str += '1' if ogr_count_1 >= ogr_count_0 else '0'
co2_str += '0' if co2_count_1 >= co2_count_0 else '1'
if ogr_bin == 0:
ogr_bin = int(ogr_str, 2)
if co2_bin == 0:
co2_bin = int(co2_str, 2)
return ogr_bin * co2_bin
lines = read_as_strings("../inputs/2021_03.txt")
print("part1:", part1(lines))
print("part2:", part2(lines))
# part2 12723489 too high
| 27.328571 | 76 | 0.543126 | from util.inputReader import read_as_strings
LENGTH = 12
def part1(list_of_strings):
one_count = [0] * LENGTH
zero_count = [0] * LENGTH
for string in list_of_strings:
for i, val in enumerate(string):
if val == '0':
zero_count[i] += 1
else:
one_count[i] += 1
epsilon = ""
gamma = ""
for i in range(LENGTH):
if one_count[i] >= zero_count[i]:
epsilon += '1'
gamma += '0'
else:
epsilon += '0'
gamma += '1'
return int(epsilon, 2) * int(gamma, 2)
def part2(numbers):
ogr_str = ""
co2_str = ""
ogr_bin, co2_bin = 0, 0
for i in range(LENGTH):
ogr_count_0, ogr_count_1 = 0, 0
co2_count_0, co2_count_1 = 0, 0
for number in numbers:
if number.startswith(ogr_str):
ogr_count_0 += 1 if number[i] == '0' else 0
ogr_count_1 += 1 if number[i] == '1' else 0
if number.startswith(co2_str):
co2_count_0 += 1 if number[i] == '0' else 0
co2_count_1 += 1 if number[i] == '1' else 0
if ogr_count_1 + ogr_count_0 == 1:
ogr_str = next(filter(lambda x: x.startswith(ogr_str), numbers))
ogr_bin = int(ogr_str, 2)
if co2_count_1 + co2_count_0 == 1:
co2_str = next(filter(lambda x: x.startswith(co2_str), numbers))
co2_bin = int(co2_str, 2)
ogr_str += '1' if ogr_count_1 >= ogr_count_0 else '0'
co2_str += '0' if co2_count_1 >= co2_count_0 else '1'
if ogr_bin == 0:
ogr_bin = int(ogr_str, 2)
if co2_bin == 0:
co2_bin = int(co2_str, 2)
return ogr_bin * co2_bin
lines = read_as_strings("../inputs/2021_03.txt")
print("part1:", part1(lines))
print("part2:", part2(lines))
| true | true |
f730bf6ba9f601b648b12146b752965863bf095b | 7,902 | py | Python | benchmarks/benchmarks/bench_function_base.py | sankalpdayal5/numpy | 9713e86cc65ebed96464f4d81bb2637857b84f44 | [
"BSD-3-Clause"
] | 1 | 2019-11-15T16:44:36.000Z | 2019-11-15T16:44:36.000Z | benchmarks/benchmarks/bench_function_base.py | sankalpdayal5/numpy | 9713e86cc65ebed96464f4d81bb2637857b84f44 | [
"BSD-3-Clause"
] | null | null | null | benchmarks/benchmarks/bench_function_base.py | sankalpdayal5/numpy | 9713e86cc65ebed96464f4d81bb2637857b84f44 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Histogram1D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 100000)
def time_full_coverage(self):
np.histogram(self.d, 200, (0, 100))
def time_small_coverage(self):
np.histogram(self.d, 200, (50, 51))
def time_fine_binning(self):
np.histogram(self.d, 10000, (0, 100))
class Histogram2D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 200000).reshape((-1,2))
def time_full_coverage(self):
np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100)))
def time_small_coverage(self):
np.histogramdd(self.d, (200, 200), ((50, 51), (50, 51)))
def time_fine_binning(self):
np.histogramdd(self.d, (10000, 10000), ((0, 100), (0, 100)))
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
def memoize(f):
_memoized = {}
def wrapped(*args):
if args not in _memoized:
_memoized[args] = f(*args)
return _memoized[args].copy()
return f
class SortGenerator(object):
# The size of the unsorted area in the "random unsorted area"
# benchmarks
AREA_SIZE = 100
# The size of the "partially ordered" sub-arrays
BUBBLE_SIZE = 100
@staticmethod
@memoize
def random(size, dtype):
"""
Returns a randomly-shuffled array.
"""
arr = np.arange(size, dtype=dtype)
np.random.shuffle(arr)
return arr
@staticmethod
@memoize
def ordered(size, dtype):
"""
Returns an ordered array.
"""
return np.arange(size, dtype=dtype)
@staticmethod
@memoize
def reversed(size, dtype):
"""
Returns an array that's in descending order.
"""
return np.arange(size-1, -1, -1, dtype=dtype)
@staticmethod
@memoize
def uniform(size, dtype):
"""
Returns an array that has the same value everywhere.
"""
return np.ones(size, dtype=dtype)
@staticmethod
@memoize
def swapped_pair(size, dtype, swap_frac):
"""
Returns an ordered array, but one that has ``swap_frac * size``
pairs swapped.
"""
a = np.arange(size, dtype=dtype)
for _ in range(int(size * swap_frac)):
x, y = np.random.randint(0, size, 2)
a[x], a[y] = a[y], a[x]
return a
@staticmethod
@memoize
def sorted_block(size, dtype, block_size):
"""
Returns an array with blocks that are all sorted.
"""
a = np.arange(size, dtype=dtype)
b = []
if size < block_size:
return a
block_num = size // block_size
for i in range(block_num):
b.extend(a[i::block_num])
return np.array(b)
@classmethod
@memoize
def random_unsorted_area(cls, size, dtype, frac, area_size=None):
"""
This type of array has random unsorted areas such that they
compose the fraction ``frac`` of the original array.
"""
if area_size is None:
area_size = cls.AREA_SIZE
area_num = int(size * frac / area_size)
a = np.arange(size, dtype=dtype)
for _ in range(area_num):
start = np.random.randint(size-area_size)
end = start + area_size
np.random.shuffle(a[start:end])
return a
@classmethod
@memoize
def random_bubble(cls, size, dtype, bubble_num, bubble_size=None):
"""
This type of array has ``bubble_num`` random unsorted areas.
"""
if bubble_size is None:
bubble_size = cls.BUBBLE_SIZE
frac = bubble_size * bubble_num / size
return cls.random_unsorted_area(size, dtype, frac, bubble_size)
class Sort(Benchmark):
"""
This benchmark tests sorting performance with several
different types of arrays that are likely to appear in
real-world applications.
"""
params = [
# In NumPy 1.17 and newer, 'merge' can be one of several
# stable sorts, it isn't necessarily merge sort.
['quick', 'merge', 'heap'],
['float64', 'int64', 'int16'],
[
('random',),
('ordered',),
('reversed',),
('uniform',),
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
# ('swapped_pair', 0.01),
# ('swapped_pair', 0.1),
# ('swapped_pair', 0.5),
# ('random_unsorted_area', 0.5),
# ('random_unsorted_area', 0.1),
# ('random_unsorted_area', 0.01),
# ('random_bubble', 1),
# ('random_bubble', 5),
# ('random_bubble', 10),
],
]
param_names = ['kind', 'dtype', 'array_type']
# The size of the benchmarked arrays.
ARRAY_SIZE = 10000
def setup(self, kind, dtype, array_type):
np.random.seed(1234)
array_class = array_type[0]
self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:])
def time_sort(self, kind, dtype, array_type):
# Using np.sort(...) instead of arr.sort(...) because it makes a copy.
# This is important because the data is prepared once per benchmark, but
# used across multiple runs.
np.sort(self.arr, kind=kind)
def time_argsort(self, kind, dtype, array_type):
np.argsort(self.arr, kind=kind)
class SortWorst(Benchmark):
def setup(self):
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort_worst(self):
np.sort(self.worst)
# Retain old benchmark name for backward compatability
time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| 27.248276 | 95 | 0.578208 | from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class Histogram1D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 100000)
def time_full_coverage(self):
np.histogram(self.d, 200, (0, 100))
def time_small_coverage(self):
np.histogram(self.d, 200, (50, 51))
def time_fine_binning(self):
np.histogram(self.d, 10000, (0, 100))
class Histogram2D(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 200000).reshape((-1,2))
def time_full_coverage(self):
np.histogramdd(self.d, (200, 200), ((0, 100), (0, 100)))
def time_small_coverage(self):
np.histogramdd(self.d, (200, 200), ((50, 51), (50, 51)))
def time_fine_binning(self):
np.histogramdd(self.d, (10000, 10000), ((0, 100), (0, 100)))
class Bincount(Benchmark):
def setup(self):
self.d = np.arange(80000, dtype=np.intp)
self.e = self.d.astype(np.float64)
def time_bincount(self):
np.bincount(self.d)
def time_weights(self):
np.bincount(self.d, weights=self.e)
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_even(self):
np.median(self.e)
def time_odd(self):
np.median(self.o)
def time_even_inplace(self):
np.median(self.e, overwrite_input=True)
def time_odd_inplace(self):
np.median(self.o, overwrite_input=True)
def time_even_small(self):
np.median(self.e[:500], overwrite_input=True)
def time_odd_small(self):
np.median(self.o[:500], overwrite_input=True)
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
self.o = np.arange(10001, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
class Select(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = [(self.d > 4), (self.d < 2)]
self.cond_large = [(self.d > 4), (self.d < 2)] * 10
def time_select(self):
np.select(self.cond, [self.d, self.e])
def time_select_larger(self):
np.select(self.cond_large, ([self.d, self.e] * 10))
def memoize(f):
_memoized = {}
def wrapped(*args):
if args not in _memoized:
_memoized[args] = f(*args)
return _memoized[args].copy()
return f
class SortGenerator(object):
AREA_SIZE = 100
BUBBLE_SIZE = 100
@staticmethod
@memoize
def random(size, dtype):
arr = np.arange(size, dtype=dtype)
np.random.shuffle(arr)
return arr
@staticmethod
@memoize
def ordered(size, dtype):
return np.arange(size, dtype=dtype)
@staticmethod
@memoize
def reversed(size, dtype):
return np.arange(size-1, -1, -1, dtype=dtype)
@staticmethod
@memoize
def uniform(size, dtype):
return np.ones(size, dtype=dtype)
@staticmethod
@memoize
def swapped_pair(size, dtype, swap_frac):
a = np.arange(size, dtype=dtype)
for _ in range(int(size * swap_frac)):
x, y = np.random.randint(0, size, 2)
a[x], a[y] = a[y], a[x]
return a
@staticmethod
@memoize
def sorted_block(size, dtype, block_size):
a = np.arange(size, dtype=dtype)
b = []
if size < block_size:
return a
block_num = size // block_size
for i in range(block_num):
b.extend(a[i::block_num])
return np.array(b)
@classmethod
@memoize
def random_unsorted_area(cls, size, dtype, frac, area_size=None):
if area_size is None:
area_size = cls.AREA_SIZE
area_num = int(size * frac / area_size)
a = np.arange(size, dtype=dtype)
for _ in range(area_num):
start = np.random.randint(size-area_size)
end = start + area_size
np.random.shuffle(a[start:end])
return a
@classmethod
@memoize
def random_bubble(cls, size, dtype, bubble_num, bubble_size=None):
if bubble_size is None:
bubble_size = cls.BUBBLE_SIZE
frac = bubble_size * bubble_num / size
return cls.random_unsorted_area(size, dtype, frac, bubble_size)
class Sort(Benchmark):
params = [
['quick', 'merge', 'heap'],
['float64', 'int64', 'int16'],
[
('random',),
('ordered',),
('reversed',),
('uniform',),
('sorted_block', 10),
('sorted_block', 100),
('sorted_block', 1000),
# ('swapped_pair', 0.01),
# ('swapped_pair', 0.1),
# ('swapped_pair', 0.5),
# ('random_unsorted_area', 0.5),
# ('random_unsorted_area', 0.1),
# ('random_unsorted_area', 0.01),
# ('random_bubble', 1),
# ('random_bubble', 5),
# ('random_bubble', 10),
],
]
param_names = ['kind', 'dtype', 'array_type']
# The size of the benchmarked arrays.
ARRAY_SIZE = 10000
def setup(self, kind, dtype, array_type):
np.random.seed(1234)
array_class = array_type[0]
self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:])
def time_sort(self, kind, dtype, array_type):
# Using np.sort(...) instead of arr.sort(...) because it makes a copy.
# This is important because the data is prepared once per benchmark, but
# used across multiple runs.
np.sort(self.arr, kind=kind)
def time_argsort(self, kind, dtype, array_type):
np.argsort(self.arr, kind=kind)
class SortWorst(Benchmark):
def setup(self):
# quicksort median of 3 worst case
self.worst = np.arange(1000000)
x = self.worst
while x.size > 3:
mid = x.size // 2
x[mid], x[-2] = x[-2], x[mid]
x = x[:-2]
def time_sort_worst(self):
np.sort(self.worst)
# Retain old benchmark name for backward compatability
time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
class Where(Benchmark):
def setup(self):
self.d = np.arange(20000)
self.e = self.d.copy()
self.cond = (self.d > 5000)
def time_1(self):
np.where(self.cond)
def time_2(self):
np.where(self.cond, self.d, self.e)
def time_2_broadcast(self):
np.where(self.cond, self.d, 0)
| true | true |
f730c073a0c4b6559a2cfcc7ed69427ce1242339 | 836 | py | Python | tests/test_exception.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | null | null | null | tests/test_exception.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | 3 | 2020-04-17T05:24:06.000Z | 2022-02-10T09:00:22.000Z | tests/test_exception.py | Clayful/clayful-python | ddd5f1f986fb0079d5128e17f4b0fdce83b4cec1 | [
"MIT"
] | null | null | null | import unittest
from clayful.exception import ClayfulException
class ClayfulExceptionTest(unittest.TestCase):
def test_clayful_error_constructor(self):
error = ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
self.assertEqual(error.is_clayful, True)
self.assertEqual(error.model, 'Brand')
self.assertEqual(error.method, 'get')
self.assertEqual(error.status, 400)
self.assertEqual(error.headers, {})
self.assertEqual(error.code, 'g-no-model')
self.assertEqual(error.message, 'my message')
self.assertEqual(error.validation, {})
def test_throw_clayful_error(self):
try:
raise ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
except ClayfulException as e:
self.assertEqual(e.is_clayful, True) | 19.44186 | 47 | 0.679426 | import unittest
from clayful.exception import ClayfulException
class ClayfulExceptionTest(unittest.TestCase):
def test_clayful_error_constructor(self):
error = ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
self.assertEqual(error.is_clayful, True)
self.assertEqual(error.model, 'Brand')
self.assertEqual(error.method, 'get')
self.assertEqual(error.status, 400)
self.assertEqual(error.headers, {})
self.assertEqual(error.code, 'g-no-model')
self.assertEqual(error.message, 'my message')
self.assertEqual(error.validation, {})
def test_throw_clayful_error(self):
try:
raise ClayfulException(
'Brand',
'get',
400,
{},
'g-no-model',
'my message',
{}
)
except ClayfulException as e:
self.assertEqual(e.is_clayful, True) | true | true |
f730c0c1c337facbe3bca9654deb29a5694579fe | 618 | py | Python | models/recommendation/tensorflow/wide_deep_large_ds/inference/__init__.py | yangw1234/models-1 | 7e7f484f4f22c760f9a5af836f57a3602b4fa7a6 | [
"Apache-2.0"
] | 357 | 2019-01-23T23:54:30.000Z | 2022-03-31T05:32:25.000Z | models/recommendation/tensorflow/wide_deep_large_ds/inference/__init__.py | yangw1234/models-1 | 7e7f484f4f22c760f9a5af836f57a3602b4fa7a6 | [
"Apache-2.0"
] | 65 | 2019-02-06T15:35:35.000Z | 2022-03-25T09:56:48.000Z | models/recommendation/tensorflow/wide_deep_large_ds/inference/__init__.py | yangw1234/models-1 | 7e7f484f4f22c760f9a5af836f57a3602b4fa7a6 | [
"Apache-2.0"
] | 164 | 2019-02-06T15:05:57.000Z | 2022-03-31T11:48:14.000Z | #
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
| 29.428571 | 74 | 0.737864 | true | true | |
f730c12f66e371e778dfd8bfc1d9044399d1afe0 | 74,951 | py | Python | src/azure-cli/azure/cli/command_modules/acs/decorator.py | charliedmcb/azure-cli | 6bc9519c91e3c241d476d1351b6e9b7543190f47 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/decorator.py | charliedmcb/azure-cli | 6bc9519c91e3c241d476d1351b6e9b7543190f47 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/decorator.py | charliedmcb/azure-cli | 6bc9519c91e3c241d476d1351b6e9b7543190f47 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.prompting import NoTTYException, prompt, prompt_pass
from knack.log import get_logger
from typing import Any, List, Dict, Tuple, Union
from azure.cli.core import AzCommandsLoader
from azure.cli.core.azclierror import (
CLIInternalError,
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
InvalidArgumentValueError,
NoTTYError,
)
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.profiles import ResourceType
from .custom import (
_get_rg_location,
_validate_ssh_key,
_get_default_dns_prefix,
_set_vm_set_type,
set_load_balancer_sku,
get_subscription_id,
_ensure_aks_service_principal,
)
logger = get_logger(__name__)
def safe_list_get(li: List, idx: int, default: Any = None):
# Attempt to get the element with index `idx` from an object `li` (which should be a `list`),
# if the index is invalid (like out of range), return `default` (whose default value is `None`)
if isinstance(li, list):
try:
return li[idx]
except IndexError:
return default
return None
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class AKSCreateModels:
# Used to store models (i.e. the corresponding class of a certain api version specified by `resource_type`)
# which would be used during the creation process.
def __init__(
self,
cmd: AzCommandsLoader,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.__cmd = cmd
self.resource_type = resource_type
self.ManagedClusterWindowsProfile = self.__cmd.get_models(
"ManagedClusterWindowsProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterSKU = self.__cmd.get_models(
"ManagedClusterSKU",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceNetworkProfile = self.__cmd.get_models(
"ContainerServiceNetworkProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceLinuxProfile = self.__cmd.get_models(
"ContainerServiceLinuxProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterServicePrincipalProfile = self.__cmd.get_models(
"ManagedClusterServicePrincipalProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshConfiguration = self.__cmd.get_models(
"ContainerServiceSshConfiguration",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshPublicKey = self.__cmd.get_models(
"ContainerServiceSshPublicKey",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAADProfile = self.__cmd.get_models(
"ManagedClusterAADProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAutoUpgradeProfile = self.__cmd.get_models(
"ManagedClusterAutoUpgradeProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAgentPoolProfile = self.__cmd.get_models(
"ManagedClusterAgentPoolProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterIdentity = self.__cmd.get_models(
"ManagedClusterIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.UserAssignedIdentity = self.__cmd.get_models(
"UserAssignedIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedCluster = self.__cmd.get_models(
"ManagedCluster",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedServiceIdentityUserAssignedIdentitiesValue = (
self.__cmd.get_models(
"ManagedServiceIdentityUserAssignedIdentitiesValue",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
)
self.ExtendedLocation = self.__cmd.get_models(
"ExtendedLocation",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocationTypes = self.__cmd.get_models(
"ExtendedLocationTypes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
# not directly used
self.ManagedClusterAPIServerAccessProfile = self.__cmd.get_models(
"ManagedClusterAPIServerAccessProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
# pylint: disable=too-many-public-methods
class AKSCreateContext:
# Used to store intermediate variables (usually this stores the dynamically completed value of the parameter,
# which has not been decorated into the `mc` object, and some pure intermediate variables (such as the
# subscription ID)) and a copy of the original function parameters, and provide "getter" methods for all
# parameters.
# To dynamically complete a parameter or check the validity of a parameter, please provide a "getter" function
# named `get_xxx`, where `xxx` is the parameter name. In this function, the process of obtaining parameter
# values, dynamic completion (optional), and validation (optional) should be followed. The obtaining of
# parameter values should further follow the order of obtaining from the `mc` object, from the intermediates,
# or from the original value.
# Note: Dynamic completion will also perform some operations that regulate parameter values, such as
# converting int 0 to None.
# Attention: In case of checking the validity of parameters, be sure not to set the `enable_validation` to
# `True` to avoid loop calls, when using the getter function to obtain the value of other parameters.
# Attention: After the parameter is dynamically completed, it must be added to the intermediates; and after
# the parameter is decorated into the `mc` object, the corresponding intermediate should be deleted.
# Attention: One of the most basic principles is that when the parameter/profile is decorated into the `mc`
# object, it should never be modified, only read-only operations (e.g. validation) can be performed.
def __init__(self, cmd: AzCliCommand, raw_parameters: Dict):
self.cmd = cmd
if not isinstance(raw_parameters, dict):
raise CLIInternalError(
"Unexpected raw_parameters object with type '{}'.".format(
type(raw_parameters)
)
)
self.raw_param = raw_parameters
self.intermediates = dict()
self.mc = None
def attach_mc(self, mc):
if self.mc is None:
self.mc = mc
else:
msg = "the same" if self.mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the `mc` object again, the two objects are {}.".format(
msg
)
)
def get_intermediate(self, variable_name: str, default_value: Any = None):
if variable_name not in self.intermediates:
msg = "The intermediate '{}' does not exist, return default value '{}'.".format(
variable_name, default_value
)
logger.debug(msg)
return self.intermediates.get(variable_name, default_value)
def set_intermediate(
self, variable_name: str, value: Any, overwrite_exists: bool = False
):
if variable_name in self.intermediates:
if overwrite_exists:
msg = "The intermediate '{}' is overwritten. Original value: '{}', new value: '{}'.".format(
variable_name, self.intermediates.get(variable_name), value
)
logger.debug(msg)
self.intermediates[variable_name] = value
elif self.intermediates.get(variable_name) != value:
msg = "The intermediate '{}' already exists, but overwrite is not enabled." \
"Original value: '{}', candidate value: '{}'.".format(
variable_name,
self.intermediates.get(variable_name),
value,
)
# warning level log will be output to the console, which may cause confusion to users
logger.warning(msg)
else:
self.intermediates[variable_name] = value
def remove_intermediate(self, variable_name: str):
self.intermediates.pop(variable_name, None)
# pylint: disable=unused-argument
def get_resource_group_name(self, **kwargs) -> str:
"""Obtain the value of resource_group_name.
Note: resource_group_name will not be decorated into the `mc` object.
The value of this parameter should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
resource_group_name = self.raw_param.get("resource_group_name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return resource_group_name
# pylint: disable=unused-argument
def get_name(self, **kwargs) -> str:
"""Obtain the value of name.
Note: name will not be decorated into the `mc` object.
The value of this parameter should be provided by user explicitly.
:return: string
"""
# read the original value passed by the command
name = self.raw_param.get("name")
# this parameter does not need dynamic completion
# this parameter does not need validation
return name
# pylint: disable=unused-argument
def get_ssh_key_value(
self, enable_validation: bool = False, **kwargs
) -> str:
"""Obtain the value of ssh_key_value.
If the user does not specify this parameter, the validator function "validate_ssh_key" checks the default file
location "~/.ssh/id_rsa.pub", if the file exists, read its content and return; otherise, create a key pair at
"~/.ssh/id_rsa.pub" and return the public key.
If the user provides a string-like input, the validator function "validate_ssh_key" checks whether it is a file
path, if so, read its content and return; if it is a valid public key, return it; otherwise, create a key pair
there and return the public key.
This function supports the option of enable_validation. When enabled, it will call "_validate_ssh_key" to
verify the validity of ssh_key_value. If parameter no_ssh_key is set to True, verification will be skipped;
otherwise, a CLIError will be raised when the value of ssh_key_value is invalid.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("ssh_key_value")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.ssh and
self.mc.linux_profile.ssh.public_keys
):
public_key_obj = safe_list_get(
self.mc.linux_profile.ssh.public_keys, 0, None
)
if public_key_obj:
value_obtained_from_mc = public_key_obj.key_data
# set default value
if value_obtained_from_mc is not None:
ssh_key_value = value_obtained_from_mc
else:
ssh_key_value = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
_validate_ssh_key(
no_ssh_key=self.get_no_ssh_key(), ssh_key_value=ssh_key_value
)
return ssh_key_value
# pylint: disable=unused-argument
def get_dns_name_prefix(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
"""Dynamically obtain the value of ssh_key_value according to the context.
When both dns_name_prefix and fqdn_subdomain are not assigned, dynamic completion will be triggerd. Function
"_get_default_dns_prefix" will be called to create a default dns_name_prefix composed of name(cluster),
resource_group_name, and subscription_id.
This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and
fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string or None
"""
parameter_name = "dns_name_prefix"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.dns_prefix
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
dns_name_prefix = value_obtained_from_mc
read_from_mc = True
else:
dns_name_prefix = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return dns_name_prefix
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if not dns_name_prefix and not self.get_fqdn_subdomain():
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
# In case the user does not specify the parameter and it meets the conditions of automatic completion,
# necessary information is dynamically completed.
if dynamic_completion:
dns_name_prefix = _get_default_dns_prefix(
name=self.get_name(),
resource_group_name=self.get_resource_group_name(),
subscription_id=self.get_intermediate("subscription_id"),
)
# validation
if enable_validation:
if dns_name_prefix and self.get_fqdn_subdomain():
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return dns_name_prefix
# pylint: disable=unused-argument
def get_location(self, **kwargs) -> str:
"""Dynamically obtain the value of location according to the context.
When location is not assigned, dynamic completion will be triggerd. Function "_get_rg_location" will be called
to get the location of the provided resource group, which internally used ResourceManagementClient to send
the request.
:return: string
"""
parameter_name = "location"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.location
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
location = value_obtained_from_mc
read_from_mc = True
else:
location = raw_value
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if location is None:
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
location = _get_rg_location(
self.cmd.cli_ctx, self.get_resource_group_name()
)
# this parameter does not need validation
return location
# pylint: disable=unused-argument
def get_kubernetes_version(self, **kwargs) -> str:
"""Obtain the value of kubernetes_version.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("kubernetes_version")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.kubernetes_version
# set default value
if value_obtained_from_mc is not None:
kubernetes_version = value_obtained_from_mc
else:
kubernetes_version = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return kubernetes_version
# pylint: disable=unused-argument
def get_no_ssh_key(self, enable_validation: bool = False, **kwargs) -> bool:
"""Obtain the value of name.
Note: no_ssh_key will not be decorated into the `mc` object.
This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and
fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
This function supports the option of enable_validation. When enabled, it will call "_validate_ssh_key" to
verify the validity of ssh_key_value. If parameter no_ssh_key is set to True, verification will be skipped;
otherwise, a CLIError will be raised when the value of ssh_key_value is invalid.
:return: bool
"""
# read the original value passed by the command
no_ssh_key = self.raw_param.get("no_ssh_key")
# this parameter does not need dynamic completion
# validation
if enable_validation:
_validate_ssh_key(
no_ssh_key=no_ssh_key, ssh_key_value=self.get_ssh_key_value()
)
return no_ssh_key
# pylint: disable=unused-argument
def get_vm_set_type(self, **kwargs) -> str:
"""Dynamically obtain the value of vm_set_type according to the context.
Dynamic completion will be triggerd by default. Function "_set_vm_set_type" will be called and the
corresponding vm set type will be returned according to the value of kubernetes_version. It will also
normalize the value as server validation is case-sensitive.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string
"""
parameter_name = "vm_set_type"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.type
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
vm_set_type = value_obtained_from_mc
read_from_mc = True
else:
vm_set_type = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return vm_set_type
# the value verified by the validator may have case problems, and the
# "_set_vm_set_type" function will adjust it
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
vm_set_type = _set_vm_set_type(
vm_set_type=vm_set_type,
kubernetes_version=self.get_kubernetes_version(),
)
# this parameter does not need validation
return vm_set_type
# pylint: disable=unused-argument
def get_load_balancer_sku(
self, enable_validation: bool = False, **kwargs
) -> str:
"""Dynamically obtain the value of load_balancer_sku according to the context.
When load_balancer_sku is not assigned, dynamic completion will be triggerd. Function "set_load_balancer_sku"
will be called and the corresponding load balancer sku will be returned according to the value of
kubernetes_version.
This function supports the option of enable_validation. When enabled, it will check if load_balancer_sku equals
to "basic" when api_server_authorized_ip_ranges is assigned, if so, raise the MutuallyExclusiveArgumentError.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: string
"""
parameter_name = "load_balancer_sku"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.network_profile:
value_obtained_from_mc = self.mc.network_profile.load_balancer_sku
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
load_balancer_sku = value_obtained_from_mc
read_from_mc = True
else:
load_balancer_sku = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return load_balancer_sku
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if not load_balancer_sku:
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
load_balancer_sku = set_load_balancer_sku(
sku=load_balancer_sku,
kubernetes_version=self.get_kubernetes_version(),
)
# validation
if enable_validation:
if (
load_balancer_sku == "basic" and
self.get_api_server_authorized_ip_ranges()
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return load_balancer_sku
# pylint: disable=unused-argument
def get_api_server_authorized_ip_ranges(
self, enable_validation: bool = False, **kwargs
) -> Union[str, List[str], None]:
"""Obtain the value of api_server_authorized_ip_ranges.
This function supports the option of enable_validation. When enabled, it will check if load_balancer_sku equals
to "basic" when api_server_authorized_ip_ranges is assigned, if so, raise the MutuallyExclusiveArgumentError.
:return: string, empty list or list of strings, or None
"""
parameter_name = "api_server_authorized_ip_ranges"
# read the original value passed by the command
raw_value = self.raw_param.get(parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.api_server_access_profile:
value_obtained_from_mc = (
self.mc.api_server_access_profile.authorized_ip_ranges
)
# set default value
if value_obtained_from_mc is not None:
api_server_authorized_ip_ranges = value_obtained_from_mc
else:
api_server_authorized_ip_ranges = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
if (
api_server_authorized_ip_ranges and
self.get_load_balancer_sku() == "basic"
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return api_server_authorized_ip_ranges
# pylint: disable=unused-argument
def get_fqdn_subdomain(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
"""Obtain the value of fqdn_subdomain.
This function supports the option of enable_validation. When enabled, it will check if both dns_name_prefix and
fqdn_subdomain are assigend, if so, raise the MutuallyExclusiveArgumentError.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("fqdn_subdomain")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.fqdn_subdomain
# set default value
if value_obtained_from_mc is not None:
fqdn_subdomain = value_obtained_from_mc
else:
fqdn_subdomain = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
if fqdn_subdomain and self.get_dns_name_prefix(read_only=True):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return fqdn_subdomain
# pylint: disable=unused-argument
def get_nodepool_name(self, **kwargs) -> str:
"""Dynamically obtain the value of nodepool_name according to the context.
When additional option enable_trim is enabled, dynamic completion will be triggerd.
This function supports the option of enable_trim. When enabled, it will normalize the value of nodepool_name.
If no value is assigned, the default value "nodepool1" is set, and if the string length is greater than 12,
it is truncated.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_name")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.name
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
nodepool_name = value_obtained_from_mc
read_from_mc = True
else:
nodepool_name = raw_value
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
if kwargs.get("enable_trim", False):
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
if not nodepool_name:
nodepool_name = "nodepool1"
else:
nodepool_name = nodepool_name[:12]
# this parameter does not need validation
return nodepool_name
# pylint: disable=unused-argument
def get_nodepool_tags(self, **kwargs) -> Union[Dict[str, str], None]:
"""Obtain the value of nodepool_tags.
:return: Dictionary or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_tags")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.tags
# set default value
if value_obtained_from_mc is not None:
nodepool_tags = value_obtained_from_mc
else:
nodepool_tags = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return nodepool_tags
# pylint: disable=unused-argument
def get_nodepool_labels(self, **kwargs) -> Union[Dict[str, str], None]:
"""Obtain the value of nodepool_labels.
:return: Dictionary or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("nodepool_labels")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.node_labels
# set default value
if value_obtained_from_mc is not None:
nodepool_labels = value_obtained_from_mc
else:
nodepool_labels = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return nodepool_labels
# pylint: disable=unused-argument
def get_node_count(self, enable_validation: bool = False, **kwargs) -> int:
"""Obtain the value of node_count.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether node_count is between min_count and
max_count, if not, raise the InvalidArgumentValueError.
:return: int
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_count")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.count
# set default value
if value_obtained_from_mc is not None:
node_count = value_obtained_from_mc
else:
node_count = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
max_count = self.get_max_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
return int(node_count)
# pylint: disable=unused-argument
def get_node_vm_size(self, **kwargs) -> str:
"""Obtain the value of node_vm_size.
:return: string
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_vm_size")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vm_size
# set default value
if value_obtained_from_mc is not None:
node_vm_size = value_obtained_from_mc
else:
node_vm_size = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_vm_size
# pylint: disable=unused-argument
def get_vnet_subnet_id(self, **kwargs) -> Union[str, None]:
"""Obtain the value of vnet_subnet_id.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("vnet_subnet_id")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vnet_subnet_id
# set default value
if value_obtained_from_mc is not None:
vnet_subnet_id = value_obtained_from_mc
else:
vnet_subnet_id = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return vnet_subnet_id
# pylint: disable=unused-argument
def get_ppg(self, **kwargs) -> Union[str, None]:
"""Obtain the value of ppg(proximity_placement_group_id).
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("ppg")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.proximity_placement_group_id
)
# set default value
if value_obtained_from_mc is not None:
ppg = value_obtained_from_mc
else:
ppg = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return ppg
# pylint: disable=unused-argument
def get_zones(self, **kwargs) -> Union[List[str], None]:
"""Obtain the value of zones.
:return: list of strings or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("zones")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.availability_zones
# set default value
if value_obtained_from_mc is not None:
zones = value_obtained_from_mc
else:
zones = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return zones
# pylint: disable=unused-argument
def get_enable_node_public_ip(self, **kwargs) -> bool:
"""Obtain the value of enable_node_public_ip.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_node_public_ip")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_node_public_ip
)
# set default value
if value_obtained_from_mc is not None:
enable_node_public_ip = value_obtained_from_mc
else:
enable_node_public_ip = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_node_public_ip
# pylint: disable=unused-argument
def get_node_public_ip_prefix_id(self, **kwargs) -> Union[str, None]:
"""Obtain the value of node_public_ip_prefix_id.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_public_ip_prefix_id")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.node_public_ip_prefix_id
)
# set default value
if value_obtained_from_mc is not None:
node_public_ip_prefix_id = value_obtained_from_mc
else:
node_public_ip_prefix_id = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_public_ip_prefix_id
# pylint: disable=unused-argument
def get_enable_encryption_at_host(self, **kwargs) -> bool:
"""Obtain the value of enable_encryption_at_host.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_encryption_at_host")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_encryption_at_host
)
# set default value
if value_obtained_from_mc is not None:
enable_encryption_at_host = value_obtained_from_mc
else:
enable_encryption_at_host = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_encryption_at_host
# pylint: disable=unused-argument
def get_enable_ultra_ssd(self, **kwargs) -> bool:
"""Obtain the value of enable_ultra_ssd.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_ultra_ssd")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_ultra_ssd
# set default value
if value_obtained_from_mc is not None:
enable_ultra_ssd = value_obtained_from_mc
else:
enable_ultra_ssd = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_ultra_ssd
# pylint: disable=unused-argument
def get_max_pods(self, **kwargs) -> Union[int, None]:
"""Obtain the value of max_pods.
Note: int 0 is converted to None.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("max_pods")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_pods
# set default value
if value_obtained_from_mc is not None:
max_pods = value_obtained_from_mc
else:
max_pods = raw_value
# Note: int 0 is converted to None
if max_pods:
max_pods = int(max_pods)
else:
max_pods = None
# this parameter does not need validation
return max_pods
# pylint: disable=unused-argument
def get_node_osdisk_size(self, **kwargs) -> Union[int, None]:
"""Obtain the value of node_osdisk_size.
Note: int 0 is converted to None.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_osdisk_size")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_size_gb
# set default value
if value_obtained_from_mc is not None:
node_osdisk_size = value_obtained_from_mc
else:
node_osdisk_size = raw_value
# Note: 0 is converted to None
if node_osdisk_size:
node_osdisk_size = int(node_osdisk_size)
else:
node_osdisk_size = None
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_size
# pylint: disable=unused-argument
def get_node_osdisk_type(self, **kwargs) -> Union[str, None]:
"""Obtain the value of node_osdisk_size.
:return: string or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("node_osdisk_type")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_type
# set default value
if value_obtained_from_mc is not None:
node_osdisk_type = value_obtained_from_mc
else:
node_osdisk_type = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return node_osdisk_type
# pylint: disable=unused-argument
def get_enable_cluster_autoscaler(
self, enable_validation: bool = False, **kwargs
) -> bool:
"""Obtain the value of enable_cluster_autoscaler.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether min_count is less than max_count and
node_count is between min_count and max_count, if not, raise the InvalidArgumentValueError. If
enable_cluster_autoscaler is not enabled, it will check whether any of min_count or max_count is assigned,
if so, raise the RequiredArgumentMissingError.
:return: bool
"""
# read the original value passed by the command
raw_value = self.raw_param.get("enable_cluster_autoscaler")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_auto_scaling
# set default value
if value_obtained_from_mc is not None:
enable_cluster_autoscaler = value_obtained_from_mc
else:
enable_cluster_autoscaler = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
min_count = self.get_min_count()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return enable_cluster_autoscaler
# pylint: disable=unused-argument
def get_min_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
"""Obtain the value of min_count.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether min_count is less than max_count and
node_count is between min_count and max_count, if not, raise the InvalidArgumentValueError. If
enable_cluster_autoscaler is not enabled, it will check whether any of min_count or max_count is assigned,
if so, raise the RequiredArgumentMissingError.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("min_count")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.min_count
# set default value
if value_obtained_from_mc is not None:
min_count = value_obtained_from_mc
else:
min_count = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return min_count
# pylint: disable=unused-argument
def get_max_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
"""Obtain the value of max_count.
This function supports the option of enable_validation. When enabled, on the premise that
enable_cluster_autoscaler is enabled, it will check whether both min_count and max_count are assigned, if not,
raise the RequiredArgumentMissingError; if will also check whether min_count is less than max_count and
node_count is between min_count and max_count, if not, raise the InvalidArgumentValueError. If
enable_cluster_autoscaler is not enabled, it will check whether any of min_count or max_count is assigned,
if so, raise the RequiredArgumentMissingError.
:return: int or None
"""
# read the original value passed by the command
raw_value = self.raw_param.get("max_count")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_count
# set default value
if value_obtained_from_mc is not None:
max_count = value_obtained_from_mc
else:
max_count = raw_value
# this parameter does not need dynamic completion
# validation
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return max_count
# pylint: disable=unused-argument
def get_admin_username(self, **kwargs) -> str:
"""Obtain the value of admin_username.
:return: str
"""
# read the original value passed by the command
raw_value = self.raw_param.get("admin_username")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.linux_profile:
value_obtained_from_mc = self.mc.linux_profile.admin_username
# set default value
if value_obtained_from_mc is not None:
admin_username = value_obtained_from_mc
else:
admin_username = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return admin_username
# pylint: disable=unused-argument
def get_windows_admin_username_and_password(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
"""Dynamically obtain the value of windows_admin_username and windows_admin_password according to the context.
When ont of windows_admin_username and windows_admin_password is not assigned, dynamic completion will be
triggerd. The user will be prompted to enter the missing windows_admin_username or windows_admin_password in
tty(pseudo terminal). If the program is running in a non-interactive environment, a NoTTYError error will be
raised.
This function supports the option of read_only. When enabled, it will skip dynamic completion and validation.
:return: a tuple containing two elements of string or None
"""
# windows_admin_username
# read the original value passed by the command
username_raw_value = self.raw_param.get("windows_admin_username")
# try to read the property value corresponding to the parameter from the `mc` object
username_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
username_value_obtained_from_mc = (
self.mc.windows_profile.admin_username
)
# set default value
username_read_from_mc = False
if username_value_obtained_from_mc is not None:
windows_admin_username = username_value_obtained_from_mc
username_read_from_mc = True
else:
windows_admin_username = username_raw_value
# windows_admin_password
# read the original value passed by the command
password_raw_value = self.raw_param.get("windows_admin_password")
# try to read the property value corresponding to the parameter from the `mc` object
password_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
password_value_obtained_from_mc = (
self.mc.windows_profile.admin_password
)
# set default value
password_read_from_mc = False
if password_value_obtained_from_mc is not None:
windows_admin_password = password_value_obtained_from_mc
password_read_from_mc = True
else:
windows_admin_password = password_raw_value
# consistent check
if username_read_from_mc != password_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of windows admin name and password is read from the `mc` object."
)
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return windows_admin_username, windows_admin_password
username_dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
# to avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None and windows_admin_password:
username_dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
username_dynamic_completion = (
username_dynamic_completion and not username_read_from_mc
)
if username_dynamic_completion:
try:
windows_admin_username = prompt("windows_admin_username: ")
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise NoTTYError(
"Please specify username for Windows in non-interactive mode."
)
password_dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
# to avoid that windows_admin_username is set but windows_admin_password is not
if windows_admin_password is None and windows_admin_username:
password_dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
password_dynamic_completion = (
password_dynamic_completion and not password_read_from_mc
)
if password_dynamic_completion:
try:
windows_admin_password = prompt_pass(
msg="windows-admin-password: ", confirm=True
)
except NoTTYException:
raise NoTTYError(
"Please specify both username and password in non-interactive mode."
)
# these parameters does not need validation
return windows_admin_username, windows_admin_password
# pylint: disable=unused-argument
def get_enable_ahub(self, **kwargs) -> bool:
"""Obtain the value of enable_ahub.
Note: This parameter will not be directly decorated into the `mc` object.
:return: bool
"""
# read the original value passed by the command
enable_ahub = self.raw_param.get("enable_ahub")
# read the original value passed by the command
raw_value = self.raw_param.get("enable_ahub")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
value_obtained_from_mc = self.mc.windows_profile.license_type == "Windows_Server"
# set default value
if value_obtained_from_mc is not None:
enable_ahub = value_obtained_from_mc
else:
enable_ahub = raw_value
# this parameter does not need dynamic completion
# this parameter does not need validation
return enable_ahub
# pylint: disable=unused-argument,too-many-statements
def get_service_principal_and_client_secret(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
"""Dynamically obtain the values of service_principal and client_secret according to the context.
When service_principal and client_secret are not assigned and enable_managed_identity is True, dynamic
completion will not be triggered. For other cases, dynamic completion will be triggered.
When client_secret is given but service_principal is not, dns_name_prefix or fqdn_subdomain will be used to
create a service principal. The parameters subscription_id, location and name(cluster) are also required when
calling function "_ensure_aks_service_principal".
When service_principal is given but client_secret is not, function "_ensure_aks_service_principal" would raise
CLIError.
:return: a tuple containing two elements of string or None
"""
# service_principal
sp_parameter_name = "service_principal"
sp_property_name_in_mc = "client_id"
# read the original value passed by the command
sp_raw_value = self.raw_param.get(sp_parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
sp_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
sp_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, sp_property_name_in_mc
)
# set default value
sp_read_from_mc = False
if sp_value_obtained_from_mc is not None:
service_principal = sp_value_obtained_from_mc
sp_read_from_mc = True
else:
service_principal = sp_raw_value
# client_secret
secret_parameter_name = "client_secret"
secret_property_name_in_mc = "secret"
# read the original value passed by the command
secret_raw_value = self.raw_param.get(secret_parameter_name)
# try to read the property value corresponding to the parameter from the `mc` object
secret_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
secret_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, secret_property_name_in_mc
)
# set default value
secret_read_from_mc = False
if secret_value_obtained_from_mc is not None:
client_secret = secret_value_obtained_from_mc
secret_read_from_mc = True
else:
client_secret = secret_raw_value
# consistent check
if sp_read_from_mc != secret_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of sp and secret is read from the `mc` object."
)
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return service_principal, client_secret
# dynamic completion for service_principal and client_secret
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
enable_managed_identity = self.get_enable_managed_identity(read_only=True)
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = (
dynamic_completion and
not sp_read_from_mc and
not secret_read_from_mc
)
if dynamic_completion:
principal_obj = _ensure_aks_service_principal(
cli_ctx=self.cmd.cli_ctx,
service_principal=service_principal,
client_secret=client_secret,
subscription_id=self.get_intermediate(
"subscription_id", None
),
dns_name_prefix=self.get_dns_name_prefix(),
fqdn_subdomain=self.get_fqdn_subdomain(),
location=self.get_location(),
name=self.get_name(),
)
service_principal = principal_obj.get("service_principal")
client_secret = principal_obj.get("client_secret")
# these parameters do not need validation
return service_principal, client_secret
def get_enable_managed_identity(
self, enable_validation=False, **kwargs
) -> bool:
"""Dynamically obtain the values of service_principal and client_secret according to the context.
Note: This parameter will not be directly decorated into the `mc` object.
When both service_principal and client_secret are assigned and enable_managed_identity is True, dynamic
completion will be triggered. The value of enable_managed_identity will be set to False.
:return: bool
"""
# Note: This parameter will not be decorated into the `mc` object.
# read the original value passed by the command
raw_value = self.raw_param.get("enable_managed_identity")
# try to read the property value corresponding to the parameter from the `mc` object
value_obtained_from_mc = None
if self.mc and self.mc.identity:
value_obtained_from_mc = self.mc.identity.type is not None
# set default value
read_from_mc = False
if value_obtained_from_mc is not None:
enable_managed_identity = value_obtained_from_mc
read_from_mc = True
else:
enable_managed_identity = raw_value
# skip dynamic completion & validation if option read_only is specified
if kwargs.get("read_only"):
return enable_managed_identity
dynamic_completion = False
# check whether the parameter meet the conditions of dynamic completion
(
service_principal,
client_secret,
) = self.get_service_principal_and_client_secret(read_only=True)
if service_principal and client_secret:
dynamic_completion = True
# disable dynamic completion if the value is read from `mc`
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
enable_managed_identity = False
# validation
if enable_validation:
# TODO: add validation
pass
return enable_managed_identity
class AKSCreateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client,
models: AKSCreateModels,
raw_parameters: Dict,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.cmd = cmd
self.client = client
self.models = models
# store the context in the process of assemble the ManagedCluster object
self.context = AKSCreateContext(cmd, raw_parameters)
# `resource_type` is used to dynamically find the model (of a specific api version) provided by the
# containerservice SDK, most models have been passed through the `modles` parameter (instantiatied
# from `AKSCreateModels` (or `PreviewAKSCreateModels` in aks-preview), where resource_type (i.e.,
# api version) has been specified), a very small number of models are instantiated through internal
# functions, one use case is that `api_server_access_profile` is initialized by function
# `_populate_api_server_access_profile` defined in `_helpers.py`
self.resource_type = resource_type
def init_mc(self):
# get subscription id and store as intermediate
subscription_id = get_subscription_id(self.cmd.cli_ctx)
self.context.set_intermediate(
"subscription_id", subscription_id, overwrite_exists=True
)
# initialize the `ManagedCluster` object with mandatory parameters (i.e. location)
mc = self.models.ManagedCluster(location=self.context.get_location())
return mc
def set_up_agent_pool_profiles(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=self.context.get_nodepool_name(enable_trim=True),
tags=self.context.get_nodepool_tags(),
node_labels=self.context.get_nodepool_labels(),
count=self.context.get_node_count(enable_validation=True),
vm_size=self.context.get_node_vm_size(),
os_type="Linux",
vnet_subnet_id=self.context.get_vnet_subnet_id(),
proximity_placement_group_id=self.context.get_ppg(),
availability_zones=self.context.get_zones(),
enable_node_public_ip=self.context.get_enable_node_public_ip(),
node_public_ip_prefix_id=self.context.get_node_public_ip_prefix_id(),
enable_encryption_at_host=self.context.get_enable_encryption_at_host(),
enable_ultra_ssd=self.context.get_enable_ultra_ssd(),
max_pods=self.context.get_max_pods(),
type=self.context.get_vm_set_type(),
mode="System",
os_disk_size_gb=self.context.get_node_osdisk_size(),
os_disk_type=self.context.get_node_osdisk_type(),
min_count=self.context.get_min_count(enable_validation=True),
max_count=self.context.get_max_count(enable_validation=True),
enable_auto_scaling=self.context.get_enable_cluster_autoscaler(
enable_validation=True
),
)
mc.agent_pool_profiles = [agent_pool_profile]
return mc
def set_up_linux_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not self.context.get_no_ssh_key(enable_validation=True):
ssh_config = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(
key_data=self.context.get_ssh_key_value(
enable_validation=True
)
)
]
)
linux_profile = self.models.ContainerServiceLinuxProfile(
admin_username=self.context.get_admin_username(), ssh=ssh_config
)
mc.linux_profile = linux_profile
return mc
def set_up_windows_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
windows_admin_username,
windows_admin_password,
) = self.context.get_windows_admin_username_and_password()
if windows_admin_username or windows_admin_password:
windows_license_type = None
if self.context.get_enable_ahub():
windows_license_type = "Windows_Server"
# this would throw an error if windows_admin_username is empty (the user enters an empty
# string after being prompted), since admin_username is a required parameter
windows_profile = self.models.ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type,
)
mc.windows_profile = windows_profile
# clean up intermediate after `mc` is decorated
self.context.remove_intermediate("windows_admin_username")
self.context.remove_intermediate("windows_admin_password")
return mc
def set_up_service_principal_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
# If customer explicitly provide a service principal, disable managed identity.
(
service_principal,
client_secret,
) = self.context.get_service_principal_and_client_secret()
enable_managed_identity = self.context.get_enable_managed_identity()
# Skip create service principal profile for the cluster if the cluster enables managed identity
# and customer doesn't explicitly provide a service principal.
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
service_principal_profile = (
self.models.ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
)
mc.service_principal_profile = service_principal_profile
# clean up intermediates after `mc` is decorated
self.context.remove_intermediate("service_principal")
self.context.remove_intermediate("client_secret")
return mc
def construct_default_mc(self):
# An all-in-one function used to create the complete `ManagedCluster` object, which will later be
# passed as a parameter to the underlying SDK (mgmt-containerservice) to send the actual request.
# Note: to reduce the risk of regression introduced by refactoring, this function is not complete
# and is being implemented gradually.
# initialize the `ManagedCluster` object, also set up the intermediate named "subscription_id"
mc = self.init_mc()
# set up agent pool profile(s)
mc = self.set_up_agent_pool_profiles(mc)
# set up linux profile (for ssh access)
mc = self.set_up_linux_profile(mc)
# set up windows profile
mc = self.set_up_windows_profile(mc)
# set up service principal profile
mc = self.set_up_service_principal_profile(mc)
return mc
| 42.878146 | 119 | 0.649611 |
from knack.prompting import NoTTYException, prompt, prompt_pass
from knack.log import get_logger
from typing import Any, List, Dict, Tuple, Union
from azure.cli.core import AzCommandsLoader
from azure.cli.core.azclierror import (
CLIInternalError,
MutuallyExclusiveArgumentError,
RequiredArgumentMissingError,
InvalidArgumentValueError,
NoTTYError,
)
from azure.cli.core.commands import AzCliCommand
from azure.cli.core.profiles import ResourceType
from .custom import (
_get_rg_location,
_validate_ssh_key,
_get_default_dns_prefix,
_set_vm_set_type,
set_load_balancer_sku,
get_subscription_id,
_ensure_aks_service_principal,
)
logger = get_logger(__name__)
def safe_list_get(li: List, idx: int, default: Any = None):
if isinstance(li, list):
try:
return li[idx]
except IndexError:
return default
return None
class AKSCreateModels:
def __init__(
self,
cmd: AzCommandsLoader,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.__cmd = cmd
self.resource_type = resource_type
self.ManagedClusterWindowsProfile = self.__cmd.get_models(
"ManagedClusterWindowsProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterSKU = self.__cmd.get_models(
"ManagedClusterSKU",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceNetworkProfile = self.__cmd.get_models(
"ContainerServiceNetworkProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceLinuxProfile = self.__cmd.get_models(
"ContainerServiceLinuxProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterServicePrincipalProfile = self.__cmd.get_models(
"ManagedClusterServicePrincipalProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshConfiguration = self.__cmd.get_models(
"ContainerServiceSshConfiguration",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ContainerServiceSshPublicKey = self.__cmd.get_models(
"ContainerServiceSshPublicKey",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAADProfile = self.__cmd.get_models(
"ManagedClusterAADProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAutoUpgradeProfile = self.__cmd.get_models(
"ManagedClusterAutoUpgradeProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAgentPoolProfile = self.__cmd.get_models(
"ManagedClusterAgentPoolProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterIdentity = self.__cmd.get_models(
"ManagedClusterIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.UserAssignedIdentity = self.__cmd.get_models(
"UserAssignedIdentity",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedCluster = self.__cmd.get_models(
"ManagedCluster",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedServiceIdentityUserAssignedIdentitiesValue = (
self.__cmd.get_models(
"ManagedServiceIdentityUserAssignedIdentitiesValue",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
)
self.ExtendedLocation = self.__cmd.get_models(
"ExtendedLocation",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ExtendedLocationTypes = self.__cmd.get_models(
"ExtendedLocationTypes",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
self.ManagedClusterAPIServerAccessProfile = self.__cmd.get_models(
"ManagedClusterAPIServerAccessProfile",
resource_type=self.resource_type,
operation_group="managed_clusters",
)
class AKSCreateContext:
def __init__(self, cmd: AzCliCommand, raw_parameters: Dict):
self.cmd = cmd
if not isinstance(raw_parameters, dict):
raise CLIInternalError(
"Unexpected raw_parameters object with type '{}'.".format(
type(raw_parameters)
)
)
self.raw_param = raw_parameters
self.intermediates = dict()
self.mc = None
def attach_mc(self, mc):
if self.mc is None:
self.mc = mc
else:
msg = "the same" if self.mc == mc else "different"
raise CLIInternalError(
"Attempting to attach the `mc` object again, the two objects are {}.".format(
msg
)
)
def get_intermediate(self, variable_name: str, default_value: Any = None):
if variable_name not in self.intermediates:
msg = "The intermediate '{}' does not exist, return default value '{}'.".format(
variable_name, default_value
)
logger.debug(msg)
return self.intermediates.get(variable_name, default_value)
def set_intermediate(
self, variable_name: str, value: Any, overwrite_exists: bool = False
):
if variable_name in self.intermediates:
if overwrite_exists:
msg = "The intermediate '{}' is overwritten. Original value: '{}', new value: '{}'.".format(
variable_name, self.intermediates.get(variable_name), value
)
logger.debug(msg)
self.intermediates[variable_name] = value
elif self.intermediates.get(variable_name) != value:
msg = "The intermediate '{}' already exists, but overwrite is not enabled." \
"Original value: '{}', candidate value: '{}'.".format(
variable_name,
self.intermediates.get(variable_name),
value,
)
logger.warning(msg)
else:
self.intermediates[variable_name] = value
def remove_intermediate(self, variable_name: str):
self.intermediates.pop(variable_name, None)
def get_resource_group_name(self, **kwargs) -> str:
resource_group_name = self.raw_param.get("resource_group_name")
return resource_group_name
def get_name(self, **kwargs) -> str:
name = self.raw_param.get("name")
return name
def get_ssh_key_value(
self, enable_validation: bool = False, **kwargs
) -> str:
raw_value = self.raw_param.get("ssh_key_value")
value_obtained_from_mc = None
if (
self.mc and
self.mc.linux_profile and
self.mc.linux_profile.ssh and
self.mc.linux_profile.ssh.public_keys
):
public_key_obj = safe_list_get(
self.mc.linux_profile.ssh.public_keys, 0, None
)
if public_key_obj:
value_obtained_from_mc = public_key_obj.key_data
if value_obtained_from_mc is not None:
ssh_key_value = value_obtained_from_mc
else:
ssh_key_value = raw_value
if enable_validation:
_validate_ssh_key(
no_ssh_key=self.get_no_ssh_key(), ssh_key_value=ssh_key_value
)
return ssh_key_value
def get_dns_name_prefix(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
parameter_name = "dns_name_prefix"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.dns_prefix
read_from_mc = False
if value_obtained_from_mc is not None:
dns_name_prefix = value_obtained_from_mc
read_from_mc = True
else:
dns_name_prefix = raw_value
if kwargs.get("read_only"):
return dns_name_prefix
dynamic_completion = False
if not dns_name_prefix and not self.get_fqdn_subdomain():
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
dns_name_prefix = _get_default_dns_prefix(
name=self.get_name(),
resource_group_name=self.get_resource_group_name(),
subscription_id=self.get_intermediate("subscription_id"),
)
if enable_validation:
if dns_name_prefix and self.get_fqdn_subdomain():
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return dns_name_prefix
def get_location(self, **kwargs) -> str:
parameter_name = "location"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.location
read_from_mc = False
if value_obtained_from_mc is not None:
location = value_obtained_from_mc
read_from_mc = True
else:
location = raw_value
dynamic_completion = False
if location is None:
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
location = _get_rg_location(
self.cmd.cli_ctx, self.get_resource_group_name()
)
return location
def get_kubernetes_version(self, **kwargs) -> str:
raw_value = self.raw_param.get("kubernetes_version")
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.kubernetes_version
if value_obtained_from_mc is not None:
kubernetes_version = value_obtained_from_mc
else:
kubernetes_version = raw_value
return kubernetes_version
def get_no_ssh_key(self, enable_validation: bool = False, **kwargs) -> bool:
no_ssh_key = self.raw_param.get("no_ssh_key")
if enable_validation:
_validate_ssh_key(
no_ssh_key=no_ssh_key, ssh_key_value=self.get_ssh_key_value()
)
return no_ssh_key
def get_vm_set_type(self, **kwargs) -> str:
parameter_name = "vm_set_type"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.type
read_from_mc = False
if value_obtained_from_mc is not None:
vm_set_type = value_obtained_from_mc
read_from_mc = True
else:
vm_set_type = raw_value
if kwargs.get("read_only"):
return vm_set_type
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
vm_set_type = _set_vm_set_type(
vm_set_type=vm_set_type,
kubernetes_version=self.get_kubernetes_version(),
)
return vm_set_type
def get_load_balancer_sku(
self, enable_validation: bool = False, **kwargs
) -> str:
parameter_name = "load_balancer_sku"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc and self.mc.network_profile:
value_obtained_from_mc = self.mc.network_profile.load_balancer_sku
read_from_mc = False
if value_obtained_from_mc is not None:
load_balancer_sku = value_obtained_from_mc
read_from_mc = True
else:
load_balancer_sku = raw_value
if kwargs.get("read_only"):
return load_balancer_sku
dynamic_completion = False
if not load_balancer_sku:
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
load_balancer_sku = set_load_balancer_sku(
sku=load_balancer_sku,
kubernetes_version=self.get_kubernetes_version(),
)
if enable_validation:
if (
load_balancer_sku == "basic" and
self.get_api_server_authorized_ip_ranges()
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return load_balancer_sku
def get_api_server_authorized_ip_ranges(
self, enable_validation: bool = False, **kwargs
) -> Union[str, List[str], None]:
parameter_name = "api_server_authorized_ip_ranges"
raw_value = self.raw_param.get(parameter_name)
value_obtained_from_mc = None
if self.mc and self.mc.api_server_access_profile:
value_obtained_from_mc = (
self.mc.api_server_access_profile.authorized_ip_ranges
)
if value_obtained_from_mc is not None:
api_server_authorized_ip_ranges = value_obtained_from_mc
else:
api_server_authorized_ip_ranges = raw_value
if enable_validation:
if (
api_server_authorized_ip_ranges and
self.get_load_balancer_sku() == "basic"
):
raise MutuallyExclusiveArgumentError(
"--api-server-authorized-ip-ranges can only be used with standard load balancer"
)
return api_server_authorized_ip_ranges
def get_fqdn_subdomain(
self, enable_validation: bool = False, **kwargs
) -> Union[str, None]:
raw_value = self.raw_param.get("fqdn_subdomain")
value_obtained_from_mc = None
if self.mc:
value_obtained_from_mc = self.mc.fqdn_subdomain
if value_obtained_from_mc is not None:
fqdn_subdomain = value_obtained_from_mc
else:
fqdn_subdomain = raw_value
if enable_validation:
if fqdn_subdomain and self.get_dns_name_prefix(read_only=True):
raise MutuallyExclusiveArgumentError(
"--dns-name-prefix and --fqdn-subdomain cannot be used at same time"
)
return fqdn_subdomain
def get_nodepool_name(self, **kwargs) -> str:
raw_value = self.raw_param.get("nodepool_name")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.name
read_from_mc = False
if value_obtained_from_mc is not None:
nodepool_name = value_obtained_from_mc
read_from_mc = True
else:
nodepool_name = raw_value
dynamic_completion = False
if kwargs.get("enable_trim", False):
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
if not nodepool_name:
nodepool_name = "nodepool1"
else:
nodepool_name = nodepool_name[:12]
return nodepool_name
def get_nodepool_tags(self, **kwargs) -> Union[Dict[str, str], None]:
raw_value = self.raw_param.get("nodepool_tags")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.tags
if value_obtained_from_mc is not None:
nodepool_tags = value_obtained_from_mc
else:
nodepool_tags = raw_value
return nodepool_tags
def get_nodepool_labels(self, **kwargs) -> Union[Dict[str, str], None]:
raw_value = self.raw_param.get("nodepool_labels")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.node_labels
if value_obtained_from_mc is not None:
nodepool_labels = value_obtained_from_mc
else:
nodepool_labels = raw_value
return nodepool_labels
def get_node_count(self, enable_validation: bool = False, **kwargs) -> int:
raw_value = self.raw_param.get("node_count")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.count
if value_obtained_from_mc is not None:
node_count = value_obtained_from_mc
else:
node_count = raw_value
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
max_count = self.get_max_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
return int(node_count)
def get_node_vm_size(self, **kwargs) -> str:
raw_value = self.raw_param.get("node_vm_size")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vm_size
if value_obtained_from_mc is not None:
node_vm_size = value_obtained_from_mc
else:
node_vm_size = raw_value
return node_vm_size
def get_vnet_subnet_id(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("vnet_subnet_id")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.vnet_subnet_id
if value_obtained_from_mc is not None:
vnet_subnet_id = value_obtained_from_mc
else:
vnet_subnet_id = raw_value
return vnet_subnet_id
def get_ppg(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("ppg")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.proximity_placement_group_id
)
if value_obtained_from_mc is not None:
ppg = value_obtained_from_mc
else:
ppg = raw_value
return ppg
def get_zones(self, **kwargs) -> Union[List[str], None]:
raw_value = self.raw_param.get("zones")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.availability_zones
if value_obtained_from_mc is not None:
zones = value_obtained_from_mc
else:
zones = raw_value
return zones
def get_enable_node_public_ip(self, **kwargs) -> bool:
raw_value = self.raw_param.get("enable_node_public_ip")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_node_public_ip
)
if value_obtained_from_mc is not None:
enable_node_public_ip = value_obtained_from_mc
else:
enable_node_public_ip = raw_value
return enable_node_public_ip
def get_node_public_ip_prefix_id(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("node_public_ip_prefix_id")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.node_public_ip_prefix_id
)
if value_obtained_from_mc is not None:
node_public_ip_prefix_id = value_obtained_from_mc
else:
node_public_ip_prefix_id = raw_value
return node_public_ip_prefix_id
def get_enable_encryption_at_host(self, **kwargs) -> bool:
raw_value = self.raw_param.get("enable_encryption_at_host")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = (
agent_pool_profile.enable_encryption_at_host
)
if value_obtained_from_mc is not None:
enable_encryption_at_host = value_obtained_from_mc
else:
enable_encryption_at_host = raw_value
return enable_encryption_at_host
def get_enable_ultra_ssd(self, **kwargs) -> bool:
raw_value = self.raw_param.get("enable_ultra_ssd")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_ultra_ssd
if value_obtained_from_mc is not None:
enable_ultra_ssd = value_obtained_from_mc
else:
enable_ultra_ssd = raw_value
return enable_ultra_ssd
def get_max_pods(self, **kwargs) -> Union[int, None]:
raw_value = self.raw_param.get("max_pods")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_pods
if value_obtained_from_mc is not None:
max_pods = value_obtained_from_mc
else:
max_pods = raw_value
if max_pods:
max_pods = int(max_pods)
else:
max_pods = None
return max_pods
def get_node_osdisk_size(self, **kwargs) -> Union[int, None]:
raw_value = self.raw_param.get("node_osdisk_size")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_size_gb
if value_obtained_from_mc is not None:
node_osdisk_size = value_obtained_from_mc
else:
node_osdisk_size = raw_value
if node_osdisk_size:
node_osdisk_size = int(node_osdisk_size)
else:
node_osdisk_size = None
return node_osdisk_size
def get_node_osdisk_type(self, **kwargs) -> Union[str, None]:
raw_value = self.raw_param.get("node_osdisk_type")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.os_disk_type
if value_obtained_from_mc is not None:
node_osdisk_type = value_obtained_from_mc
else:
node_osdisk_type = raw_value
return node_osdisk_type
def get_enable_cluster_autoscaler(
self, enable_validation: bool = False, **kwargs
) -> bool:
raw_value = self.raw_param.get("enable_cluster_autoscaler")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.enable_auto_scaling
if value_obtained_from_mc is not None:
enable_cluster_autoscaler = value_obtained_from_mc
else:
enable_cluster_autoscaler = raw_value
if enable_validation:
min_count = self.get_min_count()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return enable_cluster_autoscaler
def get_min_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
raw_value = self.raw_param.get("min_count")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.min_count
if value_obtained_from_mc is not None:
min_count = value_obtained_from_mc
else:
min_count = raw_value
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
max_count = self.get_max_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return min_count
def get_max_count(
self, enable_validation: bool = False, **kwargs
) -> Union[int, None]:
raw_value = self.raw_param.get("max_count")
value_obtained_from_mc = None
if self.mc and self.mc.agent_pool_profiles:
agent_pool_profile = safe_list_get(
self.mc.agent_pool_profiles, 0, None
)
if agent_pool_profile:
value_obtained_from_mc = agent_pool_profile.max_count
if value_obtained_from_mc is not None:
max_count = value_obtained_from_mc
else:
max_count = raw_value
if enable_validation:
enable_cluster_autoscaler = self.get_enable_cluster_autoscaler()
min_count = self.get_min_count()
node_count = self.get_node_count()
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise RequiredArgumentMissingError(
"Please specify both min-count and max-count when --enable-cluster-autoscaler enabled"
)
if min_count > max_count:
raise InvalidArgumentValueError(
"Value of min-count should be less than or equal to value of max-count"
)
if node_count < min_count or node_count > max_count:
raise InvalidArgumentValueError(
"node-count is not in the range of min-count and max-count"
)
else:
if min_count is not None or max_count is not None:
raise RequiredArgumentMissingError(
"min-count and max-count are required for --enable-cluster-autoscaler, please use the flag"
)
return max_count
def get_admin_username(self, **kwargs) -> str:
raw_value = self.raw_param.get("admin_username")
value_obtained_from_mc = None
if self.mc and self.mc.linux_profile:
value_obtained_from_mc = self.mc.linux_profile.admin_username
if value_obtained_from_mc is not None:
admin_username = value_obtained_from_mc
else:
admin_username = raw_value
return admin_username
def get_windows_admin_username_and_password(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
username_raw_value = self.raw_param.get("windows_admin_username")
username_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
username_value_obtained_from_mc = (
self.mc.windows_profile.admin_username
)
username_read_from_mc = False
if username_value_obtained_from_mc is not None:
windows_admin_username = username_value_obtained_from_mc
username_read_from_mc = True
else:
windows_admin_username = username_raw_value
password_raw_value = self.raw_param.get("windows_admin_password")
password_value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
password_value_obtained_from_mc = (
self.mc.windows_profile.admin_password
)
password_read_from_mc = False
if password_value_obtained_from_mc is not None:
windows_admin_password = password_value_obtained_from_mc
password_read_from_mc = True
else:
windows_admin_password = password_raw_value
if username_read_from_mc != password_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of windows admin name and password is read from the `mc` object."
)
if kwargs.get("read_only"):
return windows_admin_username, windows_admin_password
username_dynamic_completion = False
if windows_admin_username is None and windows_admin_password:
username_dynamic_completion = True
username_dynamic_completion = (
username_dynamic_completion and not username_read_from_mc
)
if username_dynamic_completion:
try:
windows_admin_username = prompt("windows_admin_username: ")
except NoTTYException:
raise NoTTYError(
"Please specify username for Windows in non-interactive mode."
)
password_dynamic_completion = False
if windows_admin_password is None and windows_admin_username:
password_dynamic_completion = True
password_dynamic_completion = (
password_dynamic_completion and not password_read_from_mc
)
if password_dynamic_completion:
try:
windows_admin_password = prompt_pass(
msg="windows-admin-password: ", confirm=True
)
except NoTTYException:
raise NoTTYError(
"Please specify both username and password in non-interactive mode."
)
return windows_admin_username, windows_admin_password
def get_enable_ahub(self, **kwargs) -> bool:
enable_ahub = self.raw_param.get("enable_ahub")
raw_value = self.raw_param.get("enable_ahub")
value_obtained_from_mc = None
if self.mc and self.mc.windows_profile:
value_obtained_from_mc = self.mc.windows_profile.license_type == "Windows_Server"
if value_obtained_from_mc is not None:
enable_ahub = value_obtained_from_mc
else:
enable_ahub = raw_value
return enable_ahub
def get_service_principal_and_client_secret(
self, **kwargs
) -> Tuple[Union[str, None], Union[str, None]]:
sp_parameter_name = "service_principal"
sp_property_name_in_mc = "client_id"
sp_raw_value = self.raw_param.get(sp_parameter_name)
sp_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
sp_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, sp_property_name_in_mc
)
sp_read_from_mc = False
if sp_value_obtained_from_mc is not None:
service_principal = sp_value_obtained_from_mc
sp_read_from_mc = True
else:
service_principal = sp_raw_value
secret_parameter_name = "client_secret"
secret_property_name_in_mc = "secret"
secret_raw_value = self.raw_param.get(secret_parameter_name)
secret_value_obtained_from_mc = None
if self.mc and self.mc.service_principal_profile:
secret_value_obtained_from_mc = getattr(
self.mc.service_principal_profile, secret_property_name_in_mc
)
secret_read_from_mc = False
if secret_value_obtained_from_mc is not None:
client_secret = secret_value_obtained_from_mc
secret_read_from_mc = True
else:
client_secret = secret_raw_value
if sp_read_from_mc != secret_read_from_mc:
raise CLIInternalError(
"Inconsistent state detected, one of sp and secret is read from the `mc` object."
)
if kwargs.get("read_only"):
return service_principal, client_secret
dynamic_completion = False
enable_managed_identity = self.get_enable_managed_identity(read_only=True)
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
dynamic_completion = True
dynamic_completion = (
dynamic_completion and
not sp_read_from_mc and
not secret_read_from_mc
)
if dynamic_completion:
principal_obj = _ensure_aks_service_principal(
cli_ctx=self.cmd.cli_ctx,
service_principal=service_principal,
client_secret=client_secret,
subscription_id=self.get_intermediate(
"subscription_id", None
),
dns_name_prefix=self.get_dns_name_prefix(),
fqdn_subdomain=self.get_fqdn_subdomain(),
location=self.get_location(),
name=self.get_name(),
)
service_principal = principal_obj.get("service_principal")
client_secret = principal_obj.get("client_secret")
return service_principal, client_secret
def get_enable_managed_identity(
self, enable_validation=False, **kwargs
) -> bool:
raw_value = self.raw_param.get("enable_managed_identity")
value_obtained_from_mc = None
if self.mc and self.mc.identity:
value_obtained_from_mc = self.mc.identity.type is not None
read_from_mc = False
if value_obtained_from_mc is not None:
enable_managed_identity = value_obtained_from_mc
read_from_mc = True
else:
enable_managed_identity = raw_value
if kwargs.get("read_only"):
return enable_managed_identity
dynamic_completion = False
(
service_principal,
client_secret,
) = self.get_service_principal_and_client_secret(read_only=True)
if service_principal and client_secret:
dynamic_completion = True
dynamic_completion = dynamic_completion and not read_from_mc
if dynamic_completion:
enable_managed_identity = False
if enable_validation:
pass
return enable_managed_identity
class AKSCreateDecorator:
def __init__(
self,
cmd: AzCliCommand,
client,
models: AKSCreateModels,
raw_parameters: Dict,
resource_type: ResourceType = ResourceType.MGMT_CONTAINERSERVICE,
):
self.cmd = cmd
self.client = client
self.models = models
self.context = AKSCreateContext(cmd, raw_parameters)
self.resource_type = resource_type
def init_mc(self):
subscription_id = get_subscription_id(self.cmd.cli_ctx)
self.context.set_intermediate(
"subscription_id", subscription_id, overwrite_exists=True
)
mc = self.models.ManagedCluster(location=self.context.get_location())
return mc
def set_up_agent_pool_profiles(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
agent_pool_profile = self.models.ManagedClusterAgentPoolProfile(
name=self.context.get_nodepool_name(enable_trim=True),
tags=self.context.get_nodepool_tags(),
node_labels=self.context.get_nodepool_labels(),
count=self.context.get_node_count(enable_validation=True),
vm_size=self.context.get_node_vm_size(),
os_type="Linux",
vnet_subnet_id=self.context.get_vnet_subnet_id(),
proximity_placement_group_id=self.context.get_ppg(),
availability_zones=self.context.get_zones(),
enable_node_public_ip=self.context.get_enable_node_public_ip(),
node_public_ip_prefix_id=self.context.get_node_public_ip_prefix_id(),
enable_encryption_at_host=self.context.get_enable_encryption_at_host(),
enable_ultra_ssd=self.context.get_enable_ultra_ssd(),
max_pods=self.context.get_max_pods(),
type=self.context.get_vm_set_type(),
mode="System",
os_disk_size_gb=self.context.get_node_osdisk_size(),
os_disk_type=self.context.get_node_osdisk_type(),
min_count=self.context.get_min_count(enable_validation=True),
max_count=self.context.get_max_count(enable_validation=True),
enable_auto_scaling=self.context.get_enable_cluster_autoscaler(
enable_validation=True
),
)
mc.agent_pool_profiles = [agent_pool_profile]
return mc
def set_up_linux_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
if not self.context.get_no_ssh_key(enable_validation=True):
ssh_config = self.models.ContainerServiceSshConfiguration(
public_keys=[
self.models.ContainerServiceSshPublicKey(
key_data=self.context.get_ssh_key_value(
enable_validation=True
)
)
]
)
linux_profile = self.models.ContainerServiceLinuxProfile(
admin_username=self.context.get_admin_username(), ssh=ssh_config
)
mc.linux_profile = linux_profile
return mc
def set_up_windows_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
windows_admin_username,
windows_admin_password,
) = self.context.get_windows_admin_username_and_password()
if windows_admin_username or windows_admin_password:
windows_license_type = None
if self.context.get_enable_ahub():
windows_license_type = "Windows_Server"
windows_profile = self.models.ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type,
)
mc.windows_profile = windows_profile
self.context.remove_intermediate("windows_admin_username")
self.context.remove_intermediate("windows_admin_password")
return mc
def set_up_service_principal_profile(self, mc):
if not isinstance(mc, self.models.ManagedCluster):
raise CLIInternalError(
"Unexpected mc object with type '{}'.".format(type(mc))
)
(
service_principal,
client_secret,
) = self.context.get_service_principal_and_client_secret()
enable_managed_identity = self.context.get_enable_managed_identity()
if not (
enable_managed_identity and
not service_principal and
not client_secret
):
service_principal_profile = (
self.models.ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
)
mc.service_principal_profile = service_principal_profile
# clean up intermediates after `mc` is decorated
self.context.remove_intermediate("service_principal")
self.context.remove_intermediate("client_secret")
return mc
def construct_default_mc(self):
# An all-in-one function used to create the complete `ManagedCluster` object, which will later be
# passed as a parameter to the underlying SDK (mgmt-containerservice) to send the actual request.
# Note: to reduce the risk of regression introduced by refactoring, this function is not complete
# and is being implemented gradually.
# initialize the `ManagedCluster` object, also set up the intermediate named "subscription_id"
mc = self.init_mc()
# set up agent pool profile(s)
mc = self.set_up_agent_pool_profiles(mc)
# set up linux profile (for ssh access)
mc = self.set_up_linux_profile(mc)
# set up windows profile
mc = self.set_up_windows_profile(mc)
# set up service principal profile
mc = self.set_up_service_principal_profile(mc)
return mc
| true | true |
f730c134a28f77e3c847ee825ab9c72a1458d0e4 | 3,102 | py | Python | src/stk/molecular/topology_graphs/topology_graph/optimizers/collapser.py | andrewtarzia/stk | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | src/stk/molecular/topology_graphs/topology_graph/optimizers/collapser.py | JelfsMaterialsGroup/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | src/stk/molecular/topology_graphs/topology_graph/optimizers/collapser.py | supramolecular-toolkit/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | """
Collapser
=========
"""
from .optimizer import Optimizer
from .utilities import get_mch_bonds, get_long_bond_ids, get_subunits
import mchammer as mch
class Collapser(Optimizer):
"""
Performs rigid-body collapse of molecules [1]_.
Examples
--------
*Structure Optimization*
Using :class:`.Collapser` will lead to
:class:`.ConstructedMolecule` structures without long bonds.
.. testcode:: structure-optimization
import stk
bb1 = stk.BuildingBlock('NCCN', [stk.PrimaryAminoFactory()])
bb2 = stk.BuildingBlock('O=CCC=O', [stk.AldehydeFactory()])
polymer = stk.ConstructedMolecule(
topology_graph=stk.polymer.Linear(
building_blocks=(bb1, bb2),
repeating_unit='AB',
num_repeating_units=2,
optimizer=stk.Collapser(),
),
)
Optimisation with :mod:`stk` simply collects the final position
matrix. The optimisation's trajectory can be output using the
:mod:`MCHammer` implementation if required by the user [1]_.
The open-source optimization code :mod:`MCHammer` specializes in
the `collapsing` of molecules with long bonds like those
constructed by :mod:`stk`. This code is entirely nonphysical and
is, therefore, completely general to any chemistry.
References
----------
.. [1] https://github.com/andrewtarzia/MCHammer
"""
def __init__(
self,
step_size=0.1,
distance_threshold=1.5,
scale_steps=True,
):
"""
Initialize an instance of :class:`.Collapser`.
Parameters
----------
step_size : :class:`float`, optional
The relative size of the step to take during collapse in
Angstrom.
distance_threshold : :class:`float`, optional
Distance between distinct building blocks to use as
threshold for halting collapse in Angstrom.
scale_steps : :class:`bool`, optional
Whether to scale the step of each distinct building block
by its relative distance from the molecules centroid.
"""
self._optimizer = mch.Collapser(
step_size=step_size,
distance_threshold=distance_threshold,
scale_steps=scale_steps,
)
def optimize(self, state):
# Define MCHammer molecule to optimize.
mch_mol = mch.Molecule(
atoms=(
mch.Atom(
id=atom.get_id(),
element_string=atom.__class__.__name__,
) for atom in state.get_atoms()
),
bonds=get_mch_bonds(state),
position_matrix=state.get_position_matrix(),
)
# Run optimization.
mch_mol, result = self._optimizer.get_result(
mol=mch_mol,
bond_pair_ids=tuple(get_long_bond_ids(state)),
subunits=get_subunits(state),
)
return state.with_position_matrix(
position_matrix=mch_mol.get_position_matrix()
)
| 28.722222 | 69 | 0.604449 |
from .optimizer import Optimizer
from .utilities import get_mch_bonds, get_long_bond_ids, get_subunits
import mchammer as mch
class Collapser(Optimizer):
def __init__(
self,
step_size=0.1,
distance_threshold=1.5,
scale_steps=True,
):
self._optimizer = mch.Collapser(
step_size=step_size,
distance_threshold=distance_threshold,
scale_steps=scale_steps,
)
def optimize(self, state):
mch_mol = mch.Molecule(
atoms=(
mch.Atom(
id=atom.get_id(),
element_string=atom.__class__.__name__,
) for atom in state.get_atoms()
),
bonds=get_mch_bonds(state),
position_matrix=state.get_position_matrix(),
)
mch_mol, result = self._optimizer.get_result(
mol=mch_mol,
bond_pair_ids=tuple(get_long_bond_ids(state)),
subunits=get_subunits(state),
)
return state.with_position_matrix(
position_matrix=mch_mol.get_position_matrix()
)
| true | true |
f730c1eb989c9c84f7652a96a8ab7e841c0ef149 | 339 | py | Python | BERKE/OPENCV/OpenCv4cam.py | vektorelpython24proje/temelbilgiler | bced2723d247dbb8b10cf86e25ee209635f82921 | [
"MIT"
] | null | null | null | BERKE/OPENCV/OpenCv4cam.py | vektorelpython24proje/temelbilgiler | bced2723d247dbb8b10cf86e25ee209635f82921 | [
"MIT"
] | null | null | null | BERKE/OPENCV/OpenCv4cam.py | vektorelpython24proje/temelbilgiler | bced2723d247dbb8b10cf86e25ee209635f82921 | [
"MIT"
] | 3 | 2020-10-24T14:36:14.000Z | 2020-10-24T14:41:13.000Z | import cv2
# frame per second
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
while True:
ret,frame = cap.read()
frame[200:250,200:250] = frame[100:150,100:150]
frame[100:150,100:150] = [255,255,255]
cv2.imshow("ilkresim",frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows() | 18.833333 | 51 | 0.637168 | import cv2
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
while True:
ret,frame = cap.read()
frame[200:250,200:250] = frame[100:150,100:150]
frame[100:150,100:150] = [255,255,255]
cv2.imshow("ilkresim",frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cap.release()
cv2.destroyAllWindows() | true | true |
f730c2199ac56dcd1e8d9c7c1237e1e4ccc3fe0f | 6,446 | py | Python | faker/providers/lorem/ru_RU/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | 1 | 2021-01-21T03:44:59.000Z | 2021-01-21T03:44:59.000Z | faker/providers/lorem/ru_RU/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | null | null | null | faker/providers/lorem/ru_RU/__init__.py | bdclauser/Faker | b676668214f5f4cf2849eea16d50c835ffba5be9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .. import Provider as LoremProvider
class Provider(LoremProvider):
word_list = (
'войти', 'монета', 'вскинуть', 'желание', 'экзамен', 'налоговый',
'вытаскивать', 'приятель', 'вздрагивать', 'куча', 'порт', 'точно',
'заплакать', 'изба', 'правление', 'художественный', 'мучительно',
'изображать', 'фонарик', 'миф', 'грустный', 'опасность', 'мера',
'пастух', 'факультет', 'мелькнуть', 'полевой', 'другой', 'выраженный',
'забирать', 'рот', 'народ', 'соответствие', 'тута', 'коммунизм',
'решение', 'плод', 'собеседник', 'возмутиться', 'достоинство',
'господь', 'болото', 'инфекция', 'голубчик', 'сынок', 'пространство',
'прощение', 'прежде', 'хотеть', 'ленинград', 'даль', 'развитый',
'близко', 'более', 'спорт', 'эпоха', 'ответить', 'освободить', 'совет',
'проход', 'палец', 'вчера', 'приличный', 'ярко', 'белье', 'кузнец',
'неожиданно', 'вперед', 'зато', 'кольцо', 'передо', 'мгновение',
'плавно', 'табак', 'число', 'изучить', 'тяжелый', 'рассуждение',
'салон', 'идея', 'что', 'светило', 'порода', 'сомнительный', 'бок',
'очко', 'неудобно', 'советовать', 'отдел', 'помолчать', 'поздравлять',
'пробовать', 'дошлый', 'смеяться', 'упорно', 'вздрогнуть', 'затянуться',
'танцевать', 'песенка', 'выбирать', 'правильный', 'намерение', 'издали',
'запустить', 'наслаждение', 'крыса', 'лететь', 'космос', 'радость',
'поезд', 'находить', 'гулять', 'горький', 'бочок', 'ночь', 'счастье',
'уничтожение', 'дьявол', 'коробка', 'спасть', 'кожа', 'провинция',
'прелесть', 'тюрьма', 'низкий', 'сверкать', 'темнеть', 'солнце',
'дружно', 'настать', 'блин', 'степь', 'самостоятельно', 'крутой',
'картинка', 'зачем', 'рабочий', 'необычный', 'армейский', 'труп',
'ягода', 'около', 'монета', 'естественный', 'юный', 'район', 'скрытый',
'поймать', 'строительство', 'палата', 'миг', 'триста', 'штаб', 'ломать',
'возможно', 'полюбить', 'человечек', 'легко', 'чувство', 'ручей',
'карман', 'деньги', 'неправда', 'сравнение', 'грудь', 'отъезд',
'возникновение', 'степь', 'возбуждение', 'деловой', 'следовательно',
'жидкий', 'сынок', 'художественный', 'поколение', 'расстегнуть', 'пища',
'ученый', 'секунда', 'успокоиться', 'вряд', 'аж', 'вскакивать', 'мимо',
'падать', 'потянуться', 'угроза', 'растеряться', 'бегать', 'стакан',
'о', 'кпсс', 'ныне', 'пол', 'реклама', 'при', 'школьный', 'премьера',
'дальний', 'потрясти', 'освобождение', 'покидать', 'наступать', 'жить',
'какой', 'обида', 'командование', 'девка', 'выражаться', 'головной',
'второй', 'князь', 'социалистический', 'головка', 'привлекать', 'через',
'господь', 'результат', 'отметить', 'ведь', 'падаль', 'покидать',
'художественный', 'правый', 'висеть', 'лапа', 'каюта', 'слишком',
'нервно', 'серьезный', 'зима', 'заработать', 'эффект', 'пропасть',
'плод', 'что', 'висеть', 'холодно', 'единый', 'выкинуть', 'мрачно',
'выгнать', 'умирать', 'иной', 'космос', 'природа', 'функция',
'поставить', 'оборот', 'услать', 'очередной', 'медицина', 'функция',
'зарплата', 'выдержать', 'расстройство', 'адвокат', 'задержать',
'появление', 'инвалид', 'интеллектуальный', 'исследование', 'господь',
'смертельный', 'спичка', 'вариант', 'рай', 'одиннадцать', 'чем',
'манера', 'магазин', 'поговорить', 'полоска', 'помимо', 'построить',
'домашний', 'механический', 'сохранять', 'отражение', 'научить',
'тесно', 'аллея', 'прежний', 'посидеть', 'славный', 'очутиться',
'лететь', 'невозможно', 'порядок', 'выразить', 'спешить', 'сынок',
'ребятишки', 'угроза', 'оставить', 'цвет', 'налево', 'парень',
'миллиард', 'горький', 'трубка', 'подробность', 'пасть', 'непривычный',
'угодный', 'засунуть', 'цель', 'запретить', 'дремать', 'разуметься',
'приходить', 'совещание', 'постоянный', 'анализ', 'терапия', 'приятель',
'процесс', 'академик', 'металл', 'развернуться', 'жестокий', 'интернет',
'банда', 'изменение', 'коллектив', 'похороны', 'устройство',
'торопливый', 'разводить', 'промолчать', 'подземный', 'пламя',
'редактор', 'теория', 'карандаш', 'упор', 'означать', 'бабочка',
'четыре', 'столетие', 'разнообразный', 'витрина', 'нож', 'команда',
'шлем', 'недостаток', 'протягивать', 'за', 'металл', 'добиться',
'сутки', 'четко', 'предоставить', 'тысяча', 'запеть', 'бригада',
'мелочь', 'выраженный', 'пересечь', 'сходить', 'вообще', 'рис', 'банк',
'бак', 'передо', 'назначить', 'важный', 'правление', 'палка', 'трясти',
'уронить', 'витрина', 'основание', 'да', 'перебивать', 'дыхание',
'применяться', 'июнь', 'бетонный', 'избегать', 'умолять', 'мягкий',
'заявление', 'конференция', 'встать', 'свежий', 'сопровождаться',
'цепочка', 'выражение', 'угол', 'ботинок', 'ложиться', 'инструкция',
'присесть', 'решетка', 'еврейский', 'порог', 'зеленый', 'граница',
'ставить', 'смелый', 'сустав', 'роса', 'демократия', 'вывести',
'конструкция', 'задрать', 'багровый', 'военный', 'направо', 'житель',
'товар', 'неправда', 'материя', 'командующий', 'кидать', 'заложить',
'лиловый', 'слать', 'горький', 'пространство', 'провал', 'мусор',
'наткнуться', 'торговля', 'монета', 'место', 'спалить', 'бровь',
'левый', 'хлеб', 'коричневый', 'потом', 'страсть', 'виднеться',
'роскошный', 'способ', 'костер', 'заведение', 'пропадать', 'слишком',
'пятеро', 'мальчишка', 'тусклый', 'неожиданный', 'плясать', 'дурацкий',
'дрогнуть', 'сбросить', 'прошептать', 'беспомощный', 'рота', 'песня',
'тревога', 'некоторый', 'термин', 'нажать', 'видимо', 'валюта', 'набор',
'боец', 'райком', 'новый', 'скользить', 'руководитель', 'волк',
'изредка', 'понятный', 'пропаганда', 'остановить', 'исполнять', 'ход',
'госпожа', 'печатать', 'командир', 'снимать', 'казнь', 'невыносимый',
'спорт', 'тревога', 'уточнить', 'актриса', 'полностью', 'покинуть',
'сверкающий', 'мотоцикл', 'дорогой', 'указанный', 'ремень', 'посвятить',
'один', 'а', 'доставать', 'хозяйка', 'носок', 'написать', 'еврейский',
'призыв', 'увеличиваться', 'равнодушный',
) | 70.835165 | 80 | 0.579429 |
from __future__ import unicode_literals
from .. import Provider as LoremProvider
class Provider(LoremProvider):
word_list = (
'войти', 'монета', 'вскинуть', 'желание', 'экзамен', 'налоговый',
'вытаскивать', 'приятель', 'вздрагивать', 'куча', 'порт', 'точно',
'заплакать', 'изба', 'правление', 'художественный', 'мучительно',
'изображать', 'фонарик', 'миф', 'грустный', 'опасность', 'мера',
'пастух', 'факультет', 'мелькнуть', 'полевой', 'другой', 'выраженный',
'забирать', 'рот', 'народ', 'соответствие', 'тута', 'коммунизм',
'решение', 'плод', 'собеседник', 'возмутиться', 'достоинство',
'господь', 'болото', 'инфекция', 'голубчик', 'сынок', 'пространство',
'прощение', 'прежде', 'хотеть', 'ленинград', 'даль', 'развитый',
'близко', 'более', 'спорт', 'эпоха', 'ответить', 'освободить', 'совет',
'проход', 'палец', 'вчера', 'приличный', 'ярко', 'белье', 'кузнец',
'неожиданно', 'вперед', 'зато', 'кольцо', 'передо', 'мгновение',
'плавно', 'табак', 'число', 'изучить', 'тяжелый', 'рассуждение',
'салон', 'идея', 'что', 'светило', 'порода', 'сомнительный', 'бок',
'очко', 'неудобно', 'советовать', 'отдел', 'помолчать', 'поздравлять',
'пробовать', 'дошлый', 'смеяться', 'упорно', 'вздрогнуть', 'затянуться',
'танцевать', 'песенка', 'выбирать', 'правильный', 'намерение', 'издали',
'запустить', 'наслаждение', 'крыса', 'лететь', 'космос', 'радость',
'поезд', 'находить', 'гулять', 'горький', 'бочок', 'ночь', 'счастье',
'уничтожение', 'дьявол', 'коробка', 'спасть', 'кожа', 'провинция',
'прелесть', 'тюрьма', 'низкий', 'сверкать', 'темнеть', 'солнце',
'дружно', 'настать', 'блин', 'степь', 'самостоятельно', 'крутой',
'картинка', 'зачем', 'рабочий', 'необычный', 'армейский', 'труп',
'ягода', 'около', 'монета', 'естественный', 'юный', 'район', 'скрытый',
'поймать', 'строительство', 'палата', 'миг', 'триста', 'штаб', 'ломать',
'возможно', 'полюбить', 'человечек', 'легко', 'чувство', 'ручей',
'карман', 'деньги', 'неправда', 'сравнение', 'грудь', 'отъезд',
'возникновение', 'степь', 'возбуждение', 'деловой', 'следовательно',
'жидкий', 'сынок', 'художественный', 'поколение', 'расстегнуть', 'пища',
'ученый', 'секунда', 'успокоиться', 'вряд', 'аж', 'вскакивать', 'мимо',
'падать', 'потянуться', 'угроза', 'растеряться', 'бегать', 'стакан',
'о', 'кпсс', 'ныне', 'пол', 'реклама', 'при', 'школьный', 'премьера',
'дальний', 'потрясти', 'освобождение', 'покидать', 'наступать', 'жить',
'какой', 'обида', 'командование', 'девка', 'выражаться', 'головной',
'второй', 'князь', 'социалистический', 'головка', 'привлекать', 'через',
'господь', 'результат', 'отметить', 'ведь', 'падаль', 'покидать',
'художественный', 'правый', 'висеть', 'лапа', 'каюта', 'слишком',
'нервно', 'серьезный', 'зима', 'заработать', 'эффект', 'пропасть',
'плод', 'что', 'висеть', 'холодно', 'единый', 'выкинуть', 'мрачно',
'выгнать', 'умирать', 'иной', 'космос', 'природа', 'функция',
'поставить', 'оборот', 'услать', 'очередной', 'медицина', 'функция',
'зарплата', 'выдержать', 'расстройство', 'адвокат', 'задержать',
'появление', 'инвалид', 'интеллектуальный', 'исследование', 'господь',
'смертельный', 'спичка', 'вариант', 'рай', 'одиннадцать', 'чем',
'манера', 'магазин', 'поговорить', 'полоска', 'помимо', 'построить',
'домашний', 'механический', 'сохранять', 'отражение', 'научить',
'тесно', 'аллея', 'прежний', 'посидеть', 'славный', 'очутиться',
'лететь', 'невозможно', 'порядок', 'выразить', 'спешить', 'сынок',
'ребятишки', 'угроза', 'оставить', 'цвет', 'налево', 'парень',
'миллиард', 'горький', 'трубка', 'подробность', 'пасть', 'непривычный',
'угодный', 'засунуть', 'цель', 'запретить', 'дремать', 'разуметься',
'приходить', 'совещание', 'постоянный', 'анализ', 'терапия', 'приятель',
'процесс', 'академик', 'металл', 'развернуться', 'жестокий', 'интернет',
'банда', 'изменение', 'коллектив', 'похороны', 'устройство',
'торопливый', 'разводить', 'промолчать', 'подземный', 'пламя',
'редактор', 'теория', 'карандаш', 'упор', 'означать', 'бабочка',
'четыре', 'столетие', 'разнообразный', 'витрина', 'нож', 'команда',
'шлем', 'недостаток', 'протягивать', 'за', 'металл', 'добиться',
'сутки', 'четко', 'предоставить', 'тысяча', 'запеть', 'бригада',
'мелочь', 'выраженный', 'пересечь', 'сходить', 'вообще', 'рис', 'банк',
'бак', 'передо', 'назначить', 'важный', 'правление', 'палка', 'трясти',
'уронить', 'витрина', 'основание', 'да', 'перебивать', 'дыхание',
'применяться', 'июнь', 'бетонный', 'избегать', 'умолять', 'мягкий',
'заявление', 'конференция', 'встать', 'свежий', 'сопровождаться',
'цепочка', 'выражение', 'угол', 'ботинок', 'ложиться', 'инструкция',
'присесть', 'решетка', 'еврейский', 'порог', 'зеленый', 'граница',
'ставить', 'смелый', 'сустав', 'роса', 'демократия', 'вывести',
'конструкция', 'задрать', 'багровый', 'военный', 'направо', 'житель',
'товар', 'неправда', 'материя', 'командующий', 'кидать', 'заложить',
'лиловый', 'слать', 'горький', 'пространство', 'провал', 'мусор',
'наткнуться', 'торговля', 'монета', 'место', 'спалить', 'бровь',
'левый', 'хлеб', 'коричневый', 'потом', 'страсть', 'виднеться',
'роскошный', 'способ', 'костер', 'заведение', 'пропадать', 'слишком',
'пятеро', 'мальчишка', 'тусклый', 'неожиданный', 'плясать', 'дурацкий',
'дрогнуть', 'сбросить', 'прошептать', 'беспомощный', 'рота', 'песня',
'тревога', 'некоторый', 'термин', 'нажать', 'видимо', 'валюта', 'набор',
'боец', 'райком', 'новый', 'скользить', 'руководитель', 'волк',
'изредка', 'понятный', 'пропаганда', 'остановить', 'исполнять', 'ход',
'госпожа', 'печатать', 'командир', 'снимать', 'казнь', 'невыносимый',
'спорт', 'тревога', 'уточнить', 'актриса', 'полностью', 'покинуть',
'сверкающий', 'мотоцикл', 'дорогой', 'указанный', 'ремень', 'посвятить',
'один', 'а', 'доставать', 'хозяйка', 'носок', 'написать', 'еврейский',
'призыв', 'увеличиваться', 'равнодушный',
) | true | true |
f730c23f0da51300b98e1b1ee705c0aa5cefff70 | 1,034 | py | Python | tiki/tiki/spiders/tiki.py | Necrophote/telecrawl | 8512e0ae9f6b44bb64cba29a13c382024f265ca5 | [
"MIT"
] | null | null | null | tiki/tiki/spiders/tiki.py | Necrophote/telecrawl | 8512e0ae9f6b44bb64cba29a13c382024f265ca5 | [
"MIT"
] | null | null | null | tiki/tiki/spiders/tiki.py | Necrophote/telecrawl | 8512e0ae9f6b44bb64cba29a13c382024f265ca5 | [
"MIT"
] | null | null | null | import scrapy
from scrapy.loader import ItemLoader
from tiki.items import TiviItem
class TikiSpider(scrapy.Spider):
# crawl from tiki
name = "tiki"
allowed_domains = ["tiki.vn"]
start_urls = {"https://tiki.vn/tivi/c5015"}
def parse(self, response):
tks = response.css('div.product-item')
for tk in tks:
loader = ItemLoader(item=TiviItem(), selector=tk)
# crawl product name and code from title attribute
loader.add_css('product_name', 'a::attr(title)')
loader.add_css('product_code', 'a::attr(title)')
# crawl official final price only
loader.add_css('price', '.final-price::text')
yield loader.load_item()
# yield next page
for a in response.css('li a.next'):
yield response.follow(a, callback=self.parse)
#tikitvs = response.css('div.product-item')
#for tikitv in tikitvs:
# yield {
# 'name': tikitv.css('a::attr(title)').get(),
# 'price': tikitv.css('.final-price::text').get(),
# }
#for a in response.css('li a.next'):
# yield response.follow(a, callback=self.parse) | 27.210526 | 53 | 0.679884 | import scrapy
from scrapy.loader import ItemLoader
from tiki.items import TiviItem
class TikiSpider(scrapy.Spider):
name = "tiki"
allowed_domains = ["tiki.vn"]
start_urls = {"https://tiki.vn/tivi/c5015"}
def parse(self, response):
tks = response.css('div.product-item')
for tk in tks:
loader = ItemLoader(item=TiviItem(), selector=tk)
loader.add_css('product_name', 'a::attr(title)')
loader.add_css('product_code', 'a::attr(title)')
loader.add_css('price', '.final-price::text')
yield loader.load_item()
for a in response.css('li a.next'):
yield response.follow(a, callback=self.parse)
| true | true |
f730c43933f5c965f1163cbe92ea2e00c357ef48 | 115 | py | Python | glance/contrib/plugins/artifacts_sample/__init__.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | 3 | 2015-12-22T09:04:44.000Z | 2017-10-18T15:26:03.000Z | glance/contrib/plugins/artifacts_sample/__init__.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | null | null | null | glance/contrib/plugins/artifacts_sample/__init__.py | wkoathp/glance | eb0c47047ddc28371f546437118986ed904f41d3 | [
"Apache-2.0"
] | null | null | null | from v1 import artifact as art1
from v2 import artifact as art2
MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact]
| 19.166667 | 48 | 0.791304 | from v1 import artifact as art1
from v2 import artifact as art2
MY_ARTIFACT = [art1.MyArtifact, art2.MyArtifact]
| true | true |
f730c5d7592773f3e022c3b161473f2b1d4a7b40 | 9,591 | py | Python | v6.0.6/system/fortios_system_arp_table.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 14 | 2018-09-25T20:35:25.000Z | 2021-07-14T04:30:54.000Z | v6.0.6/system/fortios_system_arp_table.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 32 | 2018-10-09T04:13:42.000Z | 2020-05-11T07:20:28.000Z | v6.0.6/system/fortios_system_arp_table.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 11 | 2018-10-09T00:14:53.000Z | 2021-11-03T10:54:09.000Z | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_arp_table
short_description: Configure ARP table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and arp_table category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.6
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_arp_table:
description:
- Configure ARP table.
default: null
type: dict
suboptions:
id:
description:
- Unique integer ID of the entry.
required: true
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
ip:
description:
- IP address.
type: str
mac:
description:
- MAC address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure ARP table.
fortios_system_arp_table:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_arp_table:
id: "3"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
mac: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_arp_table_data(json):
option_list = ['id', 'interface', 'ip',
'mac']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_arp_table(data, fos):
vdom = data['vdom']
state = data['state']
system_arp_table_data = data['system_arp_table']
filtered_data = underscore_to_hyphen(filter_system_arp_table_data(system_arp_table_data))
if state == "present":
return fos.set('system',
'arp-table',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'arp-table',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_arp_table']:
resp = system_arp_table(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_arp_table": {
"required": False, "type": "dict", "default": None,
"options": {
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"mac": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 29.151976 | 97 | 0.607966 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_arp_table
short_description: Configure ARP table in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and arp_table category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.6
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_arp_table:
description:
- Configure ARP table.
default: null
type: dict
suboptions:
id:
description:
- Unique integer ID of the entry.
required: true
type: int
interface:
description:
- Interface name. Source system.interface.name.
type: str
ip:
description:
- IP address.
type: str
mac:
description:
- MAC address.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure ARP table.
fortios_system_arp_table:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_arp_table:
id: "3"
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
mac: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_arp_table_data(json):
option_list = ['id', 'interface', 'ip',
'mac']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_arp_table(data, fos):
vdom = data['vdom']
state = data['state']
system_arp_table_data = data['system_arp_table']
filtered_data = underscore_to_hyphen(filter_system_arp_table_data(system_arp_table_data))
if state == "present":
return fos.set('system',
'arp-table',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system',
'arp-table',
mkey=filtered_data['id'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_arp_table']:
resp = system_arp_table(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_arp_table": {
"required": False, "type": "dict", "default": None,
"options": {
"id": {"required": True, "type": "int"},
"interface": {"required": False, "type": "str"},
"ip": {"required": False, "type": "str"},
"mac": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| true | true |
f730c69d6e5ae3c5d5a6129b26bdaf4232dd5224 | 161 | py | Python | coffee_small.py | meik99/CoffeeMakerApp | aa6a3785812f41138f66e58c195ed021ef2d4cc3 | [
"CC0-1.0"
] | null | null | null | coffee_small.py | meik99/CoffeeMakerApp | aa6a3785812f41138f66e58c195ed021ef2d4cc3 | [
"CC0-1.0"
] | null | null | null | coffee_small.py | meik99/CoffeeMakerApp | aa6a3785812f41138f66e58c195ed021ef2d4cc3 | [
"CC0-1.0"
] | null | null | null | import time
import RPi.GPIO as IO
PIN = 4
IO.setmode(IO.BCM)
IO.setup(PIN, IO.OUT)
IO.output(PIN, IO.HIGH)
time.sleep(15)
IO.output(PIN, IO.LOW)
IO.cleanup() | 12.384615 | 23 | 0.701863 | import time
import RPi.GPIO as IO
PIN = 4
IO.setmode(IO.BCM)
IO.setup(PIN, IO.OUT)
IO.output(PIN, IO.HIGH)
time.sleep(15)
IO.output(PIN, IO.LOW)
IO.cleanup() | true | true |
f730c70d8ffa1539142454cd5cf5157d8a5a5d00 | 226 | py | Python | historia/utils/__init__.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | 6 | 2016-04-26T18:39:36.000Z | 2021-09-01T09:13:38.000Z | historia/utils/__init__.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | null | null | null | historia/utils/__init__.py | eranimo/historia | 5e0b047d4bcdd534f48f8b9bf19d425b0b31a3fd | [
"MIT"
] | 4 | 2016-04-10T23:47:23.000Z | 2021-08-15T11:40:28.000Z | from historia.utils.id import unique_id
from historia.utils.color import random_country_colors
from historia.utils.store import Store
from historia.utils.timer import Timer
from historia.utils.trading import position_in_range
| 37.666667 | 54 | 0.867257 | from historia.utils.id import unique_id
from historia.utils.color import random_country_colors
from historia.utils.store import Store
from historia.utils.timer import Timer
from historia.utils.trading import position_in_range
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.