hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9c7db6d021abe53926601b1834856be78ee60324
| 8,949
|
py
|
Python
|
RequestHandler.py
|
robot0nfire/behem0th
|
3931f2a9a2f00b95d82ccb3c5e7c13b3fbb5f4d7
|
[
"MIT"
] | 2
|
2016-09-08T18:38:35.000Z
|
2016-09-14T11:05:34.000Z
|
RequestHandler.py
|
robot0nfire/behem0th
|
3931f2a9a2f00b95d82ccb3c5e7c13b3fbb5f4d7
|
[
"MIT"
] | 1
|
2016-09-29T17:36:49.000Z
|
2016-09-29T17:36:49.000Z
|
RequestHandler.py
|
robot0nfire/behem0th
|
3931f2a9a2f00b95d82ccb3c5e7c13b3fbb5f4d7
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2016 Christoph Heiss <me@christoph-heiss.me>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import json
import struct
import threading
import socket
import queue
import tempfile
import base64
import select
from behem0th import utils, log
BLOCK_SIZE = 4096
class Route:
def handle(self, data, request):
raise NotImplementedError
def send(self, data):
self.handler.send(self.route_name, data)
class FilelistRoute(Route):
def handle(self, data, request):
if request.is_client:
request.client._filelist = data
request.client._rlock.release()
else:
files, events = request.client._merge_filelist(data)
with request.client._rlock:
self.send(request.client._filelist)
for e in events:
request.queue_event(e)
for f in files:
request.queue_file(f[0], f[1])
"""
{
"action": "<action>",
"path": "<relpath-to-file>"
}
<action> can be either 'receive' or 'send'
Payload are base64 encoded chunks (BLOCK_SIZE bytes)
"""
class FileRoute(Route):
def handle(self, data, request):
action = data['action']
path = data['path']
if action == 'receive':
tmpf = tempfile.NamedTemporaryFile(delete=False)
buffer = b''
for chunk in request.recv():
buffer += chunk
if len(buffer) >= BLOCK_SIZE:
tmpf.write(base64.b64decode(buffer[:BLOCK_SIZE]))
buffer = buffer[:BLOCK_SIZE]
tmpf.write(base64.b64decode(buffer))
tmpf.close()
# watchdog reports a file-deleted and a file-created event, so ignore both.
request.client._ignore_next_fsevent(path)
request.client._ignore_next_fsevent(path)
os.rename(tmpf.name, request.client._abspath(path))
request.client._update_metadata(path)
request.client._event_handler._dispatch(
'received', request.client, path, 'file'
)
elif action == 'send':
request.queue_file('send', path)
else:
log.warn('FileRoute: Unknown action \'{0}\', igoring.', action)
# If we are the 'server', we also need to distribute all file request
# to all other clients.
if not request.is_client:
action = 'send' if action == 'receive' else 'request'
request.client._run_on_peers('queue_file', request, action, path)
"""
{
"type": "<type>",
"path": "<relpath-to-file>"
}
<type> can be one of 'file-created', 'file-deleted', 'file-moved'
"""
class EventRoute(Route):
def handle(self, data, request):
f_type, event = data['type'].split('-')
path = data['path']
abspath = request.client._abspath(path)
request.client._ignore_next_fsevent(path)
# TODO: factor out common code with Client._handle_fsevent() and Client._merge_filelist()
if event == 'created':
# create the file/directory
if f_type == 'file':
open(abspath, 'a').close()
else:
os.mkdir(abspath, 0o755)
request.client._add_to_filelist(path, f_type)
elif event == 'deleted':
request.client._remove_from_filelist(path)
os.remove(abspath)
elif event == 'moved':
request.client._remove_from_filelist(path)
os.rename(abspath, data['dest'])
request.client._add_to_filelist(data['dest'], f_type)
else:
log.warn('EventRoute: Unknown event {0}', data)
# For rationale, see FileRoute.handle()
if not request.is_client:
request.client._run_on_peers('queue_event', request, data)
ROUTES = {
'filelist': FilelistRoute(),
'file': FileRoute(),
'event': EventRoute()
}
"""
behem0th's protocol is completely text-based, using utf-8 encoding and
encoded in JSON for easy parsing.
A request usually looks like this:
{ "route": "<route-name>", "data": "<data>" }
'data' holds additional data which is then passed to the route.
There is no special format designed for 'data' and is specific to each route.
After each request there is a newline to separate them. (think of HTTP)
If a route needs to transfer additional data (a 'payload'), it has to send them
in a text-based format, e.g. base-64 encoding for binary data.
After the payload, if any, there has to be another newline to separate it from
the next request.
"""
class RequestHandler(threading.Thread):
req_handler_num = 0
def __init__(self, **kwargs):
super().__init__()
self.daemon = True
self.sync_queue = queue.Queue()
self.routes = {}
self.recvbuf = b''
RequestHandler.req_handler_num += 1
self.name = "request-handler-{0}".format(RequestHandler.req_handler_num)
for key, value in kwargs.items():
setattr(self, key, value)
with self.client._rlock:
self.client._peers.append(self)
self.sock.setblocking(0)
self.is_client = bool(self.client._sock)
for name, route in ROUTES.items():
route.route_name = name
route.handler = self
self.routes[name] = route
def setup(self):
log.info('Connected to {0}:{1}', self.address[0], self.address[1])
# If self.client has a (active) socket, it is a client and
# thus needs to starts syncing up with the server.
if self.is_client:
# Lock the client until the filelist has been sent back by the server.
self.client._rlock.acquire()
self.send('filelist', self.client._filelist)
def close(self):
self.sync_queue.put({'action': 'exit'})
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
def handle(self, data):
try:
data = json.loads(data)
except ValueError:
log.error('Received invalid data: {0}', data)
return
route = data['route']
data = data['data']
log.info_v('Handling {0}, data:\n{1}', route, data)
if route in self.routes:
self.routes[route].handle(data, self)
else:
log.error("Data received on unknown route '{0}'!", route)
def send(self, route, data):
request = json.dumps({'route': route, 'data': data}) + '\n'
self.sock.sendall(request.encode())
def recv(self):
if self.recvbuf:
# This needs special handling because there could be multiple
# request in recvbuf. If this is the case, we can only yield the first
# one and have to leave to others in recvbuf.
index = self.recvbuf.find(b'\n')
if index == -1:
yield self.recvbuf
self.recvbuf = None
else:
yield self.recvbuf[:index]
self.recvbuf = self.recvbuf[index+1:]
return
while 1:
select.select([self.sock], [], [])
chunk = self.sock.recv(1024)
if not len(chunk):
# If select has signaled the socket is readable, yet .recv()
# returns zero bytes, the other end probably performed
# a close() or shutdown() on the socket.
break
index = chunk.find(b'\n')
if index == -1:
yield chunk
else:
yield chunk[:index]
self.recvbuf = chunk[index+1:]
break
def queue_file(self, action, path):
self.sync_queue.put({
'action': action + '-file',
'path': path
})
def queue_event(self, event):
self.sync_queue.put({
'action': 'send-event',
'event': event
})
def sync_worker(self):
while 1:
entry = self.sync_queue.get()
log.info_v('Processing {0}', entry)
if entry['action'] == 'exit':
break
elif entry['action'] == 'send-file':
path = entry['path']
abspath = self.client._abspath(path)
self.send('file', {
'path': path,
'action': 'receive'
})
for buf in utils.read_file_seq(abspath, BLOCK_SIZE):
self.sock.sendall(base64.b64encode(buf))
self.sock.sendall(b'\n')
self.client._event_handler._dispatch(
'sent', self.client, path, 'file'
)
elif entry['action'] == 'request-file':
self.send('file', {
'path': entry['path'],
'action': 'send'
})
elif entry['action'] == 'send-event':
self.send('event', entry['event'])
self.sync_queue.task_done()
def run(self):
self.setup()
utils.create_thread(self.sync_worker,
name=self.name.replace('request-handler', 'sync-worker'))
while 1:
buffer = b''
for chunk in self.recv():
buffer += chunk
if not len(buffer):
break
self.handle(buffer.decode())
log.info('Disconnected from {0}:{1}', self.address[0], self.address[1])
self.close()
| 24.927577
| 91
| 0.684769
| 1,275
| 8,949
| 4.723137
| 0.265098
| 0.041016
| 0.012953
| 0.014115
| 0.141315
| 0.097808
| 0.054799
| 0.02358
| 0
| 0
| 0
| 0.008927
| 0.18639
| 8,949
| 358
| 92
| 24.997207
| 0.818157
| 0.211644
| 0
| 0.220588
| 0
| 0
| 0.098266
| 0
| 0
| 0
| 0
| 0.002793
| 0
| 1
| 0.073529
| false
| 0.004902
| 0.04902
| 0
| 0.161765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c7e8f9016c9cbf4f8f05d18b1e14e707c0c6a3e
| 27,504
|
py
|
Python
|
scripts/blenderseed.package.py
|
rgirish28/blenderseed
|
fee897620d0348f4ea1f5722e1a82c3682ca0178
|
[
"MIT"
] | null | null | null |
scripts/blenderseed.package.py
|
rgirish28/blenderseed
|
fee897620d0348f4ea1f5722e1a82c3682ca0178
|
[
"MIT"
] | null | null | null |
scripts/blenderseed.package.py
|
rgirish28/blenderseed
|
fee897620d0348f4ea1f5722e1a82c3682ca0178
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit https://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2017-2018 Esteban Tovagliari, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from distutils import archive_util, dir_util
from xml.etree.ElementTree import ElementTree
import argparse
import colorama
import datetime
import glob
import os
import platform
import re
import shutil
import stat
import subprocess
import sys
import time
import traceback
import urllib
#--------------------------------------------------------------------------------------------------
# Constants.
#--------------------------------------------------------------------------------------------------
VERSION = "1.1.0"
SETTINGS_FILENAME = "blenderseed.package.configuration.xml"
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
GREEN_CHECKMARK = u"{0}\u2713{1}".format(colorama.Style.BRIGHT + colorama.Fore.GREEN, colorama.Style.RESET_ALL)
RED_CROSSMARK = u"{0}\u2717{1}".format(colorama.Style.BRIGHT + colorama.Fore.RED, colorama.Style.RESET_ALL)
def trace(message):
# encode('utf-8') is required to support output redirection to files or pipes.
print(u" {0}{1}{2}".format(colorama.Style.DIM + colorama.Fore.WHITE, message, colorama.Style.RESET_ALL).encode('utf-8'))
def info(message):
print(u" {0}".format(message).encode('utf-8'))
def progress(message):
print(u" {0}...".format(message).encode('utf-8'))
def warning(message):
print(u" {0}Warning: {1}.{2}".format(colorama.Style.BRIGHT + colorama.Fore.MAGENTA, message, colorama.Style.RESET_ALL).encode('utf-8'))
def fatal(message):
print(u"{0}Fatal: {1}. Aborting.{2}".format(colorama.Style.BRIGHT + colorama.Fore.RED, message, colorama.Style.RESET_ALL).encode('utf-8'))
if sys.exc_info()[0]:
print(traceback.format_exc())
sys.exit(1)
def exe(filepath):
return filepath + ".exe" if os.name == "nt" else filepath
def safe_delete_file(path):
try:
if os.path.exists(path):
os.remove(path)
except OSError:
fatal("Failed to delete file '" + path + "'")
def on_rmtree_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed.
# Let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
def safe_delete_directory(path):
Attempts = 10
for attempt in range(Attempts):
try:
if os.path.exists(path):
shutil.rmtree(path, onerror=on_rmtree_error)
return
except OSError:
if attempt < Attempts - 1:
time.sleep(0.5)
else:
fatal("Failed to delete directory '" + path + "'")
def safe_delete_directory_recursively(root_path, directory_name):
safe_delete_directory(os.path.join(root_path, directory_name))
for entry in os.listdir(root_path):
subdirectory = os.path.join(root_path, entry)
if os.path.isdir(subdirectory):
safe_delete_directory_recursively(subdirectory, directory_name)
def safe_make_directory(path):
if not os.path.isdir(path):
os.makedirs(path)
def pushd(path):
old_path = os.getcwd()
os.chdir(path)
return old_path
def copy_glob(input_pattern, output_path):
for input_file in glob.glob(input_pattern):
shutil.copy(input_file, output_path)
#--------------------------------------------------------------------------------------------------
# Settings.
#--------------------------------------------------------------------------------------------------
class Settings:
def load(self):
self.this_dir = os.path.dirname(os.path.realpath(__file__))
self.root_dir = os.path.join(self.this_dir, "..")
print("Loading settings from " + SETTINGS_FILENAME + "...")
tree = ElementTree()
try:
tree.parse(SETTINGS_FILENAME)
except IOError:
fatal("Failed to load configuration file '" + SETTINGS_FILENAME + "'")
self.__load_values(tree)
def print_summary(self):
print("")
print(" Platform: " + self.platform)
print(" Path to appleseed release: " + self.appleseed_release_path)
print(" Path to appleseed binaries: " + self.appleseed_bin_path)
print(" Path to appleseed libraries: " + self.appleseed_lib_path)
print(" Path to appleseed shaders: " + self.appleseed_shaders_path)
print(" Path to appleseed schemas: " + self.appleseed_schemas_path)
print(" Path to appleseed settings: " + self.appleseed_settings_path)
print(" Path to appleseed.python: " + self.appleseed_python_path)
print(" Path to maketx: " + self.maketx_path)
print(" Output directory: " + self.output_dir)
print("")
def __load_values(self, tree):
self.platform = self.__get_required(tree, "platform")
self.appleseed_release_path = self.__get_required(tree, "appleseed_release_path")
os.environ['APPLESEED'] = self.appleseed_release_path
self.appleseed_bin_path = os.path.expandvars(self.__get_required(tree, "appleseed_bin_path"))
self.appleseed_lib_path = os.path.expandvars(self.__get_required(tree, "appleseed_lib_path"))
self.appleseed_shaders_path = os.path.expandvars(self.__get_required(tree, "appleseed_shaders_path"))
self.appleseed_schemas_path = os.path.expandvars(self.__get_required(tree, "appleseed_schemas_path"))
self.appleseed_settings_path = os.path.expandvars(self.__get_required(tree, "appleseed_settings_path"))
self.appleseed_python_path = os.path.expandvars(self.__get_required(tree, "appleseed_python_path"))
self.maketx_path = os.path.expandvars(self.__get_required(tree, "maketx_path"))
self.output_dir = os.path.expandvars(self.__get_required(tree, "output_dir"))
def __get_required(self, tree, key):
value = tree.findtext(key)
if value is None:
fatal("Missing value \"{0}\" in configuration file".format(key))
return value
#--------------------------------------------------------------------------------------------------
# Base package builder.
#--------------------------------------------------------------------------------------------------
class PackageBuilder(object):
def __init__(self, settings, package_version, build_date, no_release=False):
self.settings = settings
self.package_version = package_version
self.build_date = build_date
self.no_release = no_release
def build_package(self):
print("Building package:")
print("")
self.orchestrate()
print("")
print("The package was successfully built.")
def orchestrate(self):
self.remove_leftovers()
self.copy_appleseed_python()
self.copy_binaries()
self.copy_dependencies()
self.copy_schemas()
self.copy_shaders()
self.download_settings_files()
self.remove_pyc_files()
self.post_process_package()
if not self.no_release:
self.deploy_blenderseed_to_stage()
self.clean_stage()
self.build_final_zip_file()
self.remove_stage()
def remove_leftovers(self):
progress("Removing leftovers from previous invocations")
safe_delete_directory(os.path.join(self.settings.root_dir, "appleseed"))
safe_delete_directory("blenderseed")
def copy_appleseed_python(self):
progress("Copying appleseed.python to root directory")
# Create destination directory.
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
safe_make_directory(lib_dir)
# Copy appleseed.python.
dir_util.copy_tree(self.settings.appleseed_python_path, lib_dir)
# Remove _appleseedpython.so (Python 2) since blenderseed only needs _appleseedpython3.so (Python 3).
# TODO: implement properly.
safe_delete_file(os.path.join(lib_dir, "appleseed", "_appleseedpython.so"))
safe_delete_file(os.path.join(lib_dir, "appleseed", "_appleseedpython.pyd"))
def copy_binaries(self):
progress("Copying binaries to root directory")
# Create destination directory.
bin_dir = os.path.join(self.settings.root_dir, "appleseed", "bin")
safe_make_directory(bin_dir)
# Copy appleseed binaries.
for bin in [exe("appleseed.cli")]:
shutil.copy(os.path.join(self.settings.appleseed_bin_path, bin), bin_dir)
# Copy maketx.
shutil.copy(exe(self.settings.maketx_path), bin_dir)
def copy_schemas(self):
progress("Copying schemas to root directory")
dir_util.copy_tree(self.settings.appleseed_schemas_path, os.path.join(self.settings.root_dir, "appleseed", "schemas"))
safe_delete_file(os.path.join(self.settings.root_dir, "appleseed", "schemas", ".gitignore"))
def copy_shaders(self):
progress("Copying shaders to root directory")
# Create destination directory.
shaders_dir = os.path.join(self.settings.root_dir, "appleseed", "shaders")
safe_make_directory(shaders_dir)
self.__do_copy_shaders(os.path.join(self.settings.appleseed_shaders_path, "appleseed"), shaders_dir)
self.__do_copy_shaders(os.path.join(self.settings.appleseed_shaders_path, "blenderseed"), shaders_dir)
def __do_copy_shaders(self, source_dir, target_dir):
for root, dirs, files in os.walk(source_dir):
for f in files:
if f.endswith(".oso"):
shutil.copy(os.path.join(root, f), target_dir)
def download_settings_files(self):
progress("Downloading settings files to root directory")
# Create destination directory.
settings_dir = os.path.join(self.settings.root_dir, "appleseed", "settings")
safe_make_directory(settings_dir)
for file in ["appleseed.cli.xml"]:
urllib.urlretrieve(
"https://raw.githubusercontent.com/appleseedhq/appleseed/master/sandbox/settings/{0}".format(file),
os.path.join(settings_dir, file))
def remove_pyc_files(self):
progress("Removing pyc files from root directory")
for root, dirs, files in os.walk(os.path.join(self.settings.root_dir, "appleseed", "lib")):
for f in files:
if f.endswith(".pyc"):
safe_delete_file(os.path.join(root, f))
def deploy_blenderseed_to_stage(self):
progress("Deploying blenderseed to staging directory")
shutil.copytree(self.settings.root_dir, "blenderseed", ignore=shutil.ignore_patterns("scripts"))
def clean_stage(self):
progress("Cleaning staging directory")
safe_delete_directory_recursively("blenderseed", "__pycache__")
for subdirectory in [".git", ".idea", "archives", "docs", "scripts", "tests"]:
safe_delete_directory(os.path.join("blenderseed", subdirectory))
for file in [".gitignore", "README.md"]:
safe_delete_file(os.path.join("blenderseed", file))
def build_final_zip_file(self):
progress("Building final zip file from staging directory")
package_name = "blenderseed-{0}-{1}-{2}".format(self.package_version, self.settings.platform, self.build_date)
package_path = os.path.join(self.settings.output_dir, package_name)
archive_util.make_zipfile(package_path, "blenderseed")
info("Package path: {0}".format(package_path + ".zip"))
def remove_stage(self):
progress("Deleting staging directory")
safe_delete_directory("blenderseed")
def run(self, cmdline):
trace("Running command line: {0}".format(cmdline))
os.system(cmdline)
def run_subprocess(self, cmdline):
p = subprocess.Popen(cmdline, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
return p.returncode, out, err
#--------------------------------------------------------------------------------------------------
# Windows package builder.
#--------------------------------------------------------------------------------------------------
class WindowsPackageBuilder(PackageBuilder):
def copy_dependencies(self):
progress("Windows-specific: Copying dependencies")
bin_dir = self.settings.appleseed_bin_path
for dll in ["appleseed.dll", "appleseed.shared.dll"]:
shutil.copy(os.path.join(bin_dir, dll), os.path.join(self.settings.root_dir, "appleseed", "bin"))
def post_process_package(self):
pass
#--------------------------------------------------------------------------------------------------
# Mac package builder.
#--------------------------------------------------------------------------------------------------
class MacPackageBuilder(PackageBuilder):
SYSTEM_LIBS_PREFIXES = [
"/System/Library/",
"/usr/lib/libcurl",
"/usr/lib/libc++",
"/usr/lib/libbz2",
"/usr/lib/libSystem",
#"/usr/lib/libz",
"/usr/lib/libncurses",
"/usr/lib/libobjc.A.dylib"
]
def copy_dependencies(self):
progress("Mac-specific: Copying dependencies")
# Create destination directory.
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
safe_make_directory(lib_dir)
# Copy appleseed libraries.
for lib in ["libappleseed.dylib", "libappleseed.shared.dylib"]:
shutil.copy(os.path.join(self.settings.appleseed_lib_path, lib), lib_dir)
# Get shared libs needed by binaries.
all_libs = set()
for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")):
libs = self.__get_dependencies_for_file(bin)
all_libs = all_libs.union(libs)
# Get shared libs needed by appleseed.python.
appleseedpython_libs = self.__get_dependencies_for_file(
os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed", "_appleseedpython3.so"))
all_libs = all_libs.union(appleseedpython_libs)
# Get shared libs needed by libraries.
# TODO: we're not computing the full transitive closure here!
lib_libs = set()
for lib in all_libs:
libs = self.__get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = all_libs.union(lib_libs)
if True:
# Print dependencies.
trace(" Dependencies:")
for lib in all_libs:
trace(" {0}".format(lib))
# Copy needed libs to lib directory.
for lib in all_libs:
if True:
trace(" Copying {0} to {1}...".format(lib, lib_dir))
shutil.copy(lib, lib_dir)
def post_process_package(self):
progress("Mac-specific: Post-processing package")
self.__fixup_binaries()
def __fixup_binaries(self):
progress("Mac-specific: Fixing up binaries")
self.set_libraries_ids()
self.__change_library_paths_in_libraries()
self.__change_library_paths_in_executables()
def set_libraries_ids(self):
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
for dirpath, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext == ".dylib" or ext == ".so":
lib_path = os.path.join(dirpath, filename)
self.__set_library_id(lib_path, filename)
def __change_library_paths_in_libraries(self):
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
for dirpath, dirnames, filenames in os.walk(lib_dir):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext == ".dylib" or ext == ".so":
lib_path = os.path.join(dirpath, filename)
self.__change_library_paths_in_binary(lib_path)
def __change_library_paths_in_executables(self):
bin_dir = os.path.join(self.settings.root_dir, "appleseed", "bin")
for dirpath, dirnames, filenames in os.walk(bin_dir):
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext != ".py" and ext != ".conf":
exe_path = os.path.join(dirpath, filename)
self.__change_library_paths_in_binary(exe_path)
# Can be used on executables and dynamic libraries.
def __change_library_paths_in_binary(self, bin_path):
progress("Patching {0}".format(bin_path))
bin_dir = os.path.dirname(bin_path)
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
path_to_appleseed_lib = os.path.relpath(lib_dir, bin_dir)
# fix_paths set to False because we must retrieve the unmodified dependency in order to replace it by the correct one.
for lib_path in self.__get_dependencies_for_file(bin_path, fix_paths=False):
lib_name = os.path.basename(lib_path)
if path_to_appleseed_lib == ".":
self.__change_library_path(bin_path, lib_path, "@loader_path/{0}".format(lib_name))
else:
self.__change_library_path(bin_path, lib_path, "@loader_path/{0}/{1}".format(path_to_appleseed_lib, lib_name))
def __set_library_id(self, target, name):
self.run('install_name_tool -id "{0}" {1}'.format(name, target))
def __change_library_path(self, target, old, new):
self.run('install_name_tool -change "{0}" "{1}" {2}'.format(old, new, target))
def __get_dependencies_for_file(self, filepath, fix_paths=True):
filename = os.path.basename(filepath)
loader_path = os.path.dirname(filepath)
rpath = "/usr/local/lib/" # TODO: a great simplification
if True:
trace("Gathering dependencies for file")
trace(" {0}".format(filepath))
trace("with @loader_path set to")
trace(" {0}".format(loader_path))
trace("and @rpath hardcoded to")
trace(" {0}".format(rpath))
returncode, out, err = self.run_subprocess(["otool", "-L", filepath])
if returncode != 0:
fatal("Failed to invoke otool(1) to get dependencies for {0}: {1}".format(filepath, err))
libs = set()
for line in out.split("\n")[1:]: # skip the first line
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Parse the line.
m = re.match(r"(.*) \(compatibility version .*, current version .*\)", line)
if not m:
fatal("Failed to parse line from otool(1) output: " + line)
lib = m.group(1)
# Ignore self-references (why do these happen?).
if lib == filename:
continue
# Ignore system libs.
if self.__is_system_lib(lib):
continue
# Ignore Qt frameworks.
if re.search(r"Qt.*\.framework", lib):
continue
if fix_paths:
# Handle libs relative to @loader_path.
lib = lib.replace("@loader_path", loader_path)
# Handle libs relative to @rpath.
lib = lib.replace("@rpath", rpath)
# Try to handle other relative libs.
if not os.path.isabs(lib):
# TODO: generalize to a collection of user-specified search paths.
candidate = os.path.join(loader_path, lib)
if not os.path.exists(candidate):
candidate = os.path.join("/usr/local/lib/", lib)
if os.path.exists(candidate):
info("Resolved relative dependency {0} as {1}".format(lib, candidate))
lib = candidate
libs.add(lib)
if True:
trace("Dependencies for file {0}:".format(filepath))
for lib in libs:
if os.path.isfile(lib):
trace(u" {0} {1}".format(GREEN_CHECKMARK, lib))
else:
trace(u" {0} {1}".format(RED_CROSSMARK, lib))
# Don't check for missing dependencies if we didn't attempt to fix them.
if fix_paths:
for lib in libs:
if not os.path.isfile(lib):
fatal("Dependency {0} could not be found on disk".format(lib))
return libs
def __is_system_lib(self, lib):
for prefix in self.SYSTEM_LIBS_PREFIXES:
if lib.startswith(prefix):
return True
return False
#--------------------------------------------------------------------------------------------------
# Linux package builder.
#--------------------------------------------------------------------------------------------------
class LinuxPackageBuilder(PackageBuilder):
SYSTEM_LIBS_PREFIXES = [
"linux",
"librt",
"libpthread",
"libGL",
"libX",
"libselinux",
"libICE",
"libSM",
"libdl",
"libm.so",
"libgcc",
"libc.so",
"/lib64/ld-linux-",
"libstdc++",
"libxcb",
"libdrm",
"libnsl",
"libuuid",
"libgthread",
"libglib",
"libgobject",
"libglapi",
"libffi",
"libfontconfig",
"libutil",
"libpython",
"libxshmfence.so"
]
def plugin_extension(self):
return ".so"
def copy_dependencies(self):
progress("Linux-specific: Copying dependencies")
# Create destination directory.
lib_dir = os.path.join(self.settings.root_dir, "appleseed", "lib")
safe_make_directory(lib_dir)
# Copy appleseed libraries.
for lib in ["libappleseed.so", "libappleseed.shared.so"]:
shutil.copy(os.path.join(self.settings.appleseed_lib_path, lib), lib_dir)
# Get shared libs needed by binaries.
all_libs = set()
for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")):
libs = self.__get_dependencies_for_file(bin)
all_libs = all_libs.union(libs)
# Get shared libs needed by appleseed.python.
appleseedpython_libs = self.__get_dependencies_for_file(
os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed", "_appleseedpython3.so"))
all_libs = all_libs.union(appleseedpython_libs)
# Get shared libs needed by libraries.
lib_libs = set()
for lib in all_libs:
libs = self.__get_dependencies_for_file(lib)
lib_libs = lib_libs.union(libs)
all_libs = all_libs.union(lib_libs)
# Copy all shared libraries.
for lib in all_libs:
shutil.copy(lib, lib_dir)
def post_process_package(self):
progress("Linux-specific: Post-processing package")
for bin in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "bin", "*")):
self.run("chrpath -r \$ORIGIN/../lib " + bin)
for lib in glob.glob(os.path.join(self.settings.root_dir, "appleseed", "lib", "*.so")):
self.run("chrpath -d " + lib)
appleseed_python_dir = os.path.join(self.settings.root_dir, "appleseed", "lib", "appleseed")
for py_cpp_module in glob.glob(os.path.join(appleseed_python_dir, "*.so")):
self.run("chrpath -r \$ORIGIN/../ " + py_cpp_module)
def __is_system_lib(self, lib):
for prefix in self.SYSTEM_LIBS_PREFIXES:
if lib.startswith(prefix):
return True
return False
def __get_dependencies_for_file(self, filepath):
returncode, out, err = self.run_subprocess(["ldd", filepath])
if returncode != 0:
fatal("Failed to invoke ldd(1) to get dependencies for {0}: {1}".format(filepath, err))
libs = set()
for line in out.split("\n"):
line = line.strip()
# Ignore empty lines.
if len(line) == 0:
continue
# Ignore system libs.
if self.__is_system_lib(line):
continue
# Ignore appleseed libs.
if "libappleseed" in line:
continue
libs.add(line.split()[2])
return libs
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
colorama.init()
parser = argparse.ArgumentParser(description="build a blenderseed package from sources")
parser.add_argument("--nozip", action="store_true", help="copies appleseed binaries to blenderseed folder but does not build a release package")
args = parser.parse_args()
no_release = args.nozip
package_version = subprocess.Popen("git describe --long", stdout=subprocess.PIPE, shell=True).stdout.read().strip()
build_date = datetime.date.today().isoformat()
print("blenderseed.package version " + VERSION)
print("")
settings = Settings()
settings.load()
settings.print_summary()
if os.name == "nt":
package_builder = WindowsPackageBuilder(settings, package_version, build_date, no_release)
elif os.name == "posix" and platform.mac_ver()[0] != "":
package_builder = MacPackageBuilder(settings, package_version, build_date, no_release)
elif os.name == "posix" and platform.mac_ver()[0] == "":
package_builder = LinuxPackageBuilder(settings, package_version, build_date, no_release)
else:
fatal("Unsupported platform: " + os.name)
package_builder.build_package()
if __name__ == "__main__":
main()
| 37.98895
| 148
| 0.597186
| 3,191
| 27,504
| 4.949859
| 0.166719
| 0.02735
| 0.02849
| 0.025704
| 0.414435
| 0.345173
| 0.312124
| 0.277746
| 0.262805
| 0.220766
| 0
| 0.004969
| 0.239056
| 27,504
| 723
| 149
| 38.041494
| 0.749725
| 0.168339
| 0
| 0.252677
| 0
| 0
| 0.165723
| 0.01058
| 0
| 0
| 0
| 0.001383
| 0
| 1
| 0.11349
| false
| 0.002141
| 0.036403
| 0.004283
| 0.190578
| 0.059957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c7f69a036f4358b44b78abe3f34ed429e5fbfef
| 1,420
|
py
|
Python
|
wagtailkatex/wagtail_hooks.py
|
ongchi/wagtail-katex
|
c64b491e765e6b87a90d7cd8602153826ee9fe07
|
[
"Apache-2.0"
] | null | null | null |
wagtailkatex/wagtail_hooks.py
|
ongchi/wagtail-katex
|
c64b491e765e6b87a90d7cd8602153826ee9fe07
|
[
"Apache-2.0"
] | null | null | null |
wagtailkatex/wagtail_hooks.py
|
ongchi/wagtail-katex
|
c64b491e765e6b87a90d7cd8602153826ee9fe07
|
[
"Apache-2.0"
] | null | null | null |
from django.utils.translation import gettext
from wagtail.admin.rich_text.editors.draftail import features as draftail_features
from wagtail.core import hooks
from .richtext import KaTeXEntityElementHandler, katex_entity_decorator
@hooks.register('register_rich_text_features')
def register_katex_features(features):
features.default_features.append('katex')
"""
Registering the `katex` feature, which uses the `KATEX` Draft.js entity type,
and is stored as HTML with a `<div data-katex-embed="c = \\pm\\sqrt{a^2 + b^2}">` tag.
"""
feature_name = 'katex-embed'
type_ = 'KATEX-EMBED'
features.register_editor_plugin(
'draftail',
feature_name,
draftail_features.EntityFeature(
{
'type': type_,
'icon': 'square-root-alt',
'description': gettext('Equation'),
},
js=[
'wagtailkatex/katex/katex.min.js',
'wagtailkatex/wagtailkatex.js',
],
css={
'all': [
'wagtailkatex/katex/katex.min.css',
]
}
)
)
features.register_converter_rule('contentstate', feature_name, {
'from_database_format': {'div[data-katex-embed]': KaTeXEntityElementHandler()},
'to_database_format': {'entity_decorators': {type_: katex_entity_decorator}},
})
| 32.272727
| 90
| 0.607042
| 143
| 1,420
| 5.832168
| 0.482517
| 0.047962
| 0.047962
| 0.040767
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001947
| 0.276761
| 1,420
| 43
| 91
| 33.023256
| 0.810127
| 0
| 0
| 0
| 0
| 0
| 0.231392
| 0.11246
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.121212
| 0
| 0.151515
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c7f9627f318b3e1570c92823a8ee10c19ec9aa5
| 8,991
|
py
|
Python
|
test/tests/bootstrap/test_api20_windows_bootstrap.py
|
arunrordell/RackHD
|
079c21f45cb38f538c502363aa1ff86dbcac3169
|
[
"Apache-2.0"
] | 451
|
2015-11-09T13:19:25.000Z
|
2022-03-16T08:00:16.000Z
|
test/tests/bootstrap/test_api20_windows_bootstrap.py
|
arunrordell/RackHD
|
079c21f45cb38f538c502363aa1ff86dbcac3169
|
[
"Apache-2.0"
] | 824
|
2015-11-10T15:25:50.000Z
|
2018-04-09T09:59:49.000Z
|
test/tests/bootstrap/test_api20_windows_bootstrap.py
|
arunrordell/RackHD
|
079c21f45cb38f538c502363aa1ff86dbcac3169
|
[
"Apache-2.0"
] | 221
|
2015-11-10T23:00:46.000Z
|
2022-03-16T08:00:22.000Z
|
'''
Copyright 2017 Dell Inc. or its subsidiaries. All Rights Reserved.
This script tests arbitrary payload of the RackHD API 2.0 OS bootstrap workflows.
The default case is running a minimum payload Windows OS install.
Other Windows-type OS install cases can be specified by creating a payload file and specifiying it using the '-extra' argument.
This test takes 30-45 minutes to run.
Example payload file (installed in configuration dir):
{"bootstrap-payload":
{"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX",
"username": "rackhduser",
"password": "RackHDRocks",
"smbUser": "vagrant",
"smbPassword": "vagrant"}}}
}
Example command line using external payload file:
python run_tests.py -stack 4 -test tests/bootstrap/test_api20_windows_bootstrap.py -extra base_windows_2012_install.json
RackHD Windows installation workflow requires special configuration of the RackHD server:
- A customized WinPE environment installed on RackHD server as documented here:
https://github.com/RackHD/on-tools/tree/master/winpe
- Samba installed on the RackHD server and configured as documented here:
http://rackhd.readthedocs.io/en/latest/rackhd/install_os.html?highlight=os%20install
- Windows 2012 installation distro installed on RackHD server or equivalent NFS mount.
- Windows 2012 activation key in the installation payload file.
'''
import fit_path # NOQA: unused import
from nose.plugins.attrib import attr
import fit_common
import flogging
import random
import json
import time
from nosedep import depends
from datetime import datetime
log = flogging.get_loggers()
# sample default base payload
PAYLOAD = {"name": "Graph.InstallWindowsServer",
"options": {"defaults": {"version": "2012",
"repo": "http://172.31.128.1:8080/repo/winpe",
"smbRepo": "\\\\172.31.128.1\\windowsServer2012",
"productkey": "XXXXX-XXXXX-XXXXX-XXXXX-XXXXX",
"username": "rackhduser",
"password": "RackHDRocks",
"smbUser": "vagrant",
"smbPassword": "vagrant"}}}
# if an external payload file is specified, use that
config = fit_common.fitcfg().get('bootstrap-payload', None)
if config:
PAYLOAD = config
# function to return the value of a field from the workflow response
def findall(obj, key):
if isinstance(obj, dict):
for k, v in obj.items():
if k == key:
log.error(" workflow error: %s", v)
findall(v, key)
elif isinstance(obj, list):
for item in obj:
findall(item, key)
else:
pass
# this routine polls a workflow task ID for completion
def wait_for_workflow_complete(instanceid, start_time, waittime=3200, cycle=30):
log.info_1(" Workflow started at time: " + str(datetime.fromtimestamp(start_time)))
while time.time() - start_time < waittime: # limit test to waittime seconds
result = fit_common.rackhdapi("/api/2.0/workflows/" + instanceid)
if result['status'] != 200:
log.error(" HTTP error: " + result['text'])
return False
if result['json']['status'] in ['running', 'pending']:
log.info_5("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
fit_common.time.sleep(cycle)
elif result['json']['status'] == 'succeeded':
log.info_1("{} workflow status: {}".format(result['json']['injectableName'], result['json']['status']))
end_time = time.time()
log.info_1(" Workflow completed at time: " + str(datetime.fromtimestamp(end_time)))
log.info_1(" Workflow duration: " + str(end_time - start_time))
return True
else:
end_time = time.time()
log.info_1(" Workflow failed at time: " + str(datetime.fromtimestamp(end_time)))
log.info_1(" Workflow duration: " + str(end_time - start_time))
try:
res = json.loads(result['text'])
findall(res, "error")
except:
res = result['text']
log.error(" Workflow failed: status: %s", result['json']['status'])
log.error(" Data: %s", json.dumps(res, indent=4, separators=(',', ':')))
return False
try:
res = json.loads(result['text'])
except:
res = result['text']
log.error(" Workflow Timeout: " + json.dumps(res, indent=4, separators=(',', ':')))
return False
# ------------------------ Tests -------------------------------------
@attr(all=False)
class api20_bootstrap_windows(fit_common.unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get the list of nodes
NODECATALOG = fit_common.node_select()
assert (len(NODECATALOG) != 0), "There are no nodes currently discovered"
# Select one node at random
cls.__NODE = NODECATALOG[random.randint(0, len(NODECATALOG) - 1)]
# Print node Id, node BMC mac ,node type
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + cls.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
monurl = "/api/2.0/nodes/" + cls.__NODE + "/catalogs/bmc"
mondata = fit_common.rackhdapi(monurl, action="get")
catalog = mondata['json']
bmcresult = mondata['status']
if bmcresult != 200:
log.info_1(" Node ID: " + cls.__NODE)
log.info_1(" Error on catalog/bmc command")
else:
log.info_1(" Node ID: " + cls.__NODE)
log.info_1(" Node SKU: " + nodesku)
log.info_1(" Node BMC Mac: %s", catalog.get('data')['MAC Address'])
log.info_1(" Node BMC IP Addr: %s", catalog.get('data')['IP Address'])
log.info_1(" Node BMC IP Addr Src: %s", catalog.get('data')['IP Address Source'])
# delete active workflows for specified node
result = fit_common.cancel_active_workflows(cls.__NODE)
assert (result is True), "There are still some active workflows running against the node"
def test01_node_check(self):
# Log node data
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
log.info_1(" Node ID: %s ", self.__class__.__NODE)
log.info_1(" Node SKU: %s ", nodesku)
log.info_1(" Graph Name: Graph.PowerOn.Node")
# Ensure the compute node is powered on and reachable
result = fit_common.rackhdapi('/api/2.0/nodes/' +
self.__class__.__NODE +
'/workflows',
action='post', payload={"name": "Graph.PowerOn.Node"})
self.assertEqual(result['status'], 201, "Node Power on workflow API failed, see logs.")
self.assertTrue(wait_for_workflow_complete(result['json']['instanceId'], time.time(), 50, 5),
"Node Power on workflow failed, see logs.")
@depends(after=test01_node_check)
def test02_os_install(self):
# Log node data
nodeinfo = fit_common.rackhdapi('/api/2.0/nodes/' + self.__class__.__NODE)['json']
nodesku = fit_common.rackhdapi(nodeinfo.get('sku'))['json']['name']
log.info_1(" Node ID: " + self.__class__.__NODE)
log.info_1(" Node SKU: " + nodesku)
log.info_1(" Graph Name: Graph.InstallWindowsServer")
log.info_1(" Payload: " + fit_common.json.dumps(PAYLOAD))
# launch workflow
workflowid = None
result = fit_common.rackhdapi('/api/2.0/nodes/' +
self.__class__.__NODE +
'/workflows',
action='post', payload=PAYLOAD)
if result['status'] == 201:
# workflow running
log.info_1(" InstanceID: " + result['json']['instanceId'])
workflowid = result['json']['instanceId']
else:
# workflow failed with response code
log.error(" InstanceID: " + result['text'])
self.fail("Workflow failed with response code: " + result['status'])
self.assertTrue(wait_for_workflow_complete(workflowid, time.time()), "OS Install workflow failed, see logs.")
if __name__ == '__main__':
fit_common.unittest.main()
| 45.872449
| 127
| 0.587031
| 1,034
| 8,991
| 4.971954
| 0.27853
| 0.029955
| 0.032678
| 0.023342
| 0.390196
| 0.371329
| 0.335343
| 0.307722
| 0.255009
| 0.231667
| 0
| 0.024258
| 0.280169
| 8,991
| 195
| 128
| 46.107692
| 0.770087
| 0.264375
| 0
| 0.269841
| 0
| 0
| 0.234892
| 0.017613
| 0
| 0
| 0
| 0
| 0.039683
| 1
| 0.039683
| false
| 0.02381
| 0.071429
| 0
| 0.150794
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c806e8f0ae3b3c96a9df2eadcd9d67e2ad3e5fe
| 602
|
py
|
Python
|
random_number.py
|
till-h/alexa
|
47891eb97fff375500a032b23fef7a2681b50735
|
[
"MIT"
] | null | null | null |
random_number.py
|
till-h/alexa
|
47891eb97fff375500a032b23fef7a2681b50735
|
[
"MIT"
] | null | null | null |
random_number.py
|
till-h/alexa
|
47891eb97fff375500a032b23fef7a2681b50735
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
from flask_ask import Ask, statement
import random
app = Flask(__name__)
ask = Ask(app, '/')
@ask.intent('RandomNumber', convert={'lowerLimit': int, 'upperLimit': int})
def hello(lowerLimit, upperLimit):
if lowerLimit == None:
lowerLimit = 0
if upperLimit == None:
upperLimit = 100
number = random.randint(lowerLimit, upperLimit)
text = render_template('random_number', lowerLimit=lowerLimit, upperLimit=upperLimit, number=number)
return statement(text).simple_card('Flask-Ask Random Number', text)
if __name__ == '__main__':
app.run(debug=True)
| 31.684211
| 101
| 0.749169
| 75
| 602
| 5.786667
| 0.426667
| 0.138249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007605
| 0.126246
| 602
| 19
| 102
| 31.684211
| 0.81749
| 0
| 0
| 0
| 0
| 0
| 0.127695
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c81af124f83929d36674b85f7157b8a2ef4f4b9
| 9,686
|
py
|
Python
|
model/losses.py
|
askerlee/rift
|
d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2
|
[
"MIT"
] | 11
|
2022-02-14T08:31:04.000Z
|
2022-03-29T08:20:17.000Z
|
model/losses.py
|
askerlee/rift
|
d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2
|
[
"MIT"
] | 3
|
2022-02-14T11:19:15.000Z
|
2022-03-19T05:11:25.000Z
|
model/losses.py
|
askerlee/rift
|
d4dbf42b82f1f83dfab18f8da8fe3a1d0a716fa2
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
from model.laplacian import LapLoss
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EPE(nn.Module):
def __init__(self):
super(EPE, self).__init__()
def forward(self, flow, gt, loss_mask):
loss_map = (flow - gt.detach()) ** 2
loss_map = (loss_map.sum(1, True) + 1e-6) ** 0.5
return (loss_map * loss_mask)
class Ternary(nn.Module):
def __init__(self):
super(Ternary, self).__init__()
patch_size = 7
out_channels = patch_size * patch_size
self.w = np.eye(out_channels).reshape(
(patch_size, patch_size, 1, out_channels))
self.w = np.transpose(self.w, (3, 2, 0, 1))
self.w = torch.tensor(self.w).float().to(device)
def transform(self, img):
patches = F.conv2d(img, self.w, padding=3, bias=None)
transf = patches - img
transf_norm = transf / torch.sqrt(0.81 + transf**2)
return transf_norm
def rgb2gray(self, rgb):
r, g, b = rgb[:, 0:1, :, :], rgb[:, 1:2, :, :], rgb[:, 2:3, :, :]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def hamming(self, t1, t2):
dist = (t1 - t2) ** 2
dist_norm = torch.mean(dist / (0.1 + dist), 1, True)
return dist_norm
def valid_mask(self, t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
def forward(self, img0, img1):
img0 = self.transform(self.rgb2gray(img0))
img1 = self.transform(self.rgb2gray(img1))
return self.hamming(img0, img1) * self.valid_mask(img0, 1)
class SOBEL(nn.Module):
def __init__(self):
super(SOBEL, self).__init__()
self.kernelX = torch.tensor([
[1, 0, -1],
[2, 0, -2],
[1, 0, -1],
]).float()
self.kernelY = self.kernelX.clone().T
self.kernelX = self.kernelX.unsqueeze(0).unsqueeze(0).to(device)
self.kernelY = self.kernelY.unsqueeze(0).unsqueeze(0).to(device)
def forward(self, pred, gt):
N, C, H, W = pred.shape[0], pred.shape[1], pred.shape[2], pred.shape[3]
img_stack = torch.cat(
[pred.reshape(N*C, 1, H, W), gt.reshape(N*C, 1, H, W)], 0)
sobel_stack_x = F.conv2d(img_stack, self.kernelX, padding=1)
sobel_stack_y = F.conv2d(img_stack, self.kernelY, padding=1)
pred_X, gt_X = sobel_stack_x[:N*C], sobel_stack_x[N*C:]
pred_Y, gt_Y = sobel_stack_y[:N*C], sobel_stack_y[N*C:]
L1X, L1Y = torch.abs(pred_X-gt_X), torch.abs(pred_Y-gt_Y)
loss = (L1X+L1Y)
return loss
class MeanShift(nn.Conv2d):
def __init__(self, data_mean, data_std, data_range=1, norm=True):
c = len(data_mean)
super(MeanShift, self).__init__(c, c, kernel_size=1)
std = torch.Tensor(data_std)
self.weight.data = torch.eye(c).view(c, c, 1, 1)
if norm:
self.weight.data.div_(std.view(c, 1, 1, 1))
self.bias.data = -1 * data_range * torch.Tensor(data_mean)
self.bias.data.div_(std)
else:
self.weight.data.mul_(std.view(c, 1, 1, 1))
self.bias.data = data_range * torch.Tensor(data_mean)
self.requires_grad = False
class VGGPerceptualLoss(torch.nn.Module):
def __init__(self, rank=0):
super(VGGPerceptualLoss, self).__init__()
blocks = []
pretrained = True
self.vgg_pretrained_features = models.vgg19(pretrained=pretrained).features
self.normalize = MeanShift([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], norm=True).cuda()
for param in self.parameters():
param.requires_grad = False
def forward(self, X, Y, indices=None):
X = self.normalize(X)
Y = self.normalize(Y)
indices = [2, 7, 12, 21, 30]
weights = [1.0/2.6, 1.0/4.8, 1.0/3.7, 1.0/5.6, 10/1.5]
k = 0
loss = 0
for i in range(indices[-1]):
X = self.vgg_pretrained_features[i](X)
Y = self.vgg_pretrained_features[i](Y)
if (i+1) in indices:
loss += weights[k] * (X - Y.detach()).abs().mean() * 0.1
k += 1
return loss
# flow could have any channels.
# https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py
def flow_smooth_delta(flow, if_second_order=False):
def gradient(x):
D_dx = x[:, :, :, 1:] - x[:, :, :, :-1]
D_dy = x[:, :, 1:] - x[:, :, :-1]
return D_dx, D_dy
dx, dy = gradient(flow)
# dx2, dxdy = gradient(dx)
# dydx, dy2 = gradient(dy)
if if_second_order:
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
smooth_loss = dx.abs().mean() + dy.abs().mean() + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
else:
smooth_loss = dx.abs().mean() + dy.abs().mean()
# smooth_loss = dx.abs().mean() + dy.abs().mean() # + dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean()
# 暂时不上二阶的平滑损失,似乎加上以后就太猛了,无法降低photo loss TODO
return smooth_loss
# flow should have 4 channels.
# https://github.com/coolbeam/OIFlow/blob/main/utils/tools.py
# weight_type='exp' seems to perform better than 'gauss'.
def edge_aware_smoothness_order1(img0, img1, flow, constant=1.0, weight_type='exp', error_type='L1'):
def weight_fn(x):
if weight_type == 'gauss':
y = x ** 2
elif weight_type == 'exp':
y = torch.abs(x)
else:
raise ValueError('')
return y
def gradient_xy(img):
gx = img[:, :, :, :-1] - img[:, :, :, 1:]
gy = img[:, :, :-1, :] - img[:, :, 1:, :]
return gx, gy
def gradweight_xy(img0, img1):
img0_gx, img0_gy = gradient_xy(img0)
img1_gx, img1_gy = gradient_xy(img1)
img0_wx = torch.exp(-torch.mean(weight_fn(constant * img0_gx), 1, keepdim=True))
img0_wy = torch.exp(-torch.mean(weight_fn(constant * img0_gy), 1, keepdim=True))
img1_wx = torch.exp(-torch.mean(weight_fn(constant * img1_gx), 1, keepdim=True))
img1_wy = torch.exp(-torch.mean(weight_fn(constant * img1_gy), 1, keepdim=True))
# First two flow channels: 1->0 flow. So use img1 weights.
# Second two flow channels: 0->1 flow. So use img0 weights.
# weights_x and weights_y are for x and y's spatial gradients, respectively.
weights_x = torch.cat([img1_wx, img1_wx, img0_wx, img0_wx], dim=1)
weights_y = torch.cat([img1_wy, img0_wy, img0_wy, img1_wy], dim=1)
return weights_x, weights_y
def error_fn(x):
if error_type == 'L1':
y = torch.abs(x)
elif error_type == 'abs_robust':
y = (torch.abs(x) + 0.01).pow(0.4)
else:
raise ValueError('')
return y
# The flow gradients along x, y axes, respectively.
# flow_gx, flow_gy have the same number of channels as flow.
# No matter the flow is x- or y-flow, it should be smooth along both x and y axes.
# I.e., a y-flow should also be smooth along x-axis, and x-flow should also be smooth along y-axis.
flow_gx, flow_gy = gradient_xy(flow)
# weights_x, weights_y both have 4 channels, same as flow_gx and flow_gy (if the input flow has 4 channels).
weights_x, weights_y = gradweight_xy(img0, img1)
smoothness_x = error_fn(flow_gx) * weights_x
smoothness_y = error_fn(flow_gy) * weights_y
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
# Dual teaching helps slightly.
def dual_teaching_loss(mid_gt, img_stu, flow_stu, img_tea, flow_tea):
loss_distill = 0
# Ws[0]: weight of teacher -> student.
# Ws[1]: weight of student -> teacher.
# Two directions could take different weights.
# Set Ws[1] to 0 to disable student -> teacher.
Ws = [1, 0.5]
use_lap_loss = False
# Laplacian loss performs better in earlier epochs, but worse in later epochs.
# Moreover, Laplacian loss is significantly slower.
if use_lap_loss:
loss_fun = LapLoss(max_levels=3, reduction='none')
else:
loss_fun = nn.L1Loss(reduction='none')
for i in range(2):
student_error = loss_fun(img_stu, mid_gt).mean(1, True)
teacher_error = loss_fun(img_tea, mid_gt).mean(1, True)
# distill_mask indicates where the warped images according to student's prediction
# is worse than that of the teacher.
# If at some points, the warped image of the teacher is better than the student,
# then regard the flow at these points are more accurate, and use them to teach the student.
distill_mask = (student_error > teacher_error + 0.01).float().detach()
# loss_distill is the sum of the distillation losses at 2 directions.
loss_distill += Ws[i] * ((flow_tea.detach() - flow_stu).abs() * distill_mask).mean()
# Swap student and teacher, and calculate the distillation loss again.
img_stu, flow_stu, img_tea, flow_tea = \
img_tea, flow_tea, img_stu, flow_stu
# The distillation loss from the student to the teacher is given a smaller weight.
# loss_distill = loss_distill / 2
return loss_distill
if __name__ == '__main__':
img0 = torch.zeros(3, 3, 256, 256).float().to(device)
img1 = torch.tensor(np.random.normal(
0, 1, (3, 3, 256, 256))).float().to(device)
ternary_loss = Ternary()
print(ternary_loss(img0, img1).shape)
| 39.696721
| 134
| 0.601693
| 1,459
| 9,686
| 3.827964
| 0.203564
| 0.0188
| 0.009848
| 0.010743
| 0.209669
| 0.163653
| 0.126768
| 0.107789
| 0.053357
| 0.045121
| 0
| 0.038462
| 0.259137
| 9,686
| 243
| 135
| 39.860082
| 0.739827
| 0.196263
| 0
| 0.102857
| 0
| 0
| 0.006196
| 0
| 0
| 0
| 0
| 0.004115
| 0
| 1
| 0.12
| false
| 0
| 0.034286
| 0
| 0.274286
| 0.005714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c820bdf9b7f916cd742cf712e94425ee24e76e1
| 5,847
|
py
|
Python
|
project/python/swarm_simulation.py
|
righetti/swarmrobotics
|
f8f6bf72c3aae1f432f3306aebb48fd32a6dd2a7
|
[
"BSD-3-Clause"
] | 8
|
2019-09-14T11:55:49.000Z
|
2022-02-05T23:06:33.000Z
|
project/python/swarm_simulation.py
|
righetti/swarmrobotics
|
f8f6bf72c3aae1f432f3306aebb48fd32a6dd2a7
|
[
"BSD-3-Clause"
] | null | null | null |
project/python/swarm_simulation.py
|
righetti/swarmrobotics
|
f8f6bf72c3aae1f432f3306aebb48fd32a6dd2a7
|
[
"BSD-3-Clause"
] | 7
|
2019-09-16T02:42:41.000Z
|
2021-09-07T03:26:22.000Z
|
import numpy as np
import pybullet as p
import itertools
from robot import Robot
class World():
def __init__(self):
# create the physics simulator
self.physicsClient = p.connect(p.GUI)
p.setGravity(0,0,-9.81)
self.max_communication_distance = 2.0
# We will integrate every 4ms (250Hz update)
self.dt = 1./250.
p.setPhysicsEngineParameter(self.dt, numSubSteps=1)
# Create the plane.
self.planeId = p.loadURDF("../models/plane.urdf")
p.changeDynamics(self.planeId, -1, lateralFriction=5., rollingFriction=0)
self.goalId = p.loadURDF("../models/goal.urdf")
self.goalId = p.loadURDF("../models/goal2.urdf")
# the balls
self.ball1 = p.loadURDF("../models/ball1.urdf")
p.resetBasePositionAndOrientation(self.ball1, [2., 4., 0.5], (0., 0., 0.5, 0.5))
self.ball2 = p.loadURDF("../models/ball2.urdf")
p.resetBasePositionAndOrientation(self.ball2, [4., 2., 0.5], (0., 0., 0.5, 0.5))
p.resetDebugVisualizerCamera(7.0,90.0, -43.0, (1., 1., 0.0))
# Add objects
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [0., -1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [0., 1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [3., -1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [3., 1., 0], (0., 0., 0.5, 0.5))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [1., 2., 0], (0., 0., 0., 1.))
wallId = p.loadSDF("../models/walls.sdf")[0]
p.resetBasePositionAndOrientation(wallId, [2., -2., 0], (0., 0., 0., 1.))
# tube
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-1., 5., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-1., 6., 0], (0., 0., 0., 1.))
# #arena
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2, 4., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 7., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 9., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 11., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-2., 13., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-3., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-5., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-7., 3., 0], (0., 0., 0., 1.))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8, 4., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 6., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 8., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 10., 0], (0., 0., 0.5, 0.5))
# wallId = p.loadSDF("../models/walls.sdf")[0]
# p.resetBasePositionAndOrientation(wallId, [-8., 12., 0], (0., 0., 0.5, 0.5))
# create 6 robots
self.robots = []
for (i,j) in itertools.product(range(3), range(2)):
self.robots.append(Robot([1. * i + 0.5, 1. * j - 0.5, 0.3], 2*i+j, self.dt))
p.stepSimulation()
self.time = 0.0
self.stepSimulation()
self.stepSimulation()
def reset(self):
"""
Resets the position of all the robots
"""
for r in self.robots:
r.reset()
p.stepSimulation()
def stepSimulation(self):
"""
Simulates one step simulation
"""
# for each robot construct list of neighbors
for r in self.robots:
r.neighbors = [] #reset neighbors
r.messages_received = [] #reset message received
pos1, or1 = r.get_pos_and_orientation()
for j,r2 in enumerate(self.robots):
if(r.id != r2.id):
pos2, or2 = r2.get_pos_and_orientation()
if(np.linalg.norm(pos1-pos2) < self.max_communication_distance):
r.neighbors.append(j)
# for each robot send and receive messages
for i,r in enumerate(self.robots):
for msg in r.messages_to_send:
if msg[0] in r.neighbors: #then we can send the message
self.robots[msg[0]].messages_received.append([i, msg[1]]) #add the sender id
r.messages_to_send = []
# update the controllers
if self.time > 1.0:
for r in self.robots:
r.compute_controller()
# do one simulation step
p.stepSimulation()
self.time += self.dt
| 43.634328
| 96
| 0.540277
| 745
| 5,847
| 4.212081
| 0.173154
| 0.044614
| 0.042065
| 0.133843
| 0.538241
| 0.520076
| 0.501593
| 0.499363
| 0.494264
| 0.494264
| 0
| 0.067262
| 0.272789
| 5,847
| 133
| 97
| 43.962406
| 0.670743
| 0.380708
| 0
| 0.222222
| 0
| 0
| 0.060374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.063492
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c82ce7669d0a4f2d3645ab5502b497296602411
| 31,437
|
py
|
Python
|
boto/ec2/elb/__init__.py
|
wt/boto
|
83d5b256c8333307233e1ec7c1e21696e8d32437
|
[
"MIT"
] | 15
|
2015-03-25T05:24:11.000Z
|
2021-12-18T04:24:06.000Z
|
boto/ec2/elb/__init__.py
|
wt/boto
|
83d5b256c8333307233e1ec7c1e21696e8d32437
|
[
"MIT"
] | null | null | null |
boto/ec2/elb/__init__.py
|
wt/boto
|
83d5b256c8333307233e1ec7c1e21696e8d32437
|
[
"MIT"
] | 10
|
2015-04-26T17:56:37.000Z
|
2020-09-24T14:01:53.000Z
|
# Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
"""
This module provides an interface to the Elastic Compute Cloud (EC2)
load balancing service from AWS.
"""
from boto.connection import AWSQueryConnection
from boto.ec2.instanceinfo import InstanceInfo
from boto.ec2.elb.loadbalancer import LoadBalancer, LoadBalancerZones
from boto.ec2.elb.instancestate import InstanceState
from boto.ec2.elb.healthcheck import HealthCheck
from boto.ec2.elb.listelement import ListElement
from boto.regioninfo import RegionInfo, get_regions, load_regions
import boto
RegionData = load_regions().get('elasticloadbalancing', {})
def regions():
"""
Get all available regions for the ELB service.
:rtype: list
:return: A list of :class:`boto.RegionInfo` instances
"""
return get_regions('elasticloadbalancing', connection_cls=ELBConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.ec2.elb.ELBConnection`.
:param str region_name: The name of the region to connect to.
:rtype: :class:`boto.ec2.ELBConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
class ELBConnection(AWSQueryConnection):
APIVersion = boto.config.get('Boto', 'elb_version', '2012-06-01')
DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
DefaultRegionEndpoint = boto.config.get('Boto', 'elb_region_endpoint',
'elasticloadbalancing.us-east-1.amazonaws.com')
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, region=None, path='/',
security_token=None, validate_certs=True, profile_name=None):
"""
Init method to create a new connection to EC2 Load Balancing Service.
.. note:: The region argument is overridden by the region specified in
the boto configuration file.
"""
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
self.region = region
super(ELBConnection, self).__init__(aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy, proxy_port,
proxy_user, proxy_pass,
self.region.endpoint, debug,
https_connection_factory, path,
security_token,
validate_certs=validate_certs,
profile_name=profile_name)
def _required_auth_capability(self):
return ['ec2']
def build_list_params(self, params, items, label):
if isinstance(items, basestring):
items = [items]
for index, item in enumerate(items):
params[label % (index + 1)] = item
def get_all_load_balancers(self, load_balancer_names=None):
"""
Retrieve all load balancers associated with your account.
:type load_balancer_names: list
:keyword load_balancer_names: An optional list of load balancer names.
:rtype: :py:class:`boto.resultset.ResultSet`
:return: A ResultSet containing instances of
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
params = {}
if load_balancer_names:
self.build_list_params(params, load_balancer_names,
'LoadBalancerNames.member.%d')
return self.get_list('DescribeLoadBalancers', params,
[('member', LoadBalancer)])
def create_load_balancer(self, name, zones, listeners=None, subnets=None,
security_groups=None, scheme='internet-facing', complex_listeners=None):
"""
Create a new load balancer for your account. By default the load
balancer will be created in EC2. To create a load balancer inside a
VPC, parameter zones must be set to None and subnets must not be None.
The load balancer will be automatically created under the VPC that
contains the subnet(s) specified.
:type name: string
:param name: The mnemonic name associated with the new load balancer
:type zones: List of strings
:param zones: The names of the availability zone(s) to add.
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type subnets: list of strings
:param subnets: A list of subnet IDs in your VPC to attach to
your LoadBalancer.
:type security_groups: list of strings
:param security_groups: The security groups assigned to your
LoadBalancer within your VPC.
:type scheme: string
:param scheme: The type of a LoadBalancer. By default, Elastic
Load Balancing creates an internet-facing LoadBalancer with
a publicly resolvable DNS name, which resolves to public IP
addresses.
Specify the value internal for this option to create an
internal LoadBalancer with a DNS name that resolves to
private IP addresses.
This option is only available for LoadBalancers attached
to an Amazon VPC.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol,
SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
:return: The newly created
:class:`boto.ec2.elb.loadbalancer.LoadBalancer`
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name,
'Scheme': scheme}
# Handle legacy listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
if zones:
self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
if subnets:
self.build_list_params(params, subnets, 'Subnets.member.%d')
if security_groups:
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
load_balancer = self.get_object('CreateLoadBalancer',
params, LoadBalancer)
load_balancer.name = name
load_balancer.listeners = listeners
load_balancer.availability_zones = zones
load_balancer.subnets = subnets
load_balancer.security_groups = security_groups
return load_balancer
def create_load_balancer_listeners(self, name, listeners=None, complex_listeners=None):
"""
Creates a Listener (or group of listeners) for an existing
Load Balancer
:type name: string
:param name: The name of the load balancer to create the listeners for
:type listeners: List of tuples
:param listeners: Each tuple contains three or four values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol,
[SSLCertificateId]) where LoadBalancerPortNumber and
InstancePortNumber are integer values between 1 and 65535,
Protocol is a string containing either 'TCP', 'SSL', HTTP', or
'HTTPS'; SSLCertificateID is the ARN of a AWS IAM
certificate, and must be specified when doing HTTPS.
:type complex_listeners: List of tuples
:param complex_listeners: Each tuple contains four or five values,
(LoadBalancerPortNumber, InstancePortNumber, Protocol, InstanceProtocol,
SSLCertificateId).
Where:
- LoadBalancerPortNumber and InstancePortNumber are integer
values between 1 and 65535
- Protocol and InstanceProtocol is a string containing either 'TCP',
'SSL', 'HTTP', or 'HTTPS'
- SSLCertificateId is the ARN of an SSL certificate loaded into
AWS IAM
:return: The status of the request
"""
if not listeners and not complex_listeners:
# Must specify one of the two options
return None
params = {'LoadBalancerName': name}
# Handle the simple listeners
if listeners:
for index, listener in enumerate(listeners):
i = index + 1
protocol = listener[2].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[3]
# Handle the full listeners
if complex_listeners:
for index, listener in enumerate(complex_listeners):
i = index + 1
protocol = listener[2].upper()
InstanceProtocol = listener[3].upper()
params['Listeners.member.%d.LoadBalancerPort' % i] = listener[0]
params['Listeners.member.%d.InstancePort' % i] = listener[1]
params['Listeners.member.%d.Protocol' % i] = listener[2]
params['Listeners.member.%d.InstanceProtocol' % i] = listener[3]
if protocol == 'HTTPS' or protocol == 'SSL':
params['Listeners.member.%d.SSLCertificateId' % i] = listener[4]
return self.get_status('CreateLoadBalancerListeners', params)
def delete_load_balancer(self, name):
"""
Delete a Load Balancer from your account.
:type name: string
:param name: The name of the Load Balancer to delete
"""
params = {'LoadBalancerName': name}
return self.get_status('DeleteLoadBalancer', params)
def delete_load_balancer_listeners(self, name, ports):
"""
Deletes a load balancer listener (or group of listeners)
:type name: string
:param name: The name of the load balancer to create the listeners for
:type ports: List int
:param ports: Each int represents the port on the ELB to be removed
:return: The status of the request
"""
params = {'LoadBalancerName': name}
for index, port in enumerate(ports):
params['LoadBalancerPorts.member.%d' % (index + 1)] = port
return self.get_status('DeleteLoadBalancerListeners', params)
def enable_availability_zones(self, load_balancer_name, zones_to_add):
"""
Add availability zones to an existing Load Balancer
All zones must be in the same region as the Load Balancer
Adding zones that are already registered with the Load Balancer
has no effect.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to add.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_add,
'AvailabilityZones.member.%d')
obj = self.get_object('EnableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def disable_availability_zones(self, load_balancer_name, zones_to_remove):
"""
Remove availability zones from an existing Load Balancer.
All zones must be in the same region as the Load Balancer.
Removing zones that are not registered with the Load Balancer
has no effect.
You cannot remove all zones from an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type zones: List of strings
:param zones: The name of the zone(s) to remove.
:rtype: List of strings
:return: An updated list of zones for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, zones_to_remove,
'AvailabilityZones.member.%d')
obj = self.get_object('DisableAvailabilityZonesForLoadBalancer',
params, LoadBalancerZones)
return obj.zones
def modify_lb_attribute(self, load_balancer_name, attribute, value):
"""Changes an attribute of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to change.
* crossZoneLoadBalancing - Boolean (true)
* accessLog - :py:class:`AccessLogAttribute` instance
* connectionDraining - :py:class:`ConnectionDrainingAttribute` instance
:type value: string
:param value: The new value for the attribute
:rtype: bool
:return: Whether the operation succeeded or not
"""
bool_reqs = ('crosszoneloadbalancing',)
if attribute.lower() in bool_reqs:
if isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
params = {'LoadBalancerName': load_balancer_name}
if attribute.lower() == 'crosszoneloadbalancing':
params['LoadBalancerAttributes.CrossZoneLoadBalancing.Enabled'
] = value
elif attribute.lower() == 'accesslog':
params['LoadBalancerAttributes.AccessLog.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.AccessLog.S3BucketName'] = \
value.s3_bucket_name
params['LoadBalancerAttributes.AccessLog.S3BucketPrefix'] = \
value.s3_bucket_prefix
params['LoadBalancerAttributes.AccessLog.EmitInterval'] = \
value.emit_interval
elif attribute.lower() == 'connectiondraining':
params['LoadBalancerAttributes.ConnectionDraining.Enabled'] = \
value.enabled and 'true' or 'false'
params['LoadBalancerAttributes.ConnectionDraining.Timeout'] = \
value.timeout
else:
raise ValueError('InvalidAttribute', attribute)
return self.get_status('ModifyLoadBalancerAttributes', params,
verb='GET')
def get_all_lb_attributes(self, load_balancer_name):
"""Gets all Attributes of a Load Balancer
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:rtype: boto.ec2.elb.attribute.LbAttributes
:return: The attribute object of the ELB.
"""
from boto.ec2.elb.attributes import LbAttributes
params = {'LoadBalancerName': load_balancer_name}
return self.get_object('DescribeLoadBalancerAttributes',
params, LbAttributes)
def get_lb_attribute(self, load_balancer_name, attribute):
"""Gets an attribute of a Load Balancer
This will make an EC2 call for each method call.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type attribute: string
:param attribute: The attribute you wish to see.
* accessLog - :py:class:`AccessLogAttribute` instance
* crossZoneLoadBalancing - Boolean
* connectionDraining - :py:class:`ConnectionDrainingAttribute` instance
:rtype: Attribute dependent
:return: The new value for the attribute
"""
attributes = self.get_all_lb_attributes(load_balancer_name)
if attribute.lower() == 'accesslog':
return attributes.access_log
if attribute.lower() == 'crosszoneloadbalancing':
return attributes.cross_zone_load_balancing.enabled
if attribute.lower() == 'connectiondraining':
return attributes.connection_draining
return None
def register_instances(self, load_balancer_name, instances):
"""
Add new Instances to an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to add.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('RegisterInstancesWithLoadBalancer',
params, [('member', InstanceInfo)])
def deregister_instances(self, load_balancer_name, instances):
"""
Remove Instances from an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to remove.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DeregisterInstancesFromLoadBalancer',
params, [('member', InstanceInfo)])
def describe_instance_health(self, load_balancer_name, instances=None):
"""
Get current state of all Instances registered to an Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances
to return status for. If not provided,
the state of all instances will be returned.
:rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
:return: list of state info for instances in this Load Balancer.
"""
params = {'LoadBalancerName': load_balancer_name}
if instances:
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DescribeInstanceHealth', params,
[('member', InstanceState)])
def configure_health_check(self, name, health_check):
"""
Define a health check for the EndPoints.
:type name: string
:param name: The mnemonic name associated with the load balancer
:type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:param health_check: A HealthCheck object populated with the desired
values.
:rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck`
:return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck`
"""
params = {'LoadBalancerName': name,
'HealthCheck.Timeout': health_check.timeout,
'HealthCheck.Target': health_check.target,
'HealthCheck.Interval': health_check.interval,
'HealthCheck.UnhealthyThreshold': health_check.unhealthy_threshold,
'HealthCheck.HealthyThreshold': health_check.healthy_threshold}
return self.get_object('ConfigureHealthCheck', params, HealthCheck)
def set_lb_listener_SSL_certificate(self, lb_name, lb_port,
ssl_certificate_id):
"""
Sets the certificate that terminates the specified listener's SSL
connections. The specified certificate replaces any prior certificate
that was used on the same LoadBalancer and port.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port,
'SSLCertificateId': ssl_certificate_id}
return self.get_status('SetLoadBalancerListenerSSLCertificate', params)
def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes that follow
that of an application-generated cookie. This policy can only be
associated with HTTP listeners.
This policy is similar to the policy created by
CreateLBCookieStickinessPolicy, except that the lifetime of the special
Elastic Load Balancing cookie follows the lifetime of the
application-generated cookie specified in the policy configuration. The
load balancer only inserts a new stickiness cookie when the application
response includes a new application cookie.
If the application cookie is explicitly removed or expires, the session
stops being sticky until a new application cookie is issued.
"""
params = {'CookieName': name,
'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('CreateAppCookieStickinessPolicy', params)
def create_lb_cookie_stickiness_policy(self, cookie_expiration_period,
lb_name, policy_name):
"""
Generates a stickiness policy with sticky session lifetimes controlled
by the lifetime of the browser (user-agent) or a specified expiration
period. This policy can only be associated only with HTTP listeners.
When a load balancer implements this policy, the load balancer uses a
special cookie to track the backend server instance for each request.
When the load balancer receives a request, it first checks to see if
this cookie is present in the request. If so, the load balancer sends
the request to the application server specified in the cookie. If not,
the load balancer sends the request to a server that is chosen based on
the existing load balancing algorithm.
A cookie is inserted into the response for binding subsequent requests
from the same user to that server. The validity of the cookie is based
on the cookie expiration time, which is specified in the policy
configuration.
None may be passed for cookie_expiration_period.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
if cookie_expiration_period is not None:
params['CookieExpirationPeriod'] = cookie_expiration_period
return self.get_status('CreateLBCookieStickinessPolicy', params)
def create_lb_policy(self, lb_name, policy_name, policy_type, policy_attributes):
"""
Creates a new policy that contais the necessary attributes depending on
the policy type. Policies are settings that are saved for your load
balancer and that can be applied to the front-end listener, or
the back-end application server.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name,
'PolicyTypeName': policy_type}
for index, (name, value) in enumerate(policy_attributes.iteritems(), 1):
params['PolicyAttributes.member.%d.AttributeName' % index] = name
params['PolicyAttributes.member.%d.AttributeValue' % index] = value
else:
params['PolicyAttributes'] = ''
return self.get_status('CreateLoadBalancerPolicy', params)
def delete_lb_policy(self, lb_name, policy_name):
"""
Deletes a policy from the LoadBalancer. The specified policy must not
be enabled for any listeners.
"""
params = {'LoadBalancerName': lb_name,
'PolicyName': policy_name}
return self.get_status('DeleteLoadBalancerPolicy', params)
def set_lb_policies_of_listener(self, lb_name, lb_port, policies):
"""
Associates, updates, or disables a policy with a listener on the load
balancer. Currently only zero (0) or one (1) policy can be associated
with a listener.
"""
params = {'LoadBalancerName': lb_name,
'LoadBalancerPort': lb_port}
if len(policies):
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesOfListener', params)
def set_lb_policies_of_backend_server(self, lb_name, instance_port, policies):
"""
Replaces the current set of policies associated with a port on which
the back-end server is listening with a new set of policies.
"""
params = {'LoadBalancerName': lb_name,
'InstancePort': instance_port}
if policies:
self.build_list_params(params, policies, 'PolicyNames.member.%d')
else:
params['PolicyNames'] = ''
return self.get_status('SetLoadBalancerPoliciesForBackendServer', params)
def apply_security_groups_to_lb(self, name, security_groups):
"""
Applies security groups to the load balancer.
Applying security groups that are already registered with the
Load Balancer has no effect.
:type name: string
:param name: The name of the Load Balancer
:type security_groups: List of strings
:param security_groups: The name of the security group(s) to add.
:rtype: List of strings
:return: An updated list of security groups for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, security_groups,
'SecurityGroups.member.%d')
return self.get_list('ApplySecurityGroupsToLoadBalancer',
params, None)
def attach_lb_to_subnets(self, name, subnets):
"""
Attaches load balancer to one or more subnets.
Attaching subnets that are already registered with the
Load Balancer has no effect.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to add.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('AttachLoadBalancerToSubnets',
params, None)
def detach_lb_from_subnets(self, name, subnets):
"""
Detaches load balancer from one or more subnets.
:type name: string
:param name: The name of the Load Balancer
:type subnets: List of strings
:param subnets: The name of the subnet(s) to detach.
:rtype: List of strings
:return: An updated list of subnets for this Load Balancer.
"""
params = {'LoadBalancerName': name}
self.build_list_params(params, subnets,
'Subnets.member.%d')
return self.get_list('DetachLoadBalancerFromSubnets',
params, None)
| 42.946721
| 91
| 0.633426
| 3,484
| 31,437
| 5.612227
| 0.151837
| 0.065054
| 0.027004
| 0.012274
| 0.472971
| 0.436659
| 0.393495
| 0.367207
| 0.357899
| 0.349307
| 0
| 0.004867
| 0.29408
| 31,437
| 731
| 92
| 43.005472
| 0.876217
| 0.424659
| 0
| 0.388489
| 0
| 0
| 0.196384
| 0.134539
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104317
| false
| 0.007194
| 0.032374
| 0.003597
| 0.269784
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9c836060b9b7e80140ebb8a9cc363bc2e1d5ff72
| 9,677
|
py
|
Python
|
basis_set_exchange/cli/bse_cli.py
|
atomse/basis_set_exchange
|
7ffd64082c14d2f61eb43f1c2d44792e8b0e394e
|
[
"BSD-3-Clause"
] | null | null | null |
basis_set_exchange/cli/bse_cli.py
|
atomse/basis_set_exchange
|
7ffd64082c14d2f61eb43f1c2d44792e8b0e394e
|
[
"BSD-3-Clause"
] | null | null | null |
basis_set_exchange/cli/bse_cli.py
|
atomse/basis_set_exchange
|
7ffd64082c14d2f61eb43f1c2d44792e8b0e394e
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Command line interface for the basis set exchange
'''
import argparse
import argcomplete
from .. import version
from .bse_handlers import bse_cli_handle_subcmd
from .check import cli_check_normalize_args
from .complete import (cli_case_insensitive_validator,
cli_family_completer, cli_role_completer, cli_bsname_completer,
cli_write_fmt_completer, cli_read_fmt_completer, cli_reffmt_completer)
def run_bse_cli():
################################################################################################
# NOTE: I am deliberately not using the 'choices' argument in add_argument. I could use it
# for formats, etc, however I wouldn't want to use it for basis set names. Therefore, I handle
# all of that manually so that error output is consistent and clean
################################################################################################
########################################
# Main global options
########################################
parser = argparse.ArgumentParser(description='Description of your program')
parser.add_argument('-V', action='version', version='basis_set_exchange ' + version())
parser.add_argument('-d', '--data-dir', metavar='PATH', help='Override which data directory to use')
parser.add_argument('-o', '--output', metavar='PATH', help='Output to given file rather than stdout')
subparsers = parser.add_subparsers(metavar='subcommand', dest='subcmd')
subparsers.required = True # https://bugs.python.org/issue9253#msg186387
########################################
# Listing of data-independent info
########################################
# list-formats subcommand
subp = subparsers.add_parser('list-formats', help='Output a list of basis set formats that can be used with obtaining a basis set')
subp.add_argument('-n', '--no-description', action='store_true', help='Print only the format names')
# list-writer-formats subcommand
subp = subparsers.add_parser('list-writer-formats', help='Output a list available basis set formats that can be written')
subp.add_argument('-n', '--no-description', action='store_true', help='Print only the format names')
# list-reader-formats
subp = subparsers.add_parser('list-reader-formats', help='Output a list of basis set formats that can be read')
subp.add_argument('-n', '--no-description', action='store_true', help='Print only the format names')
# list-ref-formats subcommand
subp = subparsers.add_parser('list-ref-formats', help='Output a list all available reference formats and descriptions')
subp.add_argument('-n', '--no-description', action='store_true', help='Print only the reference format names')
# list-roles subcommand
subp = subparsers.add_parser('list-roles', help='Output a list all available roles and descriptions')
subp.add_argument('-n', '--no-description', action='store_true', help='Print only the role names')
########################################
# Listing of general info and metadata
########################################
# get-data-dir
subparsers.add_parser('get-data-dir', help='Output the default data directory of this package')
# list-basis-sets subcommand
subp = subparsers.add_parser('list-basis-sets', help='Output a list all available basis sets and descriptions')
subp.add_argument('-n', '--no-description', action='store_true', help='Print only the basis set names')
subp.add_argument('-f', '--family', help='Limit the basis set list to only the specified family').completer = cli_family_completer
subp.add_argument('-r', '--role', help='Limit the basis set list to only the specified role').completer = cli_role_completer
subp.add_argument('-s', '--substr', help='Limit the basis set list to only basis sets whose name contains the specified substring')
subp.add_argument('-e', '--elements', help='Limit the basis set list to only basis sets that contain all the given elements')
# list-families subcommand
subparsers.add_parser('list-families', help='Output a list all available basis set families')
# lookup-by-role
subp = subparsers.add_parser('lookup-by-role', help='Lookup a companion/auxiliary basis by primary basis and role')
subp.add_argument('basis', help='Name of the primary basis we want the auxiliary basis for').completer = cli_bsname_completer
subp.add_argument('role', help='Role of the auxiliary basis to look for').completer = cli_role_completer
#################################
# Output of info
#################################
# get-basis subcommand
subp = subparsers.add_parser('get-basis', help='Output a formatted basis set')
subp.add_argument('basis', help='Name of the basis set to output').completer = cli_bsname_completer
subp.add_argument('fmt', help='Which format to output the basis set as').completer = cli_write_fmt_completer
subp.add_argument('--elements', help='Which elements of the basis set to output. Default is all defined in the given basis')
subp.add_argument('--version', help='Which version of the basis set to output. Default is the latest version')
subp.add_argument('--noheader', action='store_true', help='Do not output the header at the top')
subp.add_argument('--unc-gen', action='store_true', help='Remove general contractions')
subp.add_argument('--unc-spdf', action='store_true', help='Remove combined sp, spd, ... contractions')
subp.add_argument('--unc-seg', action='store_true', help='Remove general contractions')
subp.add_argument('--opt-gen', action='store_true', help='Optimize general contractions')
subp.add_argument('--make-gen', action='store_true', help='Make the basis set as generally-contracted as possible')
# get-refs subcommand
subp = subparsers.add_parser('get-refs', help='Output references for a basis set')
subp.add_argument('basis', help='Name of the basis set to output the references for').completer = cli_bsname_completer
subp.add_argument('reffmt', help='Which format to output the references as').completer = cli_reffmt_completer
subp.add_argument('--elements', help='Which elements to output the references for. Default is all defined in the given basis.')
subp.add_argument('--version', help='Which version of the basis set to get the references for')
# get-info subcommand
subp = subparsers.add_parser('get-info', help='Output general info and metadata for a basis set')
subp.add_argument('basis', help='Name of the basis set to output the info for').completer = cli_bsname_completer
# get-notes subcommand
subp = subparsers.add_parser('get-notes', help='Output the notes for a basis set')
subp.add_argument('basis', help='Name of the basis set to output the notes for').completer = cli_bsname_completer
# get-family subcommand
subp = subparsers.add_parser('get-family', help='Output the family of a basis set')
subp.add_argument('basis', help='Name of the basis set to output the family for').completer = cli_bsname_completer
# get-versions subcommand
subp = subparsers.add_parser('get-versions', help='Output a list all available versions of a basis set')
subp.add_argument('basis', help='Name of the basis set to list the versions of').completer = cli_bsname_completer
subp.add_argument('-n', '--no-description', action='store_true', help='Print only the version numbers')
# get-family-notes subcommand
subp = subparsers.add_parser('get-family-notes', help='Get the notes of a family of basis sets')
subp.add_argument('family', type=str.lower, help='The basis set family to the get the notes of').completer = cli_family_completer
#################################
# Converting basis sets
#################################
subp = subparsers.add_parser('convert-basis', help='Convert basis set files from one format to another')
subp.add_argument('input_file', type=str, help='Basis set file to convert')
subp.add_argument('output_file', type=str, help='Converted basis set file')
subp.add_argument('--in-fmt', type=str, default=None, help='Input format (default: autodetected from input filename').completer = cli_read_fmt_completer
subp.add_argument('--out-fmt', type=str, default=None, help='Output format (default: autodetected from output filename').completer = cli_write_fmt_completer
#################################
# Creating bundles
#################################
subp = subparsers.add_parser('create-bundle', help='Create a bundle of basis sets')
subp.add_argument('fmt', help='Which format to output the basis set as').completer = cli_write_fmt_completer
subp.add_argument('reffmt', help='Which format to output the references as').completer = cli_reffmt_completer
subp.add_argument('bundle_file', help='Bundle/Archive file to create')
subp.add_argument('--archive-type', help='Override the type of archive to create (zip or tbz)')
#############################
# DONE WITH SUBCOMMANDS
#############################
# setup autocomplete
argcomplete.autocomplete(parser, validator=cli_case_insensitive_validator)
# Now parse and handle the args
args = parser.parse_args()
# Check and make sure basis sets, roles, etc, are valid
args = cli_check_normalize_args(args)
# Actually generate the output
output = bse_cli_handle_subcmd(args)
if args.output:
with open(args.output, 'w', encoding='utf-8') as outfile:
outfile.write(output)
else:
print(output)
return 0
| 59.368098
| 160
| 0.673349
| 1,288
| 9,677
| 4.938665
| 0.177795
| 0.076089
| 0.094325
| 0.057853
| 0.510926
| 0.45606
| 0.374627
| 0.314888
| 0.285804
| 0.285804
| 0
| 0.001463
| 0.15263
| 9,677
| 162
| 161
| 59.734568
| 0.774268
| 0.103338
| 0
| 0.084337
| 0
| 0
| 0.448428
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012048
| false
| 0
| 0.072289
| 0
| 0.096386
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92bb5127dacf316c62cd64b3874b283309deffd5
| 42,452
|
py
|
Python
|
tensorflow/tools/quantization/quantize_graph_test.py
|
tianyapiaozi/tensorflow
|
fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a
|
[
"Apache-2.0"
] | 374
|
2018-12-02T06:59:44.000Z
|
2022-03-15T10:34:00.000Z
|
tensorflow/tools/quantization/quantize_graph_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 157
|
2018-12-02T07:37:39.000Z
|
2022-03-16T09:49:11.000Z
|
tensorflow/tools/quantization/quantize_graph_test.py
|
shrikunjsarda/tensorflow
|
7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae
|
[
"Apache-2.0"
] | 141
|
2018-12-12T11:57:59.000Z
|
2022-02-28T13:12:58.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the graph quantization script.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.platform import flags as flags_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.tools.quantization import quantize_graph
flags = flags_lib
FLAGS = flags.FLAGS
def run_graph_def(graph_def, input_map, outputs):
graph = ops_lib.Graph()
with graph.as_default():
importer.import_graph_def(graph_def, input_map={}, name="")
with session.Session(graph=graph) as sess:
results = sess.run(outputs, feed_dict=input_map)
return results
def test_mat_mul(m, n, k, a, b):
"""Tests a MatMul replacement."""
a_constant_name = "a_constant"
b_constant_name = "b_constant"
mat_mul_name = "mat_mul"
float_graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=a, dtype=dtypes.float32, shape=[m, k])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=b, dtype=dtypes.float32, shape=[k, n])
float_graph_def.node.extend([b_constant])
mat_mul_node = quantize_graph.create_node("MatMul", mat_mul_name,
[a_constant_name, b_constant_name])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_a", False)
quantize_graph.set_attr_bool(mat_mul_node, "transpose_b", False)
float_graph_def.node.extend([mat_mul_node])
test_graph(float_graph_def, {}, [mat_mul_name])
def test_conv(depth, image_width, image_height, image_batch_count, filter_size,
filter_count, stride, padding, input_values, filter_values):
"""Tests a Conv replacement."""
input_constant_name = "input_constant"
filter_constant_name = "filter_constant"
conv_name = "conv"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=input_values,
dtype=dtypes.float32,
shape=[image_batch_count, image_height, image_width, depth])
float_graph_def.node.extend([input_constant])
filter_constant = quantize_graph.create_constant_node(
filter_constant_name,
value=filter_values,
dtype=dtypes.float32,
shape=[filter_size, filter_size, depth, filter_count])
float_graph_def.node.extend([filter_constant])
conv_node = quantize_graph.create_node(
"Conv2D", conv_name, [input_constant_name, filter_constant_name])
quantize_graph.set_attr_dtype(conv_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(conv_node, "strides", [1, stride, stride, 1])
quantize_graph.set_attr_string(conv_node, "padding", padding)
float_graph_def.node.extend([conv_node])
test_graph(float_graph_def, {}, [conv_name])
def are_tensors_near(a, b, tolerance):
"""Tests whether two tensors are nearly identical.
This is a specialized comparison function designed to help debug problems with
quantization. It prints out information about the differences between tensors
on failure, paying special attention to possible biases by looking at the mean
and absolute average errors.
Args:
a: First comparison tensor.
b: Second comparison tensor.
tolerance: Float value indicating how large an error between values is ok.
Returns:
Boolean indicating whether the two inputs were close enough.
"""
flat_a = a.flatten()
flat_b = b.flatten()
if len(flat_a) != len(flat_b):
tf_logging.info("Tensors are different sizes: " + str(len(flat_a)) + " vs "
+ str(len(flat_b)))
return False
value_count = len(flat_a)
how_many_different = 0
total_difference = 0
total_abs_difference = 0
for index in range(value_count):
a_value = flat_a[index]
b_value = flat_b[index]
difference = a_value - b_value
total_difference += difference
total_abs_difference += abs(difference)
if abs(difference) > tolerance:
how_many_different += 1
mean_difference = total_difference / value_count
mean_abs_difference = total_abs_difference / value_count
proportion_different = (how_many_different * 1.0) / value_count
if how_many_different == 0:
return True
else:
tf_logging.info("Tensors have {0} different values ({1}%), with mean"
" difference {2} and mean absolute difference {3}".format(
how_many_different, proportion_different * 100,
mean_difference, mean_abs_difference))
return False
def get_top_value(input_values):
max_value = None
max_index = None
for index, value in enumerate(input_values.flatten()):
if max_value is None or value > max:
max_value = value
max_index = index
return max_index, max_value
def test_graph(float_graph_def, input_map, output_names, log_graph=False):
"""Runs the float graph through the rewriter and tests the results."""
float_results = run_graph_def(
float_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
# TODO(petewarden): round test is currently failing because there is no
# RoundToSteps op available.
# round_rewriter = quantize_graph.GraphRewriter(float_graph_def, "round")
# round_graph_def = round_rewriter.rewrite(output_name)
# round_results = run_graph_def(round_graph_def, input_map,
# [output_name + ":0"])
# assert are_tensors_near(expected, round_results[0], 1.0)
#
# TODO(petewarden): Add test for "quantize" mode.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(output_names)
eightbit_results = run_graph_def(
eightbit_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, eightbit_results):
assert are_tensors_near(expected, result, 1.0)
if log_graph:
tf_logging.info("8bit:\n%s", str(eightbit_graph_def))
# Test the weights_rounded mode. This uses the default bit_depth.
weights_rounded_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "weights_rounded", quantized_input_range=None)
weights_rounded_graph_def = weights_rounded_rewriter.rewrite(output_names)
weights_rounded_results = run_graph_def(
weights_rounded_graph_def, input_map,
[output_name + ":0" for output_name in output_names])
for expected, result in zip(float_results, weights_rounded_results):
assert are_tensors_near(expected, result, 1.0)
class QuantizeGraphTest(test.TestCase):
def test_negative_const_problem(self):
shape_constant_name = "shape_constant"
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=-0.8, dtype=dtypes.float32, shape=[1])
quantization_result = quantize_graph.quantize_weight_eightbit(
shape_constant, b"MIN_COMBINED")
self.assertEqual(4, len(quantization_result))
def test_odd_padding_problem(self):
"""Tests one error case we ran into in a real graph."""
test_conv(1, 4, 4, 1, 3, 1, 2, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
[1, 2, 3, 4, 5, 6, 7, 8, 9])
def test_mat_mul_tiny(self):
# These tests are added to test the generate case where
# min(matrix) == max(matrix), which used to cause problems.
test_mat_mul(1, 1, 1, [2], [3])
test_mat_mul(1, 2, 1, [1], [2, 3])
test_mat_mul(1, 1, 2, [1, 1], [1, 1])
test_mat_mul(1, 1, 2, [0, 0], [1, 1])
# The general case.
test_mat_mul(1, 1, 2, [1, 2], [1, 2])
def test_mat_mul_small(self):
test_mat_mul(2, 4, 3, [1, 2, 3, 4, 5, 6],
[7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18])
def test_conv(self):
test_conv(1, 4, 3, 1, 3, 1, 1, b"SAME",
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[1, 4, 7, 2, 5, 8, 3, 6, 9])
def test_reshape(self):
"""Tests that MatMul->Reshape->MatMul avoids extra quantize/dequantize."""
def make_matmul(name, a, b):
n = quantize_graph.create_node("MatMul", name, [a.name, b.name])
quantize_graph.set_attr_dtype(n, "T", dtypes.float32)
quantize_graph.set_attr_bool(n, "transpose_a", False)
quantize_graph.set_attr_bool(n, "transpose_b", False)
return n
# matmul_1 = input*weight_1
input_node = quantize_graph.create_constant_node(
"input", value=[0, 1, 2, 3], dtype=dtypes.float32, shape=[4, 1])
weight_1_node = quantize_graph.create_constant_node(
"weight_1",
value=[.5, .6, .7, .8, .9],
dtype=dtypes.float32,
shape=[1, 5])
matmul_1_node = make_matmul("matmul_1", input_node, weight_1_node)
# Reshape 4x5 to 10x2.
new_shape_node = quantize_graph.create_constant_node(
"new_shape_node", value=[10, 2], dtype=dtypes.int32, shape=[2])
reshape_node = quantize_graph.create_node(
"Reshape", "reshape", [matmul_1_node.name, new_shape_node.name])
quantize_graph.set_attr_dtype(reshape_node, "T", dtypes.float32)
# matmul_2_node = reshape*weight_2
weight_2_node = quantize_graph.create_constant_node(
"weight_2", value=[1.5, 2.5], dtype=dtypes.float32, shape=[2, 1])
matmul_2_node = make_matmul("matmul_2", reshape_node, weight_2_node)
g = graph_pb2.GraphDef()
g.node.extend([
input_node, weight_1_node, matmul_1_node, new_shape_node, reshape_node,
weight_2_node, matmul_2_node
])
# Test the graph
test_graph(g, {}, ["matmul_2"])
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
g, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite(["matmul_2"])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(1, ops.count("QuantizedReshape"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_quantize_array(self):
# Test invalid parameters (empty array, or 0 buckets.
self.assertRaises(ValueError, quantize_graph.quantize_array, np.array([]),
2)
self.assertRaises(ValueError, quantize_graph.quantize_array,
np.array([1, 2]), 0)
# Test input array of length 1.
arr = np.array([1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertEqual(arr, qarr)
qarr = quantize_graph.quantize_array(arr, 2)
self.assertEqual(arr, qarr)
# Test input array with all elements equal.
arr = np.array([1, 1, 1])
qarr = quantize_graph.quantize_array(arr, 10)
self.assertTrue((np.array([1, 1, 1]) == qarr).all())
# Test "normal" input arrays.
arr = np.array([0, 0.3, 0.6, 1])
qarr = quantize_graph.quantize_array(arr, 1)
self.assertTrue((np.array([0.5, 0.5, 0.5, 0.5]) == qarr).all())
qarr = quantize_graph.quantize_array(arr, 2)
self.assertTrue((np.array([0.25, 0.25, 0.75, 0.75]) == qarr).all())
qarr = quantize_graph.quantize_array(arr.reshape((2, 2)), 2)
self.assertTrue((np.array([[0.25, 0.25], [0.75, 0.75]]) == qarr).all())
def test_non_float_concat(self):
concat_dim = quantize_graph.create_constant_node(
"concat_dim", value=0, dtype=dtypes.int32, shape=[])
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
b = quantize_graph.create_constant_node(
"b",
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.int32,
shape=[2, 2, 3])
concat = quantize_graph.create_node("Concat", "concat",
[concat_dim.name, a.name, b.name])
quantize_graph.set_attr_int(concat, "N", 2)
quantize_graph.set_attr_dtype(concat, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([concat_dim, a, b, concat])
test_graph(g, {}, [concat.name])
def test_non_float_reshape(self):
a = quantize_graph.create_constant_node(
"a",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.int32,
shape=[2, 2, 3])
shape = quantize_graph.create_constant_node(
"shape", value=[12], dtype=dtypes.int32, shape=[1])
reshape = quantize_graph.create_node("Reshape", "reshape",
[a.name, shape.name])
quantize_graph.set_attr_dtype(reshape, "T", dtypes.int32)
g = graph_pb2.GraphDef()
g.node.extend([a, shape, reshape])
test_graph(g, {}, [reshape.name])
def test_concat(self):
shape_constant_name = "shape_constant"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
shape_constant = quantize_graph.create_constant_node(
shape_constant_name, value=0, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([shape_constant])
a_constant = quantize_graph.create_constant_node(
a_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([a_constant])
b_constant = quantize_graph.create_constant_node(
b_constant_name,
value=[13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],
dtype=dtypes.float32,
shape=[2, 2, 3])
float_graph_def.node.extend([b_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[shape_constant_name, a_constant_name, b_constant_name])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
# Verify the concat is quantized.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([concat_name])
ops = [node.op for node in eightbit_graph_def.node]
self.assertEqual(1, ops.count("QuantizedConcat"))
def test_multiple_outputs(self):
input_constant_name = "input_constant"
split_constant_name = "split_constant"
split_name = "split"
concat_constant_name = "concat_constant"
concat_name = "concat"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
split_constant = quantize_graph.create_constant_node(
split_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([split_constant])
split_node = quantize_graph.create_node(
"Split", split_name, [split_constant_name, input_constant_name])
quantize_graph.set_attr_int(split_node, "num_split", 2)
quantize_graph.set_attr_dtype(split_node, "T", dtypes.float32)
float_graph_def.node.extend([split_node])
concat_constant = quantize_graph.create_constant_node(
concat_constant_name, value=1, dtype=dtypes.int32, shape=[])
float_graph_def.node.extend([concat_constant])
concat_node = quantize_graph.create_node(
"Concat", concat_name,
[concat_constant_name, split_name + ":0", split_name + ":1"])
quantize_graph.set_attr_int(concat_node, "N", 2)
quantize_graph.set_attr_dtype(concat_node, "T", dtypes.float32)
float_graph_def.node.extend([concat_node])
test_graph(float_graph_def, {}, [concat_name])
def test_node_name_from_input(self):
self.assertEqual("SomeName",
quantize_graph.node_name_from_input("^SomeName:2"))
def test_unique_node_name_from_input(self):
self.assertEqual("__hat__SomeName__port__2",
quantize_graph.unique_node_name_from_input("^SomeName:2"))
def test_identity(self):
input_constant_name = "input_constant"
identity_name = "identity"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[2, 6])
float_graph_def.node.extend([input_constant])
identity_node = quantize_graph.create_node("Identity", identity_name,
[input_constant_name])
quantize_graph.set_attr_dtype(identity_node, "T", dtypes.float32)
float_graph_def.node.extend([identity_node])
mul_name = "mul"
mul_node = quantize_graph.create_node("Mul", mul_name,
[identity_name, identity_name])
quantize_graph.set_attr_dtype(mul_node, "T", dtypes.float32)
float_graph_def.node.extend([mul_node])
test_graph(float_graph_def, {}, [mul_name])
def test_keep_control_edges(self):
no_op_name = "no_op"
a_constant_name = "a_constant"
b_constant_name = "b_constant"
a_check_name = "a_check"
b_check_name = "b_check"
a_identity_name = "a_identity"
b_identity_name = "b_identity"
add_name = "add"
graph_def = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
graph_def.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant])
a_check_node = quantize_graph.create_node("CheckNumerics", a_check_name,
[a_constant_name])
graph_def.node.extend([a_check_node])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name,
[a_constant_name, "^" + a_check_name, "^" + no_op_name])
graph_def.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant])
b_check_node = quantize_graph.create_node("CheckNumerics", b_check_name,
[b_constant_name])
graph_def.node.extend([b_check_node])
b_identity_node = quantize_graph.create_node(
"Identity", b_identity_name, [b_constant_name, "^" + b_check_name])
graph_def.node.extend([b_identity_node])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_identity_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
graph_def.node.extend([add_node])
expected_output = graph_pb2.GraphDef()
no_op = quantize_graph.create_node("NoOp", no_op_name, [])
expected_output.node.extend([no_op])
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant])
a_identity_node = quantize_graph.create_node(
"Identity", a_identity_name, [a_constant_name, "^" + no_op_name])
expected_output.node.extend([a_identity_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=1, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant])
add_node = quantize_graph.create_node("Add", add_name,
[a_identity_name, b_constant_name])
quantize_graph.set_attr_dtype(add_node, "T", dtypes.float32)
expected_output.node.extend([add_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
output = graph_util.remove_training_nodes(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [add_name])
self.assertProtoEquals(expected_output, stripped_output)
def test_batch_norm(self):
input_constant_name = "input_constant"
mean_constant_name = "mean_constant"
variance_constant_name = "variance_constant"
beta_constant_name = "beta_constant"
gamma_constant_name = "gamma_constant"
batch_norm_name = "batch_norm"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6],
dtype=dtypes.float32,
shape=[1, 1, 6, 2])
float_graph_def.node.extend([input_constant])
mean_constant = quantize_graph.create_constant_node(
mean_constant_name, value=[10, 20], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([mean_constant])
variance_constant = quantize_graph.create_constant_node(
variance_constant_name,
value=[0.25, 0.5],
dtype=dtypes.float32,
shape=[2])
float_graph_def.node.extend([variance_constant])
beta_constant = quantize_graph.create_constant_node(
beta_constant_name, value=[0.1, 0.6], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([beta_constant])
gamma_constant = quantize_graph.create_constant_node(
gamma_constant_name, value=[0, 0], dtype=dtypes.float32, shape=[2])
float_graph_def.node.extend([gamma_constant])
batch_norm_node = quantize_graph.create_node(
"BatchNormWithGlobalNormalization", batch_norm_name, [
input_constant_name, mean_constant_name, variance_constant_name,
beta_constant_name, gamma_constant_name
])
quantize_graph.set_attr_dtype(batch_norm_node, "T", dtypes.float32)
quantize_graph.set_attr_bool(batch_norm_node, "scale_after_normalization",
False)
quantize_graph.set_attr_float(batch_norm_node, "variance_epsilon", 0.001)
float_graph_def.node.extend([batch_norm_node])
test_graph(float_graph_def, {}, [batch_norm_name])
def test_max_pool(self):
input_constant_name = "input_constant"
max_pool_name = "max_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
max_pool_node = quantize_graph.create_node("MaxPool", max_pool_name,
[input_constant_name])
quantize_graph.set_attr_int_list(max_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(max_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(max_pool_node, "padding", b"SAME")
float_graph_def.node.extend([max_pool_node])
test_graph(float_graph_def, {}, [max_pool_name])
def test_avg_pool(self):
input_constant_name = "input_constant"
avg_pool_name = "avg_pool"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
avg_pool_node = quantize_graph.create_node("AvgPool", avg_pool_name,
[input_constant_name])
quantize_graph.set_attr_dtype(avg_pool_node, "T", dtypes.float32)
quantize_graph.set_attr_int_list(avg_pool_node, "ksize", [1, 2, 2, 1])
quantize_graph.set_attr_int_list(avg_pool_node, "strides", [1, 1, 1, 1])
quantize_graph.set_attr_string(avg_pool_node, "padding", b"SAME")
float_graph_def.node.extend([avg_pool_node])
test_graph(float_graph_def, {}, [avg_pool_name])
def test_relu(self):
input_constant_name = "input_constant"
relu_name = "relu"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu_node = quantize_graph.create_node("Relu", relu_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
float_graph_def.node.extend([relu_node])
test_graph(float_graph_def, {}, [relu_name])
def test_relu_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
relu_node = quantize_graph.create_node("Relu", "relu", [input_node.name])
quantize_graph.set_attr_dtype(relu_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=0, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=12, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[relu_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(
[input_node, relu_node, min_node, max_node, fake_quant_node])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
def test_relu6(self):
input_constant_name = "input_constant"
relu6_name = "relu6"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 2, 6, 1])
float_graph_def.node.extend([input_constant])
relu6_node = quantize_graph.create_node("Relu6", relu6_name,
[input_constant_name])
quantize_graph.set_attr_dtype(relu6_node, "T", dtypes.float32)
float_graph_def.node.extend([relu6_node])
test_graph(float_graph_def, {}, [relu6_name])
def test_bias_add(self):
input_constant_name = "input_constant"
offset_constant_name = "offset_constant"
bias_add_name = "bias_add"
float_graph_def = graph_pb2.GraphDef()
input_constant = quantize_graph.create_constant_node(
input_constant_name,
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=dtypes.float32,
shape=[1, 1, 2, 6])
float_graph_def.node.extend([input_constant])
offset_constant = quantize_graph.create_constant_node(
offset_constant_name,
value=[1, 2, 3, 4, 5, 6],
dtype=dtypes.float32,
shape=[6])
float_graph_def.node.extend([offset_constant])
bias_add_node = quantize_graph.create_node(
"BiasAdd", bias_add_name, [input_constant_name, offset_constant_name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def.node.extend([bias_add_node])
test_graph(float_graph_def, {}, [bias_add_name])
def test_quantized_input_range_errors(self):
with self.assertRaises(ValueError):
# Invalid mode.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "weights_rounded",
[0, 1])
with self.assertRaises(ValueError):
# Invalid range.
quantize_graph.GraphRewriter(graph_pb2.GraphDef(), "eightbit", [0, -1])
def test_quantized_input_range_bias_add(self):
input_shape = [1, 1, 2, 6]
input_n = quantize_graph.create_node("Placeholder", "input", [])
quantize_graph.set_attr_dtype(input_n, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(input_n, "shape", input_shape)
offset_n = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5, 6], dtype=dtypes.float32, shape=[6])
bias_add_n = quantize_graph.create_node("BiasAdd", "bias_add",
[input_n.name, offset_n.name])
quantize_graph.set_attr_dtype(bias_add_n, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_n, offset_n, bias_add_n])
input_map = {
input_n.name + ":0":
np.reshape([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], input_shape)
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[bias_add_n.name], [0, 12.])
def test_quantized_input_range_mat_mul(self):
shapes = [[3, 2], [2, 4]]
inputs = []
for i, shape in enumerate(shapes):
node = quantize_graph.create_node("Placeholder", "input_%s" % i, [])
quantize_graph.set_attr_dtype(node, "dtype", dtypes.float32)
quantize_graph.set_attr_shape(node, "shape", shape)
inputs.append(node)
mat_mul_node = quantize_graph.create_node("MatMul", "mat_mul",
[n.name for n in inputs])
quantize_graph.set_attr_dtype(mat_mul_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend(inputs + [mat_mul_node])
input_map = {
inputs[0].name + ":0":
np.reshape([1, 2, 3, 4, 5, 6], shapes[0]),
inputs[1].name + ":0":
np.reshape([.8, .7, .6, .5, .4, .3, .2, .1], shapes[1])
}
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [-1, 20.])
self._RunTestsForQuantizedInputRange(float_graph_def, input_map,
[mat_mul_node.name], [0, 6.])
def _RunTestsForQuantizedInputRange(self, float_graph_def, input_map,
output_names, input_range):
if sys.version_info[0] == 3:
# uint8->quint8 conversion for numpy is not working currently.
return
quantized_input_map = {}
for k, v in input_map.items():
arr = [
int(
round((n - input_range[0]) * 255 / (input_range[1] - input_range[
0]))) for n in v.flat
]
arr = np.array(arr, np.uint8)
arr = arr.reshape(v.shape)
arr = arr.astype(dtypes.quint8.as_numpy_dtype)
quantized_input_map[k] = arr
output_tensors = [output_name + ":0" for output_name in output_names]
float_results = run_graph_def(float_graph_def, input_map, output_tensors)
# Quantize treating the input as quantized in range <input_range>.
rewriter = quantize_graph.GraphRewriter(float_graph_def, "eightbit",
input_range)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, quantized_input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
# Quantize without treating input as quantized.
rewriter = quantize_graph.GraphRewriter(
float_graph_def, "eightbit", quantized_input_range=None)
graph_def = rewriter.rewrite(output_names)
results = run_graph_def(graph_def, input_map, output_tensors)
for expected, result in zip(float_results, results):
assert are_tensors_near(expected, result, .5)
ops = [node.op for node in graph_def.node]
self.assertEqual(
len(input_map), ops.count("QuantizeV2") + ops.count("Quantize"))
self.assertEqual(len(output_names), ops.count("Dequantize"))
def test_bias_add_w_fake_quant_w_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
min_node = quantize_graph.create_constant_node(
"min_bias_add", value=-.5, dtype=dtypes.float32, shape=[])
max_node = quantize_graph.create_constant_node(
"max_bias_add", value=15.5, dtype=dtypes.float32, shape=[])
fake_quant_node = quantize_graph.create_node(
"FakeQuantWithMinMaxVars", "fake_quant",
[bias_add_node.name, min_node.name, max_node.name])
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([
input_node, offset_node, bias_add_node, min_node, max_node,
fake_quant_node
])
test_graph(float_graph_def, {}, [fake_quant_node.name], log_graph=True)
# Verify there is only one Quantize and one Requantize op.
# Pass in fallback_quantization_range, although it will have no effect
# because the FakeQuantWithMinMaxVars are used instead.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-100, 100])
eightbit_graph_def = eightbit_rewriter.rewrite([fake_quant_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# The fallback constants are not in the graph.
self.assertEqual(0, node_names.count("fallback_quantization_min_value"))
self.assertEqual(0, node_names.count("fallback_quantization_max_value"))
def test_bias_add_w_fallback_min_max_vars(self):
input_node = quantize_graph.create_constant_node(
"input",
value=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
dtype=dtypes.float32,
shape=[1, 1, 2, 5])
offset_node = quantize_graph.create_constant_node(
"offset", value=[1, 2, 3, 4, 5], dtype=dtypes.float32, shape=[5])
bias_add_node = quantize_graph.create_node(
"BiasAdd", "bias_add", [input_node.name, offset_node.name])
quantize_graph.set_attr_dtype(bias_add_node, "T", dtypes.float32)
float_graph_def = graph_pb2.GraphDef()
float_graph_def.node.extend([input_node, offset_node, bias_add_node])
test_graph(float_graph_def, {}, [bias_add_node.name], log_graph=True)
# Verify there is only one Quantize, one Requantize op, and no
# RequantizationRange op.
eightbit_rewriter = quantize_graph.GraphRewriter(
float_graph_def,
"eightbit",
quantized_input_range=None,
fallback_quantization_range=[-.5, 15.5])
eightbit_graph_def = eightbit_rewriter.rewrite([bias_add_node.name])
ops = [node.op for node in eightbit_graph_def.node]
node_names = [node.name for node in eightbit_graph_def.node]
# No quantize since all inputs are const and can be quantized up-front.
self.assertEqual(0, ops.count("QuantizeV2") + ops.count("Quantize"))
# One dequantize at the end.
self.assertEqual(1, ops.count("Dequantize"))
# No RequantizationRange
self.assertEqual(0, ops.count("RequantizationRange"))
# The fallback constants are in the graph.
self.assertEqual(1, node_names.count("fallback_quantization_min_value"))
self.assertEqual(1, node_names.count("fallback_quantization_max_value"))
def test_remove_redundant_quantization(self):
a_constant_name = "a_constant"
a_constant_min_name = "a_constant_min"
a_constant_max_name = "a_constant_max"
a_dequantize_name = "a_dequantize"
a_quantize_name = "a_quantize"
b_constant_name = "b_constant"
b_constant_min_name = "b_constant_min"
b_constant_max_name = "b_constant_max"
b_dequantize_name = "b_dequantize"
b_quantize_name = "b_quantize"
mat_mul_name = "mat_mul"
graph_def = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
graph_def.node.extend([a_constant_max])
a_dequantize_node = quantize_graph.create_node(
"Dequantize", a_dequantize_name,
[a_constant_name, a_constant_min_name, a_constant_max_name])
quantize_graph.set_attr_dtype(a_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_dequantize_node])
a_quantize_node = quantize_graph.create_node(
"QuantizeV2", a_quantize_name,
[a_dequantize_name, a_dequantize_name + ":1", a_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(a_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([a_quantize_node])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
graph_def.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
graph_def.node.extend([b_constant_max])
b_dequantize_node = quantize_graph.create_node(
"Dequantize", b_dequantize_name,
[b_constant_name, b_constant_min_name, b_constant_max_name])
quantize_graph.set_attr_dtype(b_dequantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_dequantize_node])
b_quantize_node = quantize_graph.create_node(
"QuantizeV2", b_quantize_name,
[b_dequantize_name, b_dequantize_name + ":1", b_dequantize_name + ":2"])
quantize_graph.set_attr_dtype(b_quantize_node, "T", dtypes.uint8)
graph_def.node.extend([b_quantize_node])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_quantize_name, b_quantize_name, a_quantize_name + ":1",
a_quantize_name + ":2", b_quantize_name + ":1", b_quantize_name + ":2"
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
graph_def.node.extend([mat_mul_node])
expected_output = graph_pb2.GraphDef()
a_constant = quantize_graph.create_constant_node(
a_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([a_constant])
a_constant_min = quantize_graph.create_constant_node(
a_constant_min_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_min])
a_constant_max = quantize_graph.create_constant_node(
a_constant_max_name, value=2, dtype=dtypes.float32, shape=[])
expected_output.node.extend([a_constant_max])
b_constant = quantize_graph.create_constant_node(
b_constant_name, value=(0,), dtype=dtypes.quint8, shape=[])
expected_output.node.extend([b_constant])
b_constant_min = quantize_graph.create_constant_node(
b_constant_min_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_min])
b_constant_max = quantize_graph.create_constant_node(
b_constant_max_name, value=3, dtype=dtypes.float32, shape=[])
expected_output.node.extend([b_constant_max])
mat_mul_node = quantize_graph.create_node("QuantizedMatMul", mat_mul_name, [
a_constant_name, b_constant_name, a_constant_min_name,
a_constant_max_name, b_constant_min_name, b_constant_max_name
])
quantize_graph.set_attr_dtype(mat_mul_node, "T1", dtypes.uint8)
quantize_graph.set_attr_dtype(mat_mul_node, "T2", dtypes.int32)
expected_output.node.extend([mat_mul_node])
expected_output.versions.CopyFrom(graph_def.versions)
expected_output.library.CopyFrom(graph_def.library)
rewriter = quantize_graph.GraphRewriter(
graph_def, [mat_mul_name], quantized_input_range=None)
output = rewriter.remove_redundant_quantization(graph_def)
stripped_output = graph_util.extract_sub_graph(output, [mat_mul_name])
self.assertProtoEquals(expected_output, stripped_output)
if __name__ == "__main__":
test.main()
| 43.900724
| 80
| 0.692123
| 5,933
| 42,452
| 4.607281
| 0.072139
| 0.084178
| 0.068813
| 0.038851
| 0.716993
| 0.664496
| 0.59861
| 0.54026
| 0.512018
| 0.455643
| 0
| 0.029637
| 0.193254
| 42,452
| 966
| 81
| 43.94617
| 0.768512
| 0.08141
| 0
| 0.408451
| 0
| 0
| 0.054351
| 0.006456
| 0
| 0
| 0
| 0.001035
| 0.048656
| 1
| 0.046095
| false
| 0
| 0.020487
| 0
| 0.076825
| 0.00128
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92bc543d24e721550df8b06cf7b80bb7637df99c
| 910
|
py
|
Python
|
SETTINGS.py
|
pirica/fortnite-leaks-image-generator
|
c23633862fd7d2286700f932e5dab41decd2ff72
|
[
"CC0-1.0"
] | 5
|
2020-10-07T23:53:30.000Z
|
2021-09-18T17:50:11.000Z
|
SETTINGS.py
|
pirica/fortnite-leaks-image-generator
|
c23633862fd7d2286700f932e5dab41decd2ff72
|
[
"CC0-1.0"
] | null | null | null |
SETTINGS.py
|
pirica/fortnite-leaks-image-generator
|
c23633862fd7d2286700f932e5dab41decd2ff72
|
[
"CC0-1.0"
] | 5
|
2020-12-13T16:49:41.000Z
|
2021-09-18T17:50:14.000Z
|
backgroundurl = "https://storage.needpix.com/rsynced_images/colored-background.jpg" # <- Need to be a Image URL!!!
lang = "en" # <- language code
displayset = True # <- Display the Set of the Item
raritytext = True # <- Display the Rarity of the Item
typeconfig = {
"BannerToken": True,
"AthenaBackpack": True,
"AthenaPetCarrier": True,
"AthenaPet": True,
"AthenaPickaxe": True,
"AthenaCharacter": True,
"AthenaSkyDiveContrail": True,
"AthenaGlider": True,
"AthenaDance": True,
"AthenaEmoji": True,
"AthenaLoadingScreen": True,
"AthenaMusicPack": True,
"AthenaSpray": True,
"AthenaToy": True,
"AthenaBattleBus": True,
"AthenaItemWrap": True
}
interval = 5 # <- Time (in seconds) until the bot checks for leaks again | Recommend: 7
watermark = "" # <- Leave it empty if you dont want one
watermarksize = 25 # <- Size of the Watermark
| 28.4375
| 115
| 0.66044
| 98
| 910
| 6.122449
| 0.72449
| 0.025
| 0.046667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005579
| 0.212088
| 910
| 31
| 116
| 29.354839
| 0.831241
| 0.271429
| 0
| 0
| 0
| 0
| 0.432061
| 0.032061
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92bc6a8a2905baaef24ea73868b39d5f28b0a445
| 592
|
py
|
Python
|
src/healthvaultlib/tests/testbase.py
|
rajeevs1992/pyhealthvault
|
2b6fa7c1687300bcc2e501368883fbb13dc80495
|
[
"MIT"
] | 1
|
2015-12-19T09:09:15.000Z
|
2015-12-19T09:09:15.000Z
|
src/healthvaultlib/tests/testbase.py
|
rajeevs1992/pyhealthvault
|
2b6fa7c1687300bcc2e501368883fbb13dc80495
|
[
"MIT"
] | 6
|
2015-12-19T07:53:44.000Z
|
2021-12-13T19:35:10.000Z
|
src/healthvaultlib/tests/testbase.py
|
rajeevs1992/pyhealthvault
|
2b6fa7c1687300bcc2e501368883fbb13dc80495
|
[
"MIT"
] | 2
|
2018-02-20T08:34:50.000Z
|
2018-03-28T14:29:52.000Z
|
import unittest
import settings
from healthvaultlib.helpers.connection import Connection
class TestBase(unittest.TestCase):
def setUp(self):
self.connection = self.get_connection()
def get_connection(self):
conn = Connection(settings.HV_APPID, settings.HV_SERVICE_SERVER)
conn.thumbprint = settings.APP_THUMBPRINT
conn.publickey = settings.APP_PUBLIC_KEY
conn.privatekey = settings.APP_PRIVATE_KEY
conn.connect()
conn.set_person_and_record(settings.OFFLINE_PERSON_ID, settings.OFFLINE_RECORD_ID)
return conn
| 31.157895
| 90
| 0.72973
| 69
| 592
| 6.014493
| 0.492754
| 0.079518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201014
| 592
| 18
| 91
| 32.888889
| 0.877378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.214286
| 0
| 0.5
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92bcfabb83b949d7b865d6edb058159c8c815b8b
| 628
|
py
|
Python
|
regtests/bench/thread_collision.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 17
|
2015-12-13T23:11:31.000Z
|
2020-07-19T00:40:18.000Z
|
regtests/bench/thread_collision.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 8
|
2016-02-22T19:42:56.000Z
|
2016-07-13T10:58:04.000Z
|
regtests/bench/thread_collision.py
|
secureosv/pythia
|
459f9e2bc0bb2da57e9fa8326697d9ef3386883a
|
[
"BSD-3-Clause"
] | 3
|
2016-04-11T20:34:31.000Z
|
2021-03-12T10:33:02.000Z
|
'''
multi-threading (python3 version)
https://docs.python.org/3/library/threading.html
'''
from time import clock
import threading
THREADS=2
lock = threading.Lock()
A = 0
B = 0
C = 0
def test_globals():
global A, B, C
for i in range(1024*1024):
lock.acquire()
A += 1
B += 2
C = A + B
lock.release()
def main():
print( 'starting threading test')
starttime = clock()
threads = []
for i in range(THREADS):
t = threading.Thread( target=test_globals, args=() )
t.start()
threads.append( t )
for t in threads:
t.join()
print( clock()-starttime)
print('A:', A)
print('B:', B)
print('C:', C)
main()
| 14.604651
| 54
| 0.630573
| 98
| 628
| 4.020408
| 0.459184
| 0.055838
| 0.030457
| 0.055838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031809
| 0.199045
| 628
| 43
| 55
| 14.604651
| 0.751491
| 0.130573
| 0
| 0
| 0
| 0
| 0.053803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.133333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92bd6cd2780084175f5bca66b4d32f6768777683
| 2,270
|
py
|
Python
|
game/board.py
|
scooler/checkers
|
90bfe8702c6005c767a8673caed6e7e2f0ce5879
|
[
"MIT"
] | null | null | null |
game/board.py
|
scooler/checkers
|
90bfe8702c6005c767a8673caed6e7e2f0ce5879
|
[
"MIT"
] | null | null | null |
game/board.py
|
scooler/checkers
|
90bfe8702c6005c767a8673caed6e7e2f0ce5879
|
[
"MIT"
] | null | null | null |
import numpy as np
class Board:
"""
0 - black
1 - white
"""
def __init__(self):
board = [
[0, 1] * 4,
[1, 0] * 4
] * 4
players_board = [
[0, 1] * 4, # player 1
[1, 0] * 4
] + [[0] * 8] * 4 + [ # 4 rows of nothing
[0, 2] * 4, # player 2
[2, 0] * 4
]
self.board = np.array(board)
self.players_board = np.array(players_board)
self.x_size = 8
self.y_size = 8
# def move(self, x, y, current_player):
# self.board[x, y] = current_player
# def are_same_and_non_zero(self, array):
# return np.unique(array).size == 1 and array[0] != 0
# def is_board_full(self):
# return not np.any(np.unique(self.board) == 0)
def is_finished(self):
"""is game finished"""
return True
# for i in range(0, self.x_size): # rows
# if self.are_same_and_non_zero(self.board[i, :]):
# self.player_who_won = self.board[i, 0]
# self.result = 'Won {} - row {}'.format(self.player(self.player_who_won), i)
# return True
# for i in range(0, self.y_size): # columns
# if self.are_same_and_non_zero(self.board[:, i]):
# self.player_who_won = self.board[0, i]
# self.result = 'Won {} - col {}'.format(self.player(self.player_who_won), i)
# return True
# if self.are_same_and_non_zero(np.diag(self.board)): # diagonal
# self.player_who_won = self.board[1, 1]
# self.result = 'Won {} - diagonal {}'.format(self.player(self.player_who_won), i)
# return True
# if self.are_same_and_non_zero(np.diag(np.flipud(self.board))): # anty-diagonal
# self.player_who_won = self.board[1, 1]
# self.result = 'Won {} - anty-diagonal {}'.format(self.player(self.player_who_won), i)
# return True
# if self.is_board_full():
# self.player_who_won = 0 # nobody
# self.result = 'Draw'
# return True # draw
return False
def show(self):
# print(self.board)
# print(self.players_board)
return
# def player(self, player_no):
# if player_no == 1: return 'Player 1 (X)'
# if player_no == 2: return 'Player 2 (O)'
# def show_player_info(self, player_no):
# print("It's turn of ", self.player(player_no))
| 28.024691
| 93
| 0.574009
| 341
| 2,270
| 3.633431
| 0.199413
| 0.129136
| 0.094431
| 0.116223
| 0.425343
| 0.425343
| 0.408394
| 0.408394
| 0.374496
| 0.374496
| 0
| 0.027811
| 0.271366
| 2,270
| 80
| 94
| 28.375
| 0.721282
| 0.697357
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.043478
| 0.043478
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92bed45f1cd8f2bc90c85f74109f48fc3d320089
| 5,261
|
py
|
Python
|
zge/engine.py
|
zhester/zge
|
246096a8c1fd26472091aac747a3fffda58f3072
|
[
"BSD-2-Clause"
] | null | null | null |
zge/engine.py
|
zhester/zge
|
246096a8c1fd26472091aac747a3fffda58f3072
|
[
"BSD-2-Clause"
] | null | null | null |
zge/engine.py
|
zhester/zge
|
246096a8c1fd26472091aac747a3fffda58f3072
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Zoe Game Engine Core Implementation
===================================
Requirements
------------
[pygame](http://www.pygame.org/)
"""
# core packages
# third-party packages
import pygame
# local package
import layer
__version__ = '0.0.0'
#=============================================================================
class Engine( object ):
"""
Simple game engine object.
"""
#=========================================================================
def __init__( self, size ):
"""
Initializes an Engine object.
"""
# pygame initialization
pygame.init()
# initialize the root display surface
self.window = pygame.display.set_mode( size, 0, 32 )
# set the title bar text and iconification text
pygame.display.set_caption( 'Demonstration', 'Demo' )
# set the application icon
icon = pygame.image.load( '../assets/z32.png' )
pygame.display.set_icon( icon )
# create a list of normal display layers
self._layers = []
# create a transparent "top" layer for overlayed information
self._top = layer.TextLayer()
# initialize last tick value
self._last_tick = pygame.time.get_ticks()
self._last_wait = 0
# set an FPS cap
self._fps = 0.0
self._fps_limit = 120.0
self._tick_step = int( round( 1000.0 / self._fps_limit ) )
# engine is currently running
self._is_running = False
# short debug string for various things
self._debug = ''
#=========================================================================
def run( self ):
"""
Run the game loop (does not return until the application quits).
"""
# update tick value before entering the loop
self._last_tick = pygame.time.get_ticks()
# execute infinite application loop
self._is_running = True
while self._is_running:
# process event queue
for event in pygame.event.get():
# check for quit event
if event.type == pygame.QUIT:
self._is_running = False
# check for key event
elif ( event.type == pygame.KEYDOWN ) \
or ( event.type == pygame.KEYUP ) :
self.trigger_key_event( event )
# exit application loop if done
if self._is_running == False:
break
# update the game display
self.update()
# ZIH - simulate hard work
#pygame.time.delay( 3 )
# compute duration of last event/render loop
end_tick = pygame.time.get_ticks()
delta = end_tick - self._last_tick
self._last_tick = end_tick
# update FPS value
if delta > 0:
self._fps = 1000.0 / float( delta )
else:
self._fps = self._fps_limit
# compute remaining time available inside this iteration
if delta < self._tick_step:
self._last_wait = self._tick_step - delta
else:
self._last_wait = 0
# let the OS do other stuff on this core
pygame.time.wait( self._last_wait )
# shut down pygame
pygame.quit()
# return exit status
return 0
#=========================================================================
def trigger_key_event( self, event ):
"""
Initiates key input events.
"""
# ZIH - temp, just seeing how to poll the keys
mods = pygame.key.get_mods()
mod_bits = [
( pygame.KMOD_ALT, 'A' ),
( pygame.KMOD_CTRL, 'C' ),
( pygame.KMOD_SHIFT, 'S' )
]
mod_str = ''.join( b[ 1 ] for b in mod_bits if b[ 0 ] & mods )
if event.type == pygame.KEYUP:
self._debug = '({})'.format( mod_str )
elif event.type == pygame.KEYDOWN:
self._debug = '({}){}'.format(
mod_str,
pygame.key.name( event.key )
)
#=========================================================================
def update( self ):
"""
Updates the display.
"""
# update overlayed information
self._top.set_text(
' [ fps:{:4.0f} sch:{:3} tck:{:08} dbg:{} ]'.format(
self._fps,
self._last_wait,
self._last_tick,
self._debug
)
)
# draw the display on the back buffer
self._draw_layers()
# update the display (swap video buffers)
pygame.display.update()
#=========================================================================
def _draw_layers( self ):
"""
Blits all the display layers onto the back buffer.
"""
# fill the background
self.window.fill( ( 32, 32, 32 ) )
# blit all user layers
for layer in self._layers:
layer.blit( self.window )
# blit the top layer
self._top.blit( self.window )
| 27.118557
| 78
| 0.472914
| 533
| 5,261
| 4.499062
| 0.360225
| 0.033361
| 0.025021
| 0.021268
| 0.091743
| 0.025021
| 0.025021
| 0
| 0
| 0
| 0
| 0.012299
| 0.350884
| 5,261
| 193
| 79
| 27.259067
| 0.689898
| 0.345942
| 0
| 0.103896
| 0
| 0
| 0.028817
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064935
| false
| 0
| 0.025974
| 0
| 0.116883
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92c3f1ad626b115da6ffe9d3c9d13ac69cd2a64e
| 18,742
|
py
|
Python
|
litex_boards/platforms/xilinx_kcu105.py
|
smunaut/litex-boards
|
caac75c7dbcba68d9f4fb948107cb5d6ff60e05f
|
[
"BSD-2-Clause"
] | 177
|
2019-06-13T09:54:49.000Z
|
2022-03-29T02:25:13.000Z
|
litex_boards/platforms/xilinx_kcu105.py
|
smunaut/litex-boards
|
caac75c7dbcba68d9f4fb948107cb5d6ff60e05f
|
[
"BSD-2-Clause"
] | 347
|
2019-06-12T17:47:45.000Z
|
2022-03-30T21:59:01.000Z
|
litex_boards/platforms/xilinx_kcu105.py
|
smunaut/litex-boards
|
caac75c7dbcba68d9f4fb948107cb5d6ff60e05f
|
[
"BSD-2-Clause"
] | 202
|
2019-06-11T15:01:26.000Z
|
2022-03-31T16:25:19.000Z
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2017-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk125", 0,
Subsignal("p", Pins("G10"), IOStandard("LVDS")),
Subsignal("n", Pins("F10"), IOStandard("LVDS"))
),
("clk300", 0,
Subsignal("p", Pins("AK17"), IOStandard("DIFF_SSTL12")),
Subsignal("n", Pins("AK16"), IOStandard("DIFF_SSTL12"))
),
("cpu_reset", 0, Pins("AN8"), IOStandard("LVCMOS18")),
# Leds
("user_led", 0, Pins("AP8"), IOStandard("LVCMOS18")),
("user_led", 1, Pins("H23"), IOStandard("LVCMOS18")),
("user_led", 2, Pins("P20"), IOStandard("LVCMOS18")),
("user_led", 3, Pins("P21"), IOStandard("LVCMOS18")),
("user_led", 4, Pins("N22"), IOStandard("LVCMOS18")),
("user_led", 5, Pins("M22"), IOStandard("LVCMOS18")),
("user_led", 6, Pins("R23"), IOStandard("LVCMOS18")),
("user_led", 7, Pins("P23"), IOStandard("LVCMOS18")),
# Buttons
("user_btn_c", 0, Pins("AE10"), IOStandard("LVCMOS18")),
("user_btn_n", 0, Pins("AD10"), IOStandard("LVCMOS18")),
("user_btn_s", 0, Pins("AF8"), IOStandard("LVCMOS18")),
("user_btn_w", 0, Pins("AF9"), IOStandard("LVCMOS18")),
("user_btn_e", 0, Pins("AE8"), IOStandard("LVCMOS18")),
# Switches
("user_dip_btn", 0, Pins("AN16"), IOStandard("LVCMOS12")),
("user_dip_btn", 1, Pins("AN19"), IOStandard("LVCMOS12")),
("user_dip_btn", 2, Pins("AP18"), IOStandard("LVCMOS12")),
("user_dip_btn", 3, Pins("AN14"), IOStandard("LVCMOS12")),
# SMA
("user_sma_clock", 0,
Subsignal("p", Pins("D23"), IOStandard("LVDS")),
Subsignal("n", Pins("C23"), IOStandard("LVDS"))
),
("user_sma_clock_p", 0, Pins("D23"), IOStandard("LVCMOS18")),
("user_sma_clock_n", 0, Pins("C23"), IOStandard("LVCMOS18")),
("user_sma_gpio", 0,
Subsignal("p", Pins("H27"), IOStandard("LVDS")),
Subsignal("n", Pins("G27"), IOStandard("LVDS"))
),
("user_sma_gpio_p", 0, Pins("H27"), IOStandard("LVCMOS18")),
("user_sma_gpio_n", 0, Pins("G27"), IOStandard("LVCMOS18")),
# I2C
("i2c", 0,
Subsignal("scl", Pins("J24")),
Subsignal("sda", Pins("J25")),
IOStandard("LVCMOS18")
),
# Serial
("serial", 0,
Subsignal("cts", Pins("L23")),
Subsignal("rts", Pins("K27")),
Subsignal("tx", Pins("K26")),
Subsignal("rx", Pins("G25")),
IOStandard("LVCMOS18")
),
# SPIFlash
("spiflash", 0, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("U7")),
Subsignal("dq", Pins("AC7 AB7 AA7 Y7")),
IOStandard("LVCMOS18")
),
("spiflash", 1, # clock needs to be accessed through primitive
Subsignal("cs_n", Pins("G26")),
Subsignal("dq", Pins("M20 L20 R21 R22")),
IOStandard("LVCMOS18")
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cs_n", Pins("AH8")),
Subsignal("mosi", Pins("AD9"), Misc("PULLUP")),
Subsignal("miso", Pins("AP9"), Misc("PULLUP")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
("sdcard", 0,
Subsignal("clk", Pins("AL10")),
Subsignal("cmd", Pins("AD9"), Misc("PULLUP True")),
Subsignal("data", Pins("AP9 AN9 AH9 AH8"), Misc("PULLUP True")),
Misc("SLEW=FAST"),
IOStandard("LVCMOS18")
),
# Rotary Encoder
("rotary", 0,
Subsignal("a", Pins("Y21")),
Subsignal("b", Pins("AD26")),
Subsignal("push", Pins("AF28")),
IOStandard("LVCMOS18")
),
# HDMI
("hdmi", 0,
Subsignal("d", Pins(
"AK11 AP11 AP13 AN13 AN11 AM11 AN12 AM12",
"AL12 AK12 AL13 AK13 AD11 AH12 AG12 AJ11",
"AG10 AK8")),
Subsignal("de", Pins("AE11")),
Subsignal("clk", Pins("AF13")),
Subsignal("vsync", Pins("AH13")),
Subsignal("hsync", Pins("AE13")),
Subsignal("spdif", Pins("AE12")),
Subsignal("spdif_out", Pins("AF12")),
IOStandard("LVCMOS18")
),
# DDR4 SDRAM
("ddram", 0,
Subsignal("a", Pins(
"AE17 AH17 AE18 AJ15 AG16 AL17 AK18 AG17",
"AF18 AH19 AF15 AD19 AJ14 AG19"),
IOStandard("SSTL12_DCI")),
Subsignal("ba", Pins("AF17 AL15"), IOStandard("SSTL12_DCI")),
Subsignal("bg", Pins("AG15"), IOStandard("SSTL12_DCI")),
Subsignal("ras_n", Pins("AF14"), IOStandard("SSTL12_DCI")), # A16
Subsignal("cas_n", Pins("AG14"), IOStandard("SSTL12_DCI")), # A15
Subsignal("we_n", Pins("AD16"), IOStandard("SSTL12_DCI")), # A14
Subsignal("cs_n", Pins("AL19"), IOStandard("SSTL12_DCI")),
Subsignal("act_n", Pins("AH14"), IOStandard("SSTL12_DCI")),
#Subsignal("ten", Pins("AH16"), IOStandard("SSTL12_DCI")),
#Subsignal("alert_n", Pins("AJ16"), IOStandard("SSTL12_DCI")),
#Subsignal("par", Pins("AD18"), IOStandard("SSTL12_DCI")),
Subsignal("dm", Pins("AD21 AE25 AJ21 AM21 AH26 AN26 AJ29 AL32"),
IOStandard("POD12_DCI")),
Subsignal("dq", Pins(
"AE23 AG20 AF22 AF20 AE22 AD20 AG22 AE20",
"AJ24 AG24 AJ23 AF23 AH23 AF24 AH22 AG25",
"AL22 AL25 AM20 AK23 AK22 AL24 AL20 AL23",
"AM24 AN23 AN24 AP23 AP25 AN22 AP24 AM22",
"AH28 AK26 AK28 AM27 AJ28 AH27 AK27 AM26",
"AL30 AP29 AM30 AN28 AL29 AP28 AM29 AN27",
"AH31 AH32 AJ34 AK31 AJ31 AJ30 AH34 AK32",
"AN33 AP33 AM34 AP31 AM32 AN31 AL34 AN32"),
IOStandard("POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_p", Pins("AG21 AH24 AJ20 AP20 AL27 AN29 AH33 AN34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("dqs_n", Pins("AH21 AJ25 AK20 AP21 AL28 AP30 AJ33 AP34"),
IOStandard("DIFF_POD12_DCI"),
Misc("PRE_EMPHASIS=RDRV_240"),
Misc("EQUALIZATION=EQ_LEVEL2")),
Subsignal("clk_p", Pins("AE16"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("clk_n", Pins("AE15"), IOStandard("DIFF_SSTL12_DCI")),
Subsignal("cke", Pins("AD15"), IOStandard("SSTL12_DCI")),
Subsignal("odt", Pins("AJ18"), IOStandard("SSTL12_DCI")),
Subsignal("reset_n", Pins("AL18"), IOStandard("LVCMOS12")),
Misc("SLEW=FAST"),
),
# PCIe
("pcie_x1", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2")),
Subsignal("rx_n", Pins("AB1")),
Subsignal("tx_p", Pins("AC4")),
Subsignal("tx_n", Pins("AC3"))
),
("pcie_x2", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2")),
Subsignal("rx_n", Pins("AB1 AD1")),
Subsignal("tx_p", Pins("AC4 AE4")),
Subsignal("tx_n", Pins("AC3 AE3"))
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5"))
),
("pcie_x8", 0,
Subsignal("rst_n", Pins("K22"), IOStandard("LVCMOS18")),
Subsignal("clk_p", Pins("AB6")),
Subsignal("clk_n", Pins("AB5")),
Subsignal("rx_p", Pins("AB2 AD2 AF2 AH2 AJ4 AK2 AM2 AP2")),
Subsignal("rx_n", Pins("AB1 AD1 AF1 AH1 AJ3 AK1 AM1 AP1")),
Subsignal("tx_p", Pins("AC4 AE4 AG4 AH6 AK6 AL4 AM6 AN4")),
Subsignal("tx_n", Pins("AC3 AE3 AG3 AH5 AK5 AL3 AM5 AN3"))
),
# SGMII Clk
("sgmii_clock", 0,
Subsignal("p", Pins("P26"), IOStandard("LVDS_25")),
Subsignal("n", Pins("N26"), IOStandard("LVDS_25"))
),
# SI570
("si570_refclk", 0,
Subsignal("p", Pins("P6")),
Subsignal("n", Pins("P5"))
),
# SMA
("user_sma_mgt_refclk", 0,
Subsignal("p", Pins("V6")),
Subsignal("n", Pins("V5"))
),
("user_sma_mgt_tx", 0,
Subsignal("p", Pins("R4")),
Subsignal("n", Pins("R3"))
),
("user_sma_mgt_rx", 0,
Subsignal("p", Pins("P2")),
Subsignal("n", Pins("P1"))
),
# SFP
("sfp", 0,
Subsignal("txp", Pins("U4")),
Subsignal("txn", Pins("U3")),
Subsignal("rxp", Pins("T2")),
Subsignal("rxn", Pins("T1"))
),
("sfp_tx", 0,
Subsignal("p", Pins("U4")),
Subsignal("n", Pins("U3")),
),
("sfp_rx", 0,
Subsignal("p", Pins("T2")),
Subsignal("n", Pins("T1")),
),
("sfp_tx_disable_n", 0, Pins("AL8"), IOStandard("LVCMOS18")),
("sfp", 1,
Subsignal("txp", Pins("W4")),
Subsignal("txn", Pins("W3")),
Subsignal("rxp", Pins("V2")),
Subsignal("rxn", Pins("V1"))
),
("sfp_tx", 1,
Subsignal("p", Pins("W4")),
Subsignal("n", Pins("W3")),
),
("sfp_rx", 1,
Subsignal("p", Pins("V2")),
Subsignal("n", Pins("V1")),
),
("sfp_tx_disable_n", 1, Pins("D28"), IOStandard("LVCMOS18")),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("HPC", {
"DP0_C2M_P" : "F6",
"DP0_C2M_N" : "F5",
"DP0_M2C_P" : "E4",
"DP0_M2C_N" : "E3",
"DP1_C2M_P" : "D6",
"DP1_C2M_N" : "D5",
"DP1_M2C_P" : "D2",
"DP1_M2C_N" : "D1",
"DP2_C2M_P" : "C4",
"DP2_C2M_N" : "C3",
"DP2_M2C_P" : "B2",
"DP2_M2C_N" : "B1",
"DP3_C2M_P" : "B6",
"DP3_C2M_N" : "B5",
"DP3_M2C_P" : "A4",
"DP3_M2C_N" : "A3",
"DP4_C2M_P" : "N4",
"DP4_C2M_N" : "N3",
"DP4_M2C_P" : "M2",
"DP4_M2C_N" : "M1",
"DP5_C2M_P" : "J4",
"DP5_C2M_N" : "J3",
"DP5_M2C_P" : "H2",
"DP5_M2C_N" : "H1",
"DP6_C2M_P" : "L4",
"DP6_C2M_N" : "L3",
"DP6_M2C_P" : "K2",
"DP6_M2C_N" : "K1",
"DP7_C2M_P" : "G4",
"DP7_C2M_N" : "G3",
"DP7_M2C_P" : "F2",
"DP7_M2C_N" : "F1",
"LA06_P" : "D13",
"LA06_N" : "C13",
"LA10_P" : "L8",
"LA10_N" : "K8",
"LA14_P" : "B10",
"LA14_N" : "A10",
"LA18_CC_P" : "E22",
"LA18_CC_N" : "E23",
"LA27_P" : "H21",
"LA27_N" : "G21",
"HA01_CC_P" : "E16",
"HA01_CC_N" : "D16",
"HA05_P" : "J15",
"HA05_N" : "J14",
"HA09_P" : "F18",
"HA09_N" : "F17",
"HA13_P" : "B14",
"HA13_N" : "A14",
"HA16_P" : "A19",
"HA16_N" : "A18",
"HA20_P" : "C19",
"HA20_N" : "B19",
"CLK1_M2C_P" : "E25",
"CLK1_M2C_N" : "D25",
"LA00_CC_P" : "H11",
"LA00_CC_N" : "G11",
"LA03_P" : "A13",
"LA03_N" : "A12",
"LA08_P" : "J8",
"LA08_N" : "H8",
"LA12_P" : "E10",
"LA12_N" : "D10",
"LA16_P" : "B9",
"LA16_N" : "A9",
"LA20_P" : "B24",
"LA20_N" : "A24",
"LA22_P" : "G24",
"LA22_N" : "F25",
"LA25_P" : "D20",
"LA25_N" : "D21",
"LA29_P" : "B20",
"LA29_N" : "A20",
"LA31_P" : "B25",
"LA31_N" : "A25",
"LA33_P" : "A27",
"LA33_N" : "A28",
"HA03_P" : "G15",
"HA03_N" : "G14",
"HA07_P" : "L19",
"HA07_N" : "L18",
"HA11_P" : "J19",
"HA11_N" : "J18",
"HA14_P" : "F15",
"HA14_N" : "F14",
"HA18_P" : "B17",
"HA18_N" : "B16",
"HA22_P" : "C18",
"HA22_N" : "C17",
"GBTCLK1_M2C_P" : "H6",
"GBTCLK1_M2C_N" : "H5",
"GBTCLK0_M2C_P" : "K6",
"GBTCLK0_M2C_N" : "K5",
"LA01_CC_P" : "G9",
"LA01_CC_N" : "F9",
"LA05_P" : "L13",
"LA05_N" : "K13",
"LA09_P" : "J9",
"LA09_N" : "H9",
"LA13_P" : "D9",
"LA13_N" : "C9",
"LA17_CC_P" : "D24",
"LA17_CC_N" : "C24",
"LA23_P" : "G22",
"LA23_N" : "F22",
"LA26_P" : "G20",
"LA26_N" : "F20",
"PG_M2C" : "L27",
"HA00_CC_P" : "G17",
"HA00_CC_N" : "G16",
"HA04_P" : "G19",
"HA04_N" : "F19",
"HA08_P" : "K18",
"HA08_N" : "K17",
"HA12_P" : "K16",
"HA12_N" : "J16",
"HA15_P" : "D14",
"HA15_N" : "C14",
"HA19_P" : "D19",
"HA19_N" : "D18",
"PRSNT_M2C_B" : "H24",
"CLK0_M2C_P" : "H12",
"CLK0_M2C_N" : "G12",
"LA02_P" : "K10",
"LA02_N" : "J10",
"LA04_P" : "L12",
"LA04_N" : "K12",
"LA07_P" : "F8",
"LA07_N" : "E8",
"LA11_P" : "K11",
"LA11_N" : "J11",
"LA15_P" : "D8",
"LA15_N" : "C8",
"LA19_P" : "C21",
"LA19_N" : "C22",
"LA21_P" : "F23",
"LA21_N" : "F24",
"LA24_P" : "E20",
"LA24_N" : "E21",
"LA28_P" : "B21",
"LA28_N" : "B22",
"LA30_P" : "C26",
"LA30_N" : "B26",
"LA32_P" : "E26",
"LA32_N" : "D26",
"HA02_P" : "H19",
"HA02_N" : "H18",
"HA06_P" : "L15",
"HA06_N" : "K15",
"HA10_P" : "H17",
"HA10_N" : "H16",
"HA17_CC_P" : "E18",
"HA17_CC_N" : "E17",
"HA21_P" : "E15",
"HA21_N" : "D15",
"HA23_P" : "B15",
"HA23_N" : "A15",
}
),
("LPC", {
"GBTCLK0_M2C_P" : "AA24",
"GBTCLK0_M2C_N" : "AA25",
"LA01_CC_P" : "W25",
"LA01_CC_N" : "Y25",
"LA05_P" : "V27",
"LA05_N" : "V28",
"LA09_P" : "V26",
"LA09_N" : "W26",
"LA13_P" : "AA20",
"LA13_N" : "AB20",
"LA17_CC_P" : "AA32",
"LA17_CC_N" : "AB32",
"LA23_P" : "AD30",
"LA23_N" : "AD31",
"LA26_P" : "AF33",
"LA26_N" : "AG34",
"CLK0_M2C_P" : "AA24",
"CLK0_M2C_N" : "AA25",
"LA02_P" : "AA22",
"LA02_N" : "AB22",
"LA04_P" : "U26",
"LA04_N" : "U27",
"LA07_P" : "V22",
"LA07_N" : "V23",
"LA11_P" : "V21",
"LA11_N" : "W21",
"LA15_P" : "AB25",
"LA15_N" : "AB26",
"LA19_P" : "AA29",
"LA19_N" : "AB29",
"LA21_P" : "AC33",
"LA21_N" : "AD33",
"LA24_P" : "AE32",
"LA24_N" : "AF32",
"LA28_P" : "V31",
"LA28_N" : "W31",
"LA30_P" : "Y31",
"LA30_N" : "Y32",
"LA32_P" : "W30",
"LA32_N" : "Y30",
"LA06_P" : "V29",
"LA06_N" : "W29",
"LA10_P" : "T22",
"LA10_N" : "T23",
"LA14_P" : "U21",
"LA14_N" : "U22",
"LA18_CC_P" : "AB30",
"LA18_CC_N" : "AB31",
"LA27_P" : "AG31",
"LA27_N" : "AG32",
"CLK1_M2C_P" : "AC31",
"CLK1_M2C_N" : "AC32",
"LA00_CC_P" : "W23",
"LA00_CC_N" : "W24",
"LA03_P" : "W28",
"LA03_N" : "Y28",
"LA08_P" : "U24",
"LA08_N" : "U25",
"LA12_P" : "AC22",
"LA12_N" : "AC23",
"LA16_P" : "AB21",
"LA16_N" : "AC21",
"LA20_P" : "AA34",
"LA20_N" : "AB34",
"LA22_P" : "AC34",
"LA22_N" : "AD34",
"LA25_P" : "AE33",
"LA25_N" : "AF34",
"LA29_P" : "U34",
"LA29_N" : "V34",
"LA31_P" : "V33",
"LA31_N" : "W34",
"LA33_P" : "W33",
"LA33_N" : "Y33",
}
),
("pmod0", "AK25 AN21 AH18 AM19 AE26 AF25 AE21 AM17"),
("pmod1", "AL14 AM14 AP16 AP15 AM16 AM15 AN18 AN17"),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk125"
default_clk_period = 1e9/125e6
def __init__(self):
XilinxPlatform.__init__(self, "xcku040-ffva1156-2-e", _io, _connectors, toolchain="vivado")
def create_programmer(self):
return VivadoProgrammer()
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk125", loose=True), 1e9/125e6)
self.add_period_constraint(self.lookup_request("clk300", loose=True), 1e9/300e6)
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 44]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 45]")
self.add_platform_command("set_property INTERNAL_VREF 0.84 [get_iobanks 46]")
| 34.836431
| 100
| 0.442642
| 2,041
| 18,742
| 3.825086
| 0.372366
| 0.026258
| 0.039452
| 0.021135
| 0.236839
| 0.165493
| 0.149353
| 0.139106
| 0.117587
| 0.117587
| 0
| 0.137697
| 0.354445
| 18,742
| 537
| 101
| 34.901304
| 0.507563
| 0.045139
| 0
| 0.130977
| 0
| 0
| 0.297391
| 0.007222
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006237
| false
| 0
| 0.004158
| 0.002079
| 0.018711
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92c62cbe56cec51196d1580ada73d616cb7c64b7
| 1,543
|
py
|
Python
|
code/advent_of_code_day3.py
|
erinleeryan/2020adventofcode
|
69f21d3458f57d8fcf006c451416e0509a66cd7a
|
[
"Unlicense"
] | null | null | null |
code/advent_of_code_day3.py
|
erinleeryan/2020adventofcode
|
69f21d3458f57d8fcf006c451416e0509a66cd7a
|
[
"Unlicense"
] | null | null | null |
code/advent_of_code_day3.py
|
erinleeryan/2020adventofcode
|
69f21d3458f57d8fcf006c451416e0509a66cd7a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import math
# In[2]:
fileObj = open('../data/advent_of_code_input_day_three.txt', "r") #opens the file in read mode.
items = fileObj. read(). splitlines() #puts the file into an array.
# In[3]:
#print (items)
def split(line):
return list(line)
holding = []
for i, line in enumerate(items):
result = split(line)
holding.append(result)
holding = np.array(holding)
holding[holding == '.'] = 0
holding[holding == '#'] = 1
holding = holding.astype(int)
print (holding)
# In[7]:
def dup_and_count(rightstep, downstep, basedata):
needed_slope_elements = math.floor(basedata.shape[0]/downstep)
replications_needed = (needed_slope_elements* rightstep)/basedata.shape[1]
duplicated = np.tile(basedata, math.ceil(replications_needed))
right = np.arange(0,(needed_slope_elements)*rightstep, rightstep).astype(int)
down = np.arange(0,(needed_slope_elements)*downstep,downstep).astype(int)
moves = []
for ii in range(len(right)):
moves.append(duplicated[down[ii], right[ii]])
hits = np.sum(moves)
return hits
down1_right3 = dup_and_count(3,1,holding)
down1_right1 = dup_and_count(1,1,holding)
down1_right5 = dup_and_count(5,1,holding)
down1_right7 = dup_and_count(7,1,holding)
down2_right1 = dup_and_count(1,2,holding)
results = np.array([down1_right3, down1_right1, down1_right5, down1_right7, down2_right1], dtype=np.int64)
print(results)
product = np.prod(results)
print (product)
# In[ ]:
| 20.302632
| 106
| 0.700583
| 225
| 1,543
| 4.64
| 0.4
| 0.034483
| 0.063218
| 0.05364
| 0.088123
| 0.05364
| 0
| 0
| 0
| 0
| 0
| 0.033052
| 0.156837
| 1,543
| 75
| 107
| 20.573333
| 0.769408
| 0.089436
| 0
| 0
| 0
| 0
| 0.032351
| 0.030194
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.057143
| 0.028571
| 0.171429
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92c8fb39f9443d549d8e36137c05b64ee86a7a00
| 13,786
|
py
|
Python
|
pysh/transforms/alpha/bangexpr.py
|
drslump/pysh
|
673cdf2b5ea95dc3209cb294bb91cb2f298bb888
|
[
"MIT"
] | 3
|
2018-07-09T04:39:24.000Z
|
2020-11-27T05:44:56.000Z
|
pysh/transforms/alpha/bangexpr.py
|
drslump/pysh
|
673cdf2b5ea95dc3209cb294bb91cb2f298bb888
|
[
"MIT"
] | null | null | null |
pysh/transforms/alpha/bangexpr.py
|
drslump/pysh
|
673cdf2b5ea95dc3209cb294bb91cb2f298bb888
|
[
"MIT"
] | 1
|
2018-08-02T21:57:11.000Z
|
2018-08-02T21:57:11.000Z
|
from io import StringIO
import re
import tokenize
import os
from collections import deque, ChainMap
from functools import lru_cache
from enum import Enum
import pysh
from pysh.path import PathWrapper, Path
from typing import List, Callable, Iterator, Tuple, NamedTuple, Deque, Union, Any
TBangTransformer = Callable[ [List[str]], Iterator[str]]
# runtime symbols
__all__ = ['BangExpr', 'BangOp', 'BangSeq', 'BangGlob', 'BangEnv', 'BangBang']
class BangTokenType(Enum):
OPAQUE = 'OPAQUE'
GLOB = 'GLOB'
LOCAL = 'LOCAL'
ENV = 'ENV'
EXPR = 'EXPR'
OP = 'OP'
class BangToken(NamedTuple):
type: BangTokenType
value: str
span: Tuple[int, int]
TBangLexerToken = Tuple[str, str, Tuple[int,int]]
class BangLexer:
def _tokener(self, token, transformer=lambda x: x, **kwargs):
def cb(s, v):
v = transformer(v, **kwargs)
return None if v is None else (token, v, (s.match.start(), s.match.end()))
return cb
@lru_cache() # it's intended for this to be global
def build_scanner(self):
t = self._tokener
return re.Scanner([
(r'\#.+', t('COMMENT', lambda v: v[1:])),
(r'\\.', t('ESCAPE')),
(r"'( \\. | [^\\']+ )+'", t('SQS', lambda v: v[1:-1])),
(r'"( \\. | [^\\"]+ )+"', t('DQS', lambda v: v[1:-1])),
(r'\$[A-Za-z_][A-Za-z0-9_]*', t('VAR', lambda v: v[1:])),
(r'\${( \\. | [^\\}]+ )+}', t('EXPR', lambda v: v[2:-1])),
(r'[|<>^]+', t('OP')),
(r'[A-Za-z0-9_%*+:.,=/@~\[\]{}-]+', t('OPAQUE')),
(r'\s+', t('WS')),
], flags=re.X)
@lru_cache()
def build_dqs_scanner(self):
t = self._tokener
return re.Scanner([
(r'\\.', t('ESCAPE')),
(r'\$[A-Za-z_][A-Za-z0-9_]*', t('VAR', lambda v: v[1:])),
(r'\${( \\. | [^\\}]+ )+}', t('EXPR', lambda v: v[2:-1])),
(r'[^\\\$]+', t('SQS')) # handle as single quoted
], flags=re.X)
def scan_dqs(self, code: str, offset=0) -> Iterator[TBangLexerToken]:
tokens, remaining = self.build_scanner().scan(code)
if remaining:
raise SyntaxError('Unexpected char <{}> at position {}'.format(remaining[0], len(code)-len(remaining)))
for tkn, val, pos in tokens:
yield tkn, val, (offset+pos[0], offset+pos[1])
def demux_dqs(self, tokens: Iterator[TBangLexerToken]) -> Iterator[TBangLexerToken]:
""" Split double quoted strings into parts
"""
for tkn, val, pos in tokens:
if tkn == 'DQS':
yield from self.scan_dqs(val, offset=pos[0]+1)
else:
yield tkn, val, pos
def scan(self, code: str) -> Iterator[BangToken]:
tokens, remaining = self.build_scanner().scan(code)
if remaining:
raise SyntaxError('Unexpected char at position {}'.format(len(code)-len(remaining)))
# Add a terminating token so we can simplify the parsing
tokens.append(('END', '', (len(code),len(code))))
last_token = last_pos = None
for token, value, pos in self.demux_dqs(tokens):
assert token != 'DQS' # double quoted are demuxed
# Inject whitespace operator if needed
if token != 'OP' and last_token and last_token == 'WS':
yield BangToken(BangTokenType.OP, ' ', last_pos)
if token in ('COMMENT', 'END'):
continue
elif token == 'WS':
pass
elif token == 'OP':
value = value.strip()
yield BangToken(BangTokenType.OP, value, pos)
else:
if token == 'OPAQUE':
if re.search(r'(?!<\\)[~*?{]', value):
yield BangToken(BangTokenType.GLOB, value, pos)
else:
yield BangToken(BangTokenType.OPAQUE, value, pos)
elif token in ('ESCAPE', 'SQS'):
#TODO: handle special escapes \n
value = re.sub(r'\\(.)', r'\1', value)
yield BangToken(BangTokenType.OPAQUE, value, pos)
elif token in ('VAR', 'EXPR'):
value = value.strip()
if value.isalnum() and not value.isdigit():
if value.isupper():
yield BangToken(BangTokenType.ENV, value, pos)
else:
yield BangToken(BangTokenType.LOCAL, value, pos)
else:
assert token == 'EXPR'
value = re.sub(r'\\(.)', r'\1', value)
yield BangToken(BangTokenType.EXPR, value, pos)
else:
assert False, 'unexpected {}, what happened?'.format(token)
last_token, last_pos = token, pos
class BangEnv:
__slots__ = ('name',)
def __init__(self, name):
self.name = name
def __repr__(self):
return 'BangEnv<{}>'.format(self.name)
class BangSeq:
__slots__ = ('items',)
def __init__(self, *items):
self.items = items
def __repr__(self):
return 'BangSeq<{!r}>'.format(self.items)
class BangOp:
__slots__ = ('op',)
def __init__(self, op):
self.op = op
def __repr__(self):
return 'BangOp<{}>'.format(self.op)
class BangGlob:
__slots__ = ('glob',)
def __init__(self, glob):
self.glob = glob
def __repr__(self):
return 'BangGlob<{}>'.format(self.glob)
class BangExpr:
__slots__ = ('args', 'vars')
def __init__(self, *args, locals=None, globals=None):
assert locals is not None
assert globals is not None
self.args = args
self.vars = ChainMap(locals, globals)
def eval_command(self, mut_args):
arg = mut_args.popleft()
cmd = self.vars.get(str(arg))
if cmd is None:
raise RuntimeError('Unable to find {}'.format(arg))
while mut_args:
if isinstance(mut_args[0], BangOp):
break
arg = mut_args.popleft()
cmd = cmd(self.eval_expr(arg))
return cmd
def eval_expr(self, expr: Any) -> Union[str, Iterator[Path]]:
if isinstance(expr, BangSeq):
return self.eval_seq(expr)
elif isinstance(expr, BangEnv):
return os.environ[expr.name]
elif isinstance(expr, BangGlob):
return PathWrapper().glob(expr.glob)
else:
return str(expr)
def eval_seq(self, seq: BangSeq) -> Union[str, Iterator[Path]]:
exprs: Deque[Any] = deque(seq.items)
accum = ''
while exprs:
expr = exprs.popleft()
if isinstance(expr, BangGlob):
if exprs:
raise RuntimeError('Globbing can only occur at the end of a seq')
return PathWrapper(accum).glob(expr.glob)
accum += self.eval_expr(expr)
return accum
def eval(self):
mut_args = deque(self.args)
cmd = self.eval_command(mut_args)
while mut_args:
arg = mut_args.popleft()
assert isinstance(arg, BangOp), 'Expected OP but found: {}'.format(arg)
assert len(mut_args) > 0, 'No operands left!'
if arg.op == '|':
cmd |= self.eval_command(mut_args)
elif arg.op == '^':
cmd ^= self.eval_command(mut_args)
elif arg.op == '>':
cmd = cmd > self.eval_expr(mut_args.popleft())
elif arg.op == '>>':
cmd = cmd >> self.eval_expr(mut_args.popleft())
else:
raise RuntimeError('Unsupported operator {}'.format(arg.op))
return cmd
def __str__(self):
return str(self.eval())
def __repr__(self):
return 'BangExpr<{!r}>'.format(self.args)
class BangBang:
__slots__ = ('code',)
def __init__(self, code):
self.code = code
def eval(self):
#TODO: Detect shebang and use it instead of default shell
import sys, subprocess
result = subprocess.run(
['bash', '-c', self.code],
encoding='utf-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.stderr:
print(result.stderr, file=sys.stderr)
if result.returncode > 0:
if result.stdout:
print(result.stdout)
raise pysh.ExitStatusError(result.returncode)
return result.stdout
def __str__(self):
return str(self.eval())
def __repr__(self):
return 'BangBang<{}>'.format(self.code)
def parse_bangexpr(code: str) -> str:
as_str = lambda s: "'{}'".format(s.replace("\\", "\\\\").replace("'", "\\'"))
lexer = BangLexer().scan(code)
seq = []
exprs = []
while True:
tkn = next(lexer, None)
if tkn and tkn.type != BangTokenType.OP:
if tkn.type in (BangTokenType.LOCAL, BangTokenType.EXPR):
seq.append(tkn.value)
elif tkn.type == BangTokenType.ENV:
seq.append('pysh.BangEnv({})'.format(as_str(tkn.value)))
elif tkn.type == BangTokenType.OPAQUE:
seq.append('{}'.format(as_str(tkn.value)))
elif tkn.type == BangTokenType.GLOB:
seq.append('pysh.BangGlob({})'.format(as_str(tkn.value)))
else:
assert False, 'Unexpected token {}'.format(tkn.type)
continue
if seq:
if len(seq) > 1:
exprs.append('pysh.BangSeq({})'.format(', '.join(seq)))
else:
exprs.append(seq[0])
seq = []
if not tkn:
break
assert tkn.type == BangTokenType.OP
if tkn.value == ' ':
continue
exprs.append('pysh.BangOp("{}")'.format(tkn.value))
# We need to provide locals/globals so we can resolve commands to variables
return 'pysh.BangExpr({}, locals=locals(), globals=globals())'.format(', '.join(exprs))
def transform(code: StringIO, transformer: TBangTransformer) -> Iterator[str]:
""" Scans python code to transform bang expressions.
Given some python code it will extract bang expressions and process
them with a callback that can report back the transformation.
Returns a generator that allows to consume the transformed code
line by line.
"""
tokens = tokenize.generate_tokens(code.readline)
bangexpr = [] # type: List[str]
bangcont = False
prebang = None
ptkn = None
indent = 0
bang_indent = -100
last_bang_line = -100
for ctkn in tokens:
if ctkn.type == tokenize.INDENT:
indent += 1
if last_bang_line + 1 == ctkn.start[0]:
bang_indent = indent
elif ctkn.type == tokenize.DEDENT:
indent -= 1
if bang_indent > indent:
bang_indent = -100
# due to continuations we can't rely on NEWLINE tokens, instead we have
# use the lexical information to detect when we're on a new line
#TODO: Support indent/dedent for multiline
if ptkn and ctkn.start[0] > ptkn.start[0]:
if bangcont or bang_indent == indent:
if ctkn.type is tokenize.ENDMARKER:
raise SyntaxError('BangExpr continuation at program end')
line = ctkn.line.rstrip('\r\n')
bangexpr.append(line)
bangcont = line.endswith('\\')
last_bang_line = ctkn.start[0]
elif bangexpr:
lines = list(transformer(bangexpr))
assert len(lines) <= len(bangexpr)
if lines and prebang:
lines[0] = prebang + lines[0]
yield from lines
bangexpr = []
last_bang_line = ptkn.start[0]
else:
yield ptkn.line
ptkn = ctkn
if bangexpr:
continue
if ctkn.string == '!':
col = ctkn.start[1]
prebang = ctkn.line[0:col]
line = ctkn.line[col+1:].lstrip(' \t').rstrip('\r\n')
bangexpr.append(line.rstrip('\\'))
bangcont = line.endswith('\\')
last_bang_line = ctkn.start[0]
assert not bangexpr, bangexpr
def transformer(lines: List[str]) -> Iterator[str]:
if lines[0].startswith('!'):
#TODO: Detect $ident to expose them on env when evaluated
lines[0] = lines[0][1:]
code = '\n'.join(lines)
code = code.strip().replace("'", "\\'").replace("\\", "\\\\")
code = "pysh.BangBang('{}')".format(code)
lines = code.split('\n')
for line in lines:
yield line
else:
yield from parse_bangexpr(' '.join(lines)).split('\n')
from io import StringIO
code = r'''
foo = ! ls foo${bar}.* \
| grep foo
> /dev/null
foo = r' ls foo${bar} ' >> expr
expr<' ls foo${bar} '
!! #!/bin/fish
ls .*
'''.strip()
#TODO: !! is probably better solved with:
# locals are solved with inspect.frame.f_locals
sh << r'''
# << means with variables interpolated
# < is plain text
ls .*
'''
for line in transform(StringIO(code), transformer):
print(line.rstrip('\n'))
from pysh.command import command
ls = command('ls')
grep = command('grep')
bar = 10
print('::BangExpr::')
be = BangExpr('ls', BangSeq('foo', bar, BangGlob('.*')), BangOp("|"), 'grep', 'foo', 'baz', BangOp(">"), '/dev/null', locals=locals(), globals=globals())
# print(be)
print('::BangBang::')
bb = BangBang('''#!/bin/bash
ls *.py''')
print(bb)
| 31.260771
| 153
| 0.533585
| 1,591
| 13,786
| 4.52357
| 0.195475
| 0.013617
| 0.030013
| 0.014173
| 0.184104
| 0.175212
| 0.128804
| 0.128804
| 0.128804
| 0.094484
| 0
| 0.006526
| 0.321993
| 13,786
| 441
| 154
| 31.260771
| 0.763454
| 0.072828
| 0
| 0.235474
| 0
| 0
| 0.100653
| 0.006134
| 0
| 0
| 0
| 0.004535
| 0.033639
| 1
| 0.088685
| false
| 0.003058
| 0.039755
| 0.024465
| 0.269113
| 0.018349
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ca0cfb3a6ca200081a09f8a2c36869b58c22cb
| 2,449
|
py
|
Python
|
example/bayesian-methods/data_loader.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 54
|
2018-11-27T06:00:52.000Z
|
2022-03-24T09:41:01.000Z
|
example/bayesian-methods/data_loader.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 27
|
2017-07-04T17:45:51.000Z
|
2019-09-12T06:56:27.000Z
|
example/bayesian-methods/data_loader.py
|
Vikas-kum/incubator-mxnet
|
ba02bf2fe2da423caa59ddb3fd5e433b90b730bf
|
[
"Apache-2.0"
] | 51
|
2019-07-12T05:10:25.000Z
|
2021-07-28T16:19:06.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import numpy
import os
import ssl
def load_mnist(training_num=50000):
data_path = os.path.join(os.path.dirname(os.path.realpath('__file__')), 'mnist.npz')
if not os.path.isfile(data_path):
from six.moves import urllib
origin = (
'https://github.com/sxjscience/mxnet/raw/master/example/bayesian-methods/mnist.npz'
)
print('Downloading data from %s to %s' % (origin, data_path))
ctx = ssl._create_unverified_context()
with urllib.request.urlopen(origin, context=ctx) as u, open(data_path, 'wb') as f:
f.write(u.read())
print('Done!')
dat = numpy.load(data_path)
X = (dat['X'][:training_num] / 126.0).astype('float32')
Y = dat['Y'][:training_num]
X_test = (dat['X_test'] / 126.0).astype('float32')
Y_test = dat['Y_test']
Y = Y.reshape((Y.shape[0],))
Y_test = Y_test.reshape((Y_test.shape[0],))
return X, Y, X_test, Y_test
def load_toy():
training_data = numpy.loadtxt('toy_data_train.txt')
testing_data = numpy.loadtxt('toy_data_test_whole.txt')
X = training_data[:, 0].reshape((training_data.shape[0], 1))
Y = training_data[:, 1].reshape((training_data.shape[0], 1))
X_test = testing_data[:, 0].reshape((testing_data.shape[0], 1))
Y_test = testing_data[:, 1].reshape((testing_data.shape[0], 1))
return X, Y, X_test, Y_test
def load_synthetic(theta1, theta2, sigmax, num=20):
flag = numpy.random.randint(0, 2, (num,))
X = flag * numpy.random.normal(theta1, sigmax, (num,)) \
+ (1.0 - flag) * numpy.random.normal(theta1 + theta2, sigmax, (num,))
return X
| 40.147541
| 95
| 0.683953
| 371
| 2,449
| 4.377358
| 0.401617
| 0.024631
| 0.024631
| 0.027094
| 0.178571
| 0.093596
| 0.030788
| 0.030788
| 0.030788
| 0
| 0
| 0.023046
| 0.184973
| 2,449
| 60
| 96
| 40.816667
| 0.790581
| 0.307064
| 0
| 0.054054
| 0
| 0.027027
| 0.121429
| 0.01369
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.135135
| 0
| 0.297297
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ca255eec01c1e82a3ad0136582786783c1c0bd
| 4,743
|
py
|
Python
|
start.py
|
mickeyckm/nanodegree-freshtomatoes
|
12776f7e46d6c42a4755a0b81e60eb1a5a65de08
|
[
"MIT"
] | 1
|
2016-10-13T05:25:36.000Z
|
2016-10-13T05:25:36.000Z
|
start.py
|
mickeyckm/freshtomatoes
|
12776f7e46d6c42a4755a0b81e60eb1a5a65de08
|
[
"MIT"
] | null | null | null |
start.py
|
mickeyckm/freshtomatoes
|
12776f7e46d6c42a4755a0b81e60eb1a5a65de08
|
[
"MIT"
] | null | null | null |
import os
import tmdbsimple as tmdb
import media
import fresh_tomatoes as ft
movies = []
if os.environ.get('TMDB_API', False):
# Retrieve API KEY
tmdb.API_KEY = os.environ['TMDB_API']
# TMDB Movie Ids
movie_ids = [271110, 297761, 246655, 278154, 135397, 188927]
# Get Configuration
configuration = tmdb.Configuration().info()
image_base_url = configuration['images']['secure_base_url']
image_width = "w500"
for movie_id in movie_ids:
m = tmdb.Movies(movie_id)
# Retrieve Image URL
minfo = m.info()
poster_image_url = image_base_url + image_width + minfo['poster_path']
# Retrieve Youtube Video URL
videos = m.videos()
video = videos['results'][0]
youtube_url = 'https://youtube.com/watch?v=' + video['key']
# Append Movie object
movie = media.Movie(m.title)
movie.storyline = m.overview
movie.poster_url = poster_image_url
movie.trailer_url = youtube_url
movies.append(movie)
else:
# Avatar
avatar = media.Movie("Avatar")
avatar.storyline = ("A paraplegic marine dispatched to the moon Pandora "
"on a unique mission becomes torn between following "
"his orders and protecting the world he feels is "
"his home.")
avatar.poster_url = ("https://upload.wikimedia.org/wikipedia/"
"en/b/b0/Avatar-Teaser-Poster.jpg")
avatar.trailer_url = "https://www.youtube.com/watch?v=-9ceBgWV8io"
# Deadpool
deadpool = media.Movie("Deadpool")
deadpool.storyline = ("A fast-talking mercenary with a morbid sense of "
"humor is subjected to a rogue experiment that "
"leaves him with accelerated healing powers and a "
"quest for revenge.")
deadpool.poster_url = ("https://upload.wikimedia.org/wikipedia/en/4/46/"
"Deadpool_poster.jpg")
deadpool.trailer_url = "https://www.youtube.com/watch?v=gtTfd6tISfw"
# Ghostbusters
ghostbusters = media.Movie("Ghostbusters")
ghostbusters.storyline = ("Following a ghost invasion of Manhattan, "
"paranormal enthusiasts Erin Gilbert and Abby "
"Yates, nuclear engineer Jillian Holtzmann, "
"and subway worker Patty Tolan band together "
"to stop the otherworldly threat.")
ghostbusters.poster_url = ("https://upload.wikimedia.org/wikipedia/"
"en/3/32/Ghostbusters_2016_film_poster.png")
ghostbusters.trailer_url = "https://www.youtube.com/watch?v=w3ugHP-yZXw"
# Olympus
olympus = media.Movie("Olympus Has Fallen")
olympus.storyline = ("Disgraced Secret Service agent (and former "
"presidential guard) Mike Banning finds himself "
"trapped inside the White House in the wake of a "
"terrorist attack; using his inside knowledge, "
"Banning works with national security to rescue "
"the President from his kidnappers.")
olympus.poster_url = ("https://upload.wikimedia.org/wikipedia/en/b/bf/"
"Olympus_Has_Fallen_poster.jpg")
olympus.trailer_url = "https://www.youtube.com/watch?v=vwx1f0kyNwI"
# Angry Birds
angry_birds = media.Movie("The Angry Birds Movie")
angry_birds.storyline = ("Find out why the birds are so angry. When an "
"island populated by happy, flightless birds "
"is visited by mysterious green piggies, it's "
"up to three unlikely outcasts - Red, Chuck "
"and Bomb - to figure out what the pigs are up "
"to.")
angry_birds.poster_url = ("https://upload.wikimedia.org/wikipedia/en/f/"
"f9/The_Angry_Birds_Movie_poster.png")
angry_birds.trailer_url = "https://www.youtube.com/watch?v=1U2DKKqxHgE"
# Ironman
ironman = media.Movie("Iron Man")
ironman.storyline = ("After being held captive in an Afghan cave, "
"billionaire engineer Tony Stark creates a unique "
"weaponized suit of armor to fight evil.")
ironman.poster_url = ("https://upload.wikimedia.org/wikipedia/en/7/70/"
"Ironmanposter.JPG")
ironman.trailer_url = "https://www.youtube.com/watch?v=8hYlB38asDY"
movies = [avatar, deadpool, ghostbusters, olympus, angry_birds, ironman]
ft.open_movies_page(movies)
| 43.916667
| 78
| 0.59688
| 540
| 4,743
| 5.140741
| 0.435185
| 0.037464
| 0.037824
| 0.040346
| 0.167147
| 0.167147
| 0.167147
| 0.167147
| 0.0317
| 0
| 0
| 0.020091
| 0.3074
| 4,743
| 107
| 79
| 44.327103
| 0.824962
| 0.036264
| 0
| 0
| 0
| 0
| 0.439886
| 0.030057
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.051282
| 0
| 0.051282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92cd8cee441a839cf57967c393c922a1fab007b8
| 1,203
|
py
|
Python
|
tests/test_runner.py
|
elifesciences/proofreader-python
|
89d807253e17a1731c7ce15f7dd382e49c1c835a
|
[
"MIT"
] | 1
|
2018-06-26T21:49:31.000Z
|
2018-06-26T21:49:31.000Z
|
tests/test_runner.py
|
elifesciences/proofreader-python
|
89d807253e17a1731c7ce15f7dd382e49c1c835a
|
[
"MIT"
] | 8
|
2017-12-05T08:34:25.000Z
|
2018-04-30T08:58:18.000Z
|
tests/test_runner.py
|
elifesciences/proofreader-python
|
89d807253e17a1731c7ce15f7dd382e49c1c835a
|
[
"MIT"
] | null | null | null |
try:
from unittest.mock import patch
except ImportError: # pragma: no cover
from mock import patch
from proofreader.runner import run, _run_command
def test_it_will_return_1_exit_code_on_failure(bad_py_file):
try:
run(targets=[bad_py_file.strpath])
except SystemExit as exception:
assert exception.code == 1
def test_it_will_return_zero_exit_code_on_success(good_py_file):
try:
run(targets=[good_py_file.strpath])
except SystemExit as exception:
assert exception.code == 0
def test_it_returns_zero_exit_code_on_builtin_shadowing_fail(builtin_fail_py_file):
try:
run(targets=[builtin_fail_py_file.strpath])
except SystemExit as exception:
assert exception.code == 0
def test_run_command_will_return_a_bool():
with patch('proofreader.runner.Popen') as mock_popen:
mock_popen.returncode = 0
result = _run_command('dummy_cmd', [''], [''])
assert isinstance(result, bool)
def test_will_return_zero_on_success_with_license_check(good_py_file):
try:
run(targets=[good_py_file.strpath], check_licenses=True)
except SystemExit as exception:
assert exception.code == 0
| 28.642857
| 83
| 0.729842
| 169
| 1,203
| 4.810651
| 0.325444
| 0.059041
| 0.04428
| 0.059041
| 0.471095
| 0.377614
| 0.377614
| 0.377614
| 0.319803
| 0.319803
| 0
| 0.006167
| 0.191189
| 1,203
| 41
| 84
| 29.341463
| 0.829394
| 0.0133
| 0
| 0.4
| 0
| 0
| 0.027848
| 0.020253
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.166667
| false
| 0
| 0.133333
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ce1ba4b6776bf939e55fcd9a49ebf0d28494b0
| 1,266
|
py
|
Python
|
tanim/core/container/container.py
|
wofeicaoge/Tanim
|
8ef17834a4ba51092f28c0d5becec25aecd01a62
|
[
"MIT"
] | null | null | null |
tanim/core/container/container.py
|
wofeicaoge/Tanim
|
8ef17834a4ba51092f28c0d5becec25aecd01a62
|
[
"MIT"
] | 5
|
2020-04-13T15:31:37.000Z
|
2022-03-12T00:23:27.000Z
|
tanim/core/container/container.py
|
wofeicaoge/Tanim
|
8ef17834a4ba51092f28c0d5becec25aecd01a62
|
[
"MIT"
] | null | null | null |
from tanim.utils.config_ops import digest_config
from tanim.utils.iterables import list_update
# Currently, this is only used by both Scene and Mobject.
# Still, we abstract its functionality here, albeit purely nominally.
# All actual implementation has to be handled by derived classes for now.
class Container(object):
def __init__(self, **kwargs):
digest_config(self, kwargs)
self.submobjects = [] # Is it really better to name it submobjects?
def add(self, *mobjects):
if self in mobjects:
raise Exception("Mobject cannot contain self")
self.submobjects = list_update(self.submobjects, mobjects)
return self
def add_to_back(self, *mobjects):
self.remove(*mobjects)
self.submobjects = list(mobjects) + self.submobjects
return self
def remove(self, *mobjects, ):
for mobject in mobjects:
for submod in self.submobjects:
if isinstance(submod, GroupContainer):
submod.remove(mobject)
elif mobject == submod:
self.submobjects.remove(mobject)
return self
class GroupContainer(Container):
def __init__(self, *containers, **kwargs):
self.add(*containers)
| 32.461538
| 76
| 0.657188
| 147
| 1,266
| 5.557823
| 0.462585
| 0.128519
| 0.034272
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.263823
| 1,266
| 38
| 77
| 33.315789
| 0.876609
| 0.188784
| 0
| 0.115385
| 0
| 0
| 0.026419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ce6473bab7c8882ecd1ab85554b02e243b4587
| 5,076
|
py
|
Python
|
article.py
|
ZACHSTRIVES/AUCSS-StaffPlatform
|
f2d6597853e85b06f057292025d83edbb4184361
|
[
"MIT"
] | 3
|
2020-10-01T08:46:12.000Z
|
2021-01-25T11:32:16.000Z
|
article.py
|
ZACHSTRIVES/AUCSS-StaffPlatform
|
f2d6597853e85b06f057292025d83edbb4184361
|
[
"MIT"
] | null | null | null |
article.py
|
ZACHSTRIVES/AUCSS-StaffPlatform
|
f2d6597853e85b06f057292025d83edbb4184361
|
[
"MIT"
] | 1
|
2020-09-24T11:20:23.000Z
|
2020-09-24T11:20:23.000Z
|
from config import *
def fetch_all_article():
try:
cur = db.cursor()
sql = "SELECT * FROM article WHERE article_status='N'"
db.ping(reconnect=True)
cur.execute(sql)
result = cur.fetchall()
db.commit()
cur.close()
return result
except Exception as e:
print(e)
def add_article_to_db(title, due):
try:
cur = db.cursor()
sql = "INSERT INTO article(article_title,article_dueday)VALUES ('%s','%s')" % (title, due)
db.ping(reconnect=True)
cur.execute(sql)
db.commit()
cur.close()
except Exception as e:
print(e)
def fetch_all_mkt_staff():
try:
cur = db.cursor()
sql = "SELECT Name,email FROM user WHERE type=5"
db.ping(reconnect=True)
cur.execute(sql)
result = cur.fetchall()
db.commit()
cur.close()
return result
except Exception as e:
print(e)
def get_article_id(title):
try:
cur = db.cursor()
sql = "SELECT article_id FROM article WHERE article_title='%s' AND article_status='N'" % title
db.ping(reconnect=True)
cur.execute(sql)
result = cur.fetchone()
db.commit()
cur.close()
return result
except Exception as e:
print(e)
def add_works_to_db(article_id, type, staff, work_due):
try:
cur = db.cursor()
sql = "INSERT INTO article_works(works_type,works_article,works_dueday,works_staff)VALUES (%s,%s,'%s','%s');" % (
type, article_id, work_due, staff)
db.ping(reconnect=True)
cur.execute(sql)
db.commit()
cur.close()
except Exception as e:
print(e)
def get_article_s_work(id):
try:
cur = db.cursor()
sql = "SELECT * FROM article_works WHERE works_article=%s ORDER BY works_type" % id
db.ping(reconnect=True)
cur.execute(sql)
result = cur.fetchall()
db.commit()
cur.close()
return result
except Exception as e:
print(e)
def get_user_name(email):
try:
cur = db.cursor()
sql = "SELECT Name FROM user WHERE email='%s'" % email
db.ping(reconnect=True)
cur.execute(sql)
result = cur.fetchone()
db.commit()
cur.close()
return result
except Exception as e:
print(e)
def get_works_list(articles):
res = {}
for i in range(0, len(articles)):
id = articles[i][0]
work = []
works = get_article_s_work(id)
for w in works:
my_list = [w[0], w[1], w[3], get_user_name(w[5])[0]]
work.append(my_list)
res[id] = work
return res
def get_your_task_with_article(email, id):
try:
cur = db.cursor()
sql = "SELECT * FROM article_works WHERE works_staff='%s' AND works_article=%s" % (email, id)
db.ping(reconnect=True)
cur.execute(sql)
result = cur.fetchall()
db.commit()
cur.close()
return result
except Exception as e:
print(e)
def get_task_list(email, articles):
res = {}
for a in articles:
id = a[0]
tasks = get_your_task_with_article(email, id)
res[id] = tasks
return res
def update_finish_status(type, id):
try:
type = int(type)
cur = db.cursor()
sql = ''
if type == 1:
sql = "UPDATE article SET banner_status='Y' WHERE article_id=%s" % id
elif type == 2:
sql = "UPDATE article SET text_status='Y' WHERE article_id=%s" % id
elif type == 3:
sql = "UPDATE article SET style_status='Y' WHERE article_id=%s" % id
db.ping(reconnect=True)
cur.execute(sql)
db.commit()
cur.close()
except Exception as e:
print(e)
def update_task_status(id):
try:
cur = db.cursor()
sql = "UPDATE article_works SET is_finished='Y' WHERE works_num=%s" % id
db.ping(reconnect=True)
cur.execute(sql)
db.commit()
cur.close()
except Exception as e:
print(e)
def finish_task_in_db(task, article, type):
update_task_status(task)
update_finish_status(type, article)
def count_person_performance(type, email):
try:
cur = db.cursor()
sql = "SELECT * FROM article_works WHERE works_staff='%s' AND works_type=%s AND is_finished='Y'" % (email, type)
db.ping(reconnect=True)
cur.execute(sql)
res = cur.fetchall()
db.commit()
cur.close()
return res
except Exception as e:
print(e)
def count_performance():
all_staff = fetch_all_mkt_staff()
performance_list = []
for s in all_staff:
email = s[1]
banner = count_person_performance(1, email)
text = count_person_performance(2, email)
style = count_person_performance(3, email)
p_list = [s[0], len(banner), len(text), len(style)]
performance_list.append(p_list)
return performance_list
| 25.766497
| 121
| 0.573483
| 675
| 5,076
| 4.162963
| 0.134815
| 0.019573
| 0.043061
| 0.054804
| 0.575445
| 0.567616
| 0.552669
| 0.469751
| 0.457651
| 0.402491
| 0
| 0.004863
| 0.311269
| 5,076
| 196
| 122
| 25.897959
| 0.798913
| 0
| 0
| 0.636364
| 0
| 0.006061
| 0.162136
| 0.022262
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.006061
| 0
| 0.157576
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92cea7421504e38a8678084f761b6c6af9dcfff2
| 1,231
|
py
|
Python
|
12-Querying-Data-II/just_filtering.py
|
dwang-ischool/w205
|
ebcdf684dc653951691faaa2787896a2d2406539
|
[
"Apache-2.0"
] | 23
|
2018-10-21T17:47:56.000Z
|
2022-03-06T04:50:27.000Z
|
12a/just_filtering.py
|
FuriousGeorge19/W205-Course-Content
|
f51046d7507fba9ba9f7521cda437d7dad803e5b
|
[
"Apache-2.0"
] | null | null | null |
12a/just_filtering.py
|
FuriousGeorge19/W205-Course-Content
|
f51046d7507fba9ba9f7521cda437d7dad803e5b
|
[
"Apache-2.0"
] | 9
|
2020-03-16T08:52:58.000Z
|
2022-02-09T09:31:51.000Z
|
#!/usr/bin/env python
"""Extract events from kafka and write them to hdfs
"""
import json
from pyspark.sql import SparkSession, Row
from pyspark.sql.functions import udf
@udf('boolean')
def is_purchase(event_as_json):
event = json.loads(event_as_json)
if event['event_type'] == 'purchase_sword':
return True
return False
def main():
"""main
"""
spark = SparkSession \
.builder \
.appName("ExtractEventsJob") \
.getOrCreate()
raw_events = spark \
.read \
.format("kafka") \
.option("kafka.bootstrap.servers", "kafka:29092") \
.option("subscribe", "events") \
.option("startingOffsets", "earliest") \
.option("endingOffsets", "latest") \
.load()
purchase_events = raw_events \
.select(raw_events.value.cast('string').alias('raw'),
raw_events.timestamp.cast('string')) \
.filter(is_purchase('raw'))
extracted_purchase_events = purchase_events \
.rdd \
.map(lambda r: Row(timestamp=r.timestamp, **json.loads(r.raw))) \
.toDF()
extracted_purchase_events.printSchema()
extracted_purchase_events.show()
if __name__ == "__main__":
main()
| 25.122449
| 73
| 0.613323
| 133
| 1,231
| 5.466165
| 0.518797
| 0.096286
| 0.094911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005365
| 0.242892
| 1,231
| 48
| 74
| 25.645833
| 0.774678
| 0.064175
| 0
| 0
| 0
| 0
| 0.148246
| 0.020175
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.088235
| 0
| 0.205882
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92cec8b3278d323143a4d7cc2f5e6ab7db12785e
| 434
|
py
|
Python
|
test.py
|
navjotk/pysz
|
6d75aa4fe24713ed893a2301c143006dace6fd77
|
[
"MIT"
] | 3
|
2020-03-14T04:43:00.000Z
|
2022-02-02T15:22:48.000Z
|
test.py
|
navjotk/pysz
|
6d75aa4fe24713ed893a2301c143006dace6fd77
|
[
"MIT"
] | null | null | null |
test.py
|
navjotk/pysz
|
6d75aa4fe24713ed893a2301c143006dace6fd77
|
[
"MIT"
] | null | null | null |
import numpy as np
from pysz import compress, decompress
def test_compress_decompress():
a = np.linspace(0, 100, num=1000000).reshape((100, 100, 100)).astype(np.float32)
tolerance = 0.0001
compressed = compress(a, tolerance=tolerance)
recovered = decompress(compressed, a.shape, a.dtype)
assert(a.shape == recovered.shape)
assert(np.allclose(a, recovered, atol=tolerance))
test_compress_decompress()
| 25.529412
| 84
| 0.71659
| 57
| 434
| 5.385965
| 0.491228
| 0.175896
| 0.143322
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07438
| 0.163594
| 434
| 16
| 85
| 27.125
| 0.77135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92d067e85ffe42672816ef3e9eaff85647966d45
| 1,312
|
py
|
Python
|
webhooks/sentry/alerta_sentry.py
|
dunzoit/alerta-contrib
|
57dd47d5bb0c994fce036ae1eea2c3a88ef352c4
|
[
"MIT"
] | null | null | null |
webhooks/sentry/alerta_sentry.py
|
dunzoit/alerta-contrib
|
57dd47d5bb0c994fce036ae1eea2c3a88ef352c4
|
[
"MIT"
] | null | null | null |
webhooks/sentry/alerta_sentry.py
|
dunzoit/alerta-contrib
|
57dd47d5bb0c994fce036ae1eea2c3a88ef352c4
|
[
"MIT"
] | null | null | null |
from alerta.models.alert import Alert
from alerta.webhooks import WebhookBase
class SentryWebhook(WebhookBase):
def incoming(self, query_string, payload):
# For Sentry v9
# Defaults to value before Sentry v9
if 'request' in payload.get('event'):
key = 'request'
else:
key = 'sentry.interfaces.Http'
if payload.get('event')[key]['env'].get('ENV', 'prod') == 'prod':
environment = 'Production'
else:
environment = 'Development'
if payload['level'] == 'error':
severity = 'critical'
else:
severity = 'ok'
return Alert(
resource=payload['culprit'],
event=payload['event']['event_id'],
environment=environment,
severity=severity,
service=[payload['project']],
group='Application',
value=payload['level'],
text='{}\n{}\n{}'.format(payload['message'], payload['event'].get('title', ''), payload['url']),
tags=['{}={}'.format(k, v) for k, v in payload['event']['tags']],
attributes={'modules': ['{}=={}'.format(k, v) for k, v in payload['event']['modules'].items()]},
origin='sentry.io',
raw_data=str(payload)
)
| 32
| 108
| 0.529726
| 131
| 1,312
| 5.282443
| 0.503817
| 0.069364
| 0.043353
| 0.052023
| 0.078035
| 0.078035
| 0.078035
| 0.078035
| 0.078035
| 0
| 0
| 0.002188
| 0.303354
| 1,312
| 40
| 109
| 32.8
| 0.754923
| 0.036585
| 0
| 0.1
| 0
| 0
| 0.17619
| 0.01746
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.066667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92d135cd3396bc2bfc2ba5711e29b118672c8503
| 1,676
|
py
|
Python
|
setup.py
|
dolfim/django-mail-gmailapi
|
c2f7319329d07d6ecd41e4addc05e47c38fd5e19
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dolfim/django-mail-gmailapi
|
c2f7319329d07d6ecd41e4addc05e47c38fd5e19
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
dolfim/django-mail-gmailapi
|
c2f7319329d07d6ecd41e4addc05e47c38fd5e19
|
[
"Apache-2.0"
] | null | null | null |
import re
from setuptools import setup, find_packages
import sys
if sys.version_info < (3, 5):
raise 'must use Python version 3.5 or higher'
with open('./gmailapi_backend/__init__.py', 'r') as f:
MATCH_EXPR = "__version__[^'\"]+(['\"])([^'\"]+)"
VERSION = re.search(MATCH_EXPR, f.read()).group(2).strip()
setup(
name='django-gmailapi-backend',
version=VERSION,
packages=find_packages(),
author="Michele Dolfi",
author_email="michele.dolfi@gmail.com",
license="Apache License 2.0",
entry_points={
'console_scripts': [
'gmail_oauth2 = gmailapi_backend.bin.gmail_oauth2:main',
]
},
install_requires=[
'google-api-python-client~=2.0',
'google-auth>=1.16.0,<3.0.0dev',
],
url="https://github.com/dolfim/django-gmailapi-backend",
long_description_content_type='text/markdown',
long_description=open('README.md').read(),
description='Email backend for Django which sends email via the Gmail API',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Django',
'Topic :: Communications :: Email',
'Development Status :: 4 - Beta'
],
)
| 33.52
| 79
| 0.614558
| 189
| 1,676
| 5.31746
| 0.555556
| 0.113433
| 0.149254
| 0.129353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021705
| 0.23031
| 1,676
| 49
| 80
| 34.204082
| 0.757364
| 0
| 0
| 0.044444
| 0
| 0
| 0.538783
| 0.102625
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92d23334c19f98d7d5d931da713ce60c1a673466
| 1,351
|
py
|
Python
|
openpeerpower/scripts/ensure_config.py
|
OpenPeerPower/openpeerpower
|
940a04a88e8f78e2d010dc912ad6905ae363503c
|
[
"Apache-2.0"
] | null | null | null |
openpeerpower/scripts/ensure_config.py
|
OpenPeerPower/openpeerpower
|
940a04a88e8f78e2d010dc912ad6905ae363503c
|
[
"Apache-2.0"
] | null | null | null |
openpeerpower/scripts/ensure_config.py
|
OpenPeerPower/openpeerpower
|
940a04a88e8f78e2d010dc912ad6905ae363503c
|
[
"Apache-2.0"
] | 1
|
2019-04-24T14:10:08.000Z
|
2019-04-24T14:10:08.000Z
|
"""Script to ensure a configuration file exists."""
import argparse
import os
import openpeerpower.config as config_util
from openpeerpower.core import OpenPeerPower
# mypy: allow-untyped-calls, allow-untyped-defs
def run(args):
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=(
"Ensure a Open Peer Power config exists, creates one if necessary."
)
)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Open Peer Power configuration",
)
parser.add_argument("--script", choices=["ensure_config"])
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
# Test if configuration directory exists
if not os.path.isdir(config_dir):
print("Creating directory", config_dir)
os.makedirs(config_dir)
opp = OpenPeerPower()
opp.config.config_dir = config_dir
config_path = opp.loop.run_until_complete(async_run(opp))
print("Configuration file:", config_path)
return 0
async def async_run(opp):
"""Make sure config exists."""
path = await config_util.async_ensure_config_exists(opp)
await opp.async_stop(force=True)
return path
| 28.145833
| 79
| 0.687639
| 170
| 1,351
| 5.294118
| 0.423529
| 0.08
| 0.028889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000935
| 0.208734
| 1,351
| 47
| 80
| 28.744681
| 0.840973
| 0.127313
| 0
| 0
| 0
| 0
| 0.182938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.125
| 0
| 0.21875
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92d3126cd9f9279a6936076ceba3b9c4bff9aa48
| 11,146
|
py
|
Python
|
dabl/plot/tests/test_supervised.py
|
nrohan09-cloud/dabl
|
ebc4686c7b16c011bf5266cb6335221309aacb80
|
[
"BSD-3-Clause"
] | 500
|
2019-04-01T13:50:18.000Z
|
2022-03-07T01:50:45.000Z
|
dabl/plot/tests/test_supervised.py
|
nrohan09-cloud/dabl
|
ebc4686c7b16c011bf5266cb6335221309aacb80
|
[
"BSD-3-Clause"
] | 111
|
2019-04-01T17:48:40.000Z
|
2020-03-27T16:39:19.000Z
|
dabl/plot/tests/test_supervised.py
|
nrohan09-cloud/dabl
|
ebc4686c7b16c011bf5266cb6335221309aacb80
|
[
"BSD-3-Clause"
] | 60
|
2019-04-01T14:58:35.000Z
|
2021-08-13T02:58:20.000Z
|
import pytest
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import itertools
from sklearn.datasets import (make_regression, make_blobs, load_digits,
fetch_openml, load_diabetes)
from sklearn.preprocessing import KBinsDiscretizer
from dabl.preprocessing import clean, detect_types, guess_ordinal
from dabl.plot.supervised import (
plot, plot_classification_categorical,
plot_classification_continuous, plot_regression_categorical,
plot_regression_continuous)
from dabl.utils import data_df_from_bunch
from dabl import set_config
# FIXME: check that target is not y but a column name
@pytest.mark.filterwarnings('ignore:the matrix subclass')
@pytest.mark.parametrize("continuous_features, categorical_features, task",
itertools.product([0, 1, 3, 100], [0, 1, 3, 100],
['classification', 'regression']))
def test_plots_smoke(continuous_features, categorical_features, task):
# simple smoke test
# should be parametrized
n_samples = 100
X_cont, y_cont = make_regression(
n_samples=n_samples, n_features=continuous_features,
n_informative=min(continuous_features, 2))
X_cat, y_cat = make_regression(
n_samples=n_samples, n_features=categorical_features,
n_informative=min(categorical_features, 2))
if X_cat.shape[1] > 0:
X_cat = KBinsDiscretizer(encode='ordinal').fit_transform(X_cat)
cont_columns = ["asdf_%d_cont" % i for i in range(continuous_features)]
df_cont = pd.DataFrame(X_cont, columns=cont_columns)
if categorical_features > 0:
cat_columns = ["asdf_%d_cat" % i for i in range(categorical_features)]
df_cat = pd.DataFrame(X_cat, columns=cat_columns).astype('int')
df_cat = df_cat.astype("category")
X_df = pd.concat([df_cont, df_cat], axis=1)
else:
X_df = df_cont
assert(X_df.shape[1] == continuous_features + categorical_features)
X_clean = clean(X_df.copy())
y = y_cont + y_cat
if X_df.shape[1] == 0:
y = np.random.uniform(size=n_samples)
if task == "classification":
y = np.digitize(y, np.percentile(y, [5, 10, 60, 85]))
X_clean['target'] = y
if task == "classification":
X_clean['target'] = X_clean['target'].astype('category')
types = detect_types(X_clean)
column_types = types.T.idxmax()
assert np.all(column_types[:continuous_features] == 'continuous')
assert np.all(column_types[continuous_features:-1] == 'categorical')
if task == "classification":
assert column_types[-1] == 'categorical'
else:
assert column_types[-1] == 'continuous'
plot(X_clean, target_col='target')
plt.close("all")
@pytest.mark.parametrize("add, feature_type, target_type",
itertools.product([0, .1],
['continuous', 'categorical'],
['continuous', 'categorical']))
def test_type_hints(add, feature_type, target_type):
X = pd.DataFrame(np.random.randint(4, size=100)) + add
X['target'] = np.random.uniform(size=100)
plot(X, type_hints={0: feature_type,
'target': target_type},
target_col='target')
# get title of figure
text = plt.gcf()._suptitle.get_text()
assert feature_type.capitalize() in text
ax = plt.gca()
# one of the labels is 'target' iif regression
labels = ax.get_ylabel() + ax.get_xlabel()
assert ('target' in labels) == (target_type == 'continuous')
plt.close("all")
def test_float_classification_target():
# check we can plot even if we do classification with a float target
X, y = make_blobs()
data = pd.DataFrame(X)
data['target'] = y.astype(np.float)
types = detect_types(data)
assert types.categorical['target']
plot(data, target_col='target')
# same with "actual float" - we need to specify classification for that :-/
data['target'] = y.astype(np.float) + .2
plot(data, target_col='target', type_hints={'target': 'categorical'})
plt.close("all")
@pytest.mark.filterwarnings('ignore:Discarding near-constant')
def test_plot_classification_n_classes():
X, y = make_blobs()
X = pd.DataFrame(X)
X['target'] = 0
with pytest.raises(ValueError, match="Less than two classes"):
plot_classification_categorical(X, 'target')
with pytest.raises(ValueError, match="Less than two classes"):
plot_classification_continuous(X, 'target')
def test_plot_wrong_target_type():
X, y = make_blobs()
X = pd.DataFrame(X)
X['target'] = y
with pytest.raises(ValueError, match="need continuous"):
plot_regression_categorical(X, 'target')
with pytest.raises(ValueError, match="need continuous"):
plot_regression_continuous(X, 'target')
X['target'] = X[0]
with pytest.raises(ValueError, match="need categorical"):
plot_classification_categorical(X, 'target')
with pytest.raises(ValueError, match="need categorical"):
plot_classification_continuous(X, 'target')
def test_plot_target_low_card_int():
data = load_digits()
df = data_df_from_bunch(data)
plot(df[::10], target_col='target')
def test_plot_X_y():
X, y = make_blobs()
X = pd.DataFrame(X)
plot(X, y)
def test_plot_regression_numpy():
X, y = make_regression()
plot(X, y)
def test_plot_lda_binary():
X, y = make_blobs(centers=2)
X = pd.DataFrame(X)
plot(X, y, univariate_plot='kde')
def test_plot_int_column_name():
X, y = make_blobs()
X = pd.DataFrame(X)
X[3] = y
plot(X, target_col=3)
def test_negative_ordinal():
# check that a low card int with negative values is plotted correctly
data = pd.DataFrame([np.random.randint(0, 10, size=1000) - 5,
np.random.randint(0, 2, size=1000)]).T
# ensure first column is low_card_int
assert (detect_types(data).T.idxmax()
== ['low_card_int', 'categorical']).all()
assert guess_ordinal(data[0])
# smoke test
plot(data, target_col=1)
def test_large_ordinal():
# check that large integers don't bring us down (bincount memory error)
# here some random phone numbers
assert not guess_ordinal(pd.Series([6786930208, 2142878625, 9106275431]))
def test_plot_classification_continuous():
data = fetch_openml('MiceProtein')
df = data_df_from_bunch(data)
# only univariate plots
figures = plot_classification_continuous(df, target_col='target',
plot_pairwise=False)
assert len(figures) == 1
# top 10 axes
assert len(figures[0].get_axes()) == 10
# six is the minimum number of features for histograms
# (last column is target)
figures = plot_classification_continuous(df.iloc[:, -7:],
target_col='target',
plot_pairwise=False)
assert len(figures) == 1
assert len(figures[0].get_axes()) == 6
# for 5 features, do full pairplot
figures = plot_classification_continuous(df.iloc[:, -6:],
target_col='target',
plot_pairwise=False)
assert len(figures) == 1
# diagonal has twin axes
assert len(figures[0].get_axes()) == 5 * 5 + 5
# also do pairwise plots
figures = plot_classification_continuous(df, target_col='target',
random_state=42)
# univariate, pairwise, pca, lda
assert len(figures) == 4
# univariate
axes = figures[0].get_axes()
assert len(axes) == 10
# known result
assert axes[0].get_xlabel() == "SOD1_N"
# bar plot never has ylabel
assert axes[0].get_ylabel() == ""
# pairwise
axes = figures[1].get_axes()
assert len(axes) == 4
# known result
assert axes[0].get_xlabel() == "SOD1_N"
assert axes[0].get_ylabel() == 'S6_N'
# PCA
axes = figures[2].get_axes()
assert len(axes) == 4
# known result
assert axes[0].get_xlabel() == "PCA 1"
assert axes[0].get_ylabel() == 'PCA 5'
# LDA
axes = figures[3].get_axes()
assert len(axes) == 4
# known result
assert axes[0].get_xlabel() == "LDA 0"
assert axes[0].get_ylabel() == 'LDA 1'
def test_plot_string_target():
X, y = make_blobs(n_samples=30)
data = pd.DataFrame(X)
y = pd.Series(y)
y[y == 0] = 'a'
y[y == 1] = 'b'
y[y == 2] = 'c'
data['target'] = y
plot(data, target_col='target')
def test_na_vals_reg_plot_raise_warning():
X, y = load_diabetes(return_X_y=True)
X = pd.DataFrame(X)
y[::50] = np.NaN
X['target_col'] = y
with pytest.warns(UserWarning, match="Missing values in target_col have "
"been removed for regression"):
plot(X, 'target_col')
with pytest.warns(UserWarning, match="Missing values in target_col have "
"been removed for regression"):
plot_regression_continuous(X, 'target_col')
with pytest.warns(UserWarning, match="Missing values in target_col have "
"been removed for regression"):
plot_regression_categorical(X, 'target_col')
def test_plot_regression_continuous_with_target_outliers():
df = pd.DataFrame(
data={
"feature": np.random.randint(low=1, high=100, size=200),
# target values are bound between 50 and 100
"target": np.random.randint(low=50, high=100, size=200)
}
)
# append single outlier record with target value 0
df = df.append({"feature": 50, "target": 0}, ignore_index=True)
with pytest.warns(
UserWarning,
match="Dropped 1 outliers in column target."
):
plot_regression_continuous(df, 'target')
def test_plot_regression_categorical_missing_value():
df = pd.DataFrame({'y': np.random.normal(size=300)})
df.loc[100:200, 'y'] += 1
df.loc[200:300, 'y'] += 2
df['x'] = 'a'
df.loc[100:200, 'x'] = 'b'
df.loc[200:300, 'x'] = np.NaN
res = plot(df, target_col='y')
assert len(res[1][0, 0].get_yticklabels()) == 3
assert res[1][0, 0].get_yticklabels()[2].get_text() == 'dabl_mi...'
def test_label_truncation():
a = ('a_really_long_name_that_would_mess_up_the_layout_a_lot'
'_by_just_being_very_long')
b = ('the_target_that_has_an_equally_long_name_which_would_'
'mess_up_everything_as_well_but_in_different_places')
df = pd.DataFrame({a: np.random.uniform(0, 1, 1000)})
df[b] = df[a] + np.random.uniform(0, 0.1, 1000)
res = plot_regression_continuous(df, target_col=b)
assert res[0, 0].get_ylabel() == 'the_target_that_h...'
assert res[0, 0].get_xlabel() == 'a_really_long_nam...'
set_config(truncate_labels=False)
res = plot_regression_continuous(df, target_col=b)
assert res[0, 0].get_ylabel() == b
assert res[0, 0].get_xlabel() == a
set_config(truncate_labels=True)
| 35.610224
| 79
| 0.63386
| 1,494
| 11,146
| 4.513387
| 0.190094
| 0.029364
| 0.017945
| 0.01661
| 0.390479
| 0.298235
| 0.258639
| 0.228533
| 0.205102
| 0.148747
| 0
| 0.027443
| 0.241522
| 11,146
| 312
| 80
| 35.724359
| 0.770168
| 0.083707
| 0
| 0.258621
| 0
| 0
| 0.123171
| 0.019841
| 0
| 0
| 0
| 0.003205
| 0.155172
| 1
| 0.077586
| false
| 0
| 0.047414
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92d3e306e086847f38535479f8de8893955d728c
| 4,480
|
py
|
Python
|
scripts/calculate_rank.py
|
daniel-theis/multicore-test-harness
|
d0ff54ef1c9f9637dd16dd8b85ac1cee8dc49e19
|
[
"MIT"
] | 15
|
2018-05-06T20:54:41.000Z
|
2020-12-04T05:36:11.000Z
|
scripts/calculate_rank.py
|
daniel-theis/multicore-test-harness
|
d0ff54ef1c9f9637dd16dd8b85ac1cee8dc49e19
|
[
"MIT"
] | null | null | null |
scripts/calculate_rank.py
|
daniel-theis/multicore-test-harness
|
d0ff54ef1c9f9637dd16dd8b85ac1cee8dc49e19
|
[
"MIT"
] | 3
|
2020-12-04T05:36:13.000Z
|
2021-09-08T11:53:16.000Z
|
################################################################################
# Copyright (c) 2017 Dan Iorga, Tyler Sorenson, Alastair Donaldson
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
import sys
import json
from pprint import pprint
class CalculateRank(object):
def __init__(self, input_file):
self._input_file = input_file
def get_rank(self):
# Read the configuration in the JSON file
with open(self._input_file) as data_file:
experiments_object = json.load(data_file)
# Sort all the configurations in a list
dict_list = list()
for experiment in experiments_object:
ranked_list = experiments_object[experiment]["it"]
od = list(sorted(ranked_list.values(), key=lambda x:x['q_value'], reverse=True))
dict_list.append(od)
# for it in dict_list:
# print()
# print()
# for i in range(len(it)):
# print(it[i]['mapping'])
# print(it[i]['q_value'])
# For each environment. get the rank in the other experiments and store in 'rank'
for it in dict_list[0]:
environment = it['mapping']
rank_list = list()
# Look it up for each victim(experiment)
for it2 in dict_list:
# Find its rank there
for i in range(len(it2)):
env = it2[i]['mapping']
if environment == env:
rank_here = i
break
rank_list.append(rank_here)
it['rank'] = rank_list
# Identify the ones that are not Pareto optimal
rank_list_bad = list()
for it1 in dict_list[0]:
for it2 in dict_list[0]:
if len([i for i, j in zip(it1['rank'], it2['rank']) if i > j]) == len(it1['rank']):
rank_list_bad.append(it1)
# Put the Pareto Optimal in a list
paretto_optimal = list()
for it in dict_list[0]:
if not (it in rank_list_bad):
paretto_optimal.append(it)
# If there are ties, try to break them at fewer comparisons
if len(paretto_optimal) > 1:
rank_list_bad = list()
for it1 in paretto_optimal:
for it2 in paretto_optimal:
if len([i for i, j in zip(it1['rank'], it2['rank']) if i > j]) == len(it1['rank']) - 1:
rank_list_bad.append(it1)
# Put the tie broken ones in a list
paretto_optimal_tie_break = list()
for it in paretto_optimal:
if not (it in rank_list_bad):
paretto_optimal_tie_break.append(it)
print("With no tie breaking")
for i in range(len(paretto_optimal)):
print(paretto_optimal[i]['mapping'])
print("With tie breaking")
for i in range(len(paretto_optimal_tie_break)):
print(paretto_optimal_tie_break[i]['mapping'])
else:
print(paretto_optimal[0]['mapping'])
print("There was no tie breaking")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("usage: " + sys.argv[0] + " <ranked_environments>.json\n")
exit(1)
rank = CalculateRank(sys.argv[1])
rank.get_rank()
| 39.646018
| 107
| 0.579241
| 582
| 4,480
| 4.328179
| 0.324742
| 0.072251
| 0.023819
| 0.017467
| 0.191346
| 0.144502
| 0.131798
| 0.092894
| 0.092894
| 0.034935
| 0
| 0.009597
| 0.302232
| 4,480
| 112
| 108
| 40
| 0.796225
| 0.359375
| 0
| 0.135593
| 0
| 0
| 0.066468
| 0.010456
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.050847
| 0
| 0.101695
| 0.135593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92d713c9c1666b57fdf260fc3597ec5bb433209c
| 1,886
|
py
|
Python
|
scripts/spacy_files/similarity_replacement.py
|
HighDeFing/thesis_v4
|
2dc9288af75a8b51fe54ed66f520e8aa8a0ab3c7
|
[
"Apache-2.0"
] | null | null | null |
scripts/spacy_files/similarity_replacement.py
|
HighDeFing/thesis_v4
|
2dc9288af75a8b51fe54ed66f520e8aa8a0ab3c7
|
[
"Apache-2.0"
] | null | null | null |
scripts/spacy_files/similarity_replacement.py
|
HighDeFing/thesis_v4
|
2dc9288af75a8b51fe54ed66f520e8aa8a0ab3c7
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python
from black import main
import spacy
import json
from spacy import displacy
import unidecode
import pandas as pd
import numpy as np
import os
csv_source = "scripts/spacy_files/data/thesis_200_with_school.csv"
df = pd.read_csv(csv_source)
df = df[df['isScan']==False]
df = df.sort_values('isScan', ascending=False)
text1= "Escuela de Enfermería"
text2 = "ESCUELA DE ENFERMERIA"
file = open("scripts/spacy_files/data/escuelas.json", "r")
file = json.load(file)
temp_list = []
for facultad in file:
temp_list.append(facultad['escuela'])
#print(facultad['escuela'])
escuelas = [item for sublist in temp_list for item in sublist] # make the list flat
#print(escuelas)
text1_u = unidecode.unidecode(text1)
text1_l_u = text1_u.lower()
text2_l_u = unidecode.unidecode(text2).lower()
print(text1_l_u, "<-->", text2_l_u)
if text1_l_u == text2_l_u:
print(text1, " is correct.")
def unaccent_list(accent_list):
unaccented_schools = []
for sch in accent_list:
unaccented_schools.append(unidecode.unidecode(sch).lower())
return unaccented_schools
def set_school_to_unaccent(escuelas):
escuelas = unaccent_list(escuelas)
return escuelas
def create_dictionary(schools):
myDict = dict((e,i) for i,e in enumerate(schools))
return myDict
def set_schools_accents(row, dict, dict_c):
index = dict.get(row.lower())
key_list = list(dict_c.keys())
val_list = list(dict_c.values())
try:
position = val_list.index(index)
key_list[position]
except:
return None
if __name__ == "__main__":
u_escuelas = set_school_to_unaccent(escuelas)
u_escuelas_dict = create_dictionary(u_escuelas)
escuelas_dict = create_dictionary(escuelas)
print(u_escuelas_dict)
print(escuelas_dict)
print(set_schools_accents("No school", u_escuelas_dict, escuelas_dict))
| 25.486486
| 83
| 0.718982
| 271
| 1,886
| 4.738007
| 0.339483
| 0.009346
| 0.016355
| 0.03271
| 0.063863
| 0.021807
| 0
| 0
| 0
| 0
| 0
| 0.01025
| 0.172322
| 1,886
| 73
| 84
| 25.835616
| 0.8123
| 0.040297
| 0
| 0
| 0
| 0
| 0.102664
| 0.04939
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0
| 0.150943
| 0
| 0.301887
| 0.09434
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92d920562d22f1142cab1ea79e81051636bf317f
| 7,212
|
py
|
Python
|
test/unittest_base.py
|
dat-boris/tensorforce
|
d777121b1c971da5500572c5f83173b9229f7370
|
[
"Apache-2.0"
] | null | null | null |
test/unittest_base.py
|
dat-boris/tensorforce
|
d777121b1c971da5500572c5f83173b9229f7370
|
[
"Apache-2.0"
] | null | null | null |
test/unittest_base.py
|
dat-boris/tensorforce
|
d777121b1c971da5500572c5f83173b9229f7370
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from copy import deepcopy
from datetime import datetime
import os
import sys
import warnings
from tensorforce import TensorforceError
from tensorforce.agents import Agent
from tensorforce.core.layers import Layer
from tensorforce.environments import Environment
from tensorforce.execution import Runner
from test.unittest_environment import UnittestEnvironment
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class UnittestBase(object):
"""
Unit-test base class.
"""
# Unittest
num_updates = None
num_episodes = None
num_timesteps = None
# Environment
min_timesteps = 1
states = dict(
bool_state=dict(type='bool', shape=(1,)),
int_state=dict(type='int', shape=(2,), num_values=4),
float_state=dict(type='float', shape=(1, 1, 2)),
bounded_state=dict(type='float', shape=(), min_value=-0.5, max_value=0.5)
)
actions = dict(
bool_action=dict(type='bool', shape=(1,)),
int_action=dict(type='int', shape=(2,), num_values=4),
float_action=dict(type='float', shape=(1, 1)),
bounded_action=dict(type='float', shape=(2,), min_value=-0.5, max_value=0.5)
)
# Exclude action types
exclude_bool_action = False
exclude_int_action = False
exclude_float_action = False
exclude_bounded_action = False
# Agent
agent = dict(
update=4, policy=dict(network=dict(type='auto', size=8, depth=1, internal_rnn=2)),
objective='policy_gradient', reward_estimation=dict(horizon=3)
)
# Tensorforce config
require_observe = False
require_all = False
def setUp(self):
warnings.filterwarnings(
action='ignore',
message='Converting sparse IndexedSlices to a dense Tensor of unknown shape'
)
def start_tests(self, name=None):
"""
Start unit-test method.
"""
if name is None:
sys.stdout.write('\n{} {}: '.format(
datetime.now().strftime('%H:%M:%S'), self.__class__.__name__[4:]
))
else:
sys.stdout.write('\n{} {} ({}): '.format(
datetime.now().strftime('%H:%M:%S'), self.__class__.__name__[4:], name
))
sys.stdout.flush()
def finished_test(self, assertion=None):
"""
Finished unit-test.
"""
if assertion is None:
assertion = True
else:
self.assertTrue(expr=assertion)
if assertion:
sys.stdout.write('.')
sys.stdout.flush()
def prepare(
self, environment=None, min_timesteps=None, states=None, actions=None,
exclude_bool_action=False, exclude_int_action=False, exclude_float_action=False,
exclude_bounded_action=False, require_observe=False, require_all=False, **agent
):
"""
Generic unit-test preparation.
"""
Layer.layers = None
if environment is None:
if states is None:
states = deepcopy(self.__class__.states)
if actions is None:
actions = deepcopy(self.__class__.actions)
if exclude_bool_action or self.__class__.exclude_bool_action:
actions.pop('bool_action')
if exclude_int_action or self.__class__.exclude_int_action:
actions.pop('int_action')
if exclude_float_action or self.__class__.exclude_float_action:
actions.pop('float_action')
if exclude_bounded_action or self.__class__.exclude_bounded_action:
actions.pop('bounded_action')
if min_timesteps is None:
min_timesteps = self.__class__.min_timesteps
environment = UnittestEnvironment(
states=states, actions=actions, min_timesteps=min_timesteps
)
elif min_timesteps is not None:
raise TensorforceError.unexpected()
environment = Environment.create(environment=environment, max_episode_timesteps=5)
for key, value in self.__class__.agent.items():
if key not in agent:
agent[key] = value
if self.__class__.require_all or require_all:
config = None
elif self.__class__.require_observe or require_observe:
config = dict(api_functions=['reset', 'act', 'observe'])
else:
config = dict(api_functions=['reset', 'act'])
agent = Agent.create(agent=agent, environment=environment, config=config)
return agent, environment
def unittest(
self, num_updates=None, num_episodes=None, num_timesteps=None, environment=None,
min_timesteps=None, states=None, actions=None, exclude_bool_action=False,
exclude_int_action=False, exclude_float_action=False, exclude_bounded_action=False,
require_observe=False, require_all=False, **agent
):
"""
Generic unit-test.
"""
agent, environment = self.prepare(
environment=environment, min_timesteps=min_timesteps, states=states, actions=actions,
exclude_bool_action=exclude_bool_action, exclude_int_action=exclude_int_action,
exclude_float_action=exclude_float_action,
exclude_bounded_action=exclude_bounded_action, require_observe=require_observe,
require_all=require_all, **agent
)
self.runner = Runner(agent=agent, environment=environment)
assert (num_updates is not None) + (num_episodes is not None) + \
(num_timesteps is not None) <= 1
if num_updates is None and num_episodes is None and num_timesteps is None:
num_updates = self.__class__.num_updates
num_episodes = self.__class__.num_episodes
num_timesteps = self.__class__.num_timesteps
if num_updates is None and num_episodes is None and num_timesteps is None:
num_updates = 2
assert (num_updates is not None) + (num_episodes is not None) + \
(num_timesteps is not None) == 1
evaluation = not any([
require_all, require_observe, self.__class__.require_all,
self.__class__.require_observe
])
self.runner.run(
num_episodes=num_episodes, num_timesteps=num_timesteps, num_updates=num_updates,
use_tqdm=False, evaluation=evaluation
)
self.runner.close()
agent.close()
environment.close()
self.finished_test()
| 35.880597
| 97
| 0.634498
| 849
| 7,212
| 5.123675
| 0.228504
| 0.035172
| 0.037241
| 0.016552
| 0.341839
| 0.287816
| 0.247356
| 0.247356
| 0.238161
| 0.223448
| 0
| 0.007704
| 0.262063
| 7,212
| 200
| 98
| 36.06
| 0.809658
| 0.116195
| 0
| 0.095588
| 0
| 0
| 0.040973
| 0
| 0
| 0
| 0
| 0
| 0.051471
| 1
| 0.036765
| false
| 0
| 0.080882
| 0
| 0.227941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92dc54efa676f164aaadbce167924df2d1df95ab
| 7,112
|
py
|
Python
|
webcam_demo.py
|
taranek/tennis-stats-provider
|
e95093679a194d30d0727ec8e11d44fc462f6adc
|
[
"Apache-2.0"
] | null | null | null |
webcam_demo.py
|
taranek/tennis-stats-provider
|
e95093679a194d30d0727ec8e11d44fc462f6adc
|
[
"Apache-2.0"
] | null | null | null |
webcam_demo.py
|
taranek/tennis-stats-provider
|
e95093679a194d30d0727ec8e11d44fc462f6adc
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
import json
import math
import cv2
import time
import argparse
import concurrent.futures
import posenet
import keyboard
import sys
import numpy as np
from threading import Thread
from slugify import slugify
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=int, default=101)
parser.add_argument('--cam_id', type=int, default=0)
parser.add_argument('--cam_width', type=int, default=1280)
parser.add_argument('--cam_height', type=int, default=720)
parser.add_argument('--scale_factor', type=float, default=0.7125)
parser.add_argument('--file', type=str, default=None, help="Optionally use a video file instead of a live camera")
args = parser.parse_args()
def main():
# tf.config.threading.set_inter_op_parallelism_threads(0)
# tf.config.threading.set_intra_op_parallelism_threads(0)
# print(tf.config.threading.get_inter_op_parallelism_threads())
# print(tf.config.threading.get_intra_op_parallelism_threads())
with tf.compat.v1.Session() as sess:
model_cfg, model_outputs = posenet.load_model(args.model, sess)
output_stride = model_cfg['output_stride']
if args.file is not None:
cap = cv2.VideoCapture(args.file)
else:
cap = cv2.VideoCapture(args.cam_id)
cap.set(3, args.cam_width)
cap.set(4, args.cam_height)
start = time.time()
frame_count = 0
recording = True
# ret,frame1 = cap.read()
# ret,frame2 = cap.read()
file_content = []
while True:
# diff = cv2.absdiff(frame1,frame2)
# gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
# blur = cv2.GaussianBlur(gray,(15,15),0)
# _, thresh = cv2.threshold(blur,20,255,cv2.THRESH_BINARY)
# dilated = cv2.dilate(thresh,None, iterations=3)
# contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# # if(len(contours)>0):
# # print("One:")
# # print(dir(contours[0]))
# # print("One it is.")
# for contour in contours:
# (x,y,w,h) = cv2.boundingRect(contour)
# if(cv2.contourArea(contour)>400):
# continue
# cv2.rectangle(frame1,(x,y),(x+w,y+h),(0,255,0),2)
# # cv2.drawContours(frame1,contours, -1,(0,255,0),2)
# cv2.imshow("feed",frame1)
# frame1 = frame2
# ret, frame2 = cap.read()
input_image, display_image, output_scale = posenet.read_cap(cap, scale_factor=args.scale_factor, output_stride=output_stride)
heatmaps_result, offsets_result, displacement_fwd_result, displacement_bwd_result = sess.run(
model_outputs,
feed_dict={'image:0': input_image}
)
pose_scores, keypoint_scores, keypoint_coords = posenet.decode_multi.decode_multiple_poses(
heatmaps_result.squeeze(axis=0),
offsets_result.squeeze(axis=0),
displacement_fwd_result.squeeze(axis=0),
displacement_bwd_result.squeeze(axis=0),
output_stride=output_stride,
max_pose_detections=1,
min_pose_score=0.15)
keypoint_coords *= output_scale
# TODO this isn't particularly fast, use GL for drawing and display someday...
# print("\n ===================================== \n")
img = posenet.draw_skel_and_kp(
display_image, pose_scores, keypoint_scores, keypoint_coords,
min_pose_score=0.15, min_part_score=0.15)
cv2.imshow('posenet', img)
frame_count += 1
if(recording):
normalize_poses(keypoint_coords)
results = json.dumps({
"timestamp":time.time() - start,
"pose_scores":pose_scores.tolist(),
"keypoint_scores":keypoint_scores.tolist(),
"scores": keypoint_scores.size,
"keypoint_coords":normalize_poses(keypoint_coords),
"coords": keypoint_coords.size
})
file_content.append(results)
file_content = file_content[-30:]
if cv2.waitKey(1) & keyboard.is_pressed('w'):
print('you pressed w - service it was!')
time.sleep(0.5)
path = "collected/serves/"
filename = str(slugify("s-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('d'):
print('you pressed d - forehand it was!')
time.sleep(0.5)
path = "collected/forehand/"
filename = str(slugify("f-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('a'):
print('you pressed a - backhand it was!')
time.sleep(0.5)
path = "collected/backhand/"
filename = str(slugify("b-"+str(time.time()))+".txt")
x = Thread(target=save_to_file, args=(str(path+filename),str(file_content)))
x.start()
x.join()
file_content = []
if cv2.waitKey(1) & keyboard.is_pressed('q'):
print('you pressed q - quitting!')
cv2.destroyAllWindows()
break
print('Average FPS: ', frame_count / (time.time() - start))
return 0
def my_function(toPrint):
print(toPrint)
def save_to_file(filename,data):
file = open(filename,'w')
file.write(data)
file.close()
def find_middle(left,right):
x = (left[0]+right[0])/2.0
y = (left[1]+right[1])/2.0
return [x,y]
def find_distance(pointA,pointB):
dist = math.sqrt((pointB[0] - pointA[0])**2 + (pointB[1] - pointA[1])**2)
return dist
def normalize_poses(poses):
leftShoulderCords = poses[0][5]
rightShoulderCords = poses[0][6]
middleShoulderPoint = find_middle(leftShoulderCords,rightShoulderCords)
leftHipCords = poses[0][11]
rightHipCords = poses[0][12]
middleHipPoint = find_middle(leftHipCords,rightHipCords)
armHipDistance = find_distance(middleHipPoint,middleShoulderPoint);
normalized = []
for pose in poses[0]:
normalized.append(
[(pose[0]-middleHipPoint[0])/armHipDistance,
(pose[1]-middleHipPoint[1])/armHipDistance]
)
return normalized
if __name__ == "__main__":
main()
| 37.829787
| 137
| 0.564398
| 804
| 7,112
| 4.81592
| 0.304726
| 0.028409
| 0.026343
| 0.018595
| 0.187758
| 0.146952
| 0.146952
| 0.116994
| 0.094525
| 0.094525
| 0
| 0.02937
| 0.310602
| 7,112
| 188
| 138
| 37.829787
| 0.760351
| 0.158043
| 0
| 0.123077
| 0
| 0
| 0.070181
| 0
| 0
| 0
| 0
| 0.005319
| 0
| 1
| 0.046154
| false
| 0
| 0.1
| 0
| 0.176923
| 0.046154
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92df29892405e44dded087915f2a1792a9fb1160
| 6,265
|
py
|
Python
|
otcextensions/tests/unit/osclient/dcs/v1/fakes.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/tests/unit/osclient/dcs/v1/fakes.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | null | null | null |
otcextensions/tests/unit/osclient/dcs/v1/fakes.py
|
zsoltn/python-otcextensions
|
4c0fa22f095ebd5f9636ae72acbae5048096822c
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# import datetime
import random
import uuid
import mock
from openstackclient.tests.unit import utils
from otcextensions.tests.unit.osclient import test_base
from otcextensions.sdk.dcs.v1 import backup
from otcextensions.sdk.dcs.v1 import config
from otcextensions.sdk.dcs.v1 import instance
from otcextensions.sdk.dcs.v1 import restore
from otcextensions.sdk.dcs.v1 import statistic
class TestDCS(utils.TestCommand):
def setUp(self):
super(TestDCS, self).setUp()
self.app.client_manager.dcs = mock.Mock()
self.client = self.app.client_manager.dcs
self.client.get_instance = mock.Mock()
self.client.find_instance = mock.Mock()
self.client.instances = mock.Mock()
self.client.delete_instance = mock.Mock()
self.client.update_instance = mock.Mock()
self.client.create_instance = mock.Mock()
self.client.extend_instance = mock.Mock()
class FakeInstance(test_base.Fake):
"""Fake one or more Instance"""
@classmethod
def generate(cls):
object_info = {
'name': 'group-' + uuid.uuid4().hex,
'id': 'id-' + uuid.uuid4().hex,
'description': 'SOME description',
'status': random.choice(['CREATING', 'CREATEFILED',
'RUNNING', 'ERROR', 'STARTING',
'RESTARTING', 'CLOSING', 'CLOSED',
'EXTENDING']),
'engine': uuid.uuid4().hex,
'capacity': random.randint(1, 100),
'ip': uuid.uuid4().hex,
'port': random.randint(1, 65535),
'resource_spec_code': random.choice(['dcs.single_node',
'dcs.master_standby',
'dcs.cluster'
]),
'engine_version': uuid.uuid4().hex,
'internal_version': uuid.uuid4().hex,
'charging_mode': random.randint(0, 10),
'vpc_id': uuid.uuid4().hex,
'vpc_name': uuid.uuid4().hex,
'subnet_id': uuid.uuid4().hex,
'subnet_name': uuid.uuid4().hex,
'subnet_cidr': uuid.uuid4().hex,
'security_group_id': uuid.uuid4().hex,
'security_group_name': uuid.uuid4().hex,
'created_at': uuid.uuid4().hex,
'error_code': uuid.uuid4().hex,
'product_id': random.choice(['OTC_DCS_SINGLE',
'OTC_DCS_MS',
'OTC_DCS_CL']),
'available_zones': uuid.uuid4().hex,
'max_memory': random.randint(0, 10),
'used_memory': random.randint(0, 10),
'user_id': uuid.uuid4().hex,
'user_name': uuid.uuid4().hex,
'order_id': uuid.uuid4().hex,
'maintain_begin': uuid.uuid4().hex,
'maintain_end': uuid.uuid4().hex,
}
obj = instance.Instance.existing(**object_info)
return obj
class FakeStatistic(test_base.Fake):
"""Fake one or more Statistic"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'max_memory': random.randint(1, 65535),
'used_memory': random.randint(1, 65535),
'cmd_get_count': random.randint(1, 65535),
'cmd_set_count': random.randint(1, 65535),
'used_cpu': 'cpu-' + uuid.uuid4().hex,
'input_kbps': 'input-' + uuid.uuid4().hex,
'output_kbps': 'output-' + uuid.uuid4().hex,
}
obj = statistic.Statistic.existing(**object_info)
return obj
class FakeBackup(test_base.Fake):
"""Fake one or more Backup"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'id': 'id-' + uuid.uuid4().hex,
'size': random.randint(1, 65535),
'period': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'progress': uuid.uuid4().hex,
'created_at': uuid.uuid4().hex,
'updated_at': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'error_code': uuid.uuid4().hex,
'is_restorable': True,
}
obj = backup.Backup.existing(**object_info)
return obj
class FakeRestore(test_base.Fake):
"""Fake one or more Restore"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'max_memory': random.randint(1, 65535),
'used_memory': random.randint(1, 65535),
'cmd_get_count': random.randint(1, 65535),
'cmd_set_count': random.randint(1, 65535),
'used_cpu': 'cpu-' + uuid.uuid4().hex,
'input_kbps': 'input-' + uuid.uuid4().hex,
'output_kbps': 'output-' + uuid.uuid4().hex
}
obj = restore.Restore.existing(**object_info)
return obj
class FakeConfig(test_base.Fake):
"""Fake one or more Config"""
@classmethod
def generate(cls):
object_info = {
'instance_id': 'instance_id-' + uuid.uuid4().hex,
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'value': uuid.uuid4().hex,
'value_type': uuid.uuid4().hex,
'value_range': uuid.uuid4().hex,
'default_value': uuid.uuid4().hex,
'description': uuid.uuid4().hex
}
obj = config.Config.existing(**object_info)
return obj
| 36.005747
| 77
| 0.555148
| 698
| 6,265
| 4.853868
| 0.269341
| 0.124852
| 0.16647
| 0.049587
| 0.513282
| 0.418241
| 0.32438
| 0.243802
| 0.195986
| 0.195986
| 0
| 0.029717
| 0.307103
| 6,265
| 173
| 78
| 36.213873
| 0.750749
| 0.109018
| 0
| 0.335878
| 0
| 0
| 0.161226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045802
| false
| 0
| 0.076336
| 0
| 0.206107
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92dfa8870f87fbcfb31691bd442140d0c802358d
| 4,121
|
py
|
Python
|
torchattacks/attacks/multiattack.py
|
Harry24k/adversarial-attacks-pytorch
|
bfa2aa8d6f0c3b8086718f9f31526fcafa6995bb
|
[
"MIT"
] | 782
|
2020-03-28T01:56:36.000Z
|
2022-03-31T14:54:02.000Z
|
torchattacks/attacks/multiattack.py
|
Harry24k/adversarial-attacks-pytorch
|
bfa2aa8d6f0c3b8086718f9f31526fcafa6995bb
|
[
"MIT"
] | 48
|
2020-04-18T23:06:30.000Z
|
2022-03-24T01:54:50.000Z
|
torchattacks/attacks/multiattack.py
|
Harry24k/adversarial-attacks-pytorch
|
bfa2aa8d6f0c3b8086718f9f31526fcafa6995bb
|
[
"MIT"
] | 197
|
2020-03-31T05:21:02.000Z
|
2022-03-31T15:24:29.000Z
|
import copy
import torch
from ..attack import Attack
class MultiAttack(Attack):
r"""
MultiAttack is a class to attack a model with various attacks agains same images and labels.
Arguments:
model (nn.Module): model to attack.
attacks (list): list of attacks.
Examples::
>>> atk1 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk2 = torchattacks.PGD(model, eps=8/255, alpha=2/255, iters=40, random_start=True)
>>> atk = torchattacks.MultiAttack([atk1, atk2])
>>> adv_images = attack(images, labels)
"""
def __init__(self, attacks, verbose=False):
# Check validity
ids = []
for attack in attacks:
ids.append(id(attack.model))
if len(set(ids)) != 1:
raise ValueError("At least one of attacks is referencing a different model.")
super().__init__("MultiAttack", attack.model)
self.attacks = attacks
self.verbose = verbose
self._accumulate_multi_atk_records = False
self._multi_atk_records = [0.0]
self._supported_mode = ['default']
def forward(self, images, labels):
r"""
Overridden.
"""
batch_size = images.shape[0]
fails = torch.arange(batch_size).to(self.device)
final_images = images.clone().detach().to(self.device)
labels = labels.clone().detach().to(self.device)
multi_atk_records = [batch_size]
for _, attack in enumerate(self.attacks):
adv_images = attack(images[fails], labels[fails])
outputs = self.model(adv_images)
_, pre = torch.max(outputs.data, 1)
corrects = (pre == labels[fails])
wrongs = ~corrects
succeeds = torch.masked_select(fails, wrongs)
succeeds_of_fails = torch.masked_select(torch.arange(fails.shape[0]).to(self.device), wrongs)
final_images[succeeds] = adv_images[succeeds_of_fails]
fails = torch.masked_select(fails, corrects)
multi_atk_records.append(len(fails))
if len(fails) == 0:
break
if self.verbose:
print(self._return_sr_record(multi_atk_records))
if self._accumulate_multi_atk_records:
self._update_multi_atk_records(multi_atk_records)
return final_images
def _clear_multi_atk_records(self):
self._multi_atk_records = [0.0]
def _covert_to_success_rates(self, multi_atk_records):
sr = [((1-multi_atk_records[i]/multi_atk_records[0])*100) for i in range(1, len(multi_atk_records))]
return sr
def _return_sr_record(self, multi_atk_records):
sr = self._covert_to_success_rates(multi_atk_records)
return "Attack success rate: "+" | ".join(["%2.2f %%"%item for item in sr])
def _update_multi_atk_records(self, multi_atk_records):
for i, item in enumerate(multi_atk_records):
self._multi_atk_records[i] += item
def save(self, data_loader, save_path=None, verbose=True, return_verbose=False):
r"""
Overridden.
"""
self._clear_multi_atk_records()
verbose = self.verbose
self.verbose = False
self._accumulate_multi_atk_records = True
for i, attack in enumerate(self.attacks):
self._multi_atk_records.append(0.0)
rob_acc, l2, elapsed_time = super().save(data_loader, save_path, verbose, return_verbose)
sr = self._covert_to_success_rates(self._multi_atk_records)
self._clear_multi_atk_records()
self._accumulate_multi_atk_records = False
self.verbose = verbose
if return_verbose:
return rob_acc, sr, l2, elapsed_time
def _save_print(self, progress, rob_acc, l2, elapsed_time, end):
r"""
Overridden.
"""
print("- Save progress: %2.2f %% / Robust accuracy: %2.2f %%"%(progress, rob_acc)+\
" / "+self._return_sr_record(self._multi_atk_records)+\
' / L2: %1.5f (%2.3f it/s) \t'%(l2, elapsed_time), end=end)
| 33.778689
| 108
| 0.626062
| 522
| 4,121
| 4.66092
| 0.247126
| 0.088779
| 0.166461
| 0.070284
| 0.306617
| 0.194821
| 0.16605
| 0.079737
| 0.047678
| 0.047678
| 0
| 0.018403
| 0.261587
| 4,121
| 121
| 109
| 34.057851
| 0.781137
| 0.127639
| 0
| 0.109589
| 0
| 0
| 0.054932
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109589
| false
| 0
| 0.041096
| 0
| 0.219178
| 0.041096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e0877363cacd633cbbf12e0ee4175cb9564598
| 2,627
|
py
|
Python
|
src/manager/om/script/gspylib/inspection/items/os/CheckPortConflict.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | 1
|
2020-06-30T15:00:50.000Z
|
2020-06-30T15:00:50.000Z
|
src/manager/om/script/gspylib/inspection/items/os/CheckPortConflict.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
src/manager/om/script/gspylib/inspection/items/os/CheckPortConflict.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms
# and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# ----------------------------------------------------------------------------
import subprocess
from gspylib.inspection.common.CheckItem import BaseItem
from gspylib.inspection.common.CheckResult import ResultStatus
class CheckPortConflict(BaseItem):
def __init__(self):
super(CheckPortConflict, self).__init__(self.__class__.__name__)
def doCheck(self):
cmd = "netstat -apn | grep 'tcp' " \
"| grep 'LISTEN'| awk -F ' ' '$4 ~ /25[0-9][0-9][0-9]/'"
(status, output) = subprocess.getstatusoutput(cmd)
if (status != 0):
self.result.rst = ResultStatus.NG
self.result.val = "Failed to excuted commands: %s\noutput:%s " % (
cmd, output)
else:
if (output.strip() == ""):
self.result.rst = ResultStatus.OK
self.result.val = "ports is normal"
else:
self.result.rst = ResultStatus.NG
self.result.val = output
self.result.raw = "checked ports: (25000-26000)\n" + output
def doSet(self):
pidList = []
cmd = "netstat -apn| grep 'tcp'" \
"| grep 'LISTEN'| awk -F ' ' '$4 ~ /25[0-9][0-9][0-9]/'" \
"| awk '{print $NF}'"
(status, output) = subprocess.getstatusoutput(cmd)
if (status == 0 and output != ""):
for line in output.split('\n'):
if (line.find('/') > 0):
pid = line.split('/')[0].strip()
if (pid.isdigit()):
pidList.append(pid)
if (pidList):
cmd = "kill -9"
for pid in pidList:
cmd += " %s" % pid
(status, output) = subprocess.getstatusoutput(cmd)
if (status != ""):
self.result.val = "Failed to kill process.Error:%s\n" % output
self.result.val += "The cmd is %s " % cmd
else:
self.result.val = \
"Successfully killed the process with occupies the port.\n"
| 39.80303
| 79
| 0.53445
| 303
| 2,627
| 4.580858
| 0.458746
| 0.072046
| 0.056196
| 0.011527
| 0.245677
| 0.224784
| 0.224784
| 0.190202
| 0.06196
| 0.06196
| 0
| 0.023783
| 0.311762
| 2,627
| 65
| 80
| 40.415385
| 0.743916
| 0.229159
| 0
| 0.222222
| 0
| 0.044444
| 0.190239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.155556
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e16c1fa8d877e82eb805100d17b73907afb25e
| 646
|
py
|
Python
|
_scripts/_build.py
|
dfreeman06/wxyz
|
663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd
|
[
"BSD-3-Clause"
] | 1
|
2021-06-20T12:21:27.000Z
|
2021-06-20T12:21:27.000Z
|
_scripts/_build.py
|
nrbgt/wxyz
|
663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd
|
[
"BSD-3-Clause"
] | null | null | null |
_scripts/_build.py
|
nrbgt/wxyz
|
663cf6593f4c0ca12f7b94b61e34c0a8d3cbcdfd
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
from . import ROOT, PY_SRC, _run, PY, DIST
CONDA_ORDER = [
"core",
"html",
"lab",
"datagrid",
"svg",
"tpl-jjinja"
"yaml"
]
CONDA_BUILD_ARGS = [
"conda-build", "-c", "conda-forge", "--output-folder", DIST / "conda-bld",
]
if __name__ == "__main__":
for pkg in PY_SRC.glob("wxyz_*"):
_run([PY, "setup.py", "sdist", "--dist-dir", DIST / "sdist"], cwd=str(pkg))
try:
_run([*CONDA_BUILD_ARGS, "--skip-existing", "."], cwd=ROOT / "recipes")
except:
for pkg in CONDA_ORDER:
_run([*CONDA_BUILD_ARGS, f"wxyz-{pkg}"], cwd=ROOT / "recipes")
| 20.83871
| 83
| 0.560372
| 83
| 646
| 4.084337
| 0.542169
| 0.117994
| 0.123894
| 0.100295
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.23839
| 646
| 30
| 84
| 21.533333
| 0.689024
| 0
| 0
| 0
| 0
| 0
| 0.256966
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e2096dcbe8b31e8b6213b7078b62e4efd23dd0
| 3,318
|
py
|
Python
|
Mmint/CGratio.py
|
lijiacd985/Mplot
|
adea07aa78a5495cf3551618f6ec2c08fa7c1029
|
[
"MIT"
] | 5
|
2018-07-02T16:33:23.000Z
|
2021-03-23T00:32:31.000Z
|
Mmint/CGratio.py
|
lijiacd985/Mplot
|
adea07aa78a5495cf3551618f6ec2c08fa7c1029
|
[
"MIT"
] | 1
|
2017-09-19T19:46:11.000Z
|
2020-02-28T05:00:49.000Z
|
Mmint/CGratio.py
|
lijiacd985/Mplot
|
adea07aa78a5495cf3551618f6ec2c08fa7c1029
|
[
"MIT"
] | 4
|
2017-11-16T15:26:24.000Z
|
2020-02-13T16:25:25.000Z
|
import subprocess
from .Genome_fasta import get_fasta
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
import pysam
def run(parser):
args = parser.parse_args()
bases,chrs = get_fasta(args.genome)
l={}
for c in chrs:
l[c]=len(bases[c])
chrs = set(chrs)
#p = subprocess.Popen('bamToBed -i '+args.bamfile,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
reads_num=0
reads_cg_num=[0,0,0] #CG,cg,Cg
cgnum_per_read=[]
with pysam.AlignmentFile(args.bamfile) as f:
for line in f:
#t = line.decode('utf-8').strip().split()
chr = line.reference_name#t[0]
start= line.reference_start
end= line.reference_end
strand= not line.is_reverse # True +strand; False -strand
if not chr in chrs: continue
end=min(end+1,l[chr])
reads_num+=1
if strand:#=='+':
cg=[bases[chr].count('CG',start,end)+bases[chr].count('Cg',start,end),bases[chr].count('cG',start,end)+bases[chr].count('cg',start,end)]
else:
cg=[bases[chr].count('GC',start,end)+bases[chr].count('gC',start,end),bases[chr].count('Gc',start,end)+bases[chr].count('gc',start,end)]
#We need to consider strand specific situation.
#'+' strand we have CG but '-' we should count 'GC'.
#print cg
# for i in range(1,ls):
# r2=read[i]
# r1=read[i-1]
# if 'G'==r2 or 'g'==r2:
# if 'C'==r1: cg[0]+=1
# if 'c'==r1: cg[1]+=1
#count = int(cg[0]>0)+int(cg[1]>0)
if cg[0]+cg[1]==0: continue
#print cg
cgnum_per_read.append(sum(cg))
if cg[0]>0 and cg[1]>0:
reads_cg_num[2]+=1
continue
if cg[0]>0:
reads_cg_num[0]+=1
else:
reads_cg_num[1]+=1
#print reads_cg_num
#print reads_num
plt.figure()
plt.subplot(211)
labels = ['noCG','NonRepeat CG','Repeat cg','CGcg mix']
colors = ['r','b','g','y']
explode=(0.05,0,0,0)
sizes=[reads_num-sum(reads_cg_num)]+reads_cg_num
patches,l_text,p_text = plt.pie(sizes,explode=explode,labels=labels,colors=colors, labeldistance = 1.1,autopct = '%3.1f%%',shadow = False, startangle = 90,pctdistance = 0.6)
plt.axis('equal')
#plt.legend(loc=2,bbox_to_anchor=(0, 0))
ax=plt.subplot(212)
t=np.zeros(20)
for num in cgnum_per_read:
t[min(num-1,19)]+=1
labels = list(map(str,np.arange(1,20)))+['20+']
#print(t)
t = (np.array(t).astype(float)/sum(reads_cg_num))*100
plt.bar(np.arange(20),t)
ax.set_xticks(np.arange(20))
ax.set_xticklabels(labels)
ax.set_ylabel('Percentage of reads including CG')
ax.set_xlabel('CG number per read')
plt.text(4,max(t)+4,'All reads including CG site: '+str(sum(reads_cg_num)))
#print args.output+'.pdf'
plt.savefig(args.output+'.pdf')
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-b','--bamfile',help="bam file name", metavar="FILE")
parser.add_argument('-g','--genome',help="Genome fasta file path")
parser.add_argument('-o','--output',help="pie figure's filename")
run(parser)
| 33.857143
| 177
| 0.589813
| 511
| 3,318
| 3.722114
| 0.330724
| 0.033123
| 0.047319
| 0.050473
| 0.109359
| 0.09674
| 0.09674
| 0.09674
| 0.09674
| 0.09674
| 0
| 0.032462
| 0.238698
| 3,318
| 97
| 178
| 34.206186
| 0.720507
| 0.187764
| 0
| 0.029851
| 0
| 0
| 0.094828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014925
| false
| 0
| 0.104478
| 0
| 0.119403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e278de46c7d8190178a6e51a0f4e234995f14e
| 1,536
|
py
|
Python
|
src/furo/__init__.py
|
sethmlarson/furo
|
1257d884dae9040248380595e06d7d2a1e6eba39
|
[
"MIT"
] | null | null | null |
src/furo/__init__.py
|
sethmlarson/furo
|
1257d884dae9040248380595e06d7d2a1e6eba39
|
[
"MIT"
] | null | null | null |
src/furo/__init__.py
|
sethmlarson/furo
|
1257d884dae9040248380595e06d7d2a1e6eba39
|
[
"MIT"
] | null | null | null |
"""A clean customisable Sphinx documentation theme."""
__version__ = "2020.9.8.beta2"
from pathlib import Path
from .body import wrap_tables
from .code import get_pygments_style_colors
from .navigation import get_navigation_tree
from .toc import should_hide_toc
def _html_page_context(app, pagename, templatename, context, doctree):
if app.config.html_theme != "furo":
return
# Custom Navigation Tree (adds checkboxes and labels)
toctree = context.get("toctree", lambda **kwargs: "")
toctree_html = toctree(
collapse=False, titles_only=True, maxdepth=-1, includehidden=True
)
context["furo_navigation_tree"] = get_navigation_tree(toctree_html)
# Custom "should hide ToC" logic
context["furo_hide_toc"] = should_hide_toc(context.get("toc", ""))
# Allow for hiding toc via ToC in page-wide metadata.
if "hide-toc" in (context.get("meta", None) or {}):
context["furo_hide_toc"] = True
# Inject information about styles
colors = get_pygments_style_colors(
app.builder.highlighter.formatter_args["style"],
fallbacks={"foreground": "#000000", "background": "#FFFFFF"},
)
context["furo_pygments"] = colors
# Patch the content
if "body" in context:
context["body"] = wrap_tables(context["body"])
def setup(app):
"""Entry point for sphinx theming."""
theme_path = (Path(__file__).parent / "theme").resolve()
app.add_html_theme("furo", str(theme_path))
app.connect("html-page-context", _html_page_context)
| 32
| 73
| 0.69401
| 196
| 1,536
| 5.204082
| 0.47449
| 0.041176
| 0.038235
| 0.043137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011138
| 0.181641
| 1,536
| 47
| 74
| 32.680851
| 0.800318
| 0.173177
| 0
| 0
| 0
| 0
| 0.140239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.178571
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e3577604795bc43851e0afe7af80fe0fe26bbf
| 2,059
|
py
|
Python
|
experiments/mix_down.py
|
fretboardfreak/potty_oh
|
70b752c719576c0975e1d2af5aca2fc7abc8abcc
|
[
"Apache-2.0"
] | null | null | null |
experiments/mix_down.py
|
fretboardfreak/potty_oh
|
70b752c719576c0975e1d2af5aca2fc7abc8abcc
|
[
"Apache-2.0"
] | 1
|
2016-05-04T03:51:36.000Z
|
2016-05-16T19:08:02.000Z
|
experiments/mix_down.py
|
fretboardfreak/potty_oh
|
70b752c719576c0975e1d2af5aca2fc7abc8abcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2016 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A test for what happens when two waveforms are averaged together."""
from potty_oh import common
from potty_oh.wav_file import wav_file_context
from potty_oh.waveform import mix_down
from potty_oh.signal_generator import Generator
from potty_oh.music.pitch import Key
from potty_oh.music.interval import Interval
def main():
parser = common.get_cmd_line_parser(description=__doc__)
common.ParserArguments.filename(parser)
common.ParserArguments.length(parser)
common.ParserArguments.framerate(parser)
common.ParserArguments.set_defaults(parser, type='constant',
length=2.0)
args = parser.parse_args()
common.defaults.framerate = args.framerate
sg = Generator(length=args.length, verbose=args.debug)
key = Key()
unison = sg.sin_constant(key.interval(Interval.unison))
maj_third = sg.sin_constant(key.interval(Interval.major_third))
min_third = sg.sin_constant(key.interval(Interval.minor_third))
fifth = sg.sin_constant(key.interval(Interval.fifth))
powerchord = unison.mix_down(fifth)
maj_triad = powerchord.mix_down(maj_third)
min_triad = mix_down(powerchord, min_third)
with wav_file_context(args.filename) as fout:
fout.write_frames(powerchord.frames)
fout.write_frames(maj_triad.frames)
fout.write_frames(min_triad.frames)
return 0
if __name__ == "__main__":
common.call_main(main)
| 34.898305
| 78
| 0.736765
| 287
| 2,059
| 5.108014
| 0.452962
| 0.040928
| 0.04502
| 0.043656
| 0.094134
| 0.094134
| 0.050477
| 0
| 0
| 0
| 0
| 0.007096
| 0.178728
| 2,059
| 58
| 79
| 35.5
| 0.859846
| 0.318601
| 0
| 0
| 0
| 0
| 0.011561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.193548
| 0
| 0.258065
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e37ec4545956a8e8242b1871fea16288a1a867
| 8,704
|
py
|
Python
|
tests/test_hrepr.py
|
fabaff/hrepr
|
f6de915f1d34c47ceab11f5f70e433a30e6de174
|
[
"MIT"
] | null | null | null |
tests/test_hrepr.py
|
fabaff/hrepr
|
f6de915f1d34c47ceab11f5f70e433a30e6de174
|
[
"MIT"
] | null | null | null |
tests/test_hrepr.py
|
fabaff/hrepr
|
f6de915f1d34c47ceab11f5f70e433a30e6de174
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from hrepr import H
from hrepr import hrepr as real_hrepr
from hrepr.h import styledir
from .common import one_test_per_assert
css_hrepr = open(f"{styledir}/hrepr.css", encoding="utf-8").read()
hrepr = real_hrepr.variant(fill_resources=False)
@dataclass
class Point:
x: int
y: int
class Opaque:
pass
def hshort(x, **kw):
return hrepr(x, max_depth=0, **kw)
@one_test_per_assert
def test_singletons():
assert hrepr(True) == H.span["hreprv-True"]("True")
assert hrepr(False) == H.span["hreprv-False"]("False")
assert hrepr(None) == H.span["hreprv-None"]("None")
@one_test_per_assert
def test_numbers():
assert hrepr(123) == H.span["hreprt-int"]("123")
assert hrepr(1.25) == H.span["hreprt-float"]("1.25")
@one_test_per_assert
def test_string():
assert hshort("hello") == H.span["hreprt-str"]("hello")
assert hrepr("3 spaces") == H.span["hreprt-str"]("3 spaces")
assert hrepr("hello this is a bit long") == H.span["hreprt-str"](
"hello this is a bit long"
)
assert hshort("hello this is a bit long") == H.span["hreprt-str"](
"hello this is a b..."
)
assert hshort("hello this is a bit long", string_cutoff=10) == H.span[
"hreprt-str"
]("hello t...")
assert hshort("hello this is a bit long", string_cutoff=5) == H.span[
"hreprt-str"
]("he...")
assert hshort("hello this is a bit long", string_cutoff=10000) == H.span[
"hreprt-str"
]("hello this is a bit long")
@one_test_per_assert
def test_bytes():
assert hrepr(b"hello") == H.span["hreprt-bytes"]("68656c6c6f")
assert hshort(b"hello") == H.span["hreprt-bytes"]("68656c6c6f")
assert hrepr(b"hello this is a bit long") == H.span["hreprt-bytes"](
"68656c6c6f2074686973206973206120626974206c6f6e67"
)
assert hshort(b"hello this is a bit long") == H.span["hreprt-bytes"](
"68656c6c6f2074686..."
)
def test_function():
assert hrepr(Opaque) == H.span["hreprk-class"](
H.span["hrepr-defn-key"]("class"),
" ",
H.span["hrepr-defn-name"]("Opaque"),
)
def test_structures():
for typ, o, c in (
(tuple, "(", ")"),
(list, "[", "]"),
(set, "{", "}"),
(frozenset, "{", "}"),
):
clsname = typ.__name__
assert hrepr(typ((1, 2))) == H.div[
f"hreprt-{clsname}", "hrepr-bracketed"
](
H.div["hrepr-open"](o),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"](c),
)
def test_short_structures():
for val, o, c in (
((1, 2), "(", ")"),
([1, 2], "[", "]"),
({1, 2}, "{", "}"),
(frozenset({1, 2}), "{", "}"),
({"x": 1, "y": 2}, "{", "}"),
):
clsname = type(val).__name__
assert hrepr(val, max_depth=0) == H.div[
f"hreprt-{clsname}", "hrepr-bracketed"
](
H.div["hrepr-open"](o),
H.div["hreprl-s", "hrepr-body"](H.div("...")),
H.div["hrepr-close"](c),
)
def test_dict():
pt = {"x": 1, "y": 2}
assert hrepr(pt) == H.div["hreprt-dict", "hrepr-bracketed"](
H.div["hrepr-open"]("{"),
H.table["hrepr-body"](
H.tr(
H.td(H.span["hreprt-str"]("x")),
H.td["hrepr-delim"](": "),
H.td(H.span["hreprt-int"]("1")),
),
H.tr(
H.td(H.span["hreprt-str"]("y")),
H.td["hrepr-delim"](": "),
H.td(H.span["hreprt-int"]("2")),
),
),
H.div["hrepr-close"]("}"),
)
def test_dataclass():
pt = Point(1, 2)
assert hrepr(pt) == H.div["hreprt-Point", "hrepr-instance", "hreprl-v"](
H.div["hrepr-title"]("Point"),
H.table["hrepr-body"](
H.tr(
H.td(H.span["hreprt-symbol"]("x")),
H.td["hrepr-delim"]("="),
H.td(H.span["hreprt-int"]("1")),
),
H.tr(
H.td(H.span["hreprt-symbol"]("y")),
H.td["hrepr-delim"]("="),
H.td(H.span["hreprt-int"]("2")),
),
),
)
assert hrepr(pt, max_depth=0) == H.div[
"hreprt-Point", "hrepr-instance", "hreprl-s"
](
H.div["hrepr-title"]("Point"),
H.div["hreprl-s", "hrepr-body"](H.div("...")),
)
def test_tag():
tg = H.span["hello"](1, 2, H.b("there"))
assert hrepr(tg) == tg
def test_multiref():
li = [1, 2]
lili = [li, li]
assert hrepr(lili) == H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"]("]"),
),
)
),
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-s", "hrepr-body"](H.div("..."),),
H.div["hrepr-close"]("]"),
),
)
),
),
H.div["hrepr-close"]("]"),
)
assert hrepr(lili, shortrefs=True) == H.div[
"hreprt-list", "hrepr-bracketed"
](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hreprt-int"]("2")),
),
H.div["hrepr-close"]("]"),
),
)
),
H.div(H.span["hrepr-ref"]("#", 1)),
),
H.div["hrepr-close"]("]"),
)
def test_recursive():
li = [1]
li.append(li)
assert hrepr(li) == H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(
H.div["hrepr-refbox"](
H.span["hrepr-ref"]("⟳", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-s", "hrepr-body"](H.div("..."),),
H.div["hrepr-close"]("]"),
),
)
),
),
H.div["hrepr-close"]("]"),
),
)
assert hrepr(li, shortrefs=True) == H.div["hrepr-refbox"](
H.span["hrepr-ref"]("#", 1, "="),
H.div["hreprt-list", "hrepr-bracketed"](
H.div["hrepr-open"]("["),
H.div["hreprl-h", "hrepr-body"](
H.div(H.span["hreprt-int"]("1")),
H.div(H.span["hrepr-ref"]("⟳", 1)),
),
H.div["hrepr-close"]("]"),
),
)
def test_unsupported():
assert hshort(Opaque()) == H.span["hreprt-Opaque"](
"<", "tests.test_hrepr.Opaque", ">"
)
def test_as_page():
utf8 = H.meta(
{"http-equiv": "Content-type"}, content="text/html", charset="UTF-8"
)
assert real_hrepr.page(1) == H.inline(
H.raw("<!DOCTYPE html>"),
H.html(H.head(utf8, H.style(css_hrepr)), H.body(real_hrepr(1)),),
)
def test_hrepr_multiarg():
assert hrepr(1, 2) == H.inline(
H.span["hreprt-int"]("1"), H.span["hreprt-int"]("2"),
)
def test_preprocess():
assert hrepr(1, preprocess=lambda x, hrepr: x + 1) == H.span["hreprt-int"](
"2"
)
def test_postprocess():
assert hrepr(1, postprocess=lambda x, obj, hrepr: x["newclass"]) == H.span[
"newclass", "hreprt-int"
]("1")
| 29.110368
| 79
| 0.443934
| 1,033
| 8,704
| 3.687318
| 0.136496
| 0.07561
| 0.095301
| 0.058808
| 0.611447
| 0.586768
| 0.547125
| 0.494618
| 0.444474
| 0.440798
| 0
| 0.026088
| 0.335018
| 8,704
| 298
| 80
| 29.208054
| 0.631652
| 0
| 0
| 0.449799
| 0
| 0
| 0.240924
| 0.008157
| 0
| 0
| 0
| 0
| 0.148594
| 1
| 0.072289
| false
| 0.004016
| 0.02008
| 0.004016
| 0.11245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e5bc0e9b68f032b202632a0013f3e6bb85256a
| 11,460
|
py
|
Python
|
supervisor/const.py
|
peddamat/home-assistant-supervisor-test
|
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
|
[
"Apache-2.0"
] | null | null | null |
supervisor/const.py
|
peddamat/home-assistant-supervisor-test
|
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
|
[
"Apache-2.0"
] | null | null | null |
supervisor/const.py
|
peddamat/home-assistant-supervisor-test
|
5da55772bcb2db3c6d8432cbc08e2ac9fbf480c4
|
[
"Apache-2.0"
] | null | null | null |
"""Constants file for Supervisor."""
from enum import Enum
from ipaddress import ip_network
from pathlib import Path
SUPERVISOR_VERSION = "DEV"
URL_HASSIO_ADDONS = "https://github.com/home-assistant/addons"
URL_HASSIO_APPARMOR = "https://version.home-assistant.io/apparmor.txt"
URL_HASSIO_VERSION = "https://version.home-assistant.io/{channel}.json"
SUPERVISOR_DATA = Path("/data")
FILE_HASSIO_ADDONS = Path(SUPERVISOR_DATA, "addons.json")
FILE_HASSIO_AUTH = Path(SUPERVISOR_DATA, "auth.json")
FILE_HASSIO_CONFIG = Path(SUPERVISOR_DATA, "config.json")
FILE_HASSIO_DISCOVERY = Path(SUPERVISOR_DATA, "discovery.json")
FILE_HASSIO_DOCKER = Path(SUPERVISOR_DATA, "docker.json")
FILE_HASSIO_HOMEASSISTANT = Path(SUPERVISOR_DATA, "homeassistant.json")
FILE_HASSIO_INGRESS = Path(SUPERVISOR_DATA, "ingress.json")
FILE_HASSIO_SERVICES = Path(SUPERVISOR_DATA, "services.json")
FILE_HASSIO_UPDATER = Path(SUPERVISOR_DATA, "updater.json")
FILE_SUFFIX_CONFIGURATION = [".yaml", ".yml", ".json"]
MACHINE_ID = Path("/etc/machine-id")
SOCKET_DBUS = Path("/run/dbus/system_bus_socket")
SOCKET_DOCKER = Path("/run/docker.sock")
RUN_SUPERVISOR_STATE = Path("/run/supervisor")
SYSTEMD_JOURNAL_PERSISTENT = Path("/var/log/journal")
SYSTEMD_JOURNAL_VOLATILE = Path("/run/log/journal")
DOCKER_NETWORK = "hassio"
DOCKER_NETWORK_MASK = ip_network("172.30.32.0/23")
DOCKER_NETWORK_RANGE = ip_network("172.30.33.0/24")
# This needs to match the dockerd --cpu-rt-runtime= argument.
DOCKER_CPU_RUNTIME_TOTAL = 950_000
# The rt runtimes are guarantees, hence we cannot allocate more
# time than available! Support up to 5 containers with equal time
# allocated.
# Note that the time is multiplied by CPU count. This means that
# a single container can schedule up to 950/5*4 = 760ms in RT priority
# on a quad core system.
DOCKER_CPU_RUNTIME_ALLOCATION = int(DOCKER_CPU_RUNTIME_TOTAL / 5)
DNS_SUFFIX = "local.hass.io"
LABEL_ARCH = "io.hass.arch"
LABEL_MACHINE = "io.hass.machine"
LABEL_TYPE = "io.hass.type"
LABEL_VERSION = "io.hass.version"
META_ADDON = "addon"
META_HOMEASSISTANT = "homeassistant"
META_SUPERVISOR = "supervisor"
JSON_DATA = "data"
JSON_MESSAGE = "message"
JSON_RESULT = "result"
RESULT_ERROR = "error"
RESULT_OK = "ok"
CONTENT_TYPE_BINARY = "application/octet-stream"
CONTENT_TYPE_JSON = "application/json"
CONTENT_TYPE_PNG = "image/png"
CONTENT_TYPE_TAR = "application/tar"
CONTENT_TYPE_TEXT = "text/plain"
CONTENT_TYPE_URL = "application/x-www-form-urlencoded"
COOKIE_INGRESS = "ingress_session"
HEADER_TOKEN = "X-Supervisor-Token"
HEADER_TOKEN_OLD = "X-Hassio-Key"
ENV_TIME = "TZ"
ENV_TOKEN = "SUPERVISOR_TOKEN"
ENV_TOKEN_HASSIO = "HASSIO_TOKEN"
ENV_HOMEASSISTANT_REPOSITORY = "HOMEASSISTANT_REPOSITORY"
ENV_SUPERVISOR_DEV = "SUPERVISOR_DEV"
ENV_SUPERVISOR_MACHINE = "SUPERVISOR_MACHINE"
ENV_SUPERVISOR_NAME = "SUPERVISOR_NAME"
ENV_SUPERVISOR_SHARE = "SUPERVISOR_SHARE"
ENV_SUPERVISOR_CPU_RT = "SUPERVISOR_CPU_RT"
REQUEST_FROM = "HASSIO_FROM"
ATTR_ACCESS_TOKEN = "access_token"
ATTR_ACCESSPOINTS = "accesspoints"
ATTR_ACTIVE = "active"
ATTR_ADDON = "addon"
ATTR_ADDONS = "addons"
ATTR_ADDONS_CUSTOM_LIST = "addons_custom_list"
ATTR_ADDONS_REPOSITORIES = "addons_repositories"
ATTR_ADDRESS = "address"
ATTR_ADDRESS_DATA = "address-data"
ATTR_ADMIN = "admin"
ATTR_ADVANCED = "advanced"
ATTR_APPARMOR = "apparmor"
ATTR_APPLICATION = "application"
ATTR_ARCH = "arch"
ATTR_ARGS = "args"
ATTR_LABELS = "labels"
ATTR_AUDIO = "audio"
ATTR_AUDIO_INPUT = "audio_input"
ATTR_AUDIO_OUTPUT = "audio_output"
ATTR_AUTH = "auth"
ATTR_AUTH_API = "auth_api"
ATTR_AUTO_UPDATE = "auto_update"
ATTR_AVAILABLE = "available"
ATTR_BLK_READ = "blk_read"
ATTR_BLK_WRITE = "blk_write"
ATTR_BOARD = "board"
ATTR_BOOT = "boot"
ATTR_BRANCH = "branch"
ATTR_BUILD = "build"
ATTR_BUILD_FROM = "build_from"
ATTR_CARD = "card"
ATTR_CHANGELOG = "changelog"
ATTR_CHANNEL = "channel"
ATTR_CHASSIS = "chassis"
ATTR_CHECKS = "checks"
ATTR_CLI = "cli"
ATTR_CONFIG = "config"
ATTR_CONFIGURATION = "configuration"
ATTR_CONNECTED = "connected"
ATTR_CONNECTIONS = "connections"
ATTR_CONTAINERS = "containers"
ATTR_CPE = "cpe"
ATTR_CPU_PERCENT = "cpu_percent"
ATTR_CRYPTO = "crypto"
ATTR_DATA = "data"
ATTR_DATE = "date"
ATTR_DEBUG = "debug"
ATTR_DEBUG_BLOCK = "debug_block"
ATTR_DEFAULT = "default"
ATTR_DEPLOYMENT = "deployment"
ATTR_DESCRIPTON = "description"
ATTR_DETACHED = "detached"
ATTR_DEVICES = "devices"
ATTR_DEVICETREE = "devicetree"
ATTR_DIAGNOSTICS = "diagnostics"
ATTR_DISCOVERY = "discovery"
ATTR_DISK = "disk"
ATTR_DISK_FREE = "disk_free"
ATTR_DISK_LIFE_TIME = "disk_life_time"
ATTR_DISK_TOTAL = "disk_total"
ATTR_DISK_USED = "disk_used"
ATTR_DNS = "dns"
ATTR_DOCKER = "docker"
ATTR_DOCKER_API = "docker_api"
ATTR_DOCUMENTATION = "documentation"
ATTR_DOMAINS = "domains"
ATTR_ENABLE = "enable"
ATTR_ENABLED = "enabled"
ATTR_ENVIRONMENT = "environment"
ATTR_EVENT = "event"
ATTR_FEATURES = "features"
ATTR_FILENAME = "filename"
ATTR_FLAGS = "flags"
ATTR_FOLDERS = "folders"
ATTR_FREQUENCY = "frequency"
ATTR_FULL_ACCESS = "full_access"
ATTR_GATEWAY = "gateway"
ATTR_GPIO = "gpio"
ATTR_HASSIO_API = "hassio_api"
ATTR_HASSIO_ROLE = "hassio_role"
ATTR_HASSOS = "hassos"
ATTR_HEALTHY = "healthy"
ATTR_HOMEASSISTANT = "homeassistant"
ATTR_HOMEASSISTANT_API = "homeassistant_api"
ATTR_HOST = "host"
ATTR_HOST_DBUS = "host_dbus"
ATTR_HOST_INTERNET = "host_internet"
ATTR_HOST_IPC = "host_ipc"
ATTR_HOST_NETWORK = "host_network"
ATTR_HOST_PID = "host_pid"
ATTR_HOSTNAME = "hostname"
ATTR_ICON = "icon"
ATTR_ID = "id"
ATTR_IMAGE = "image"
ATTR_IMAGES = "images"
ATTR_INDEX = "index"
ATTR_INGRESS = "ingress"
ATTR_INGRESS_ENTRY = "ingress_entry"
ATTR_INGRESS_PANEL = "ingress_panel"
ATTR_INGRESS_PORT = "ingress_port"
ATTR_INGRESS_TOKEN = "ingress_token"
ATTR_INGRESS_URL = "ingress_url"
ATTR_INIT = "init"
ATTR_INITIALIZE = "initialize"
ATTR_INPUT = "input"
ATTR_INSTALLED = "installed"
ATTR_INTERFACE = "interface"
ATTR_INTERFACES = "interfaces"
ATTR_IP_ADDRESS = "ip_address"
ATTR_IPV4 = "ipv4"
ATTR_IPV6 = "ipv6"
ATTR_ISSUES = "issues"
ATTR_KERNEL = "kernel"
ATTR_KERNEL_MODULES = "kernel_modules"
ATTR_LAST_BOOT = "last_boot"
ATTR_LEGACY = "legacy"
ATTR_LOCALS = "locals"
ATTR_LOCATON = "location"
ATTR_LOGGING = "logging"
ATTR_LOGO = "logo"
ATTR_LONG_DESCRIPTION = "long_description"
ATTR_MAC = "mac"
ATTR_MACHINE = "machine"
ATTR_MAINTAINER = "maintainer"
ATTR_MAP = "map"
ATTR_MEMORY_LIMIT = "memory_limit"
ATTR_MEMORY_PERCENT = "memory_percent"
ATTR_MEMORY_USAGE = "memory_usage"
ATTR_MESSAGE = "message"
ATTR_METHOD = "method"
ATTR_MODE = "mode"
ATTR_MULTICAST = "multicast"
ATTR_NAME = "name"
ATTR_NAMESERVERS = "nameservers"
ATTR_NETWORK = "network"
ATTR_NETWORK_DESCRIPTION = "network_description"
ATTR_NETWORK_RX = "network_rx"
ATTR_NETWORK_TX = "network_tx"
ATTR_OBSERVER = "observer"
ATTR_OPERATING_SYSTEM = "operating_system"
ATTR_OPTIONS = "options"
ATTR_OTA = "ota"
ATTR_OUTPUT = "output"
ATTR_PANEL_ADMIN = "panel_admin"
ATTR_PANEL_ICON = "panel_icon"
ATTR_PANEL_TITLE = "panel_title"
ATTR_PANELS = "panels"
ATTR_PARENT = "parent"
ATTR_PASSWORD = "password"
ATTR_PORT = "port"
ATTR_PORTS = "ports"
ATTR_PORTS_DESCRIPTION = "ports_description"
ATTR_PREFIX = "prefix"
ATTR_PRIMARY = "primary"
ATTR_PRIORITY = "priority"
ATTR_PRIVILEGED = "privileged"
ATTR_PROTECTED = "protected"
ATTR_PROVIDERS = "providers"
ATTR_PSK = "psk"
ATTR_RATING = "rating"
ATTR_REALTIME = "realtime"
ATTR_REFRESH_TOKEN = "refresh_token"
ATTR_REGISTRIES = "registries"
ATTR_REGISTRY = "registry"
ATTR_REPOSITORIES = "repositories"
ATTR_REPOSITORY = "repository"
ATTR_SCHEMA = "schema"
ATTR_SECURITY = "security"
ATTR_SERIAL = "serial"
ATTR_SERVERS = "servers"
ATTR_SERVICE = "service"
ATTR_SERVICES = "services"
ATTR_SESSION = "session"
ATTR_SIGNAL = "signal"
ATTR_SIZE = "size"
ATTR_SLUG = "slug"
ATTR_SNAPSHOT_EXCLUDE = "snapshot_exclude"
ATTR_SNAPSHOTS = "snapshots"
ATTR_SOURCE = "source"
ATTR_SQUASH = "squash"
ATTR_SSD = "ssid"
ATTR_SSID = "ssid"
ATTR_SSL = "ssl"
ATTR_STAGE = "stage"
ATTR_STARTUP = "startup"
ATTR_STATE = "state"
ATTR_STATIC = "static"
ATTR_STDIN = "stdin"
ATTR_STORAGE = "storage"
ATTR_SUGGESTIONS = "suggestions"
ATTR_SUPERVISOR = "supervisor"
ATTR_SUPERVISOR_INTERNET = "supervisor_internet"
ATTR_SUPPORTED = "supported"
ATTR_SUPPORTED_ARCH = "supported_arch"
ATTR_SYSTEM = "system"
ATTR_JOURNALD = "journald"
ATTR_TIMEOUT = "timeout"
ATTR_TIMEZONE = "timezone"
ATTR_TITLE = "title"
ATTR_TMPFS = "tmpfs"
ATTR_TOTP = "totp"
ATTR_TRANSLATIONS = "translations"
ATTR_TYPE = "type"
ATTR_UART = "uart"
ATTR_UDEV = "udev"
ATTR_UNHEALTHY = "unhealthy"
ATTR_UNSAVED = "unsaved"
ATTR_UNSUPPORTED = "unsupported"
ATTR_UPDATE_AVAILABLE = "update_available"
ATTR_UPDATE_KEY = "update_key"
ATTR_URL = "url"
ATTR_USB = "usb"
ATTR_USER = "user"
ATTR_USERNAME = "username"
ATTR_UUID = "uuid"
ATTR_VALID = "valid"
ATTR_VALUE = "value"
ATTR_VERSION = "version"
ATTR_VERSION_LATEST = "version_latest"
ATTR_VIDEO = "video"
ATTR_VLAN = "vlan"
ATTR_VOLUME = "volume"
ATTR_VPN = "vpn"
ATTR_WAIT_BOOT = "wait_boot"
ATTR_WATCHDOG = "watchdog"
ATTR_WEBUI = "webui"
ATTR_WIFI = "wifi"
ATTR_CONTENT_TRUST = "content_trust"
ATTR_FORCE_SECURITY = "force_security"
PROVIDE_SERVICE = "provide"
NEED_SERVICE = "need"
WANT_SERVICE = "want"
MAP_CONFIG = "config"
MAP_SSL = "ssl"
MAP_ADDONS = "addons"
MAP_BACKUP = "backup"
MAP_SHARE = "share"
MAP_MEDIA = "media"
ARCH_ARMHF = "armhf"
ARCH_ARMV7 = "armv7"
ARCH_AARCH64 = "aarch64"
ARCH_AMD64 = "amd64"
ARCH_I386 = "i386"
ARCH_ALL = [ARCH_ARMHF, ARCH_ARMV7, ARCH_AARCH64, ARCH_AMD64, ARCH_I386]
REPOSITORY_CORE = "core"
REPOSITORY_LOCAL = "local"
FOLDER_HOMEASSISTANT = "homeassistant"
FOLDER_SHARE = "share"
FOLDER_ADDONS = "addons/local"
FOLDER_SSL = "ssl"
FOLDER_MEDIA = "media"
SNAPSHOT_FULL = "full"
SNAPSHOT_PARTIAL = "partial"
CRYPTO_AES128 = "aes128"
SECURITY_PROFILE = "profile"
SECURITY_DEFAULT = "default"
SECURITY_DISABLE = "disable"
ROLE_DEFAULT = "default"
ROLE_HOMEASSISTANT = "homeassistant"
ROLE_BACKUP = "backup"
ROLE_MANAGER = "manager"
ROLE_ADMIN = "admin"
ROLE_ALL = [ROLE_DEFAULT, ROLE_HOMEASSISTANT, ROLE_BACKUP, ROLE_MANAGER, ROLE_ADMIN]
class AddonBoot(str, Enum):
"""Boot mode for the add-on."""
AUTO = "auto"
MANUAL = "manual"
class AddonStartup(str, Enum):
"""Startup types of Add-on."""
INITIALIZE = "initialize"
SYSTEM = "system"
SERVICES = "services"
APPLICATION = "application"
ONCE = "once"
class AddonStage(str, Enum):
"""Stage types of add-on."""
STABLE = "stable"
EXPERIMENTAL = "experimental"
DEPRECATED = "deprecated"
class AddonState(str, Enum):
"""State of add-on."""
STARTED = "started"
STOPPED = "stopped"
UNKNOWN = "unknown"
ERROR = "error"
class UpdateChannel(str, Enum):
"""Core supported update channels."""
STABLE = "stable"
BETA = "beta"
DEV = "dev"
class CoreState(str, Enum):
"""Represent current loading state."""
INITIALIZE = "initialize"
SETUP = "setup"
STARTUP = "startup"
RUNNING = "running"
FREEZE = "freeze"
SHUTDOWN = "shutdown"
STOPPING = "stopping"
CLOSE = "close"
class LogLevel(str, Enum):
"""Logging level of system."""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class HostFeature(str, Enum):
"""Host feature."""
HASSOS = "hassos"
HOSTNAME = "hostname"
NETWORK = "network"
REBOOT = "reboot"
SERVICES = "services"
SHUTDOWN = "shutdown"
| 25.986395
| 84
| 0.755585
| 1,483
| 11,460
| 5.492245
| 0.262306
| 0.017188
| 0.01989
| 0.006139
| 0.00663
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007001
| 0.127487
| 11,460
| 440
| 85
| 26.045455
| 0.807581
| 0.050436
| 0
| 0.027174
| 0
| 0
| 0.285134
| 0.009972
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.002717
| 0.008152
| 0
| 0.127717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e5e938e0e0af1229cd08971df68b5917c123c7
| 8,334
|
py
|
Python
|
quaesit/agent.py
|
jgregoriods/quaesit
|
3846f5084ea4d6c1cbd9a93176ee9dee25e12105
|
[
"MIT"
] | null | null | null |
quaesit/agent.py
|
jgregoriods/quaesit
|
3846f5084ea4d6c1cbd9a93176ee9dee25e12105
|
[
"MIT"
] | null | null | null |
quaesit/agent.py
|
jgregoriods/quaesit
|
3846f5084ea4d6c1cbd9a93176ee9dee25e12105
|
[
"MIT"
] | null | null | null |
import inspect
from math import hypot, sin, asin, cos, radians, degrees
from abc import ABCMeta, abstractmethod
from random import randint, choice
from typing import Dict, List, Tuple, Union
class Agent(metaclass=ABCMeta):
"""
Class to represent an agent in an agent-based model.
"""
_id = 0
colors = ['blue', 'brown', 'cyan', 'gray', 'green', 'magenta', 'orange',
'pink', 'purple', 'red', 'yellow']
def __init__(self, world, coords: Tuple = None):
self._id = Agent._id
Agent._id += 1
self.world = world
self.coords = coords or (randint(0, self.world.width - 1),
randint(0, self.world.height - 1))
self.direction = 90
self.breed = self.__class__.__name__.lower()
self.icon = '.'
self.color = choice(self.colors)
self.world.add_agent(self)
def die(self):
"""
Remove the agent from the world.
"""
del self.world.agents[self._id]
self.world.grid[self.coords]['agents'].remove(self)
del self
def hatch(self):
"""
Creates an agent and initializes it with the same parameters as
oneself.
"""
sig = inspect.signature(self.__init__)
filter_keys = [param.name for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD]
filtered_dict = {filter_key: self.__dict__[filter_key]
for filter_key in filter_keys}
return self.__class__(**filtered_dict)
def move_to(self, coords: Tuple):
"""
Places the agent in a different cell of the world grid.
"""
self.world.remove_from_grid(self)
self.coords = coords
self.world.place_on_grid(self)
def cell_here(self, layer = None):
"""
Returns the value of a layer in the model's grid for the cell
where the agent is. If no layer is specified, the values of all
layers are returned.
"""
if layer is not None:
return self.world.grid[self.coords][layer]
else:
return self.world.grid[self.coords]
def get_distance(self, coords: Tuple) -> int:
"""
Returns the distance (in cells) from the agent to a pair of
coordinates.
"""
x, y = coords
return round(hypot((x - self.coords[0]), (y - self.coords[1])))
def cells_in_radius(self, radius: int) -> Dict:
"""
Returns all cells and respective attributes within a distance
of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if self.get_distance((x, y)) <= radius}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid)}
return neighborhood
def empty_cells_in_radius(self, radius: int) -> Dict:
"""
Returns all empty cells (with no agents on them) and respective
attributes within a distance of the agent.
"""
if self.world.torus:
neighborhood = {self.world.to_torus((x, y)):
self.world.grid[self.world.to_torus((x, y))]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and not
self.world.grid[self.world.to_torus((x, y))]
['agents'])}
else:
neighborhood = {(x, y): self.world.grid[(x, y)]
for x in range(self.coords[0] - radius,
self.coords[0] + radius + 1)
for y in range(self.coords[1] - radius,
self.coords[1] + radius + 1)
if (self.get_distance((x, y)) <= radius and
(x, y) in self.world.grid and not
self.world.grid[(x, y)]['agents'])}
return neighborhood
def nearest_cell(self, cells: Union[List, Dict]) -> Tuple:
"""
Given a list or dictionary of cells, returns the coordinates of
the cell that is nearest to the agent.
"""
dists = {cell: self.get_distance(cell) for cell in cells}
return min(dists, key=dists.get)
def agents_in_radius(self, radius: int):
"""
Returns all agents within a distance of oneself.
"""
neighborhood = self.cells_in_radius(radius)
neighbors = [agent for coords in neighborhood
for agent in self.world.grid[coords]['agents']
if agent is not self]
return neighbors
def agents_here(self) -> List:
"""
Returns all agents located on the same cell as oneself.
"""
return [agent for agent in self.world.grid[self.coords]['agents']
if agent is not self]
def nearest_agent(self, agents: List = None):
"""
Given a list of agents, returns the agent that is nearest to
oneself. If no list is provided, all agents are evaluated.
"""
if agents is None:
agents = [self.world.agents[_id] for _id in self.world.agents]
dists = {agent: self.get_distance(agent.coords)
for agent in agents if agent is not self}
return min(dists, key=dists.get)
def turn_right(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the right.
"""
self.direction = round((self.direction - angle) % 360)
def turn_left(self, angle: int = 90):
"""
Rotates the agent's direction a number of degrees to the left.
"""
self.direction = round((self.direction + angle) % 360)
def forward(self, n_steps: int = 1):
"""
Moves the agent a number of cells forward in the direction it
is currently facing.
"""
x = round(self.coords[0] + cos(radians(self.direction)) * n_steps)
y = round(self.coords[1] + sin(radians(self.direction)) * n_steps)
if self.world.torus:
self.move_to(self.world.to_torus((x, y)))
elif (x, y) in self.world.grid:
self.move_to((x, y))
def face_towards(self, coords: Tuple):
"""
Turns the agent's direction towards a given pair of coordinates.
"""
if coords != self.coords:
xdif = coords[0] - self.coords[0]
ydif = coords[1] - self.coords[1]
dist = hypot(xdif, ydif)
angle = degrees(asin(ydif / dist))
if xdif < 0:
self.direction = round(180 - angle)
else:
self.direction = round((360 + angle) % 360)
def random_walk(self, n_steps: int = 1):
"""
Moves the agent one cell forward in a random direction for a
number of times.
"""
for i in range(n_steps):
self.turn_right(randint(0, 360))
self.forward()
@abstractmethod
def step(self):
"""
Methods to be performed by the agent at each step of the
simulation.
"""
raise NotImplementedError
| 35.016807
| 76
| 0.517639
| 1,004
| 8,334
| 4.213147
| 0.177291
| 0.070213
| 0.043026
| 0.032151
| 0.380378
| 0.356501
| 0.315839
| 0.282742
| 0.250118
| 0.223877
| 0
| 0.012739
| 0.37833
| 8,334
| 237
| 77
| 35.164557
| 0.803706
| 0.164027
| 0
| 0.296875
| 0
| 0
| 0.013134
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140625
| false
| 0
| 0.039063
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e5fb97c8f7793e1b150c9be5289156548c78e6
| 15,337
|
py
|
Python
|
models/LRF_COCO_300.py
|
vaesl/LRF-Net
|
e44b120dd55288c02852f8e58cda31313525d748
|
[
"MIT"
] | 180
|
2019-10-24T01:55:54.000Z
|
2022-02-07T11:26:49.000Z
|
models/LRF_COCO_300.py
|
CV-Rookie/LRF-Net
|
e44b120dd55288c02852f8e58cda31313525d748
|
[
"MIT"
] | 11
|
2019-11-06T08:46:00.000Z
|
2020-06-20T02:30:32.000Z
|
models/LRF_COCO_300.py
|
CV-Rookie/LRF-Net
|
e44b120dd55288c02852f8e58cda31313525d748
|
[
"MIT"
] | 29
|
2019-10-28T03:26:27.000Z
|
2021-05-03T02:32:06.000Z
|
import torch
import torch.nn as nn
import os
import torch.nn.functional as F
class LDS(nn.Module):
def __init__(self,):
super(LDS, self).__init__()
self.pool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=0)
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=2, padding=1)
def forward(self, x):
x_pool1 = self.pool1(x)
x_pool2 = self.pool2(x_pool1)
x_pool3 = self.pool3(x_pool2)
return x_pool3
class ConvBlock(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(ConvBlock, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU(inplace=False) if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class LSN_init(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_init, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1),
ConvBlock(inter_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class LSN_later(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(LSN_later, self).__init__()
self.out_channels = out_planes
inter_planes = out_planes // 4
self.part_a = ConvBlock(in_planes, inter_planes, kernel_size=(3, 3), stride=stride, padding=1)
self.part_b = ConvBlock(inter_planes, out_planes, kernel_size=1, stride=1, relu=False)
def forward(self, x):
out1 = self.part_a(x)
out2 = self.part_b(out1)
return out1, out2
class IBN(nn.Module):
def __init__(self, out_planes, bn=True):
super(IBN, self).__init__()
self.out_channels = out_planes
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
def forward(self, x):
if self.bn is not None:
x = self.bn(x)
return x
class One_Three_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(One_Three_Conv, self).__init__()
self.out_channels = out_planes
inter_planes = in_planes // 4
self.single_branch = nn.Sequential(
ConvBlock(in_planes, inter_planes, kernel_size=1, stride=1),
ConvBlock(inter_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class Relu_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1):
super(Relu_Conv, self).__init__()
self.out_channels = out_planes
self.relu = nn.ReLU(inplace=False)
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=1)
)
def forward(self, x):
x = self.relu(x)
out = self.single_branch(x)
return out
class Ds_Conv(nn.Module):
def __init__(self, in_planes, out_planes, stride=1, padding=(1, 1)):
super(Ds_Conv, self).__init__()
self.out_channels = out_planes
self.single_branch = nn.Sequential(
ConvBlock(in_planes, out_planes, kernel_size=(3, 3), stride=stride, padding=padding, relu=False)
)
def forward(self, x):
out = self.single_branch(x)
return out
class LRFNet(nn.Module):
"""LRFNet for object detection
The network is based on the SSD architecture.
Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
Args:
phase: (string) Can be "test" or "train"
base: VGG16 layers for input, size of either 300 or 512
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(LRFNet, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.size = size
# vgg network
self.base = nn.ModuleList(base)
self.lds = LDS()
# convs for merging the lsn and ssd features
self.Norm1 = Relu_Conv(512, 512, stride=1)
self.Norm2 = Relu_Conv(1024, 1024, stride=1)
self.Norm3 = Relu_Conv(512, 512, stride=1)
self.Norm4 = Relu_Conv(256, 256, stride=1)
# convs for generate the lsn features
self.icn1 = LSN_init(3, 512, stride=1)
self.icn2 = LSN_later(128, 1024, stride=2)
self.icn3 = LSN_later(256, 512, stride=2)
# convs with s=2 to downsample the features
self.dsc1 = Ds_Conv(512, 1024, stride=2, padding=(1, 1))
self.dsc2 = Ds_Conv(1024, 512, stride=2, padding=(1, 1))
self.dsc3 = Ds_Conv(512, 256, stride=2, padding=(1, 1))
# convs to reduce the feature dimensions of current level
self.agent1 = ConvBlock(512, 256, kernel_size=1, stride=1)
self.agent2 = ConvBlock(1024, 512, kernel_size=1, stride=1)
self.agent3 = ConvBlock(512, 256, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.proj1 = ConvBlock(1024, 128, kernel_size=1, stride=1)
self.proj2 = ConvBlock(512, 128, kernel_size=1, stride=1)
self.proj3 = ConvBlock(256, 128, kernel_size=1, stride=1)
# convs to reduce the feature dimensions of other levels
self.convert1 = ConvBlock(384, 256, kernel_size=1)
self.convert2 = ConvBlock(256, 512, kernel_size=1)
self.convert3 = ConvBlock(128, 256, kernel_size=1)
# convs to merge the features of the current and higher level features
self.merge1 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.merge2 = ConvBlock(1024, 1024, kernel_size=3, stride=1, padding=1)
self.merge3 = ConvBlock(512, 512, kernel_size=3, stride=1, padding=1)
self.ibn1 = IBN(512, bn=True)
self.ibn2 = IBN(1024, bn=True)
self.relu = nn.ReLU(inplace=False)
self.extras = nn.ModuleList(extras)
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
if self.phase == 'test':
self.softmax = nn.Softmax()
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
list of concat outputs from:
1: softmax layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
loc = list()
conf = list()
new_sources = list()
# apply lds to the initial image
x_pool = self.lds(x)
# apply vgg up to conv4_3
for k in range(22):
x = self.base[k](x)
conv4_3_bn = self.ibn1(x)
x_pool1_skip, x_pool1_icn = self.icn1(x_pool)
s = self.Norm1(conv4_3_bn * x_pool1_icn)
# apply vgg up to fc7
for k in range(22, 34):
x = self.base[k](x)
conv7_bn = self.ibn2(x)
x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)
p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)
x = self.base[34](x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = v(x)
if k == 0:
x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)
w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)
elif k == 2:
q = self.Norm4(self.dsc3(w) + x)
sources.append(q)
elif k == 5 or k == 7:
sources.append(x)
else:
pass
# project the forward features into lower dimension.
tmp1 = self.proj1(p)
tmp2 = self.proj2(w)
tmp3 = self.proj3(q)
# The conv4_3 level
proj1 = F.upsample(tmp1, size=(38, 38), mode='bilinear')
proj2 = F.upsample(tmp2, size=(38, 38), mode='bilinear')
proj3 = F.upsample(tmp3, size=(38, 38), mode='bilinear')
proj = torch.cat([proj1, proj2, proj3], dim=1)
agent1 = self.agent1(s)
convert1 = self.convert1(proj)
pred1 = torch.cat([agent1, convert1], dim=1)
pred1 = self.merge1(pred1)
new_sources.append(pred1)
# The fc_7 level
proj2 = F.upsample(tmp2, size=(19, 19), mode='bilinear')
proj3 = F.upsample(tmp3, size=(19, 19), mode='bilinear')
proj = torch.cat([proj2, proj3], dim=1)
agent2 = self.agent2(p)
convert2 = self.convert2(proj)
pred2 = torch.cat([agent2, convert2], dim=1)
pred2 = self.merge2(pred2)
new_sources.append(pred2)
# The conv8 level
proj3 = F.upsample(tmp3, size=(10, 10), mode='bilinear')
proj = proj3
agent3 = self.agent3(w)
convert3 = self.convert3(proj)
pred3 = torch.cat([agent3, convert3], dim=1)
pred3 = self.merge3(pred3)
new_sources.append(pred3)
for prediction in sources:
new_sources.append(prediction)
# apply multibox head to source layers
for (x, l, c) in zip(new_sources, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
if self.phase == "test":
output = (
loc.view(loc.size(0), -1, 4), # loc preds
self.softmax(conf.view(-1, self.num_classes)), # conf preds
)
else:
output = (
loc.view(loc.size(0), -1, 4),
conf.view(conf.size(0), -1, self.num_classes),
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=False)]
else:
layers += [conv2d, nn.ReLU(inplace=False)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=False), conv7, nn.ReLU(inplace=False)]
return layers
base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512]}
def add_extras(size, cfg, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
if in_channels == 256 and size == 512:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
else:
layers += [One_Three_Conv(in_channels, cfg[k+1], stride=2), nn.ReLU(inplace=False)]
in_channels = v
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
layers += [ConvBlock(256, 128, kernel_size=1,stride=1)]
layers += [ConvBlock(128, 256, kernel_size=3,stride=1)]
return layers
extras = {
'300': [1024, 'S', 512, 'S', 256]}
def multibox(size, vgg, extra_layers, cfg, num_classes):
loc_layers = []
conf_layers = []
vgg_source = [1, -2]
for k, v in enumerate(vgg_source):
if k == 0:
loc_layers += [nn.Conv2d(512,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers +=[nn.Conv2d(512,
cfg[k] * num_classes, kernel_size=3, padding=1)]
else:
loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
i = 2
indicator = 3
for k, v in enumerate(extra_layers):
if (k < indicator+1 and k % 2 == 0) or (k > indicator+1 and k % 2 != 0):
loc_layers += [nn.Conv2d(v.out_channels, cfg[i]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[i]
* num_classes, kernel_size=3, padding=1)]
i += 1
return vgg, extra_layers, (loc_layers, conf_layers)
mbox = {
'300': [6, 6, 6, 6, 4, 4]}
def build_net(phase, size=300, num_classes=81):
if size != 300:
print("Error: The input image size is not supported!")
return
return LRFNet(phase, size, *multibox(size, vgg(base[str(size)], 3),
add_extras(size, extras[str(size)], 1024),
mbox[str(size)], num_classes), num_classes)
| 36.516667
| 154
| 0.579774
| 2,134
| 15,337
| 4.017338
| 0.13074
| 0.051324
| 0.025662
| 0.023796
| 0.502275
| 0.450018
| 0.41794
| 0.371632
| 0.322174
| 0.278782
| 0
| 0.064213
| 0.298363
| 15,337
| 419
| 155
| 36.603819
| 0.73246
| 0.122319
| 0
| 0.284746
| 0
| 0
| 0.015987
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077966
| false
| 0.00339
| 0.013559
| 0
| 0.166102
| 0.013559
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e751e7128a30f8b366e1182af0f9e14b4591cd
| 25,418
|
py
|
Python
|
tests/test.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
tests/test.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
tests/test.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
import wandplus.image as wpi
from wandplus.textutil import calcSuitableFontsize, calcSuitableImagesize
import os
import unittest
tmpdir = '_tmp/'
def save(img, function, channel=False, ext='.png'):
if channel:
path = tmpdir + function.__name__ + "_ch" + ext
else:
path = tmpdir + function.__name__ + ext
# print(path)
img.save(filename=path)
class CheckImage(unittest.TestCase):
@classmethod
def setUpClass(self):
os.mkdir(tmpdir)
self.rose = Image(filename='rose:')
self.grad = Image(filename='gradient:', width=400, height=400)
self.logo = Image(filename='logo:')
self.text = Image(filename='label:Confirm', width=200, height=60)
self.text_a = Image(width=70, height=60)
with Drawing() as draw:
draw.font = 'Arial'
draw.font_size = 50
draw.gravity = 'center'
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.text(0, 0, 'A')
draw(self.text_a)
self.rose.save(filename=tmpdir + 'rose.png')
self.grad.save(filename=tmpdir + 'grad.png')
self.logo.save(filename=tmpdir + 'logo.png')
self.text.save(filename=tmpdir + 'text.png')
self.text_a.save(filename=tmpdir + 'a.png')
@classmethod
def tearDownClass(self):
self.rose.destroy()
self.grad.destroy()
self.logo.destroy()
self.text.destroy()
self.text_a.destroy()
def test_adaptiveblur(self):
f = wpi.adaptiveblur
with self.rose.clone() as t:
f(t, 5.0, 3.0)
save(t, f)
with self.rose.clone() as t:
f(t, 5.0, 3.0, channel='red')
save(t, f, True)
def test_adaptiveresize(self):
f = wpi.adaptiveresize
with self.rose.clone() as t:
f(t, int(t.width*1.5), int(t.height*2.0))
save(t, f)
def test_adaptivesharpen(self):
f = wpi.adaptivesharpen
with self.rose.clone() as t:
f(t, 5, 5)
save(t, f)
with self.rose.clone() as t:
f(t, 5, 5, channel='red')
save(t, f, True)
def test_adaptivethreshold(self):
f = wpi.adaptivethreshold
with self.logo.clone() as t:
f(t, 20, 20, int(0.1*t.quantum_range))
save(t, f)
def test_addnoise(self):
f = wpi.addnoise
with self.grad.clone() as t:
f(t, 'gaussian')
save(t, f)
with self.grad.clone() as t:
f(t, 'gaussian', channel='red')
save(t, f, True)
def test_affinetransform(self):
f = wpi.affinetransform
with self.rose.clone() as t:
with Drawing() as d:
d.affine([2.0, 0.0, 0.0, 2.0, 0.0, 0.0])
f(t, d) # not work correctly (IM<6.9.9-36)
save(t, f)
def test_autogamma(self):
f = wpi.autogamma
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_autolevel(self):
f = wpi.autolevel
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_blackthreshold(self):
f = wpi.blackthreshold
with self.grad.clone() as t:
f(t, Color('gray(50%)'))
save(t, f)
def test_blueshift(self):
f = wpi.blueshift
with self.logo.clone() as t:
f(t, 0.5)
save(t, f)
def test_brightnesscontrast(self):
f = wpi.brightnesscontrast
with self.rose.clone() as t:
f(t, -30, 0)
save(t, f)
with self.rose.clone() as t:
f(t, -30, 0, channel='red')
save(t, f, True)
def test_blur(self):
f = wpi.blur
with self.rose.clone() as t:
f(t, 0, 3)
save(t, f)
with self.rose.clone() as t:
f(t, 0, 3, channel='red')
save(t, f, True)
def test_charcoal(self):
f = wpi.charcoal
with self.rose.clone() as t:
f(t, 5, 1)
save(t, f)
def test_chop(self):
f = wpi.chop
with self.grad.clone() as t:
t.gravity = 'north_west'
f(t, 0, 00, 200, 200)
save(t, f)
def test_clamp(self):
f = wpi.clamp # TODO: more useful code
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_clip(self): # NOTE: result is always FAILED.
f = wpi.clip # I don't have an image which has clipping path
with self.rose.clone() as t:
f(t)
save(t, f)
def test_clippath(self): # NOTE: result is always FAILED.
f = wpi.clippath
with self.rose.clone() as t:
f(t, '#1', True)
save(t, f)
def test_clut(self):
f = wpi.clut
with Image(filename='gradient:red-blue', width=1, height=100) as p:
p.rotate(90)
with self.grad.clone() as t:
f(t, p)
save(t, f)
with self.grad.clone() as t:
f(t, p, channel='green')
save(t, f, True)
def test_coalesce(self): # TODO: input optimized .gif file.
f = wpi.coalesce
with Image() as t:
with self.rose.clone() as p:
for i in range(5):
wpi.blur(p, 0, 1)
wpi.add(t, p)
with f(t) as p:
save(p, f)
def test_colordecisionlist(self):
xml = """
<ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
<ColorCorrection id="cc03345">
<SOPNode>
<Slope> 0.9 1.2 0.5 </Slope>
<Offset> 0.4 -0.5 0.6 </Offset>
<Power> 1.0 0.8 1.5 </Power>
</SOPNode>
<SATNode>
<Saturation> 0.85 </Saturation>
</SATNode>
</ColorCorrection>
</ColorCorrectionCollection>
"""
f = wpi.colordecisionlist
with self.rose.clone() as t:
f(t, xml)
save(t, f)
def test_colorize(self):
f = wpi.colorize
with self.grad.clone() as t:
f(t, Color('red'), Color('gray(25%)'))
save(t, f)
def test_colormatrix(self):
f = wpi.colormatrix
with self.logo.clone() as t:
kernel = [
0.5, 0.0, 0.0, 0.0, 0.0,
0.0, 1.5, 0.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0
]
f(t, 5, 5, kernel)
save(t, f)
def test_combine(self):
f = wpi.combine
with Image() as t:
w = 100
h = 100
black = Color('black')
white = Color('white')
with Image(width=w, height=w, background=black) as b:
with Image(width=h, height=h, background=white) as w:
wpi.add(t, b) # add image for red channel
wpi.add(t, b) # add image for green channel
wpi.add(t, w) # add image for blue channel
wpi.setfirstiterator(t) # rewind the index pointer
channel = 1 + 2 + 4 # R + G + B
with f(t, channel) as q:
save(q, f)
def test_comment(self):
f = wpi.comment
with self.grad.clone() as t:
f(t, 'hello')
save(t, f)
def test_compare(self):
f = wpi.compare
with self.rose.clone() as t:
with t.clone() as p:
(c, d) = f(t, p, metric='absolute')
save(c, f)
c.destroy()
with self.rose.clone() as t:
with t.clone() as p:
(c, d) = f(t, p, metric='absolute', channel='red')
save(c, f, True)
c.destroy()
def test_comparelayer(self):
f = wpi.comparelayer
with Image() as t:
with Image(width=50, height=50, background=Color('red')) as p:
wpi.add(t, p)
with Image(width=25, height=25, background=Color('green1')) as q:
for i in range(4):
with q.clone() as qq:
wpi.resetpage(qq, 5*(i+1), 5*(i+1))
wpi.add(t, qq)
with f(t, 'compareany') as r:
save(r, f, ext='.gif')
def test_constitute(self):
f = wpi.constitute
with Image() as t:
w = 2
h = 2
b = [0, 0, 0,
255, 255, 255,
255, 0, 0,
0, 255, 0]
f(t, w, h, 'RGB', 'char', b)
save(t, f)
def test_contrast(self):
f = wpi.contrast
with self.rose.clone() as t:
f(t, False)
save(t, f)
def test_convolve(self):
f = wpi.convolve
kernel = [1/16, 2/16, 1/16,
2/16, 4/16, 2/16,
1/16, 2/16, 1/16]
with self.rose.clone() as t:
f(t, 3, kernel)
save(t, f)
with self.rose.clone() as t:
f(t, 3, kernel, channel='red')
save(t, f, True)
def test_cyclecolormap(self):
f = wpi.cyclecolormap
with self.logo.clone() as t:
f(t, 5)
save(t, f)
def test_cipher(self):
f = wpi.encipher
with self.rose.clone() as t:
f(t, 'password')
save(t, f)
f = wpi.decipher
f(t, 'password')
save(t, f)
def test_deskew(self):
f = wpi.deskew
with Image(width=80, height=40, background=Color('black')) as t:
f(t, 0.5*t.quantum_range) # TODO: find an skewed image as sample
save(t, f)
def test_despeckle(self):
f = wpi.despeckle
with self.rose.clone() as t:
# TODO: add speckle noise
f(t)
save(t, f)
def test_edge(self):
f = wpi.edge
with self.logo.clone() as t:
f(t, 3)
save(t, f)
def test_emboss(self):
f = wpi.emboss
with self.logo.clone() as t:
f(t, 0, 3)
save(t, f)
def test_enhance(self):
f = wpi.enhance
with Image(filename='plasma:', width=100, height=100) as t:
f(t)
save(t, f)
def test_equalize(self):
f = wpi.equalize
with self.rose.clone() as t:
f(t)
save(t, f)
with self.rose.clone() as t:
f(t, channel='red')
save(t, f, True)
def test_exportpixels(self):
w = 1
h = 1
channels = 'RGB'
with Image(width=w, height=h, background=Color('red')) as t:
r = wpi.exportpixels(t, 0, 0, w, h, channels, 'double')
self.assertEqual(r[0], 1.0)
self.assertEqual(r[1], 0.0)
self.assertEqual(r[2], 0.0)
def test_extent(self):
f = wpi.extent
with self.rose.clone() as t:
t.gravity = 'center'
t.background_color = Color('blue')
f(t, -10, -10, t.width+20, t.height+20)
save(t, f)
def test_filterimage(self):
f = wpi.filterimage
kernel = [ # Sobel filter
-1.0, 0.0, 1.0,
-2.0, 0.0, 2.0,
-1.0, 0.0, 1.0,
]
with self.rose.clone() as t:
f(t, 3, 3, kernel)
save(t, f)
with self.rose.clone() as t:
f(t, 3, 3, kernel, channel='red')
save(t, f, True)
def test_floodfillpaint(self):
f = wpi.floodfillpaint
with self.logo.clone() as t:
f(t, Color('green'), 0.10*t.quantum_range, Color('white'), 0, 0)
save(t, f)
def test_fft(self):
f = wpi.forwardfouriertransform # require IM build option '--with-fftw'
with self.logo.clone() as t: # I couldn't build on Windows...
f(t, True)
save(t, f) # includes two images(magnitude&phase)
f = wpi.inversefouriertransform
with t.sequence[0].clone() as mag:
with t.sequence[1].clone() as phase:
wpi.blur(mag, 0, 0.5) # as degradation
t2 = mag
f(t2, phase, True)
save(t2, f)
def test_haldclut(self):
f = wpi.haldclut # TODO: more useful code
with Image(filename='hald:12') as p:
with self.rose.clone() as t:
f(t, p)
save(t, f)
with self.rose.clone() as t:
f(t, p, channel='red')
save(t, f, True)
def test_implode(self):
f = wpi.implode
with self.rose.clone() as t:
f(t, 1.0)
save(t, f)
def test_importpixels(self):
f = wpi.importpixels
with Image(width=4, height=4, background=Color('red')) as t:
w = 2
h = 2
b = [0, 0, 0,
255, 255, 255,
255, 0, 0,
0, 255, 0]
f(t, 1, 1, w, h, 'RGB', 'char', b)
save(t, f)
def test_label(self):
f = wpi.label
with self.rose.clone() as t:
f(t, 'hello')
save(t, f)
def test_localcontrast(self):
f = wpi.localcontrast
with self.logo.clone() as t:
f(t, 5, 30)
save(t, f)
def test_magnify(self):
f = wpi.magnify
with self.rose.clone() as t:
f(t)
save(t, f)
def test_minify(self):
f = wpi.minify
with self.rose.clone() as t:
f(t)
save(t, f)
def test_montage(self):
f = wpi.montage
with self.rose.clone() as base:
with Image() as dst:
rows = 2
columns = 3
for i in range(rows * columns):
wpi.add(dst, base)
tile = "{0}x{1}+0+0".format(columns, rows)
thumb = "80x50+4+3"
frame = "15x15+3+3"
mode = "frame"
with Drawing() as d:
with f(dst, d, tile, thumb, mode, frame) as result:
save(result, f)
def test_morph(self):
f = wpi.morph
color = Color('white')
with self.rose.clone() as t:
with Image(width=t.width, height=t.height, background=color) as p:
wpi.add(t, p)
wpi.setfirstiterator(t)
wpi.setdelay(t, 60)
with f(t, 5) as q:
save(q, f, ext='.gif')
def test_morphology(self):
f = wpi.morphology
with self.logo.clone() as t:
f(t, 'dilate', 1, 'Diamond')
save(t, f)
with self.logo.clone() as t:
f(t, 'dilate', 1, 'Diamond', channel='red')
save(t, f, True)
def test_motionblur(self):
f = wpi.motionblur
with self.logo.clone() as t:
f(t, 30, 10, 45)
save(t, f)
with self.logo.clone() as t:
f(t, 30, 10, 45, channel='red')
save(t, f, True)
def test_oilpaint(self):
f = wpi.oilpaint
with self.rose.clone() as t:
f(t, 2.0)
save(t, f)
def test_opaquepaint(self):
f = wpi.opaquepaint
with self.logo.clone() as t:
f(t, Color('red'), Color('blue'), 1.0, False)
save(t, f)
with self.logo.clone() as t:
f(t, Color('red'), Color('blue'), 1.0, False, channel='blue')
save(t, f, True)
def test_orderedposterize(self):
f = wpi.orderedposterize
with self.grad.clone() as t:
f(t, 'o4x4,3,3')
save(t, f)
with self.grad.clone() as t:
f(t, 'o4x4,3,3', channel='red')
save(t, f, True)
def test_polaroid(self):
f = wpi.polaroid
with self.logo.clone() as t:
with Drawing() as d:
f(t, d, 1.0)
save(t, f)
def test_posterize(self):
f = wpi.posterize
with self.rose.clone() as t:
f(t, 3, True)
save(t, f)
def test_raiseimage(self):
f = wpi.raiseimage
with self.rose.clone() as t:
f(t, 10, 10, 10, 10, True)
save(t, f)
def test_randomthreshold(self):
f = wpi.randomthreshold
with self.text_a.clone() as t:
rng = t.quantum_range
f(t, int(rng * 0.05), int(rng * 0.95))
save(t, f)
with self.text_a.clone() as t:
rng = t.quantum_range
f(t, int(rng * 0.05), int(rng * 0.95), channel='red')
save(t, f, True)
def test_remap(self):
f = wpi.remap
with self.logo.clone() as t:
with self.rose.clone() as p:
f(t, p, 'nodither')
save(t, f)
def test_resample(self):
f = wpi.resample
with self.rose.clone() as t:
dpi = 72 * 2
f(t, dpi, dpi, 'lanczos', 1.0)
save(t, f)
def test_roll(self):
f = wpi.roll
with self.rose.clone() as t:
f(t, 10, 10)
save(t, f)
def test_rotationalblur(self):
f = wpi.rotationalblur
with self.rose.clone() as t:
f(t, 45)
save(t, f)
with self.rose.clone() as t:
f(t, 45, channel='red')
save(t, f, True)
def test_scale(self):
f = wpi.scale
with self.rose.clone() as t:
f(t, t.width*2, t.height*2)
save(t, f)
def test_segment(self):
f = wpi.segment
with self.logo.clone() as t:
f(t, 'rgb', False, 5, 20)
save(t, f)
def test_selectiveblur(self):
f = wpi.selectiveblur
with self.logo.clone() as t:
f(t, 20, 20, 0.5*t.quantum_range)
save(t, f)
with self.logo.clone() as t:
f(t, 20, 20, 0.5*t.quantum_range, channel='red')
save(t, f, True)
def test_separate_channel(self):
f = wpi.separate_channel
with self.rose.clone() as t:
f(t, 'red')
save(t, f)
def test_sepiatone(self):
f = wpi.sepiatone
with self.rose.clone() as t:
f(t, 0.5*t.quantum_range)
save(t, f)
def test_shade(self):
f = wpi.shade
with self.logo.clone() as t:
f(t, True, 45, 135)
save(t, f)
def test_shadow(self):
f = wpi.shadow
with self.text.clone() as t:
with self.text.clone() as p:
p.negate()
f(p, 100, 2, 10, 10)
t.composite_channel('default_channels', p, 'overlay')
save(t, f)
def test_sharpen(self):
f = wpi.sharpen
with self.rose.clone() as t:
f(t, 3, 3)
save(t, f)
with self.rose.clone() as t:
f(t, 3, 3, channel='red')
save(t, f, True)
def test_shave(self):
f = wpi.shave
with self.logo.clone() as t:
f(t, 100, 100)
save(t, f)
def test_shear(self):
f = wpi.shear
with self.grad.clone() as t:
f(t, Color('red'), 0, 10)
save(t, f)
def test_sigmoidalcontrast(self):
f = wpi.sigmoidalcontrast
with self.rose.clone() as t:
f(t, True, 3, 3)
save(t, f)
with self.rose.clone() as t:
f(t, True, 3, 3, channel='red')
save(t, f, True)
def test_sketch(self):
f = wpi.sketch
with self.logo.clone() as t:
f(t, 10, 10, 45)
save(t, f)
def test_smush(self):
f = wpi.smush
def makeletter(letter, w, h):
img = Image(width=w, height=h)
with Drawing() as d:
d.font = 'Arial'
d.font_size = 24
d.gravity = 'center'
d.text(0, 0, letter)
d(img)
return img
with Image() as t:
with makeletter('A', 50, 30) as a:
with makeletter('B', 50, 30) as b:
wpi.add(t, a)
wpi.add(t, b)
wpi.setfirstiterator(t)
with f(t, False, -3) as p:
save(p, f)
def test_solarize(self):
f = wpi.solarize
with self.rose.clone() as t:
f(t, 0.4*t.quantum_range)
save(t, f)
with self.rose.clone() as t:
f(t, 0.4*t.quantum_range, channel='red')
save(t, f, True)
def test_splice(self):
f = wpi.splice
with self.rose.clone() as t:
t.gravity = 'center'
f(t, t.width//2, t.height//2, 20, 20)
save(t, f)
def test_sparsecolor(self):
f = wpi.sparsecolor
with Image(width=100, height=100, background=Color('black')) as t:
f(t, 'default_channels', 'bilinear',
[0, 0, 1.0, 0.0, 0.0, 1.0,
100, 100, 0.0, 1.0, 1.0, 1.0])
save(t, f)
def test_spread(self):
f = wpi.spread
with self.logo.clone() as t:
f(t, 20)
save(t, f)
def test_statistic(self):
f = wpi.statistic
with self.rose.clone() as t:
f(t, 'gradient', 4, 4)
save(t, f)
with self.rose.clone() as t:
f(t, 'gradient', 4, 4, channel='red')
save(t, f, True)
def test_stegano(self):
f = wpi.stegano
with self.rose.clone() as t:
w = 50
h = 40
offset = 15
tmpfile = 'tmp.png'
with Image(width=w, height=h, background=Color('white')) as p:
with Drawing() as d:
d.gravity = 'center'
d.fill_color = Color('black')
d.text(0, 0, 'Watch\nthe\nPidgeon')
d(p)
with f(t, p, offset) as q:
q.save(filename=tmpfile)
try:
with Image() as q:
wpi.setsizeoffset(q, w, h, offset)
q.read(filename='stegano:' + tmpfile)
save(q, f)
except Exception:
raise
finally:
os.remove(tmpfile)
def test_stereo(self):
f = wpi.stereo
with self.rose.clone() as t:
with self.rose.clone() as p:
p.negate()
with f(t, p) as q:
save(q, f)
def test_swirl(self):
f = wpi.swirl
with self.rose.clone() as t:
f(t, 180)
save(t, f)
def test_texture(self):
f = wpi.texture
with Image(width=300, height=200) as t:
with self.rose.clone() as p:
with f(t, p) as q:
save(q, f)
def test_thumbnail(self):
f = wpi.thumbnail
with self.logo.clone() as t:
f(t, 100, 100)
save(t, f)
def test_tint(self):
f = wpi.tint
with self.rose.clone() as t:
f(t, Color('rgb'), Color('gray(25%)'))
save(t, f)
def test_vignette(self):
f = wpi.vignette
with self.logo.clone() as t:
wpi.minify(t)
t.background_color = Color('black')
f(t, 0, 10, 20, 20)
save(t, f)
def test_wave(self):
f = wpi.wave
with self.grad.clone() as t:
f(t, 40, 200)
save(t, f)
def test_whitethreshold(self):
f = wpi.whitethreshold
with self.grad.clone() as t:
f(t, Color('gray(50%)'))
save(t, f)
class CheckTextUtil(unittest.TestCase):
def test_imagesize(self):
with Drawing() as d:
text = 'check'
d.font = 'Arial'
d.font_size = 36
size = calcSuitableImagesize(d, text)
print('calcSuitableImagesize: ', size)
self.assertTrue(size[0] > 0 and size[1] > 0)
def test_fontsize(self):
w = 100
h = 100
with Drawing() as d:
text = 'check'
d.font = 'Arial'
fontsize = calcSuitableFontsize(d, text, width=w)
print('calcSuitableImagesize[W]: ', fontsize)
self.assertTrue(fontsize > 0)
fontsize = calcSuitableFontsize(d, text, height=h)
print('calcSuitableImagesize[H]: ', fontsize)
self.assertTrue(fontsize > 0)
if __name__ == '__main__':
unittest.main()
| 29.487239
| 81
| 0.464395
| 3,438
| 25,418
| 3.392379
| 0.103839
| 0.033096
| 0.054017
| 0.037726
| 0.481694
| 0.427334
| 0.383949
| 0.347852
| 0.27883
| 0.217611
| 0
| 0.040395
| 0.405893
| 25,418
| 861
| 82
| 29.521487
| 0.731938
| 0.02215
| 0
| 0.41255
| 0
| 0
| 0.051059
| 0.00608
| 0
| 0
| 0
| 0.001161
| 0.008011
| 1
| 0.129506
| false
| 0.00267
| 0.012016
| 0
| 0.145527
| 0.004005
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92e78a29e0f69d74c35aa00744e686a1763079d2
| 7,652
|
py
|
Python
|
src/librender/tests/test_mesh.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 7
|
2020-07-24T03:19:59.000Z
|
2022-03-30T10:56:12.000Z
|
src/librender/tests/test_mesh.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 1
|
2021-04-07T22:30:23.000Z
|
2021-04-08T00:55:36.000Z
|
src/librender/tests/test_mesh.py
|
tizian/layer-laboratory
|
008cc94b76127e9eb74227fcd3d0145da8ddec30
|
[
"CNRI-Python"
] | 2
|
2020-06-08T08:25:09.000Z
|
2021-04-05T22:13:08.000Z
|
import mitsuba
import pytest
import enoki as ek
from enoki.dynamic import Float32 as Float
from mitsuba.python.test.util import fresolver_append_path
from mitsuba.python.util import traverse
def test01_create_mesh(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype
from mitsuba.render import Mesh
m = Mesh("MyMesh", 3, 2)
m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]
m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]
m.parameters_changed()
assert str(m) == """Mesh[
name = "MyMesh",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [1, 1, 0]
],
vertex_count = 3,
vertices = [36 B of vertex data],
face_count = 2,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0.96
]"""
@fresolver_append_path
def test02_ply_triangle(variant_scalar_rgb):
from mitsuba.core import UInt32, Vector3f
from mitsuba.core.xml import load_string
m = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle.ply"/>
<boolean name="face_normals" value="true"/>
</shape>
""")
positions = m.vertex_positions_buffer()
faces = m.faces_buffer()
assert not m.has_vertex_normals()
assert ek.slices(positions) == 9
assert ek.allclose(positions[0:3], [0, 0, 0])
assert ek.allclose(positions[3:6], [0, 0, 1])
assert ek.allclose(positions[6:9], [0, 1, 0])
assert ek.slices(faces) == 3
assert faces[0] == UInt32(0)
assert faces[1] == UInt32(1)
assert faces[2] == UInt32(2)
@fresolver_append_path
def test03_ply_computed_normals(variant_scalar_rgb):
from mitsuba.core import Vector3f
from mitsuba.core.xml import load_string
"""Checks(automatic) vertex normal computation for a PLY file that
doesn't have them."""
shape = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle.ply"/>
</shape>
""")
normals = shape.vertex_normals_buffer()
assert shape.has_vertex_normals()
# Normals are stored in half precision
assert ek.allclose(normals[0:3], [-1, 0, 0])
assert ek.allclose(normals[3:6], [-1, 0, 0])
assert ek.allclose(normals[6:9], [-1, 0, 0])
def test04_normal_weighting_scheme(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype, Vector3f
from mitsuba.render import Mesh
import numpy as np
"""Tests the weighting scheme that is used to compute surface normals."""
m = Mesh("MyMesh", 5, 2, has_vertex_normals=True)
vertices = m.vertex_positions_buffer()
normals = m.vertex_normals_buffer()
a, b = 1.0, 0.5
vertices[:] = [0, 0, 0, -a, 1, 0, a, 1, 0, -b, 0, 1, b, 0, 1]
n0 = Vector3f(0.0, 0.0, -1.0)
n1 = Vector3f(0.0, 1.0, 0.0)
angle_0 = ek.pi / 2.0
angle_1 = ek.acos(3.0 / 5.0)
n2 = n0 * angle_0 + n1 * angle_1
n2 /= ek.norm(n2)
n = np.vstack([n2, n0, n0, n1, n1]).transpose()
m.faces_buffer()[:] = [0, 1, 2, 0, 3, 4]
m.recompute_vertex_normals()
for i in range(5):
assert ek.allclose(normals[i*3:(i+1)*3], n[:, i], 5e-4)
@fresolver_append_path
def test05_load_simple_mesh(variant_scalar_rgb):
from mitsuba.core.xml import load_string
"""Tests the OBJ and PLY loaders on a simple example."""
for mesh_format in ["obj", "ply"]:
shape = load_string("""
<shape type="{0}" version="2.0.0">
<string name="filename" value="resources/data/tests/{0}/cbox_smallbox.{0}"/>
</shape>
""".format(mesh_format))
positions = shape.vertex_positions_buffer()
faces = shape.faces_buffer()
assert shape.has_vertex_normals()
assert ek.slices(positions) == 72
assert ek.slices(faces) == 36
assert ek.allclose(faces[6:9], [4, 5, 6])
assert ek.allclose(positions[:5], [130, 165, 65, 82, 165])
@pytest.mark.parametrize('mesh_format', ['obj', 'ply', 'serialized'])
@pytest.mark.parametrize('features', ['normals', 'uv', 'normals_uv'])
@pytest.mark.parametrize('face_normals', [True, False])
def test06_load_various_features(variant_scalar_rgb, mesh_format, features, face_normals):
"""Tests the OBJ & PLY loaders with combinations of vertex / face normals,
presence and absence of UVs, etc.
"""
from mitsuba.core.xml import load_string
def test():
shape = load_string("""
<shape type="{0}" version="2.0.0">
<string name="filename" value="resources/data/tests/{0}/rectangle_{1}.{0}" />
<boolean name="face_normals" value="{2}" />
</shape>
""".format(mesh_format, features, str(face_normals).lower()))
assert shape.has_vertex_normals() == (not face_normals)
positions = shape.vertex_positions_buffer()
normals = shape.vertex_normals_buffer()
texcoords = shape.vertex_texcoords_buffer()
faces = shape.faces_buffer()
(v0, v2, v3) = [positions[i*3:(i+1)*3] for i in [0, 2, 3]]
assert ek.allclose(v0, [-2.85, 0.0, -7.600000], atol=1e-3)
assert ek.allclose(v2, [ 2.85, 0.0, 0.599999], atol=1e-3)
assert ek.allclose(v3, [ 2.85, 0.0, -7.600000], atol=1e-3)
if 'uv' in features:
assert shape.has_vertex_texcoords()
(uv0, uv2, uv3) = [texcoords[i*2:(i+1)*2] for i in [0, 2, 3]]
# For OBJs (and .serialized generated from OBJ), UV.y is flipped.
if mesh_format in ['obj', 'serialized']:
assert ek.allclose(uv0, [0.950589, 1-0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 1-0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 1-0.689127], atol=1e-3)
else:
assert ek.allclose(uv0, [0.950589, 0.988416], atol=1e-3)
assert ek.allclose(uv2, [0.025105, 0.689127], atol=1e-3)
assert ek.allclose(uv3, [0.950589, 0.689127], atol=1e-3)
if shape.has_vertex_normals():
for n in [normals[i*3:(i+1)*3] for i in [0, 2, 3]]:
assert ek.allclose(n, [0.0, 1.0, 0.0])
return fresolver_append_path(test)()
@fresolver_append_path
def test07_ply_stored_attribute(variant_scalar_rgb):
from mitsuba.core import Vector3f
from mitsuba.core.xml import load_string
m = load_string("""
<shape type="ply" version="0.5.0">
<string name="filename" value="data/triangle_face_colors.ply"/>
</shape>
""")
assert str(m) == """PLYMesh[
name = "triangle_face_colors.ply",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [0, 1, 1]
],
vertex_count = 3,
vertices = [72 B of vertex data],
face_count = 1,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0,
mesh attributes = [
face_color: 3 floats
]
]"""
def test08_mesh_add_attribute(variant_scalar_rgb):
from mitsuba.core import Struct, float_dtype
from mitsuba.render import Mesh
m = Mesh("MyMesh", 3, 2)
m.vertex_positions_buffer()[:] = [0.0, 0.0, 0.0, 1.0, 0.2, 0.0, 0.2, 1.0, 0.0]
m.faces_buffer()[:] = [0, 1, 2, 1, 2, 0]
m.parameters_changed()
m.add_attribute("vertex_color", 3)[:] = [0.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 0.0]
assert str(m) == """Mesh[
name = "MyMesh",
bbox = BoundingBox3f[
min = [0, 0, 0],
max = [1, 1, 0]
],
vertex_count = 3,
vertices = [72 B of vertex data],
face_count = 2,
faces = [24 B of face data],
disable_vertex_normals = 0,
surface_area = 0.96,
mesh attributes = [
vertex_color: 3 floats
]
]"""
| 32.561702
| 93
| 0.611997
| 1,151
| 7,652
| 3.939183
| 0.166811
| 0.026026
| 0.018527
| 0.009704
| 0.542347
| 0.473313
| 0.425232
| 0.362373
| 0.354433
| 0.346052
| 0
| 0.082012
| 0.233534
| 7,652
| 235
| 94
| 32.561702
| 0.691049
| 0.027052
| 0
| 0.467033
| 0
| 0.005495
| 0.257456
| 0.03995
| 0
| 0
| 0
| 0
| 0.186813
| 1
| 0.049451
| false
| 0
| 0.115385
| 0
| 0.17033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ec31910f4ccb9a9e9fdaf1976491caf430c06d
| 1,067
|
py
|
Python
|
tests/slicebuilders/subpopulations/test_length.py
|
ANarayan/robustness-gym
|
eed2800985631fbbe6491b5f6f0731a067eef78e
|
[
"Apache-2.0"
] | null | null | null |
tests/slicebuilders/subpopulations/test_length.py
|
ANarayan/robustness-gym
|
eed2800985631fbbe6491b5f6f0731a067eef78e
|
[
"Apache-2.0"
] | null | null | null |
tests/slicebuilders/subpopulations/test_length.py
|
ANarayan/robustness-gym
|
eed2800985631fbbe6491b5f6f0731a067eef78e
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
import numpy as np
from robustnessgym.cachedops.spacy import Spacy
from robustnessgym.slicebuilders.subpopulations.length import LengthSubpopulation
from tests.testbeds import MockTestBedv0
class TestLengthSubpopulation(TestCase):
def setUp(self):
self.testbed = MockTestBedv0()
self.testbed.dataset = Spacy()(self.testbed.dataset, columns=["text"])
def test_score(self):
# Create the length subpopulation
length = LengthSubpopulation(intervals=[(1, 3), (4, 5)])
# Compute scores
scores = length.score(self.testbed.dataset[:], columns=["text"])
self.assertTrue(np.allclose(scores, np.array([5, 5, 5, 5, 5, 5])))
print(self.testbed.dataset.column_names)
print(Spacy.retrieve(self.testbed.dataset[:], ["text"]))
# Apply the subpopulation
slices, slice_matrix = length(self.testbed.dataset, columns=["text"])
# Check that the slice membership lines up
self.assertTrue(np.allclose(slice_matrix, np.array([[0, 1]] * 6)))
| 34.419355
| 81
| 0.686036
| 125
| 1,067
| 5.824
| 0.448
| 0.105769
| 0.148352
| 0.103022
| 0.127747
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017381
| 0.19119
| 1,067
| 30
| 82
| 35.566667
| 0.826188
| 0.10403
| 0
| 0
| 0
| 0
| 0.016824
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 1
| 0.117647
| false
| 0
| 0.294118
| 0
| 0.470588
| 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92eca5c1a6337291d863c933685487ea52da0c9b
| 1,146
|
py
|
Python
|
pulsar_spectra/catalogue_papers/Jankowski_2018_raw_to_yaml.py
|
NickSwainston/pulsar_spectra
|
b264aab3f8fc1bb3cad14ef1b93cab519ed5bc69
|
[
"MIT"
] | null | null | null |
pulsar_spectra/catalogue_papers/Jankowski_2018_raw_to_yaml.py
|
NickSwainston/pulsar_spectra
|
b264aab3f8fc1bb3cad14ef1b93cab519ed5bc69
|
[
"MIT"
] | 4
|
2021-12-17T04:24:13.000Z
|
2022-02-24T14:51:18.000Z
|
pulsar_spectra/catalogue_papers/Jankowski_2018_raw_to_yaml.py
|
NickSwainston/pulsar_spectra
|
b264aab3f8fc1bb3cad14ef1b93cab519ed5bc69
|
[
"MIT"
] | null | null | null |
import json
from astroquery.vizier import Vizier
with open("Jankowski_2018_raw.txt", "r") as raw_file:
lines = raw_file.readlines()
print(lines)
pulsar_dict = {}
for row in lines[3:]:
row = row.split("|")
print(row)
pulsar = row[0].strip().replace("−", "-")
freqs = []
fluxs = []
flux_errs = []
# If no error means it's an upper limit andnow sure how to handle it
if row[1].strip() != "" and row[2].strip() != "":
freqs.append(728)
fluxs.append(float(row[1].strip()))
flux_errs.append(float(row[2].strip()))
if row[3].strip() != "" and row[4].strip() != "":
freqs.append(1382)
fluxs.append(float(row[3].strip()))
flux_errs.append(float(row[4].strip()))
if row[5].strip() != "" and row[6].strip() != "":
freqs.append(3100)
fluxs.append(float(row[5].strip()))
flux_errs.append(float(row[6].strip()))
pulsar_dict[pulsar] = {"Frequency MHz":freqs, "Flux Density mJy":fluxs, "Flux Density error mJy":flux_errs}
with open("Jankowski_2018.yaml", "w") as cat_file:
cat_file.write(json.dumps(pulsar_dict))
print(pulsar_dict)
| 34.727273
| 111
| 0.604712
| 167
| 1,146
| 4.05988
| 0.39521
| 0.097345
| 0.123894
| 0.084071
| 0.119469
| 0.119469
| 0
| 0
| 0
| 0
| 0
| 0.036224
| 0.205061
| 1,146
| 33
| 112
| 34.727273
| 0.706915
| 0.057592
| 0
| 0
| 0
| 0
| 0.089898
| 0.020389
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.068966
| 0
| 0.068966
| 0.103448
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ee36608ac8edb00b879a89f8f1eafb4cb4fb04
| 15,018
|
py
|
Python
|
integration-tests/run-intg-test.py
|
NishikaDeSilva/identity-test-integration
|
dbd1db07aa6d4f4942d772cd56c0b06c355bd43b
|
[
"Apache-2.0"
] | 4
|
2017-10-23T05:25:27.000Z
|
2018-01-10T08:00:14.000Z
|
integration-tests/run-intg-test.py
|
NishikaDeSilva/identity-test-integration
|
dbd1db07aa6d4f4942d772cd56c0b06c355bd43b
|
[
"Apache-2.0"
] | 42
|
2018-05-21T12:55:49.000Z
|
2020-01-17T06:40:25.000Z
|
integration-tests/run-intg-test.py
|
NishikaDeSilva/identity-test-integration
|
dbd1db07aa6d4f4942d772cd56c0b06c355bd43b
|
[
"Apache-2.0"
] | 46
|
2017-10-04T05:45:52.000Z
|
2018-05-05T14:32:26.000Z
|
# Copyright (c) 2018, WSO2 Inc. (http://wso2.com) All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# importing required modules
import sys
from xml.etree import ElementTree as ET
import toml
import subprocess
import wget
import logging
import inspect
import os
import shutil
import pymysql
import sqlparse
import re
from pathlib import Path
import urllib.request as urllib2
from xml.dom import minidom
import intg_test_manager as cm
from subprocess import Popen, PIPE
import os
from prod_test_constant import DB_META_DATA, DIST_POM_PATH, INTEGRATION_PATH, DISTRIBUTION_PATH, \
DATASOURCE_PATHS, LIB_PATH, WSO2SERVER, M2_PATH, ARTIFACT_REPORTS_PATHS, POM_FILE_PATHS
from intg_test_constant import NS, ZIP_FILE_EXTENSION, CARBON_NAME, VALUE_TAG, SURFACE_PLUGIN_ARTIFACT_ID, \
DEPLOYMENT_PROPERTY_FILE_NAME, LOG_FILE_NAME, PRODUCT_STORAGE_DIR_NAME, \
DEFAULT_DB_USERNAME, LOG_STORAGE, TEST_OUTPUT_DIR_NAME, DEFAULT_ORACLE_SID, MYSQL_DB_ENGINE, \
ORACLE_DB_ENGINE, PRODUCT_STORAGE_DIR_NAME, MSSQL_DB_ENGINE
database_names = []
db_engine = None
sql_driver_location = None
identity_db_url = None
identity_db_username = None
identity_db_password = None
identity_db_driver = None
shared_db_url = None
shared_db_username = None
shared_db_password = None
shared_db_driver = None
identity_db = "WSO2_IDENTITY_DB"
shared_db = "WSO2_SHARED_DB"
def get_db_meta_data(argument):
switcher = DB_META_DATA
return switcher.get(argument, False)
def add_environmental_variables():
if MYSQL_DB_ENGINE == cm.database_config['db_engine'].upper():
identity_url = cm.database_config[
'url'] + "/" + identity_db + "?useSSL=false&autoReconnect=true&requireSSL=false" \
"&verifyServerCertificate=false"
shared_url = cm.database_config[
'url'] + "/" + shared_db + \
"?useSSL=false&autoReconnect=true&requireSSL=false" \
"&verifyServerCertificate=false"
user = cm.database_config['user']
elif ORACLE_DB_ENGINE == cm.database_config['db_engine'].upper():
identity_url= cm.database_config['url'] + "/" + DEFAULT_ORACLE_SID
shared_url= cm.database_config['url'] + "/" + DEFAULT_ORACLE_SID
user = cm.database_config['user']
elif MSSQL_DB_ENGINE == cm.database_config['db_engine'].upper():
identity_url = cm.database_config['url'] + ";" + "databaseName=" + identity_db
shared_url = cm.database_config['url'] + ";" + "databaseName=" + shared_db
user = cm.database_config['user']
else:
shared_url = cm.database_config['url'] + "/" + shared_db
identity_url = cm.database_config['url'] + "/" + identity_db
user = cm.database_config['user']
password = cm.database_config['password']
driver_class_name = cm.database_config['driver_class_name']
os.environ["SHARED_DATABASE_URL"] = shared_url
os.environ["SHARED_DATABASE_USERNAME"] = user
os.environ["SHARED_DATABASE_PASSWORD"] = password
os.environ["SHARED_DATABASE_DRIVER"] = driver_class_name
os.environ["IDENTITY_DATABASE_URL"] = identity_url
os.environ["IDENTITY_DATABASE_USERNAME"] = user
os.environ["IDENTITY_DATABASE_PASSWORD"] = password
os.environ["IDENTITY_DATABASE_DRIVER"] = driver_class_name
logger.info("Added environmental variables for integration test")
def modify_datasources():
file_path = Path(storage_dist_abs_path / datasource_path)
if sys.platform.startswith('win'):
file_path = cm.winapi_path(file_path)
logger.info("Modifying datasource: " + str(file_path))
deployment_toml_config = toml.load(file_path)
logger.info("loading dep,loyment.toml file")
logger.info(deployment_toml_config)
for key in deployment_toml_config:
if key == 'database':
database_config = deployment_toml_config[key]
for key in database_config:
if key == 'identity_db':
identity_db_config = database_config['identity_db']
identity_db_config ['url'] = "$env{IDENTITY_DATABASE_URL}"
identity_db_config ['username'] = "$env{IDENTITY_DATABASE_USERNAME}"
identity_db_config ['password'] = "$env{IDENTITY_DATABASE_PASSWORD}"
identity_db_config ['driver'] = "$env{IDENTITY_DATABASE_DRIVER}"
database_names.append(identity_db)
if key == 'shared_db':
shared_db_config = database_config['shared_db']
shared_db_config ['url'] = "$env{SHARED_DATABASE_URL}"
shared_db_config ['username'] = "$env{SHARED_DATABASE_USERNAME}"
shared_db_config ['password'] = "$env{SHARED_DATABASE_PASSWORD}"
shared_db_config ['driver'] = "$env{SHARED_DATABASE_DRIVER}"
database_names.append(shared_db)
with open(file_path, 'w') as writer:
writer.write(toml.dumps(deployment_toml_config))
# Since we have added a method to clone a given git branch and checkout to the latest released tag it is not required to
# modify pom files. Hence in the current implementation this method is not using.
# However, in order to execute this method you can define pom file paths in const_<prod>.py as a constant
# and import it to run-intg-test.py. Thereafter assign it to global variable called pom_file_paths in the
# configure_product method and call the modify_pom_files method.
def modify_pom_files():
for pom in POM_FILE_PATHS:
file_path = Path(cm.workspace + "/" + cm.product_id + "/" + pom)
if sys.platform.startswith('win'):
file_path = cm.winapi_path(file_path)
logger.info("Modifying pom file: " + str(file_path))
ET.register_namespace('', NS['d'])
artifact_tree = ET.parse(file_path)
artifarct_root = artifact_tree.getroot()
data_sources = artifarct_root.find('d:build', NS)
plugins = data_sources.find('d:plugins', NS)
for plugin in plugins.findall('d:plugin', NS):
artifact_id = plugin.find('d:artifactId', NS)
if artifact_id is not None and artifact_id.text == SURFACE_PLUGIN_ARTIFACT_ID:
configuration = plugin.find('d:configuration', NS)
system_properties = configuration.find('d:systemProperties', NS)
for neighbor in system_properties.iter('{' + NS['d'] + '}' + CARBON_NAME):
neighbor.text = cm.modify_distribution_name(neighbor)
for prop in system_properties:
name = prop.find('d:name', NS)
if name is not None and name.text == CARBON_NAME:
for data in prop:
if data.tag == VALUE_TAG:
data.text = cm.modify_distribution_name(data)
break
artifact_tree.write(file_path)
#TODO: Improve the method in generic way to support all products
def save_log_files():
log_storage = Path(cm.workspace + "/" + LOG_STORAGE)
if not Path.exists(log_storage):
Path(log_storage).mkdir(parents=True, exist_ok=True)
log_file_paths = ARTIFACT_REPORTS_PATHS
if log_file_paths:
for file in log_file_paths:
absolute_file_path = Path(cm.workspace + "/" + cm.product_id + "/" + file)
if Path.exists(absolute_file_path):
cm.copy_file(absolute_file_path, log_storage)
else:
logger.error("File doesn't contain in the given location: " + str(absolute_file_path))
#TODO: Improve the method in generic way to support all products
def save_test_output():
report_folder = Path(cm.workspace + "/" + TEST_OUTPUT_DIR_NAME)
logger.info(str(report_folder))
if Path.exists(report_folder):
shutil.rmtree(report_folder)
logger.info(str(ARTIFACT_REPORTS_PATHS))
logger.info(str(type(ARTIFACT_REPORTS_PATHS)))
report_file_paths = ARTIFACT_REPORTS_PATHS
for key, value in report_file_paths.items():
for file in value:
absolute_file_path = Path(cm.workspace + "/" + cm.product_id + "/" + file)
if Path.exists(absolute_file_path):
report_storage = Path(cm.workspace + "/" + TEST_OUTPUT_DIR_NAME + "/" + key)
cm.copy_file(absolute_file_path, report_storage)
logger.info("Report successfully copied")
else:
logger.error("File doesn't contain in the given location: " + str(absolute_file_path))
#TODO: Improve the method in generic way to support all products
# def set_custom_testng():
# if cm.use_custom_testng_file == "TRUE":
# testng_source = Path(cm.workspace + "/" + "testng.xml")
# testng_destination = Path(cm.workspace + "/" + cm.product_id + "/" + TESTNG_DIST_XML_PATHS)
# testng_server_mgt_source = Path(cm.workspace + "/" + "testng-server-mgt.xml")
# testng_server_mgt_destination = Path(cm.workspace + "/" + cm.product_id + "/" + TESTNG_SERVER_MGT_DIST)
# # replace testng source
# cm.replace_file(testng_source, testng_destination)
# # replace testng server mgt source
# cm.replace_file(testng_server_mgt_source, testng_server_mgt_destination)
def configure_product():
try:
global datasource_path
global target_dir_abs_path
global storage_dist_abs_path
global pom_file_paths
datasource_path = DATASOURCE_PATHS
zip_name = dist_name + ZIP_FILE_EXTENSION
storage_dir_abs_path = Path(cm.workspace + "/" + PRODUCT_STORAGE_DIR_NAME)
target_dir_abs_path = Path(cm.workspace + "/" + cm.product_id + "/" + DISTRIBUTION_PATH)
storage_dist_abs_path = Path(storage_dir_abs_path / dist_name)
storage_zip_abs_path = Path(storage_dir_abs_path / zip_name)
configured_dist_storing_loc = Path(target_dir_abs_path / dist_name)
script_name = Path(WSO2SERVER)
script_path = Path(storage_dist_abs_path / script_name)
cm.extract_product(storage_dir_abs_path, storage_zip_abs_path)
cm.attach_jolokia_agent(script_path)
cm.copy_jar_file(Path(cm.database_config['sql_driver_location']), Path(storage_dist_abs_path / LIB_PATH))
if datasource_path is not None:
modify_datasources()
else:
logger.info("Datasource paths are not defined in the config file")
os.remove(str(storage_zip_abs_path))
cm.compress_distribution(configured_dist_storing_loc, storage_dir_abs_path)
cm.add_distribution_to_m2(storage_dir_abs_path, M2_PATH)
shutil.rmtree(configured_dist_storing_loc, onerror=cm.on_rm_error)
return database_names
except FileNotFoundError as e:
logger.error("Error occurred while finding files", exc_info=True)
except IOError as e:
logger.error("Error occurred while accessing files", exc_info=True)
except Exception as e:
logger.error("Error occurred while configuring the product", exc_info=True)
def build_source_without_tests(source_path):
"""Build the product-source.
"""
logger.info('Building the source skipping tests')
if sys.platform.startswith('win'):
subprocess.call(['mvn', 'clean', 'install', '-B', '-e','-Dmaven.test.skip=true'], shell=True, cwd=source_path)
else:
subprocess.call(['mvn', 'clean', 'install', '-B', '-e', '-Dmaven.test.skip=true'], cwd=source_path)
logger.info('Module build is completed. Module: ' + str(source_path))
def main():
try:
global logger
global dist_name
logger = cm.function_logger(logging.DEBUG, logging.DEBUG)
if sys.version_info < (3, 6):
raise Exception(
"To run run-intg-test.py script you must have Python 3.6 or latest. Current version info: " + sys.version_info)
cm.read_property_files()
if not cm.validate_property_readings():
raise Exception(
"Property file doesn't have mandatory key-value pair. Please verify the content of the property file "
"and the format")
# get properties assigned to local variables
pom_path = DIST_POM_PATH
engine = cm.db_engine.upper()
db_meta_data = get_db_meta_data(engine)
distribution_path = DISTRIBUTION_PATH
# construct the database configurations
cm.construct_db_config(db_meta_data)
# clone the repository
cm.clone_repo()
if cm.test_mode == "RELEASE":
cm.checkout_to_tag()
# product name retrieve from product pom files
dist_name = cm.get_dist_name(pom_path)
# build the product without test once to make samples and required artifacts to be available.
build_source_without_tests(cm.workspace + "/" + cm.product_id + "/")
cm.get_latest_released_dist()
elif cm.test_mode == "SNAPSHOT":
# product name retrieve from product pom files
dist_name = cm.get_dist_name(pom_path)
cm.build_snapshot_dist(distribution_path)
elif cm.test_mode == "WUM":
dist_name = cm.get_dist_name_wum()
# populate databases
db_names = configure_product()
if db_names is None or not db_names:
raise Exception("Failed the product configuring")
cm.setup_databases(db_names, db_meta_data)
# run integration tests
# Buld Common module
add_environmental_variables()
module_path = Path(cm.workspace + "/" + cm.product_id + "/" + 'modules/integration/tests-common')
logger.info('Building common module. Build path: '+ str(module_path) + ' \n')
cm.build_module(module_path)
intg_module_path = Path(cm.workspace + "/" + cm.product_id + "/" + INTEGRATION_PATH)
logger.info('Building integration module. Build path: '+ str(intg_module_path) + ' \n')
cm.build_module(intg_module_path)
save_test_output()
cm.create_output_property_fle()
except Exception as e:
logger.error("Error occurred while running the run-intg-test.py script", exc_info=True)
except BaseException as e:
logger.error("Error occurred while doing the configuration", exc_info=True)
if __name__ == "__main__":
main()
| 46.639752
| 127
| 0.671195
| 1,930
| 15,018
| 4.935233
| 0.192746
| 0.014488
| 0.030236
| 0.018898
| 0.328189
| 0.23937
| 0.200735
| 0.174908
| 0.136693
| 0.126824
| 0
| 0.001915
| 0.234852
| 15,018
| 321
| 128
| 46.785047
| 0.826995
| 0.152617
| 0
| 0.131148
| 0
| 0.004098
| 0.158385
| 0.05198
| 0
| 0
| 0
| 0.003115
| 0
| 1
| 0.036885
| false
| 0.028689
| 0.081967
| 0
| 0.127049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92eed01036cb07058175a69126f2f5a418891a9a
| 2,376
|
py
|
Python
|
src/pytest_notification/sound.py
|
rhpvorderman/pytest-notification
|
3f322ab04914f52525e1b07bc80537d5f9a00250
|
[
"MIT"
] | 2
|
2020-08-27T03:14:05.000Z
|
2020-10-24T17:17:36.000Z
|
src/pytest_notification/sound.py
|
rhpvorderman/pytest-notification
|
3f322ab04914f52525e1b07bc80537d5f9a00250
|
[
"MIT"
] | 5
|
2019-12-02T08:49:15.000Z
|
2020-06-22T08:38:34.000Z
|
src/pytest_notification/sound.py
|
rhpvorderman/pytest-notification
|
3f322ab04914f52525e1b07bc80537d5f9a00250
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019 Leiden University Medical Center
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import subprocess
import sys
from pathlib import Path
SOUNDS_DIR = (Path(__file__).parent / Path("sounds")).absolute()
DEFAULT_SUCCESS_SOUND = SOUNDS_DIR / Path("applause")
DEFAULT_FAIL_SOUND = SOUNDS_DIR / Path("buzzer")
def play_sound(sound_file: Path):
if sys.platform == "linux":
# paplay comes from PulseAudio and should be installed by default on
# most systems.
_play_sound_unix(sound_file.with_suffix(".oga"), program="paplay")
elif sys.platform == "darwin":
# Afplay comes installed by default on Macintosh
_play_sound_unix(sound_file.with_suffix(".mp3"), program="afplay")
else:
# A windows implementation should be possible with the winsound
# implementation, but that does not play ogg audio.
raise NotImplementedError(
"Playing sounds not supported by pytest-notification on {}"
"".format(sys.platform))
def _play_sound_unix(sound_file: Path, program):
"""
Play a sound file on unix with the program.
:param sound_file: Path to the sound file.
:param program: Which program to use.
:return: No returns. Plays a sound file.
"""
# Play the sound non blocking, use Popen.
subprocess.Popen([program, str(sound_file)])
| 43.2
| 79
| 0.731481
| 336
| 2,376
| 5.08631
| 0.479167
| 0.047396
| 0.02282
| 0.031597
| 0.050322
| 0.037449
| 0.037449
| 0
| 0
| 0
| 0
| 0.002614
| 0.194865
| 2,376
| 54
| 80
| 44
| 0.890748
| 0.63931
| 0
| 0
| 0
| 0
| 0.134161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.176471
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ef37eb449c4f50b5c90c7a720a5f53652a647c
| 420
|
py
|
Python
|
7KYU/next_prime.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 4
|
2021-07-17T22:48:03.000Z
|
2022-03-25T14:10:58.000Z
|
7KYU/next_prime.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | null | null | null |
7KYU/next_prime.py
|
yaznasivasai/python_codewars
|
25493591dde4649dc9c1ec3bece8191a3bed6818
|
[
"MIT"
] | 3
|
2021-06-14T14:18:16.000Z
|
2022-03-16T06:02:02.000Z
|
from math import sqrt
def is_simple(n: int) -> bool:
if n % 2 == 0 and n != 2:
return False
for i in range (3, int(sqrt(n)) + 2, 2):
if n % i == 0 and n != i:
return False
return True
def next_prime(n: int) -> int:
n += 1
if n <= 2:
return 2
else:
if n % 2 == 0:
n += 1
while not is_simple(n):
n += 2
return n
| 20
| 44
| 0.435714
| 69
| 420
| 2.608696
| 0.405797
| 0.066667
| 0.066667
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059829
| 0.442857
| 420
| 21
| 45
| 20
| 0.709402
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ef91238a4d28bed6389f80b7547828e84737ba
| 6,622
|
py
|
Python
|
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
|
manxueitp/cozmo-test
|
a91b1a4020544cb622bd67385f317931c095d2e8
|
[
"MIT"
] | null | null | null |
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
|
manxueitp/cozmo-test
|
a91b1a4020544cb622bd67385f317931c095d2e8
|
[
"MIT"
] | null | null | null |
cozmo_sdk_examples/if_this_then_that/ifttt_gmail.py
|
manxueitp/cozmo-test
|
a91b1a4020544cb622bd67385f317931c095d2e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 Anki, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''"If This Then That" Gmail example
This example demonstrates how "If This Then That" (http://ifttt.com) can be used
make Cozmo respond when a Gmail account receives an email. Instructions below
will lead you through setting up an applet on the IFTTT website. When the applet
trigger is called (which sends a web request received by the web server started
in this example), Cozmo will play an animation, speak the email sender's name and
show a mailbox image on his face.
Please place Cozmo on the charger for this example. When necessary, he will be
rolled off and back on.
Follow these steps to set up and run the example:
1) Provide a a static ip, URL or similar that can be reached from the If This
Then That server. One easy way to do this is with ngrok, which sets up
a secure tunnel to localhost running on your machine.
To set up ngrok:
a) Follow instructions here to download and install:
https://ngrok.com/download
b) Run this command to create a secure public URL for port 8080:
./ngrok http 8080
c) Note the HTTP forwarding address shown in the terminal (e.g., http://55e57164.ngrok.io).
You will use this address in your applet, below.
WARNING: Using ngrok exposes your local web server to the internet. See the ngrok
documentation for more information: https://ngrok.com/docs
2) Set up your applet on the "If This Then That" website.
a) Sign up and sign into https://ifttt.com
b) Create an applet: https://ifttt.com/create
c) Set up your trigger.
1. Click "this".
2. Select "Gmail" as your service. If prompted, click "Connect",
select your Gmail account, and click “Allow” to provide permissions
to IFTTT for your email account. Click "Done".
3. Under "Choose a Trigger", select “Any new email in inbox".
d) Set up your action.
1. Click “that".
2. Select “Maker" to set it as your action channel. Connect to the Maker channel if prompted.
3. Click “Make a web request" and fill out the fields as follows. Remember your publicly
accessible URL from above (e.g., http://55e57164.ngrok.io) and use it in the URL field,
followed by "/iftttGmail" as shown below:
URL: http://55e57164.ngrok.io/iftttGmail
Method: POST
Content Type: application/json
Body: {"FromAddress":"{{FromAddress}}"}
5. Click “Create Action" then “Finish".
3) Test your applet.
a) Run this script at the command line: ./ifttt_gmail.py
b) On ifttt.com, on your applet page, click “Check now”. See that IFTTT confirms that the applet
was checked.
c) Send an email to the Gmail account in your recipe
d) On your IFTTT applet webpage, again click “Check now”. This should cause IFTTT to detect that
the email was received and send a web request to the ifttt_gmail.py script.
e) In response to the ifttt web request, Cozmo should roll off the charger, raise and lower
his lift, announce the email, and then show a mailbox image on his face.
'''
import asyncio
import re
import sys
try:
from aiohttp import web
except ImportError:
sys.exit("Cannot import from aiohttp. Do `pip3 install --user aiohttp` to install")
import cozmo
from common import IFTTTRobot
app = web.Application()
async def serve_gmail(request):
'''Define an HTTP POST handler for receiving requests from If This Then That.
You may modify this method to change how Cozmo reacts to the email
being received.
'''
json_object = await request.json()
# Extract the name of the email sender.
from_email_address = json_object["FromAddress"]
# Use a regular expression to break apart pieces of the email address
match_object = re.search(r'([\w.]+)@([\w.]+)', from_email_address)
email_local_part = match_object.group(1)
robot = request.app['robot']
async def read_name():
try:
async with robot.perform_off_charger():
'''If necessary, Move Cozmo's Head and Lift to make it easy to see Cozmo's face.'''
await robot.get_in_position()
# First, have Cozmo play animation "ID_pokedB", which tells
# Cozmo to raise and lower his lift. To change the animation,
# you may replace "ID_pokedB" with another animation. Run
# remote_control_cozmo.py to see a list of animations.
await robot.play_anim(name='ID_pokedB').wait_for_completed()
# Next, have Cozmo speak the name of the email sender.
await robot.say_text("Email from " + email_local_part).wait_for_completed()
# Last, have Cozmo display an email image on his face.
robot.display_image_file_on_face("../face_images/ifttt_gmail.png")
except cozmo.RobotBusy:
cozmo.logger.warning("Robot was busy so didn't read email address: "+ from_email_address)
# Perform Cozmo's task in the background so the HTTP server responds immediately.
asyncio.ensure_future(read_name())
return web.Response(text="OK")
# Attach the function as an HTTP handler.
app.router.add_post('/iftttGmail', serve_gmail)
if __name__ == '__main__':
cozmo.setup_basic_logging()
cozmo.robot.Robot.drive_off_charger_on_connect = False
# Use our custom robot class with extra helper methods
cozmo.conn.CozmoConnection.robot_factory = IFTTTRobot
try:
sdk_conn = cozmo.connect_on_loop(app.loop)
# Wait for the robot to become available and add it to the app object.
app['robot'] = app.loop.run_until_complete(sdk_conn.wait_for_robot())
except cozmo.ConnectionError as e:
sys.exit("A connection error occurred: %s" % e)
web.run_app(app)
| 41.3875
| 105
| 0.67457
| 985
| 6,622
| 4.467005
| 0.35533
| 0.012727
| 0.011364
| 0.015909
| 0.048636
| 0.031818
| 0.011818
| 0
| 0
| 0
| 0
| 0.010126
| 0.254304
| 6,622
| 159
| 106
| 41.647799
| 0.880923
| 0.685744
| 0
| 0.078947
| 0
| 0
| 0.143097
| 0.016769
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.210526
| 0
| 0.236842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92f0c7d812707a316f1c04c4ec3e35722444b8b5
| 13,843
|
py
|
Python
|
plotutils.py
|
parkus/mypy
|
21043c559dca14abe7508e0f6b2f8053bf376bb8
|
[
"MIT"
] | 1
|
2015-11-06T06:27:59.000Z
|
2015-11-06T06:27:59.000Z
|
plotutils.py
|
parkus/mypy
|
21043c559dca14abe7508e0f6b2f8053bf376bb8
|
[
"MIT"
] | null | null | null |
plotutils.py
|
parkus/mypy
|
21043c559dca14abe7508e0f6b2f8053bf376bb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri May 30 17:15:27 2014
@author: Parke
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import matplotlib as mplot
import matplotlib.pyplot as plt
import mypy.my_numpy as mnp
dpi = 100
fullwidth = 10.0
halfwidth = 5.0
# use these with line.set_dashes and iterate through more linestyles than come with matplotlib
# consider ussing a ::2 slice for fewer
dashes = [[],
[30, 10],
[20, 8],
[10, 5],
[3, 2],
[30, 5, 3, 5, 10, 5, 3, 5],
[15] + [5, 3]*3 + [5],
[15] + [5, 3]*2 + [5],
[15] + [5, 3] + [5]]
def click_coords(fig=None, timeout=600.):
if fig is None:
fig = plt.gcf()
xy = []
def onclick(event):
if not event.inaxes:
fig.canvas.stop_event_loop()
else:
xy.append([event.xdata, event.ydata])
print("Gathering coordinates of mouse clicks. Click outside of the axes " \
"when done.")
cid = fig.canvas.mpl_connect('button_press_event', onclick)
fig.canvas.start_event_loop(timeout=timeout)
fig.canvas.mpl_disconnect(cid)
return np.array(xy)
def common_axes(fig, pos=None):
if pos is None:
bigax = fig.add_subplot(111)
else:
bigax = fig.add_axes(pos)
[bigax.spines[s].set_visible(False) for s in ['top', 'bottom', 'left', 'right']]
bigax.tick_params(labelleft=False, labelbottom=False, left='off', bottom='off')
bigax.set_zorder(-10)
return bigax
def log_frac(x, frac):
l0, l1 = list(map(np.log10, x))
ld = l1 - l0
l = ld*frac + l0
return 10**l
def log2linear(x, errneg=None, errpos=None):
xl = 10**x
result = [xl]
if errneg is not None:
xn = xl - 10**(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = 10**(x + errpos) - xl
result.append(xp)
return result
def linear2log(x, errneg=None, errpos=None):
xl = np.log10(x)
result = [x]
if errneg is not None:
xn = xl - np.log10(x - np.abs(errneg))
result.append(xn)
if errpos is not None:
xp = np.log10(x + errpos) - xl
result.append(xp)
return result
def step(*args, **kwargs):
edges, values = args[0], args[1]
# deal with potentially gappy 2-column bin specifications
edges = np.asarray(edges)
if edges.ndim == 2:
if np.any(edges[1:,0] < edges[:-1,1]):
raise ValueError('Some bins overlap')
if np.any(edges[1:,0] < edges[:-1,0]):
raise ValueError('Bins must be in increasing order.')
gaps = edges[1:,0] > edges[:-1,1]
edges = np.unique(edges)
if np.any(gaps):
values = np.insert(values, np.nonzero(gaps), np.nan)
edges = mnp.lace(edges[:-1], edges[1:])
values = mnp.lace(values, values)
args = list(args)
args[0], args[1] = edges, values
ax = kwargs.pop('ax', plt.gca())
return ax.plot(*args, **kwargs)
def point_along_line(x, y, xfrac=None, xlbl=None, scale='linear'):
if scale == 'log':
lx, ly = point_along_line(np.log10(x), np.log10(y), xfrac, xlbl, ylbl, scale)
return 10 ** lx, 10 ** ly
if xfrac is not None:
if xfrac == 0:
return x[0], y[0]
if xfrac == 1:
return x[-1], y[-1]
else:
d = np.cumsum(np.sqrt(np.diff(x)**2 + np.diff(y)**2))
d = np.insert(d, 0, 0)
f = d/d[-1]
xp, yp = [np.interp(xfrac, f, a) for a in [x,y]]
return xp, yp
if xlbl is not None:
return xlbl, np.interp(xlbl, x, y)
def textSize(ax_or_fig=None, coordinate='data'):
"""
Return x & y scale factors for converting text sizes in points to another coordinate. Useful for properly spacing
text labels and such when you need to know sizes before the text is made (otherwise you can use textBoxSize).
Coordinate can be 'data', 'axes', or 'figure'.
If data coordinates are requested and the data is plotted on a log scale, then the factor will be given in dex.
"""
if ax_or_fig is None:
fig = plt.gcf()
ax = fig.gca()
else:
if isinstance(ax_or_fig, plt.Figure):
fig = ax_or_fig
ax = fig.gca()
elif isinstance(ax_or_fig, plt.Axes):
ax = ax_or_fig
fig = ax.get_figure()
else:
raise TypeError('ax_or_fig must be a Figure or Axes instance, if given.')
w_fig_in, h_fig_in = ax.get_figure().get_size_inches()
if coordinate == 'fig':
return 1.0/(w_fig_in*72), 1.0/(h_fig_in*72)
w_ax_norm, h_ax_norm = ax.get_position().size
w_ax_in = w_ax_norm * w_fig_in
h_ax_in = h_ax_norm * h_fig_in
w_ax_pts, h_ax_pts = w_ax_in*72, h_ax_in*72
if coordinate == 'axes':
return 1.0/w_ax_pts, 1.0/h_ax_pts
if coordinate == 'data':
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if ax.get_xscale() == 'log': xlim = np.log10(xlim)
if ax.get_yscale() == 'log': ylim = np.log10(ylim)
w_ax_data = xlim[1] - xlim[0]
h_ax_data = ylim[1] - ylim[0]
return w_ax_data/w_ax_pts, h_ax_data/h_ax_pts
def tight_axis_limits(ax=None, xory='both', margin=0.05):
if ax is None: ax = plt.gca()
def newlim(oldlim):
delta = abs(oldlim[1] - oldlim[0])
pad = delta*margin
if oldlim[1] > oldlim[0]:
return (oldlim[0] - pad, oldlim[1] + pad)
else:
return (oldlim[0] + pad, oldlim[1] - pad)
def newlim_log(oldlim):
loglim = [np.log10(l) for l in oldlim]
newloglim = newlim(loglim)
return (10.0**newloglim[0], 10.0**newloglim[1])
def newlim_either(oldlim,axlim,scale):
if axlim[1] < axlim [0]: oldlim = oldlim[::-1]
if scale == 'linear':
return newlim(oldlim)
elif scale == 'log':
return newlim_log(oldlim)
elif scale == 'symlog':
raise NotImplementedError('Past Parke to future Parke, you did\'t write an implementation for symlog'
'scaled axes.')
if xory == 'x' or xory == 'both':
datalim = ax.dataLim.extents[[0,2]]
axlim = ax.get_xlim()
scale = ax.get_xscale()
ax.set_xlim(newlim_either(datalim,axlim,scale))
if xory == 'y' or xory == 'both':
datalim = ax.dataLim.extents[[1,3]]
axlim = ax.get_ylim()
scale = ax.get_yscale()
ax.set_ylim(newlim_either(datalim,axlim,scale))
#TODO: discard this function?
def standard_figure(app, slideAR=1.6, height=1.0):
"""Generate a figure of standard size for publishing.
implemented values for app (application) are:
'fullslide'
height is the fractional height of the figure relative to the "standard"
height. For slides the standard is the full height of a slide.
returns the figure object and default font size
"""
if app == 'fullslide':
fontsize = 20
figsize = [fullwidth, fullwidth/slideAR*height]
fig = mplot.pyplot.figure(figsize=figsize, dpi=dpi)
mplot.rcParams.update({'font.size': fontsize})
return fig, fontsize
def pcolor_reg(x, y, z, **kw):
"""
Similar to `pcolor`, but assume that the grid is uniform,
and do plotting with the (much faster) `imshow` function.
"""
x, y, z = np.asarray(x), np.asarray(y), np.asarray(z)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should be 1-dimensional")
if z.ndim != 2 or z.shape != (y.size, x.size):
raise ValueError("z.shape should be (y.size, x.size)")
dx = np.diff(x)
dy = np.diff(y)
if not np.allclose(dx, dx[0], 1e-2) or not np.allclose(dy, dy[0], 1e-2):
raise ValueError("The grid must be uniform")
if np.issubdtype(z.dtype, np.complexfloating):
zp = np.zeros(z.shape, float)
zp[...] = z[...]
z = zp
plt.imshow(z, origin='lower',
extent=[x.min(), x.max(), y.min(), y.max()],
interpolation='nearest',
aspect='auto',
**kw)
plt.axis('tight')
def errorpoly(x, y, yerr, fmt=None, ecolor=None, ealpha=0.5, ax=None, **kw):
if ax is None: ax = plt.gca()
p = ax.plot(x, y, **kw) if fmt is None else ax.plot(x, y, fmt, **kw)
if len(yerr.shape) == 2:
ylo = y - yerr[0,:]
yhi = y + yerr[1,:]
else:
ylo, yhi = y - yerr, y + yerr
if ecolor is None: ecolor = p[0].get_color()
# deal with matplotlib sometimes not showing polygon when it extends beyond plot range
xlim = ax.get_xlim()
inrange = mnp.inranges(x, xlim)
if not np.all(inrange):
n = np.sum(inrange)
yends = np.interp(xlim, x, y)
yloends = np.interp(xlim, x, ylo)
yhiends = np.interp(xlim, x, yhi)
x = np.insert(x[inrange], [0, n], xlim)
y = np.insert(y[inrange], [0, n], yends)
ylo = np.insert(ylo[inrange], [0, n], yloends)
yhi = np.insert(yhi[inrange], [0, n], yhiends)
f = ax.fill_between(x,ylo,yhi,color=ecolor,alpha=ealpha)
return p[0],f
def onscreen_pres(mpl, screenwidth=1200):
"""
Set matplotlibrc values so that plots are readable as they are created
and maximized for an audience far from a screen.
Parameters
----------
mpl : module
Current matplotlib module. Use 'import matplotlib as mpl'.
screewidth : int
Width of the screen in question in pixels.
Returns
-------
None
"""
mpl.rcParams['lines.linewidth'] = 2
fontsize = round(14 / (800.0 / screenwidth))
mpl.rcParams['font.size'] = fontsize
def textBoxSize(txt, transformation=None, figure=None):
"""Get the width and height of a text object's bounding box transformed to the desired coordinates. Defaults to
figure coordinates if transformation is None."""
fig= txt.get_figure() if figure is None else figure
if transformation is None:
transformation = fig.transFigure
coordConvert = transformation.inverted().transform
bboxDisp = txt.get_window_extent(fig.canvas.renderer)
bboxConv = coordConvert(bboxDisp)
w = bboxConv[1,0] - bboxConv[0,0]
h = bboxConv[1,1] - bboxConv[0,1]
return w, h
def stars3d(ra, dec, dist, T=5000.0, r=1.0, labels='', view=None, size=(800,800), txt_scale=1.0):
"""
Make a 3D diagram of stars positions relative to the Sun, with
semi-accurate colors and distances as desired. Coordinates must be in
degrees. Distance is assumed to be in pc (for axes labels).
Meant to be used with only a handful of stars.
"""
from mayavi import mlab
from color.maps import true_temp
n = len(ra)
dec, ra = dec*np.pi/180.0, ra*np.pi/180.0
makearr = lambda v: np.array([v] * n) if np.isscalar(v) else v
T, r, labels = list(map(makearr, (T, r, labels)))
# add the sun
ra, dec, dist = list(map(np.append, (ra, dec, dist), (0.0, 0.0, 0.0)))
r, T, labels = list(map(np.append, (r, T, labels), (1.0, 5780.0, 'Sun')))
# get xyz coordinates
z = dist * np.sin(dec)
h = dist * np.cos(dec)
x = h * np.cos(ra)
y = h * np.sin(ra)
# make figure
fig = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1), size=size)
# plot lines down to the dec=0 plane for all but the sun
lines = []
for x1, y1, z1 in list(zip(x, y, z))[:-1]:
xx, yy, zz = [x1, x1], [y1, y1], [0.0, z1]
line = mlab.plot3d(xx, yy, zz, color=(0.7,0.7,0.7), line_width=0.5,
figure=fig)
lines.append(line)
# plot spheres
r_factor = np.max(dist) / 30.0
pts = mlab.quiver3d(x, y, z, r, r, r, scalars=T, mode='sphere',
scale_factor=r_factor, figure=fig, resolution=100)
pts.glyph.color_mode = 'color_by_scalar'
# center the glyphs on the data point
pts.glyph.glyph_source.glyph_source.center = [0, 0, 0]
# set a temperature colormap
cmap = true_temp(T)
pts.module_manager.scalar_lut_manager.lut.table = cmap
# set the camera view
mlab.view(focalpoint=(0.0, 0.0, 0.0), figure=fig)
if view is not None:
mlab.view(*view, figure=fig)
## add labels
# unit vec to camera
view = mlab.view()
az, el = view[:2]
hc = np.sin(el * np.pi / 180.0)
xc = hc * np.cos(az * np.pi / 180.0)
yc = hc * np.sin(az * np.pi / 180.0)
zc = -np.cos(el * np.pi / 180.0)
# unit vec orthoganal to camera
if xc**2 + yc**2 == 0.0:
xoff = 1.0
yoff = 0.0
zoff = 0.0
else:
xoff = yc / np.sqrt(xc**2 + yc**2)
yoff = np.sqrt(1.0 - xoff**2)
zoff = 0.0
# xoff, yoff, zoff = xc, yc, zc
# scale orthogonal vec by sphere size
r_label = 1.0 * r_factor
xoff, yoff, zoff = [r_label * v for v in [xoff, yoff, zoff]]
# plot labels
size = r_factor * txt_scale * 0.75
for xx, yy, zz, label in zip(x, y, z, labels):
mlab.text3d(xx + xoff, yy + yoff, zz + zoff, label, figure=fig,
color=(1,1,1), scale=size)
## add translucent dec=0 surface
n = 101
t = np.linspace(0.0, 2*np.pi, n)
r = np.max(dist * np.cos(dec))
x, y = r*np.cos(t), r*np.sin(t)
z = np.zeros(n+1)
x, y = [np.insert(a, 0, 0.0) for a in [x,y]]
triangles = [(0, i, i + 1) for i in range(1, n)]
mlab.triangular_mesh(x, y, z, triangles, color=(1,1,1), opacity=0.3, figure=fig)
## add ra=0 line
line = mlab.plot3d([0, r], [0, 0], [0, 0], color=(1,1,1), line_width=1, figure=fig)
rtxt = '{:.1f} pc'.format(r)
orientation=np.array([180.0, 180.0, 0.0])
mlab.text3d(r, 0, 0, rtxt, figure=fig, scale=size*1.25, orient_to_camera=False, orientation=orientation)
if view is not None:
mlab.view(*view, figure=fig)
return fig
| 31.461364
| 117
| 0.582388
| 2,185
| 13,843
| 3.623341
| 0.230664
| 0.007579
| 0.005305
| 0.003537
| 0.100164
| 0.073639
| 0.059871
| 0.035114
| 0.030062
| 0.020462
| 0
| 0.039837
| 0.274651
| 13,843
| 439
| 118
| 31.53303
| 0.748631
| 0.162682
| 0
| 0.109966
| 0
| 0.061856
| 0.045223
| 0
| 0
| 0
| 0
| 0.002278
| 0
| 1
| 0.065292
| false
| 0
| 0.024055
| 0
| 0.168385
| 0.006873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92f9c4373d43c67eefcb0f04052b7d238d59ad11
| 2,297
|
py
|
Python
|
integrations/tensorflow/bindings/python/pyiree/tf/compiler/saved_model_test.py
|
rise-lang/iree
|
46ad3fe392d38ce3df6eff7826cc1ab331a40b72
|
[
"Apache-2.0"
] | 1
|
2020-08-13T09:25:59.000Z
|
2020-08-13T09:25:59.000Z
|
integrations/tensorflow/bindings/python/pyiree/tf/compiler/saved_model_test.py
|
rise-lang/iree
|
46ad3fe392d38ce3df6eff7826cc1ab331a40b72
|
[
"Apache-2.0"
] | null | null | null |
integrations/tensorflow/bindings/python/pyiree/tf/compiler/saved_model_test.py
|
rise-lang/iree
|
46ad3fe392d38ce3df6eff7826cc1ab331a40b72
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import importlib
import os
import sys
import tempfile
from pyiree.tf import compiler
# Dynamically import tensorflow.
try:
# Use a dynamic import so as to avoid hermetic dependency analysis
# (i.e. we only want the tensorflow from the environment).
tf = importlib.import_module("tensorflow")
# Just in case if linked against a pre-V2 defaulted version.
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf = tf.compat.v2
except ImportError:
print("Not running tests because tensorflow is not available")
sys.exit(0)
class StatelessModule(tf.Module):
def __init__(self):
pass
@tf.function(input_signature=[
tf.TensorSpec([4], tf.float32),
tf.TensorSpec([4], tf.float32)
])
def add(self, a, b):
return tf.tanh(a + b)
class RuntimeTest(tf.test.TestCase):
def testLoadSavedModelToXlaPipeline(self):
"""Tests that a basic saved model to XLA workflow grossly functions.
This is largely here to verify that everything is linked in that needs to be
and that there are not no-ops, etc.
"""
with tempfile.TemporaryDirectory() as temp_dir:
sm_dir = os.path.join(temp_dir, "simple.sm")
print("Saving to:", sm_dir)
my_module = StatelessModule()
options = tf.saved_model.SaveOptions(save_debug_info=True)
tf.saved_model.save(my_module, sm_dir, options=options)
# Load it up.
input_module = compiler.tf_load_saved_model(sm_dir)
xla_asm = input_module.to_asm()
print("XLA ASM:", xla_asm)
self.assertRegex(xla_asm, "mhlo.tanh")
if __name__ == "__main__":
tf.test.main()
| 29.831169
| 80
| 0.727035
| 337
| 2,297
| 4.79822
| 0.510386
| 0.037106
| 0.029685
| 0.01979
| 0.050711
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01016
| 0.185895
| 2,297
| 76
| 81
| 30.223684
| 0.854545
| 0.415324
| 0
| 0
| 0
| 0
| 0.095639
| 0
| 0
| 0
| 0
| 0
| 0.025641
| 1
| 0.076923
| false
| 0.025641
| 0.25641
| 0.025641
| 0.410256
| 0.102564
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92fa506f1dc831d005f72a65f033e46f94fe54e8
| 9,084
|
py
|
Python
|
iRep/gc_skew.py
|
scottdaniel/iRep
|
5d31688eeeab057ce54f39698e3f9cc5738e05ad
|
[
"MIT"
] | 55
|
2016-06-17T17:31:48.000Z
|
2022-01-19T08:24:43.000Z
|
iRep/gc_skew.py
|
scottdaniel/iRep
|
5d31688eeeab057ce54f39698e3f9cc5738e05ad
|
[
"MIT"
] | 35
|
2016-06-24T17:19:04.000Z
|
2021-11-06T16:08:43.000Z
|
iRep/gc_skew.py
|
scottdaniel/iRep
|
5d31688eeeab057ce54f39698e3f9cc5738e05ad
|
[
"MIT"
] | 14
|
2016-07-21T17:34:16.000Z
|
2020-03-18T03:45:55.000Z
|
#!/usr/bin/env python3
"""
script for calculating gc skew
Chris Brown
ctb@berkeley.edu
"""
# python modules
import os
import sys
import argparse
import numpy as np
from scipy import signal
from itertools import cycle, product
# plotting modules
from matplotlib import use as mplUse
mplUse('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
plt.rcParams['pdf.fonttype'] = 42
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
# ctb
from ctbBio.fasta import iterate_fasta as parse_fasta
def plot_two(title, subtitle, A, B, labels, legend, vert = False):
"""
plot with differnt y axes
title = title for chart
A = data for left axis [[x], [y]]
B = data for right axis
lables = [left label, right label, x label]
legend = [[left legend], [right legend]]
"""
fig, ax1 = plt.subplots()
colors = ['0.75', 'b', 'r', 'c', 'y', 'm', 'k', 'g']
a_colors = cycle(colors)
b_colors = cycle(colors[::-1])
a_label = cycle(legend[0])
b_label = cycle(legend[1])
# plot left axis and x - axis
for a in A:
x, y = a
ax1.set_ylabel(labels[0], labelpad = 3)
ax1.set_xlabel(labels[-1])
ax1.plot(x, y, c = next(a_colors), marker = 'o', ms = 4, label = next(a_label))
# add vertical lines
if vert is not False:
for i in vert:
x, c = i
ax1.axvline(x = x, c = c, label = next(a_label), linewidth = 2)
# plot right axis
ax2 = ax1.twinx()
for b in B:
x, y = b
ax2.set_ylabel(labels[1], labelpad = 8)
ax2.plot(x, y, c = next(b_colors), linewidth = 2, label = next(b_label))
xmin = min([min(i[1]) for i in A] + [min(i[0]) for i in B])
xmax = max([max(i[0]) for i in A] + [max(i[0]) for i in B])
ax2.set_xlim(xmin, xmax)
# title
plt.suptitle(title, fontsize = 16)
plt.title(subtitle, fontsize = 10)
# legend
ax1.legend(loc = 'upper left', \
bbox_to_anchor=(0.55, -0.125), \
prop = {'size':8}, \
framealpha = 0.0
)
plt.legend(loc = 'upper right', \
bbox_to_anchor=(0.45, -0.125), \
prop = {'size':8}, \
framealpha = 0.0\
)
# save
pdf = PdfPages('%s.pdf' % title.replace(' ', '_'))
pdf.savefig(bbox_inches = 'tight')
plt.close()
pdf.close()
def check_peaks(peaks, length):
"""
select pair of min and max that are not too close or
too far apart and have greatest y distance between one another
"""
# if ori/ter peaks are too close or too far apart, they are probably wrong
closest, farthest = int(length * float(0.45)), int(length * float(0.55))
pairs = []
for pair in list(product(*peaks)):
### added this to make sure gets origin and ter right
tr, pk = sorted(list(pair), key = lambda x: x[1], reverse = False) # trough and peak
a = (tr[0] - pk[0]) % length
b = (pk[0] - tr[0]) % length
pt = abs(tr[1] - pk[1]) # distance between values
if (a <= farthest and a >= closest) or (b <=farthest and b >= closest):
pairs.append([pt, tr, pk])
if len(pairs) == 0:
return [False, False]
pt, tr, pk = sorted(pairs, reverse = True)[0]
return [tr[0], pk[0]]
def find_ori_ter(c_skew, length):
"""
find origin and terminus of replication based on
cumulative GC Skew
"""
# find origin and terminus of replication based on
# cumulative gc skew min and max peaks
c_skew_min = signal.argrelextrema(np.asarray(c_skew[1]), np.less, order = 1)[0].tolist()
c_skew_max = signal.argrelextrema(np.asarray(c_skew[1]), np.greater, order = 1)[0].tolist()
# return False if no peaks were detected
if len(c_skew_min) == 0 or len(c_skew_min) == 0:
return [False, False]
else:
c_skew_min = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_min]
c_skew_max = [[c_skew[0][i], c_skew[1][i]] for i in c_skew_max]
ori, ter = check_peaks([c_skew_min, c_skew_max], length)
return ori, ter
def gc_skew(name, length, seq, window, slide, plot_skew):
"""
calculate gc skew and cumulative sum of gc skew over sequence windows
gc skew = ((G - C) / (G + C)) * window size * genome length
"""
# convert to G - C
replacements = {'G':1, 'C':-1, 'A':0, 'T':0, 'N':0}
gmc = [] # G - C
for base in seq:
try:
gmc.append(replacements[base])
except:
gmc.append(0)
# convert to G + C
gpc = [abs(i) for i in gmc] # G + C
# calculate sliding windows for (G - C) and (G + C)
weights = np.ones(window)/window
gmc = [[i, c] for i, c in enumerate(signal.fftconvolve(gmc, weights, 'same').tolist())]
gpc = [[i, c] for i, c in enumerate(signal.fftconvolve(gpc, weights, 'same').tolist())]
# calculate gc skew and cummulative gc skew sum
skew = [[], []] # x and y for gc skew
c_skew = [[], []] # x and y for gc skew cummulative sums
cs = 0 # cummulative sum
# select windows to use based on slide
for i, m in gmc[0::slide]:
p = gpc[i][1]
if p == 0:
gcs = 0
else:
gcs = m/p
cs += gcs
skew[0].append(i)
c_skew[0].append(i)
skew[1].append(gcs)
c_skew[1].append(cs)
ori, ter = find_ori_ter(c_skew, length)
# plot data
if plot_skew is True:
title = '%s GC Skew' % (name)
subtitle = '(window = %s, slide = %s)' % (window, slide)
labels = ['GC Skew', 'Cumulative GC Skew', 'Position on Genome (bp)']
# remove some points for plotting (approx. 1,000 datapoints)
N = int(len(skew[0])/1000)
if N != 0:
skew = [skew[0][0::N], skew[1][0::N]]
if ori is False:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0]], [labels[1]]])
else:
plot_two(title, subtitle, [skew], [c_skew], labels, \
[[labels[0], 'Ori:%s' % ('{:,}'.format(ori)), \
'Ter:%s' % ('{:,}'.format(ter))], [labels[1]]], \
vert = [(ori, 'r'), (ter, 'b')])
return ori, ter, skew, c_skew
def parse_genomes(fastas, single):
"""
generator for parsing fastas
if single is True, combine sequences in multifasta file
"""
if single is True:
for genome in fastas:
sequence = []
for seq in parse_fasta(genome):
sequence.extend(list(seq[1].upper()))
yield (genome.name.rsplit('.', 1)[0], len(sequence), sequence)
else:
for genome in fastas:
for seq in parse_fasta(genome):
ID = seq[0].split('>', 1)[1].split()[0]
yield (ID, len(seq[1]), list(seq[1].upper()))
def open_files(files):
"""
open files in list, use stdin if first
item in list is '-'
"""
if files is None:
return files
if files[0] == '-':
return (sys.stdin)
return (open(i) for i in files)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = \
'# calculate gc skew and find Ori and Ter of replication')
parser.add_argument(\
'-f', nargs = '*', action = 'store', required = True, \
help = 'fasta(s)')
parser.add_argument(\
'-l', default = False, type = int, \
help = 'minimum contig length (default = 10 x window)')
parser.add_argument(\
'-w', default = 1000, type = int, \
help = 'window length (default = 1000)')
parser.add_argument(\
'-s', default = 10, type = int, \
help = 'slide length (default = 10)')
parser.add_argument(\
'--single', action = 'store_true', \
help = 'combine multi-fasta sequences into single genome')
parser.add_argument(\
'--no-plot', action = 'store_false', \
help = 'do not generate plots, print GC Skew to stdout')
args = vars(parser.parse_args())
fastas = open_files(args['f'])
single, plot_skew = args['single'], args['no_plot']
window, slide = args['w'], args['s']
min_len = args['l']
if min_len is False:
min_len = 10 * window
for name, length, seq in parse_genomes(fastas, single):
if length < min_len:
print('%s: Too Short' % (name), file=sys.stderr)
continue
ori, ter, skew, c_skew = gc_skew(name, length, seq, window, slide, plot_skew)
if ori == False:
ori, ter = 'n/a', 'n/a'
else:
ori, ter = '{:,}'.format(ori), '{:,}'.format(ter)
print('%s -> Origin: %s Terminus: %s' \
% (name, ori, ter), file=sys.stderr)
if plot_skew is False:
print('\t'.join(['# Name', 'Position', 'GC Skew', 'Cumulative GC Skew']))
for i, pos in enumerate(skew[0]):
out = [name, pos, skew[1][i], c_skew[1][i]]
print('\t'.join([str(i) for i in out]))
| 36.191235
| 95
| 0.55295
| 1,308
| 9,084
| 3.762232
| 0.228593
| 0.026417
| 0.012193
| 0.007112
| 0.178622
| 0.150986
| 0.112172
| 0.104857
| 0.080065
| 0.050396
| 0
| 0.022972
| 0.295575
| 9,084
| 250
| 96
| 36.336
| 0.746054
| 0.161052
| 0
| 0.114754
| 0
| 0
| 0.089092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.060109
| 0
| 0.136612
| 0.027322
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92fa730397bfd4949cfd5d8aa12c70a6b5cb5576
| 2,429
|
py
|
Python
|
examples/send_governance_vote_transaction.py
|
Algofiorg/algofi-py-sdk
|
6100a6726d36db4d4d3287064f0ad1d0b9a05e03
|
[
"MIT"
] | 38
|
2021-12-30T02:32:57.000Z
|
2022-03-23T22:09:16.000Z
|
examples/send_governance_vote_transaction.py
|
Algofiorg/algofi-py-sdk
|
6100a6726d36db4d4d3287064f0ad1d0b9a05e03
|
[
"MIT"
] | 4
|
2021-11-03T00:14:46.000Z
|
2022-03-28T02:17:33.000Z
|
examples/send_governance_vote_transaction.py
|
Algofiorg/algofi-py-sdk
|
6100a6726d36db4d4d3287064f0ad1d0b9a05e03
|
[
"MIT"
] | 8
|
2021-12-15T05:29:55.000Z
|
2022-02-08T03:45:11.000Z
|
# This sample is provided for demonstration purposes only.
# It is not intended for production use.
# This example does not constitute trading advice.
import os
from dotenv import dotenv_values
from algosdk import mnemonic, account
from algofi.v1.asset import Asset
from algofi.v1.client import AlgofiTestnetClient, AlgofiMainnetClient
from algofi.utils import get_ordered_symbols, prepare_payment_transaction, get_new_account
from example_utils import print_market_state, print_user_state
### run setup.py before proceeding. make sure the .env file is set with mnemonic + storage_mnemonic.
# Hardcoding account keys is not a great practice. This is for demonstration purposes only.
# See the README & Docs for alternative signing methods.
my_path = os.path.abspath(os.path.dirname(__file__))
ENV_PATH = os.path.join(my_path, ".env")
# load user passphrase
user = dotenv_values(ENV_PATH)
sender = mnemonic.to_public_key(user['mnemonic'])
key = mnemonic.to_private_key(user['mnemonic'])
# IS_MAINNET
IS_MAINNET = False
client = AlgofiMainnetClient(user_address=sender) if IS_MAINNET else AlgofiTestnetClient(user_address=sender)
# NOTE: Get the live governance address at https://governance.algorand.foundation/api/periods/
# under "sign_up_address" for the relevant governance period
# Specify your vote according to the formats that are permissible in the Algorand Foundation Spec
# https://github.com/algorandfoundation/governance/blob/main/af-gov1-spec.md
# Get the idx, vote choices based on the relevant voting session from https://governance.algorand.foundation/api/periods/
address = sender
governance_address = ""
vote_note = b'af/gov1:j[6,"a","c"]' # NOTE: an example, not to be used in live voting necessarily
vault_address = client.manager.get_storage_address(address)
print("~"*100)
print("Processing send_governance_vote_transaction transaction for vault address " + vault_address)
print("~"*100)
txn = client.prepare_send_governance_vote_transactions(governance_address, note=vote_note, address=address)
txn.sign_with_private_key(sender, key)
txn.submit(client.algod, wait=True)
# After sending, check your vote at
# https://governance.algorand.foundation/api/periods/<governance-period-slug>/governors/<vault_address>
# to confirm successful vote in voting session
# print final state
print("~"*100)
print("Final State")
print("Sent governance transaction with note: " + str(vote_note))
print("~"*100)
| 42.614035
| 121
| 0.799918
| 350
| 2,429
| 5.4
| 0.434286
| 0.038095
| 0.036508
| 0.052381
| 0.07037
| 0.07037
| 0.047619
| 0
| 0
| 0
| 0
| 0.007863
| 0.109922
| 2,429
| 57
| 122
| 42.614035
| 0.866327
| 0.461095
| 0
| 0.142857
| 0
| 0
| 0.130334
| 0.024825
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92fa7f11780de4e7d336cb67c51c29ac5c8fbc36
| 9,059
|
py
|
Python
|
bid/inventoryClient.py
|
franklx/SOAPpy-py3
|
f25afba322e9300ba4ebdd281118b629ca63ba24
|
[
"BSD-3-Clause"
] | 7
|
2018-01-03T18:24:43.000Z
|
2022-03-07T04:34:01.000Z
|
bid/inventoryClient.py
|
franklx/SOAPpy-py3
|
f25afba322e9300ba4ebdd281118b629ca63ba24
|
[
"BSD-3-Clause"
] | null | null | null |
bid/inventoryClient.py
|
franklx/SOAPpy-py3
|
f25afba322e9300ba4ebdd281118b629ca63ba24
|
[
"BSD-3-Clause"
] | 18
|
2018-08-06T11:30:16.000Z
|
2022-03-09T11:24:24.000Z
|
#!/usr/bin/env python
import getopt
import sys
import string
import re
import time
sys.path.insert(1,"..")
from SOAPpy import SOAP
import traceback
DEFAULT_SERVERS_FILE = './inventory.servers'
DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping')
def usage (error = None):
sys.stdout = sys.stderr
if error != None:
print(error)
print("""usage: %s [options] [server ...]
If a long option shows an argument is mandatory, it's mandatory for the
equivalent short option also.
-?, --help display this usage
-d, --debug turn on debugging in the SOAP library
-i, --invert test servers *not* in the list of servers given
-m, --method=METHOD#[,METHOD#...]
call only the given methods, specify a METHOD# of ?
for the list of method numbers
-o, --output=TYPE turn on output, TYPE is one or more of s(uccess),
f(ailure), n(ot implemented), F(ailed (as expected)),
a(ll)
[f]
-s, --servers=FILE use FILE as list of servers to test [%s]
-t, --stacktrace print a stack trace on each unexpected failure
-T, --always-stacktrace
print a stack trace on any failure
""" % (sys.argv[0], DEFAULT_SERVERS_FILE), end=' ')
sys.exit (0)
def methodUsage ():
sys.stdout = sys.stderr
print("Methods are specified by number. Multiple methods can be " \
"specified using a\ncomma-separated list of numbers or ranges. " \
"For example 1,4-6,8 specifies\nmethods 1, 4, 5, 6, and 8.\n")
print("The available methods are:\n")
half = (len (DEFAULT_METHODS) + 1) / 2
for i in range (half):
print("%4d. %-25s" % (i + 1, DEFAULT_METHODS[i]), end=' ')
if i + half < len (DEFAULT_METHODS):
print("%4d. %-25s" % (i + 1 + half, DEFAULT_METHODS[i + half]), end=' ')
print()
sys.exit (0)
def readServers (file):
servers = []
f = open (file, 'r')
while 1:
line = f.readline ()
if line == '':
break
if line[0] in ('#', '\n') or line[0] in string.whitespace:
continue
cur = {'nonfunctional': {}}
tag = None
servers.append (cur)
while 1:
if line[0] in string.whitespace:
if tag == 'nonfunctional':
value = method + ' ' + cur[tag][method]
else:
value = cur[tag]
value += ' ' + line.strip ()
else:
tag, value = line.split (':', 1)
tag = tag.strip ().lower ()
value = value.strip ()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if tag == 'nonfunctional':
value = value.split (' ', 1) + ['']
method = value[0]
cur[tag][method] = value[1]
else:
cur[tag] = value
line = f.readline ()
if line == '' or line[0] == '\n':
break
return servers
def str2list (s):
l = {}
for i in s.split (','):
if i.find ('-') != -1:
i = i.split ('-')
for i in range (int (i[0]),int (i[1]) + 1):
l[i] = 1
else:
l[int (i)] = 1
l = list(l.keys ())
l.sort ()
return l
def SimpleBuy(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'SimpleBuy'})
return serv.SimpleBuy(ProductName="widget", Quantity = 50, Address = "this is my address") #JHawk, Phalanx require this order of params
def RequestForQuote(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'RequestForQuote'})
return serv.RequestForQuote(Quantity=3, ProductName = "thing") # for Phalanx, JHawk
def Buy(serv, sa, epname):
import copy
serv = serv._sa (sa % {'methodname':'Buy'})
billTo_d = {"name":"Buyer One", "address":"1 1st Street",
"city":"New York", "state":"NY", "zipCode":"10000"}
shipTo_d = {"name":"Buyer One ", "address":"1 1st Street ",
"city":"New York ", "state":"NY ", "zipCode":"10000 "}
for k,v in list(shipTo_d.items()):
shipTo_d[k] = v[:-1]
itemd1 = SOAP.structType( {"name":"widg1","quantity":200,"price":SOAP.decimalType(45.99), "_typename":"LineItem"})
itemd2 = SOAP.structType( {"name":"widg2","quantity":400,"price":SOAP.decimalType(33.45), "_typename":"LineItem"})
items_d = SOAP.arrayType( [itemd1, itemd2] )
items_d._ns = "http://www.soapinterop.org/Bid"
po_d = SOAP.structType( data = {"poID":"myord","createDate":SOAP.dateTimeType(),"shipTo":shipTo_d, "billTo":billTo_d, "items":items_d})
try:
# it's called PO by MST (MS SOAP Toolkit), JHawk (.NET Remoting),
# Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft,
# HP, EasySoap, and Jake (Frontier). [Actzero accepts either]
return serv.Buy(PO=po_d)
except:
# called PurchaseOrder by KeithBa
return serv.Buy(PurchaseOrder=po_d)
def Ping(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'Ping'})
return serv.Ping()
def main():
servers = DEFAULT_SERVERS_FILE
methodnums = None
output = 'f'
invert = 0
succeed = 0
printtrace = 0
stats = 1
total = 0
fail = 0
failok = 0
notimp = 0
try:
opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t',
['help', 'method', 'debug', 'invert',
'output', 'servers='])
for opt, arg in opts:
if opt in ('-?', '--help'):
usage ()
elif opt in ('-d', '--debug'):
SOAP.Config.debug = 1
elif opt in ('-i', '--invert'):
invert = 1
elif opt in ('-m', '--method'):
if arg == '?':
methodUsage ()
methodnums = str2list (arg)
elif opt in ('-o', '--output'):
output = arg
elif opt in ('-s', '--servers'):
servers = arg
else:
raise AttributeError("Recognized but unimplemented option `%s'" % opt)
except SystemExit:
raise
except:
usage (sys.exc_info ()[1])
if 'a' in output:
output = 'fFns'
servers = readServers(servers)
if methodnums == None:
methodnums = list(range(1, len (DEFAULT_METHODS) + 1))
limitre = re.compile ('|'.join (args), re.IGNORECASE)
for s in servers:
if (not not limitre.match (s['name'])) == invert:
continue
serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace'])
for num in (methodnums):
if num > len(DEFAULT_METHODS):
break
total += 1
name = DEFAULT_METHODS[num - 1]
title = '%s: %s (#%d)' % (s['name'], name, num)
try:
fn = globals ()[name]
except KeyboardInterrupt:
raise
except:
if 'n' in output:
print(title, "test not yet implemented")
notimp += 1
continue
try:
res = fn (serv, s['soapaction'], s['name'])
if name in s['nonfunctional']:
print(title, "succeeded despite marked nonfunctional")
elif 's' in output:
print(title, "succeeded ")
succeed += 1
except KeyboardInterrupt:
print("fail")
raise
except:
if name in s['nonfunctional']:
if 'F' in output:
t = 'as expected'
if s['nonfunctional'][name] != '':
t += ', ' + s['nonfunctional'][name]
print(title, "failed (%s) -" %t, sys.exc_info()[1])
failok += 1
else:
if 'f' in output:
print(title, "failed -", str (sys.exc_info()[1]))
fail += 1
if stats:
print(" Tests ended at:", time.ctime (time.time()))
if stats > 0:
print(" Total tests: %d" % total)
print(" Successes: %d (%3.2f%%)" % \
(succeed, 100.0 * succeed / total))
if stats > 0 or fail > 0:
print("Failed unexpectedly: %d (%3.2f%%)" % \
(fail, 100.0 * fail / total))
if stats > 0:
print(" Failed as expected: %d (%3.2f%%)" % \
(failok, 100.0 * failok / total))
if stats > 0 or notimp > 0:
print(" Not implemented: %d (%3.2f%%)" % \
(notimp, 100.0 * notimp / total))
return fail + notimp
if __name__ == "__main__":
main()
| 31.130584
| 139
| 0.486919
| 1,027
| 9,059
| 4.252191
| 0.290166
| 0.025647
| 0.010305
| 0.010992
| 0.110373
| 0.063659
| 0.050836
| 0.050836
| 0.027479
| 0.027479
| 0
| 0.023651
| 0.369908
| 9,059
| 290
| 140
| 31.237931
| 0.741416
| 0.033227
| 0
| 0.184685
| 0
| 0.009009
| 0.248486
| 0.003315
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.036036
| 0
| 0.112613
| 0.099099
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92fb1af4be141cb39cbab935a9b9551b1ec5b453
| 934
|
py
|
Python
|
src/compile.py
|
Pixxeasy/WinTools
|
e67c365cd4a7a47a410c25b7df8eeaeedc05dd8d
|
[
"MIT"
] | null | null | null |
src/compile.py
|
Pixxeasy/WinTools
|
e67c365cd4a7a47a410c25b7df8eeaeedc05dd8d
|
[
"MIT"
] | null | null | null |
src/compile.py
|
Pixxeasy/WinTools
|
e67c365cd4a7a47a410c25b7df8eeaeedc05dd8d
|
[
"MIT"
] | null | null | null |
import os
import json
import shutil
with open("entry.tp") as entry:
entry = json.loads(entry.read())
startcmd = entry['plugin_start_cmd'].split("%TP_PLUGIN_FOLDER%")[1].split("\\")
filedirectory = startcmd[0]
fileName = startcmd[1]
if os.path.exists(filedirectory):
os.remove(os.path.join(os.getcwd(), "WinTools"))
else:
os.makedirs("temp/"+filedirectory)
for file in os.listdir("."):
if file not in ["compile.py", "utils", "requirements.txt", "build", "dist", "main.py", "main.spec", "__pycache__", "temp"]:
print("copying", file)
shutil.copy(os.path.join(os.getcwd(), file), os.path.join("temp", filedirectory))
os.rename("dist\Main.exe", "dist\WinTools.exe")
shutil.copy(os.path.join(os.getcwd(), r"dist\WinTools.exe"), "temp/"+filedirectory)
shutil.make_archive(base_name="WinTools", format='zip', root_dir="temp", base_dir="WinTools")
os.rename("WinTools.zip", "WinTools.tpp")
| 33.357143
| 127
| 0.674518
| 131
| 934
| 4.717557
| 0.465649
| 0.048544
| 0.064725
| 0.058252
| 0.119741
| 0.090615
| 0.090615
| 0
| 0
| 0
| 0
| 0.003659
| 0.122056
| 934
| 27
| 128
| 34.592593
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.255889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
92ff4f4bfa893dc686e0e12fb0d4936e8c8b259d
| 272
|
py
|
Python
|
basic_and.py
|
Verkhovskaya/PyDL
|
4c3f2d952dd988ff27bf359d2f2cdde65737e062
|
[
"MIT"
] | 5
|
2018-07-28T18:18:59.000Z
|
2022-01-05T19:01:50.000Z
|
basic_and.py
|
Verkhovskaya/PyDL
|
4c3f2d952dd988ff27bf359d2f2cdde65737e062
|
[
"MIT"
] | null | null | null |
basic_and.py
|
Verkhovskaya/PyDL
|
4c3f2d952dd988ff27bf359d2f2cdde65737e062
|
[
"MIT"
] | null | null | null |
from pywire import *
def invert(signal):
if signal:
return False
else:
return True
class Inverter:
def __init__(self, a, b):
b.drive(invert, a)
width = 4
a = Signal(width, io="in")
b = Signal(width, io="out")
Inverter(a, b)
build()
| 14.315789
| 29
| 0.591912
| 40
| 272
| 3.925
| 0.6
| 0.025478
| 0.165605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005076
| 0.275735
| 272
| 19
| 30
| 14.315789
| 0.791878
| 0
| 0
| 0
| 0
| 0
| 0.018315
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13003b62c91dfe370f2b6ae3d293c73a5a463179
| 4,521
|
py
|
Python
|
network/evaluate_keypoints.py
|
mhsung/deep-functional-dictionaries
|
8b3d70c3376339cb1b7baacf7753094cd1ffef45
|
[
"MIT"
] | 41
|
2018-07-10T10:15:02.000Z
|
2021-04-20T03:10:16.000Z
|
network/evaluate_keypoints.py
|
Yajha/deep-functional-dictionaries
|
deecf8c6c85e253cfa52be7c6b3c308d5e5aaf81
|
[
"MIT"
] | 2
|
2018-07-05T06:34:13.000Z
|
2019-09-18T08:57:56.000Z
|
network/evaluate_keypoints.py
|
Yajha/deep-functional-dictionaries
|
deecf8c6c85e253cfa52be7c6b3c308d5e5aaf81
|
[
"MIT"
] | 7
|
2018-07-28T00:00:08.000Z
|
2021-06-30T13:39:44.000Z
|
# Minhyuk Sung (mhsung@cs.stanford.edu)
# April 2018
import os, sys
BASE_DIR = os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.join(BASE_DIR, '..'))
from datasets import *
from generate_outputs import *
from scipy.optimize import linear_sum_assignment
#import matplotlib.pyplot as plt
import numpy as np
def compute_all_keypoints(sess, net, data):
P = data.point_clouds
assert(P.shape[0] == data.n_data)
assert(P.shape[1] == data.n_points)
KP = data.keypoints
assert(KP.shape[0] == data.n_data)
assert(KP.shape[1] == data.n_labels)
A = predict_A(P, sess, net)
assert(A.shape[0] == data.n_data)
assert(A.shape[1] == data.n_points)
assert(A.shape[2] == net.K)
pred_KP = np.argmax(A, axis=1)
return P, KP, pred_KP
def evaluate_PCK(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
# NOTE:
# Skip if the keypoint does not exist.
labels = [i for i in range(n_labels) if KP[k,i] >= 0]
# Find the closest prediction (w/o matching).
for i, label in enumerate(labels):
all_dists = np.zeros(K)
idx_i = KP[k,label]
assert(idx_i < n_points)
p_i = P[k,idx_i]
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[j] = np.linalg.norm(p_i - p_j)
j = np.argmin(all_dists)
dists_info.append((k, i, j, all_dists[j]))
dists_info = np.array(dists_info)
return dists_info
def evaluate_PCK_after_label_basis_matching(P, KP, pred_KP):
n_data = P.shape[0]
n_points = P.shape[1]
n_labels = KP.shape[1]
K = pred_KP.shape[1]
# Find the best mapping from labels to bases.
all_dists = np.zeros((n_data, n_labels, K))
label_counts = np.zeros(n_labels)
for k in range(n_data):
for i in range(n_labels):
# NOTE:
# Skip if the keypoint does not exist.
if KP[k,i] < 0: continue
idx_i = KP[k,i]
assert(idx_i < n_points)
p_i = P[k,idx_i]
label_counts[i] += 1.
for j in range(K):
idx_j = pred_KP[k,j]
assert(idx_j < n_points)
p_j = P[k,idx_j]
all_dists[k,i,j] += np.linalg.norm(p_i - p_j)
mean_dists = np.sum(all_dists, axis=0) / \
np.expand_dims(label_counts, axis=-1)
row_ind, col_ind = linear_sum_assignment(mean_dists)
# dists_info: (point_cloud_index, label, basis_index, distance)
dists_info = []
for k in range(n_data):
for (i, j) in zip(row_ind, col_ind):
if KP[k,i] < 0: continue
dists_info.append((k, i, j, all_dists[k,i,j]))
dists_info = np.array(dists_info)
return dists_info
def save_results(dists_info, out_dir, postfix=None):
# dists_info: (point_cloud_index, label, basis_index, distance)
dists = dists_info[:,3]
if postfix is not None:
out_file = os.path.join(out_dir, 'distances_{}.npy'.format(postfix))
else:
out_file = os.path.join(out_dir, 'distances.npy')
np.save(out_file, dists)
print("Saved '{}'.".format(out_file))
'''
# Draw plot.
n_matches = dists.size
x_list = np.linspace(0.0, 0.1, 20 + 1)
counts = np.zeros(x_list.size, dtype=int)
for i in range(x_list.size):
counts[i] = np.sum(dists <= x_list[i])
y_list = counts.astype(x_list.dtype) / float(n_matches)
plt.clf()
plt.plot(x_list, y_list)
plt.ylim(0., 1.)
plt.yticks(np.linspace(0., 1., 10 + 1))
if postfix is not None:
out_file = os.path.join(out_dir, 'pck_{}.png'.format(postfix))
else:
out_file = os.path.join(out_dir, 'pck.png')
plt.savefig(out_file)
print("Saved '{}'.".format(out_file))
'''
def evaluate(sess, net, data, out_dir):
if not os.path.exists(out_dir): os.makedirs(out_dir)
P, KP, pred_KP = compute_all_keypoints(sess, net, data)
dists = evaluate_PCK(P, KP, pred_KP)
save_results(dists, out_dir)
dists_after_matching = evaluate_PCK_after_label_basis_matching(
P, KP, pred_KP)
save_results(dists_after_matching, out_dir, postfix='after_matching')
| 26.438596
| 76
| 0.601858
| 726
| 4,521
| 3.522039
| 0.198347
| 0.052796
| 0.023465
| 0.021119
| 0.533438
| 0.499413
| 0.416895
| 0.40086
| 0.33203
| 0.317951
| 0
| 0.012364
| 0.266534
| 4,521
| 170
| 77
| 26.594118
| 0.758745
| 0.097102
| 0
| 0.352273
| 0
| 0
| 0.016274
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.056818
| false
| 0
| 0.056818
| 0
| 0.147727
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13008c4023106e4274d2b92d9aa79a58e4551138
| 2,388
|
py
|
Python
|
recipes/cxxopts/all/conanfile.py
|
dvirtz/conan-center-index
|
2e7a6337804325616f8d97e3a5b6f66cc72699cb
|
[
"MIT"
] | 562
|
2019-09-04T12:23:43.000Z
|
2022-03-29T16:41:43.000Z
|
recipes/cxxopts/all/conanfile.py
|
dvirtz/conan-center-index
|
2e7a6337804325616f8d97e3a5b6f66cc72699cb
|
[
"MIT"
] | 9,799
|
2019-09-04T12:02:11.000Z
|
2022-03-31T23:55:45.000Z
|
recipes/cxxopts/all/conanfile.py
|
dvirtz/conan-center-index
|
2e7a6337804325616f8d97e3a5b6f66cc72699cb
|
[
"MIT"
] | 1,126
|
2019-09-04T11:57:46.000Z
|
2022-03-31T16:43:38.000Z
|
import os
from conans import ConanFile, tools
from conans.errors import ConanInvalidConfiguration
class CxxOptsConan(ConanFile):
name = "cxxopts"
homepage = "https://github.com/jarro2783/cxxopts"
url = "https://github.com/conan-io/conan-center-index"
description = "Lightweight C++ option parser library, supporting the standard GNU style syntax for options."
license = "MIT"
topics = ("conan", "option-parser", "positional-arguments ", "header-only")
settings = "compiler"
options = { "unicode": [True, False] }
default_options = { "unicode": False }
no_copy_source = True
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _minimum_cpp_standard(self):
return 11
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"gcc": "5",
"clang": "3.9",
"apple-clang": "8",
}
def configure(self):
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, self._minimum_cpp_standard)
min_version = self._minimum_compilers_version.get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
else:
if tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} requires C++{} support. The current compiler {} {} does not support it.".format(
self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))
def requirements(self):
if self.options.unicode:
self.requires("icu/64.2")
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename("{}-{}".format(self.name, self.version), self._source_subfolder)
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("{}.hpp".format(self.name), dst="include", src=os.path.join(self._source_subfolder, "include"))
def package_id(self):
self.info.header_only()
def package_info(self):
if self.options.unicode:
self.cpp_info.defines = ["CXXOPTS_USE_UNICODE"]
| 36.738462
| 132
| 0.641122
| 274
| 2,388
| 5.441606
| 0.416058
| 0.075117
| 0.080483
| 0.036217
| 0.037559
| 0.037559
| 0
| 0
| 0
| 0
| 0
| 0.008161
| 0.230318
| 2,388
| 64
| 133
| 37.3125
| 0.803047
| 0
| 0
| 0.09434
| 0
| 0
| 0.219012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.169811
| false
| 0
| 0.056604
| 0.056604
| 0.490566
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1300c8abfbfcff2fad07bdd38a7b66244215a15d
| 1,868
|
py
|
Python
|
p_030_039/problem31.py
|
ericgreveson/projecteuler
|
1844bf383fca871b82d88ef1eb3a9b1a0e363054
|
[
"Apache-2.0"
] | null | null | null |
p_030_039/problem31.py
|
ericgreveson/projecteuler
|
1844bf383fca871b82d88ef1eb3a9b1a0e363054
|
[
"Apache-2.0"
] | null | null | null |
p_030_039/problem31.py
|
ericgreveson/projecteuler
|
1844bf383fca871b82d88ef1eb3a9b1a0e363054
|
[
"Apache-2.0"
] | null | null | null |
class CoinArray(list):
"""
Coin list that is hashable for storage in sets
The 8 entries are [1p count, 2p count, 5p count, ... , 200p count]
"""
def __hash__(self):
"""
Hash this as a string
"""
return hash(" ".join([str(i) for i in self]))
def main():
"""
Entry point
"""
# Important: sorted smallest to largest
coins = [1, 2, 5, 10, 20, 50, 100, 200]
coin_index = {coin: index for index, coin in enumerate(coins)}
# How many ways are there of making each number from 1 to 200 from these values?
# Building up from 1 means we can re-use earlier results
# e.g.:
# 1p: [{1}]
# 2p: [{1,1}, {2}]
# 3p: [{1,1,1}, {2,1}]
# 4p: [{1,1,1,1}, {2,1,1}, {2,2}]
# etc
way_sets = [None]
for i in range(1, 201):
way_set_i = set()
# Try using 1 of each coin and then all the ways of the remainder, if > 0
for coin in coins:
remainder = i - coin
if remainder == 0:
# We can make this with exactly this coin alone - but no larger coins
coin_count = [0 for i in coins]
coin_count[coin_index[coin]] = 1
way_set_i.add(CoinArray(coin_count))
break
elif remainder > 0:
# We can use this coin and whatever the options for the smaller value are
for rem_list in way_sets[remainder]:
new_coin_count = [c for c in rem_list]
new_coin_count[coin_index[coin]] += 1
way_set_i.add(CoinArray(new_coin_count))
else:
# Can't use any bigger coins
break
way_sets.append(way_set_i)
print(f"Number of ways of making £2: {len(way_sets[200])}")
return
if __name__ == "__main__":
main()
| 31.661017
| 89
| 0.534261
| 274
| 1,868
| 3.507299
| 0.419708
| 0.014568
| 0.012487
| 0.008325
| 0.097815
| 0.087409
| 0.087409
| 0.087409
| 0.087409
| 0.087409
| 0
| 0.053422
| 0.358672
| 1,868
| 58
| 90
| 32.206897
| 0.747913
| 0.34743
| 0
| 0.071429
| 0
| 0
| 0.050479
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.178571
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1300e7747076d34572209fef1029da836f1dbf7b
| 2,358
|
py
|
Python
|
video/cloud-client/quickstart/quickstart.py
|
nasirdec/GCP-AppEngine-Example
|
3f5ad26ad2c1e3c8deceb5844adfb40cf7c2e53f
|
[
"Apache-2.0"
] | 1
|
2019-11-17T08:59:14.000Z
|
2019-11-17T08:59:14.000Z
|
video/cloud-client/quickstart/quickstart.py
|
nasirdec/GCP-AppEngine-Example
|
3f5ad26ad2c1e3c8deceb5844adfb40cf7c2e53f
|
[
"Apache-2.0"
] | 16
|
2019-06-15T00:02:56.000Z
|
2021-03-25T23:22:38.000Z
|
video/cloud-client/quickstart/quickstart.py
|
nasirdec/GCP-AppEngine-Example
|
3f5ad26ad2c1e3c8deceb5844adfb40cf7c2e53f
|
[
"Apache-2.0"
] | 3
|
2019-02-11T16:16:11.000Z
|
2019-04-19T21:34:37.000Z
|
#!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This application demonstrates label detection on a demo video using
the Google Cloud API.
Usage:
python quickstart.py
"""
def run_quickstart():
# [START video_quickstart]
from google.cloud import videointelligence
video_client = videointelligence.VideoIntelligenceServiceClient()
features = [videointelligence.enums.Feature.LABEL_DETECTION]
operation = video_client.annotate_video(
'gs://demomaker/cat.mp4', features=features)
print('\nProcessing video for label annotations:')
result = operation.result(timeout=120)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
segment_labels = result.annotation_results[0].segment_label_annotations
for i, segment_label in enumerate(segment_labels):
print('Video label description: {}'.format(
segment_label.entity.description))
for category_entity in segment_label.category_entities:
print('\tLabel category description: {}'.format(
category_entity.description))
for i, segment in enumerate(segment_label.segments):
start_time = (segment.segment.start_time_offset.seconds +
segment.segment.start_time_offset.nanos / 1e9)
end_time = (segment.segment.end_time_offset.seconds +
segment.segment.end_time_offset.nanos / 1e9)
positions = '{}s to {}s'.format(start_time, end_time)
confidence = segment.confidence
print('\tSegment {}: {}'.format(i, positions))
print('\tConfidence: {}'.format(confidence))
print('\n')
# [END video_quickstart]
if __name__ == '__main__':
run_quickstart()
| 37.428571
| 75
| 0.697201
| 282
| 2,358
| 5.687943
| 0.5
| 0.037406
| 0.016209
| 0.01995
| 0.078554
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00914
| 0.211196
| 2,358
| 62
| 76
| 38.032258
| 0.853226
| 0.349449
| 0
| 0
| 0
| 0
| 0.129801
| 0.01457
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.071429
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13058e5281c2e6d308c1c802707f6f83b62cdc9b
| 1,809
|
py
|
Python
|
darts/models/linear_regression_model.py
|
BiancaMT25/darts
|
bb550dede6d8927a45aea0d9f3df53de32a6eee2
|
[
"Apache-2.0"
] | 1
|
2021-07-15T11:12:05.000Z
|
2021-07-15T11:12:05.000Z
|
darts/models/linear_regression_model.py
|
BiancaMT25/darts
|
bb550dede6d8927a45aea0d9f3df53de32a6eee2
|
[
"Apache-2.0"
] | null | null | null |
darts/models/linear_regression_model.py
|
BiancaMT25/darts
|
bb550dede6d8927a45aea0d9f3df53de32a6eee2
|
[
"Apache-2.0"
] | null | null | null |
"""
Standard Regression model
-------------------------
"""
import numpy as np
import pandas as pd
from typing import Union
from ..logging import get_logger
from .regression_model import RegressionModel
from sklearn.linear_model import LinearRegression
logger = get_logger(__name__)
class LinearRegressionModel(RegressionModel):
def __init__(self,
lags: Union[int, list] = None,
lags_exog: Union[int, list, bool] = None,
**kwargs):
"""
Simple wrapper for the linear regression model in scikit-learn, LinearRegression().
Parameters
----------
lags : Union[int, list]
Number of lagged target values used to predict the next time step. If an integer is given
the last `lags` lags are used (inclusive). Otherwise a list of integers with lags is required.
lags_exog : Union[int, list, bool]
Number of lagged exogenous values used to predict the next time step. If an integer is given
the last `lags_exog` lags are used (inclusive). Otherwise a list of integers with lags is required.
If True `lags` will be used to determine lags_exog. If False, the values of all exogenous variables
at the current time `t`. This might lead to leakage if for predictions the values of the exogenous
variables at time `t` are not known.
**kwargs
Additional keyword arguments passed to `sklearn.linear_model.LinearRegression`.
"""
self.kwargs = kwargs
super().__init__(
lags=lags,
lags_exog=lags_exog,
model=LinearRegression(**kwargs)
)
def __str__(self):
return 'LinearRegression(lags={}, lags_exog={})'.format(self.lags, self.lags_exog)
| 38.489362
| 111
| 0.640133
| 224
| 1,809
| 5.040179
| 0.401786
| 0.056687
| 0.042516
| 0.028344
| 0.26395
| 0.26395
| 0.221435
| 0.221435
| 0.221435
| 0.221435
| 0
| 0
| 0.273079
| 1,809
| 47
| 112
| 38.489362
| 0.858555
| 0.526258
| 0
| 0
| 0
| 0
| 0.053942
| 0.034578
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0.05
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13064197a568b4ea0fdb674d3a8685e3b27e92eb
| 863
|
py
|
Python
|
hood/urls.py
|
wadi-1000/Vicinity
|
a41f6ec2c532cb06f7444b55073b6879a1fce63a
|
[
"MIT"
] | null | null | null |
hood/urls.py
|
wadi-1000/Vicinity
|
a41f6ec2c532cb06f7444b55073b6879a1fce63a
|
[
"MIT"
] | null | null | null |
hood/urls.py
|
wadi-1000/Vicinity
|
a41f6ec2c532cb06f7444b55073b6879a1fce63a
|
[
"MIT"
] | null | null | null |
from django.urls import path,include
from . import views
urlpatterns = [
path('home/', views.home, name = 'home'),
path('add_hood/',views.uploadNeighbourhood, name = 'add_hood'),
path('viewhood/',views.viewHood, name = 'viewhood'),
path('hood/<int:pk>/',views.hood, name = 'hood'),
path('add_bizna/',views.uploadBuisness, name = 'add_bizna'),
path('bizna/',views.viewBizna, name = 'view_bizna'),
path('viewbizna/<int:pk>/',views.bizna, name = 'bizna'),
path('post/',views.create_post, name = 'post'),
path('posts/',views.viewPost, name = 'posts'),
path('searchbizna/', views.searchBizna, name="search_results"),
path('searchhood/', views.searchHood, name="search_res"),
path('join_hood/<id>', views.join_neighbourhood, name='join-hood'),
path('leave_hood/<id>', views.leave_neighbourhood, name='leave-hood'),
]
| 47.944444
| 74
| 0.665122
| 108
| 863
| 5.203704
| 0.305556
| 0.042705
| 0.035587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130939
| 863
| 18
| 75
| 47.944444
| 0.749333
| 0
| 0
| 0
| 0
| 0
| 0.271991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1306e647595d1f2f64e2d6dd214b9b25580f3ed1
| 8,305
|
py
|
Python
|
src/licensedcode/tokenize.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/licensedcode/tokenize.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
src/licensedcode/tokenize.py
|
chetanya-shrimali/scancode-toolkit
|
a1a22fb225cbeb211bd6f92272a46f1351f57d6b
|
[
"Apache-2.0",
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from itertools import islice
from itertools import izip
import re
from zlib import crc32
from textcode.analysis import text_lines
"""
Utilities to break texts in lines and tokens (aka. words) with specialized version
for queries and rules texts.
"""
def query_lines(location=None, query_string=None, strip=True):
"""
Return an iterable of text lines given a file at `location` or a
`query string`. Include empty lines.
"""
# TODO: OPTIMIZE: tokenizing line by line may be rather slow
# we could instead get lines and tokens at once in a batch?
lines = []
if location:
lines = text_lines(location, demarkup=False)
elif query_string:
if strip:
keepends = False
else:
keepends = True
lines = query_string.splitlines(keepends)
for line in lines:
if strip:
yield line.strip()
else:
yield line
# Split on whitespace and punctuations: keep only characters
# and + in the middle or end of a word.
# Keeping the trailing + is important for licenses name such as GPL2+
query_pattern = '[^\W_]+\+?[^\W_]*'
word_splitter = re.compile(query_pattern, re.UNICODE).findall
def query_tokenizer(text, lower=True):
"""
Return an iterable of tokens from a unicode query text.
"""
if not text:
return []
text = lower and text.lower() or text
return (token for token in word_splitter(text) if token)
# Alternate pattern used for matched text collection
not_query_pattern = '[\W_+]+[\W_]?'
# collect tokens and non-token texts in two different groups
_text_capture_pattern = '(?P<token>' + query_pattern + ')' + '|' + '(?P<punct>' + not_query_pattern + ')'
tokens_and_non_tokens = re.compile(_text_capture_pattern, re.UNICODE).finditer
def matched_query_text_tokenizer(text):
"""
Return an iterable of tokens and non-tokens from a unicode query text keeping
everything (including punctuations, line endings, etc.)
The returned iterable contains 2-tuples of:
- True if the string is a text token or False if this is not (such as punctuation, spaces, etc).
- the corresponding string
This is used to reconstruct the matched query text accurately.
"""
if not text:
return
for match in tokens_and_non_tokens(text):
if not match:
continue
mgd = match.groupdict()
token = mgd.get('token')
punct = mgd.get('punct')
if token or punct:
yield (True, token) if token else (False, punct)
# Template-aware splitter, keeping a templated part {{anything}} as a token.
# This splitter yields plain token strings or double braces-enclosed strings
# {{something}} for templates. curly barces are otherwise treated as punctuation.
# A template part is anything enclosed in double braces
template_pattern = '\{\{[^{}]*\}\}'
rule_pattern = '%s|%s+' % (query_pattern, template_pattern,)
template_splitter = re.compile(rule_pattern , re.UNICODE).findall
def rule_tokenizer(text, lower=True):
"""
Return an iterable of tokens from a unicode rule text, skipping templated
parts, including leading and trailing templated parts.
For example:
>>> list(rule_tokenizer(''))
[]
>>> list(rule_tokenizer('some Text with spAces! + _ -'))
[u'some', u'text', u'with', u'spaces']
Unbalanced templates are handled correctly:
>>> list(rule_tokenizer('{{}some }}Text with spAces! + _ -'))
[u'some', u'text', u'with', u'spaces']
Templates are handled and skipped for templated sequences:
>>> list(rule_tokenizer('{{Hi}}some {{}}Text with{{noth+-_!@ing}} {{junk}}spAces! + _ -{{}}'))
[u'some', u'text', u'with', u'spaces']
"""
if not text:
return []
text = lower and text.lower() or text
tokens = template_splitter(text)
# skip templates
return (token for token in tokens if token and not token.startswith('{{'))
def ngrams(iterable, ngram_length):
"""
Return an iterable of ngrams of length `ngram_length` given an iterable.
Each ngram is a tuple of ngram_length items.
The returned iterable is empty if the input iterable contains less than
`ngram_length` items.
Note: this is a fairly arcane but optimized way to compute ngrams.
For example:
>>> list(ngrams([1,2,3,4,5], 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams([1,2,3,4,5], 4))
[(1, 2, 3, 4), (2, 3, 4, 5)]
>>> list(ngrams([1,2,3,4], 2))
[(1, 2), (2, 3), (3, 4)]
>>> list(ngrams([1,2,3], 2))
[(1, 2), (2, 3)]
>>> list(ngrams([1,2], 2))
[(1, 2)]
>>> list(ngrams([1], 2))
[]
This also works with arrays or tuples:
>>> from array import array
>>> list(ngrams(array(b'h', [1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
>>> list(ngrams(tuple([1,2,3,4,5]), 2))
[(1, 2), (2, 3), (3, 4), (4, 5)]
"""
return izip(*(islice(iterable, i, None) for i in range(ngram_length)))
def select_ngrams(ngrams, with_pos=False):
"""
Return an iterable as a subset of a sequence of ngrams using the hailstorm
algorithm. If `with_pos` is True also include the starting position for the ngram
in the original sequence.
Definition from the paper: http://www2009.eprints.org/7/1/p61.pdf
The algorithm first fingerprints every token and then selects a shingle s if
the minimum fingerprint value of all k tokens in s occurs at the first or the
last position of s (and potentially also in between). Due to the
probabilistic properties of Rabin fingerprints the probability that a shingle
is chosen is 2/k if all tokens in the shingle are different.
For example:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (2, 6, 1), (7, 3, 4)]
Positions can also be included. In this case, tuple of (pos, ngram) are returned:
>>> list(select_ngrams([(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)], with_pos=True))
[(0, (2, 1, 3)), (1, (1, 1, 3)), (3, (2, 6, 1)), (4, (7, 3, 4))]
This works also from a generator:
>>> list(select_ngrams(x for x in [(2, 1, 3), (1, 1, 3), (5, 1, 3), (2, 6, 1), (7, 3, 4)]))
[(2, 1, 3), (1, 1, 3), (2, 6, 1), (7, 3, 4)]
"""
last = None
for i, ngram in enumerate(ngrams):
# FIXME: use a proper hash
nghs = [crc32(str(ng)) for ng in ngram]
min_hash = min(nghs)
if with_pos:
ngram = (i, ngram,)
if nghs[0] == min_hash or nghs[-1] == min_hash:
yield ngram
last = ngram
else:
# always yield the first or last ngram too.
if i == 0:
yield ngram
last = ngram
if last != ngram:
yield ngram
| 36.425439
| 105
| 0.646117
| 1,245
| 8,305
| 4.244177
| 0.271486
| 0.006435
| 0.003974
| 0.013626
| 0.181491
| 0.149697
| 0.130394
| 0.126987
| 0.123202
| 0.117903
| 0
| 0.030436
| 0.232511
| 8,305
| 227
| 106
| 36.585903
| 0.798557
| 0.628657
| 0
| 0.239437
| 0
| 0
| 0.03263
| 0
| 0.014085
| 0
| 0
| 0.008811
| 0
| 1
| 0.084507
| false
| 0
| 0.112676
| 0
| 0.28169
| 0.014085
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
130a61bee54706bd995afa354e7681d8726e5d5d
| 3,764
|
py
|
Python
|
src/model.py
|
palucki/RememberIt
|
1d66616d4bb1bca026dda031d876dca226ba71ad
|
[
"MIT"
] | null | null | null |
src/model.py
|
palucki/RememberIt
|
1d66616d4bb1bca026dda031d876dca226ba71ad
|
[
"MIT"
] | null | null | null |
src/model.py
|
palucki/RememberIt
|
1d66616d4bb1bca026dda031d876dca226ba71ad
|
[
"MIT"
] | null | null | null |
import random
from pymongo import MongoClient
from observable import Observable
from phrase import Phrase
class MongoDbProxy:
"""Proxy for MongoDB"""
def __init__(self, url, dbName, tableName):
self.client = MongoClient(url)
self.db = self.client[dbName]
self.table = tableName
self.count = self.db[self.table].find().count()
def get_db(self):
return self.db
def add_phrase(self, phrase):
#[{ "english": eng, "polish" : pl}]
record = {"english" : phrase.eng, "polish" : phrase.meanings}
self.db[self.table].insert(record)
self.count = self.db[self.table].find().count()
def show_one(self, phrase):
print("eng: \'%s\' pol: \'%s\'" % (phrase["english"], phrase["polish"]))
def get_all(self):
#define your data struct here
words = {}
for i, phrase in enumerate(self.db[self.table].find()):
eng = phrase["english"]
#lang = phrase["lang"]
meaning = phrase["polish"]
words[eng] = meaning
return words
def show_all(self):
if self.count > 0:
for i, phrase in enumerate(self.db[self.table].find()):
print(i, end=" ")
self.show_one(phrase)
else:
print("Database is empty")
def show_random(self):
entries = self.db[self.table].find()
self.count = entries.count()
if self.count > 0:
self.show_one(entries[random.randrange(self.count)])
else:
print("Database is empty")
def record_exists(self, eng):
if self.db[self.table].find_one({"english" : eng}):
return True
else:
return False
def drop_record(self, eng):
self.db[self.table].delete_one({"english":eng})
def drop_db(self):
print("Dropping")
self.db.self.table.drop()
self.count = self.db[self.table].find().count()
class Model:
"""That needs a table of pairs - eng and its meanings"""
def __init__(self):
self.phrases = Observable({})
self.db = MongoDbProxy("mongodb://localhost:27017/", "RepeatItDb", "phrases")
data = self.db.get_all()
self.phrases.setData(data)
def addWord(self, key, lang, meanings):
newData = self.phrases.getData()
newData[key] = meanings
self.phrases.setData(newData)
def getAllWords(self):
return self.phrases.getData()
def removeWord(self, key):
newData = self.phrases.getData()
newData.pop(key)
self.phrases.setData(newData)
def saveWord(self, wordAndMeaning):
word = wordAndMeaning[0]
meaning = wordAndMeaning[1]
self.addWord(word, "pl", meaning)
def saveDb(self):
dbData = self.db.get_all()
modelData = self.getAllWords()
#That's for future optimization: update db instead of adding it all
dbKeysSet = set(dbData.keys())
dbValuesSet = set(dbData.values())
modelKeysSet = set(modelData.keys())
modelValuesSet = set(modelData.values())
newRecordsKeys = modelKeysSet - dbKeysSet
deletedRecordsKeys = dbKeysSet - modelKeysSet
if len(newRecordsKeys):
for newKey in newRecordsKeys:
self.db.add_phrase(Phrase(newKey, "pl", modelData[newKey]))
if len(deletedRecordsKeys):
for deletedKey in deletedRecordsKeys:
self.db.drop_record(deletedKey)
#Handle also value update
print("Saving database...")
| 30.852459
| 85
| 0.562965
| 411
| 3,764
| 5.094891
| 0.277372
| 0.048711
| 0.052531
| 0.071633
| 0.189589
| 0.114136
| 0.088348
| 0.088348
| 0.072588
| 0.038204
| 0
| 0.003497
| 0.316153
| 3,764
| 121
| 86
| 31.107438
| 0.810023
| 0.064293
| 0
| 0.186047
| 0
| 0
| 0.052467
| 0.007414
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186047
| false
| 0
| 0.046512
| 0.023256
| 0.313953
| 0.069767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
130ad5b5c3caa22c7668a018ea30cf4d2bc3c2f4
| 1,381
|
py
|
Python
|
sampleApplication/clientGenerator.py
|
chall68/BlackWatch
|
0b95d69e4b7de9213a031557e9aff54ce35b12dd
|
[
"MIT"
] | null | null | null |
sampleApplication/clientGenerator.py
|
chall68/BlackWatch
|
0b95d69e4b7de9213a031557e9aff54ce35b12dd
|
[
"MIT"
] | null | null | null |
sampleApplication/clientGenerator.py
|
chall68/BlackWatch
|
0b95d69e4b7de9213a031557e9aff54ce35b12dd
|
[
"MIT"
] | null | null | null |
#!flask/bin/python
#from user import User
from sampleObjects.User import User
from datetime import datetime
from sampleObjects.DetectionPoint import DetectionPoint
import time, requests, random, atexit
def requestGenerator():
userObject = randomUser()
detectionPointObject = randomDetectionPoint()
req = requests.post('http://localhost:5000/addevent', json = {"User": userObject.__dict__, "DetectionPoint" : detectionPointObject.__dict__, "Time" : str(datetime.now().isoformat())})
print (req.text)
checkResp = requests.get('http://localhost:5000/getResponses')
print (checkResp.text)
def randomUser():
user = random.randint(1,3)
attacker=0
if (user==1):
attacker = User("Phillipo", "255.255.255.101", "xxxx")
elif (user==2):
attacker = User("Sergio", "109.123.234.1", "yyyy")
elif (user==3):
attacker = User("Anonymous", "101.101.101.87", "354343jjk23")
return attacker
def randomDetectionPoint():
rand = random.randint(1,2)
dp=0
if (rand==1):
dp = DetectionPoint("HTTP Verb", "GET Request used where POST is expected")
elif (rand==2):
dp = DetectionPoint("Login Page", "Hidden field altered within the login form")
return dp
for i in range (50):
requestGenerator()
time.sleep(1.5)
def closingTime():
print ("Exiting")
atexit.register(closingTime)
| 27.62
| 187
| 0.674873
| 164
| 1,381
| 5.634146
| 0.5
| 0.038961
| 0.030303
| 0.038961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05694
| 0.186097
| 1,381
| 49
| 188
| 28.183673
| 0.765125
| 0.027516
| 0
| 0
| 0
| 0
| 0.206562
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.114286
| 0
| 0.285714
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
130c49099f8aa40a9dd92ff170ecb6c15b43d8f9
| 1,873
|
py
|
Python
|
news_collector/collector/consumers.py
|
ridwaniyas/channels-examples
|
9e6a26c8e6404483695cbd96ebf12fc4ed9956b2
|
[
"BSD-3-Clause"
] | null | null | null |
news_collector/collector/consumers.py
|
ridwaniyas/channels-examples
|
9e6a26c8e6404483695cbd96ebf12fc4ed9956b2
|
[
"BSD-3-Clause"
] | null | null | null |
news_collector/collector/consumers.py
|
ridwaniyas/channels-examples
|
9e6a26c8e6404483695cbd96ebf12fc4ed9956b2
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import json
import datetime
from aiohttp import ClientSession
from channels.generic.http import AsyncHttpConsumer
from .constants import BLOGS
class NewsCollectorAsyncConsumer(AsyncHttpConsumer):
"""
Async HTTP consumer that fetches URLs.
"""
async def handle(self, body):
# Adapted from:
# "Making 1 million requests with python-aiohttp"
# https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
tasks = []
loop = asyncio.get_event_loop()
# aiohttp allows a ClientSession object to link all requests together
t0 = datetime.datetime.now()
async with ClientSession() as session:
for name, url in BLOGS.items():
print('Start downloading "%s"' % name)
# Launch a coroutine for each URL fetch
task = loop.create_task(fetch(url, session))
tasks.append(task)
# Wait on, and then gather, all responses
responses = await asyncio.gather(*tasks)
dt = (datetime.datetime.now() - t0).total_seconds()
print('All downloads completed; elapsed time: {} [s]'.format(dt))
# asyncio.gather returns results in the order of the original sequence,
# so we can safely zip these together.
data = dict(zip(BLOGS.keys(), [r.decode('utf-8') for r in responses]))
text = json.dumps(data)
# We have to send a response using send_response rather than returning
# it in Channels' async HTTP consumer
await self.send_response(200,
text.encode(),
headers=[
("Content-Type", "application/json"),
]
)
| 35.339623
| 91
| 0.615056
| 218
| 1,873
| 5.256881
| 0.56422
| 0.015707
| 0.029668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011338
| 0.293647
| 1,873
| 52
| 92
| 36.019231
| 0.854875
| 0.288841
| 0
| 0
| 0
| 0
| 0.076511
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.266667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
130d77d6c796e047f21c43df476be8389b35aecb
| 737
|
py
|
Python
|
src/randomcsv/FileUtils.py
|
PhilipBuhr/randomCsv
|
34b1da62134077dfe4db2682ee0da386ef380c1d
|
[
"MIT"
] | null | null | null |
src/randomcsv/FileUtils.py
|
PhilipBuhr/randomCsv
|
34b1da62134077dfe4db2682ee0da386ef380c1d
|
[
"MIT"
] | null | null | null |
src/randomcsv/FileUtils.py
|
PhilipBuhr/randomCsv
|
34b1da62134077dfe4db2682ee0da386ef380c1d
|
[
"MIT"
] | null | null | null |
import os
from pathlib import Path
def write(file_name, content):
Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True)
with open(file_name, 'w') as file:
file.write(content)
def read_line_looping(file_name, count):
i = 0
lines = []
file = open(file_name, 'r')
line = file.readline()
if line == '':
raise EmptyFileError(f'Error: Dictionary {file_name} seems to be empty')
while i < count:
lines.append(line.strip())
i += 1
line = file.readline()
if line == '':
file.close()
file = open(file_name, 'r')
line = file.readline()
file.close()
return lines
class EmptyFileError(Exception):
pass
| 23.03125
| 80
| 0.591588
| 96
| 737
| 4.4375
| 0.5
| 0.131455
| 0.084507
| 0.075117
| 0.220657
| 0.15493
| 0.15493
| 0.15493
| 0
| 0
| 0
| 0.003781
| 0.282225
| 737
| 31
| 81
| 23.774194
| 0.801512
| 0
| 0
| 0.36
| 0
| 0
| 0.067843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.04
| 0.08
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
130ec5dfef9f34118ca5d16b6a8a1a90a53517da
| 5,495
|
py
|
Python
|
aux_sys_err_prediction_module/additive/R_runmed_spline/my_R_runmed_spline_analysis.py
|
PNNL-Comp-Mass-Spec/DtaRefinery
|
609cc90d0322af69aea43c2fc21d9cf05a06797a
|
[
"BSD-2-Clause"
] | null | null | null |
aux_sys_err_prediction_module/additive/R_runmed_spline/my_R_runmed_spline_analysis.py
|
PNNL-Comp-Mass-Spec/DtaRefinery
|
609cc90d0322af69aea43c2fc21d9cf05a06797a
|
[
"BSD-2-Clause"
] | null | null | null |
aux_sys_err_prediction_module/additive/R_runmed_spline/my_R_runmed_spline_analysis.py
|
PNNL-Comp-Mass-Spec/DtaRefinery
|
609cc90d0322af69aea43c2fc21d9cf05a06797a
|
[
"BSD-2-Clause"
] | null | null | null |
from aux_sys_err_prediction_module.additive.R_runmed_spline.my_R_runmed_spline_fit import R_runmed_smooth_spline
from numpy import random, array, median, zeros, arange, hstack
from win32com.client import Dispatch
import math
myName = 'R_runmed_spline'
useMAD = True # use median absolute deviations instead of sum of squared residues
# -----------------------------------------------------------------------
def R_runmed_spline_MAIN(ARG3, Controller):
pars = Controller.updatedSettings['refiningPars']['regressionSettings'][myName]
# ARG3
x = ARG3[0][0]
y = ARG3[0][1]
sc = Dispatch("StatConnectorSrv.StatConnector")
sc.Init("R")
# get the best smoothing parameter
bestSpar = R_runmed_spline_KCV_OPTIMIZATION(x, y, sc=sc, **pars)
# get the prediction error for this smoothing parameter
bestPredErr = R_runmed_spline_KCV_predErr(x, y, spar=bestSpar, sc=sc, **pars)
# compare with original SSE
# is fit successful?
# return isSuccessfulFit, yFit, yEval, runMedData
SSE = sum(y ** 2)
MAD = 1.4826 * median(abs(y))
if useMAD:
SSE = MAD
if bestPredErr < SSE:
isSuccessfulFit = True
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit, runMedData = R_runmed_smooth_spline(x, y, x, spar=bestSpar, sc=sc, **pars)
yEval, runMedData = R_runmed_smooth_spline(x, y, xEval, spar=bestSpar, sc=sc, **pars)
#
ppmArrs[ind] = [yFit, yEval]
else:
isSuccessfulFit = False
#
ppmArrs = [[] for i in range(len(ARG3))]
for ind in range(len(ARG3)):
x = ARG3[ind][0]
y = ARG3[ind][1]
xEval = ARG3[ind][2]
#
yFit = zeros(len(x), 'd')
yEval = zeros(len(xEval), 'd')
#
ppmArrs[ind] = [yFit, yEval]
sc.Close()
return isSuccessfulFit, bestPredErr, ppmArrs
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_OPTIMIZATION(x, y, sc, **pars):
sparRange = array([float(i) for i in pars['spar range'].split(',')])
sparStepsNum = int(pars['spar steps number'])
sparStep = round((sparRange[1] - sparRange[0]) / sparStepsNum, 5)
sparSet = arange(sparRange[0], sparRange[1], sparStep)
predErrSet = zeros(len(sparSet), 'd')
for i in range(len(sparSet)):
predErr = R_runmed_spline_KCV_predErr(x, y, spar=sparSet[i], sc=sc, **pars)
predErrSet[i] = predErr
## p(zip(sparSet, predErrSet))
spar = sparSet[predErrSet == min(predErrSet)][-1] # take the last one (smoothest) if there are few
## print('spar ', spar)
return spar
# -----------------------------------------------------------------------
# -----------------------------------------------------------------------
def R_runmed_spline_KCV_predErr(x, y, **kwargs):
"""
just returns the prediction error
"""
K = int(kwargs['K'])
# --Related to K-fold CV---------------------------
L = len(x)
N = L / K ##min length of pieces
W = list(range(L))
Z = list(range(1, K + 1))
Z = [N for j in Z]
R = L % K
Z[0:R] = [j + 1 for j in Z[0:R]] # length of the pieces
random.shuffle(W)
ind = 0
predErr = 0
allResiduals = array([])
SSE = sum(y ** 2) # VLAD. Why do I need this???
# ---running through K training/testings-------------
for val in Z:
j = math.floor(val)
# ---making training/testing subsets-------------
test = W[ind:ind + j]
test.sort()
train = W[0:ind] + W[ind + j:]
train.sort()
ind += j
# -----------------------------------------------
# ---fit runmed_spline here----------------------
yFit, runMed = R_runmed_smooth_spline(x[train], y[train], x[test], **kwargs)
residualsTest = y[test] - yFit
predErr += sum(residualsTest ** 2)
allResiduals = hstack((allResiduals, residualsTest))
# -----------------------------------------------
if useMAD:
predErr = 1.4826 * median(abs(allResiduals))
return predErr
# -----------------------------------------------------------------------
if __name__ == '__main__':
from numpy import linspace, cos, lexsort, zeros, sin
from pylab import plot, show, subplot, savefig, clf, ylim
from pprint import pprint as p
from time import clock as c
x1 = linspace(0, 30, 300)
## y1 = cos(x1)
## y1 = zeros(len(x1),'d') #nice test
y1 = x1 * 0.03
y1 += random.normal(scale=0.2, size=y1.shape)
ind = lexsort(keys=(y1, x1))
x1 = x1[ind]
y1 = y1[ind]
t1 = c()
isSuccessfulFit, yFit, yEval, runMedData, predErr = \
R_runmed_spline_MAIN(x1, y1, x1, runMedSpan=0.01, K=10, sparRange=[0.6, 1.1, 0.1])
t2 = c()
print('done in %s seconds' % (t2 - t1))
subplot(211)
plot(x1, y1, 'bo')
plot(runMedData[0], runMedData[1], 'y^')
plot(x1, yEval, 'r+-')
ylim([-1.5, +1.5])
subplot(212)
plot(x1, y1 - yEval, 'go')
ylim([-1.5, +1.5])
show()
| 32.323529
| 113
| 0.499363
| 648
| 5,495
| 4.143519
| 0.29784
| 0.036499
| 0.048417
| 0.029795
| 0.179143
| 0.13743
| 0.13743
| 0.105028
| 0.05959
| 0.05959
| 0
| 0.029544
| 0.273157
| 5,495
| 169
| 114
| 32.514793
| 0.642714
| 0.22475
| 0
| 0.173077
| 0
| 0
| 0.035422
| 0.007431
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028846
| false
| 0
| 0.076923
| 0
| 0.134615
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
130f0d527db89218f9714b016db75a6b60750779
| 2,721
|
py
|
Python
|
setup.py
|
Ms2ger/python-zstandard
|
b8ea1f6722a710e252b452554442b84c81049439
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Ms2ger/python-zstandard
|
b8ea1f6722a710e252b452554442b84c81049439
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
Ms2ger/python-zstandard
|
b8ea1f6722a710e252b452554442b84c81049439
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2016-present, Gregory Szorc
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import os
import sys
from setuptools import setup
try:
import cffi
except ImportError:
cffi = None
import setup_zstd
SUPPORT_LEGACY = False
SYSTEM_ZSTD = False
WARNINGS_AS_ERRORS = False
if os.environ.get('ZSTD_WARNINGS_AS_ERRORS', ''):
WARNINGS_AS_ERRORS = True
if '--legacy' in sys.argv:
SUPPORT_LEGACY = True
sys.argv.remove('--legacy')
if '--system-zstd' in sys.argv:
SYSTEM_ZSTD = True
sys.argv.remove('--system-zstd')
if '--warnings-as-errors' in sys.argv:
WARNINGS_AS_ERRORS = True
sys.argv.remote('--warning-as-errors')
# Code for obtaining the Extension instance is in its own module to
# facilitate reuse in other projects.
extensions = [
setup_zstd.get_c_extension(name='zstd',
support_legacy=SUPPORT_LEGACY,
system_zstd=SYSTEM_ZSTD,
warnings_as_errors=WARNINGS_AS_ERRORS),
]
install_requires = []
if cffi:
import make_cffi
extensions.append(make_cffi.ffi.distutils_extension())
# Need change in 1.10 for ffi.from_buffer() to handle all buffer types
# (like memoryview).
# Need feature in 1.11 for ffi.gc() to declare size of objects so we avoid
# garbage collection pitfalls.
install_requires.append('cffi>=1.11')
version = None
with open('c-ext/python-zstandard.h', 'r') as fh:
for line in fh:
if not line.startswith('#define PYTHON_ZSTANDARD_VERSION'):
continue
version = line.split()[2][1:-1]
break
if not version:
raise Exception('could not resolve package version; '
'this should never happen')
setup(
name='zstandard',
version=version,
description='Zstandard bindings for Python',
long_description=open('README.rst', 'r').read(),
url='https://github.com/indygreg/python-zstandard',
author='Gregory Szorc',
author_email='gregory.szorc@gmail.com',
license='BSD',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='zstandard zstd compression',
packages=['zstandard'],
ext_modules=extensions,
test_suite='tests',
install_requires=install_requires,
)
| 27.765306
| 78
| 0.655274
| 343
| 2,721
| 5.084548
| 0.469388
| 0.036697
| 0.06422
| 0.044725
| 0.041284
| 0.041284
| 0.041284
| 0
| 0
| 0
| 0
| 0.011967
| 0.232268
| 2,721
| 97
| 79
| 28.051546
| 0.822882
| 0.180448
| 0
| 0.029412
| 0
| 0
| 0.305681
| 0.042381
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102941
| 0
| 0.102941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1312f8f0f49eb471bc17c6830c67ae3b593f1370
| 8,694
|
py
|
Python
|
mmdet/models/losses/ranking_losses.py
|
VietDunghacker/VarifocalNet
|
f57917afb3c29ceba1d3c4f824d10b9cc53aaa40
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/losses/ranking_losses.py
|
VietDunghacker/VarifocalNet
|
f57917afb3c29ceba1d3c4f824d10b9cc53aaa40
|
[
"Apache-2.0"
] | null | null | null |
mmdet/models/losses/ranking_losses.py
|
VietDunghacker/VarifocalNet
|
f57917afb3c29ceba1d3c4f824d10b9cc53aaa40
|
[
"Apache-2.0"
] | null | null | null |
import torch
class RankSort(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets > 0.)
fg_logits = logits[fg_labels]
fg_targets = targets[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta_RS
relevant_bg_labels=((targets==0) & (logits>=threshold_logit))
relevant_bg_logits = logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
sorting_error=torch.zeros(fg_num).cuda()
ranking_error=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
# Difference Transforms (x_ij)
fg_relations=fg_logits-fg_logits[ii]
bg_relations=relevant_bg_logits-fg_logits[ii]
if delta_RS > 0:
fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1)
bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1)
else:
fg_relations = (fg_relations >= 0).float()
bg_relations = (bg_relations >= 0).float()
# Rank of ii among pos and false positive number (bg with larger scores)
rank_pos=torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
# Rank of ii among all examples
rank=rank_pos+FP_num
# Ranking error of example ii. target_ranking_error is always 0. (Eq. 7)
ranking_error[ii]=FP_num/rank
# Current sorting error of example ii. (Eq. 7)
current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos
#Find examples in the target sorted order for example ii
iou_relations = (fg_targets >= fg_targets[ii])
target_sorted_order = iou_relations * fg_relations
#The rank of ii among positives in sorted order
rank_pos_target = torch.sum(target_sorted_order)
#Compute target sorting error. (Eq. 8)
#Since target ranking error is 0, this is also total target error
target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target
#Compute sorting error on example ii
sorting_error[ii] = current_sorting_error - target_sorting_error
#Identity Update for Ranking Error
if FP_num > eps:
#For ii the update is the ranking error
fg_grad[ii] -= ranking_error[ii]
#For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num)
relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num))
#Find the positives that are misranked (the cause of the error)
#These are the ones with smaller IoU but larger logits
missorted_examples = (~ iou_relations) * fg_relations
#Denominotor of sorting pmf
sorting_pmf_denom = torch.sum(missorted_examples)
#Identity Update for Sorting Error
if sorting_pmf_denom > eps:
#For ii the update is the sorting error
fg_grad[ii] -= sorting_error[ii]
#For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom)
fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom))
#Normalize gradients by number of positives
classification_grads[fg_labels]= (fg_grad/fg_num)
classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num)
ctx.save_for_backward(classification_grads)
return ranking_error.mean(), sorting_error.mean()
@staticmethod
def backward(ctx, out_grad1, out_grad2):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None, None
class aLRPLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets == 1)
fg_logits = logits[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta
#Get valid bg logits
relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
relevant_bg_logits=logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
rank=torch.zeros(fg_num).cuda()
prec=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
max_prec=0
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
#x_ij s as score differences with fgs
fg_relations=fg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with fgs
fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
#Discard i=j in the summation in rank_pos
fg_relations[ii]=0
#x_ij s as score differences with bgs
bg_relations=relevant_bg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with bgs
bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
#Compute the rank of the example within fgs and number of bgs with larger scores
rank_pos=1+torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
#Store the total since it is normalizer also for aLRP Regression error
rank[ii]=rank_pos+FP_num
#Compute precision for this example to compute classification loss
prec[ii]=rank_pos/rank[ii]
#For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads
if FP_num > eps:
fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii]
relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num))
#aLRP with grad formulation fg gradient
classification_grads[fg_labels]= fg_grad
#aLRP with grad formulation bg gradient
classification_grads[relevant_bg_labels]= relevant_bg_grad
classification_grads /= (fg_num)
cls_loss=1-prec.mean()
ctx.save_for_backward(classification_grads)
return cls_loss, rank, order
@staticmethod
def backward(ctx, out_grad1, out_grad2, out_grad3):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None, None, None
class APLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, logits, targets, delta=1.):
classification_grads=torch.zeros(logits.shape).cuda()
#Filter fg logits
fg_labels = (targets == 1)
fg_logits = logits[fg_labels]
fg_num = len(fg_logits)
#Do not use bg with scores less than minimum fg logit
#since changing its score does not have an effect on precision
threshold_logit = torch.min(fg_logits)-delta
#Get valid bg logits
relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
relevant_bg_logits=logits[relevant_bg_labels]
relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
rank=torch.zeros(fg_num).cuda()
prec=torch.zeros(fg_num).cuda()
fg_grad=torch.zeros(fg_num).cuda()
max_prec=0
#sort the fg logits
order=torch.argsort(fg_logits)
#Loops over each positive following the order
for ii in order:
#x_ij s as score differences with fgs
fg_relations=fg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with fgs
fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
#Discard i=j in the summation in rank_pos
fg_relations[ii]=0
#x_ij s as score differences with bgs
bg_relations=relevant_bg_logits-fg_logits[ii]
#Apply piecewise linear function and determine relations with bgs
bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
#Compute the rank of the example within fgs and number of bgs with larger scores
rank_pos=1+torch.sum(fg_relations)
FP_num=torch.sum(bg_relations)
#Store the total since it is normalizer also for aLRP Regression error
rank[ii]=rank_pos+FP_num
#Compute precision for this example
current_prec=rank_pos/rank[ii]
#Compute interpolated AP and store gradients for relevant bg examples
if (max_prec<=current_prec):
max_prec=current_prec
relevant_bg_grad += (bg_relations/rank[ii])
else:
relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec)))
#Store fg gradients
fg_grad[ii]=-(1-max_prec)
prec[ii]=max_prec
#aLRP with grad formulation fg gradient
classification_grads[fg_labels]= fg_grad
#aLRP with grad formulation bg gradient
classification_grads[relevant_bg_labels]= relevant_bg_grad
classification_grads /= fg_num
cls_loss=1-prec.mean()
ctx.save_for_backward(classification_grads)
return cls_loss
@staticmethod
def backward(ctx, out_grad1):
g1, =ctx.saved_tensors
return g1*out_grad1, None, None
| 35.198381
| 96
| 0.743961
| 1,362
| 8,694
| 4.533774
| 0.133627
| 0.046964
| 0.022672
| 0.021862
| 0.711903
| 0.680324
| 0.660891
| 0.632065
| 0.597085
| 0.58753
| 0
| 0.011546
| 0.163216
| 8,694
| 246
| 97
| 35.341463
| 0.837251
| 0.320451
| 0
| 0.58209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.007463
| 0
| 0.119403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1314a6b3e97ad080ab7cf47017455ad35f9e033a
| 34,521
|
py
|
Python
|
maint/MultiStage2.py
|
Liastre/pcre2
|
ca4fd145ee16acbc67b52b8563ab6e25c67ddfc8
|
[
"BSD-3-Clause"
] | null | null | null |
maint/MultiStage2.py
|
Liastre/pcre2
|
ca4fd145ee16acbc67b52b8563ab6e25c67ddfc8
|
[
"BSD-3-Clause"
] | 1
|
2020-04-07T10:42:22.000Z
|
2020-04-07T10:42:22.000Z
|
maint/MultiStage2.py
|
Liastre/pcre2
|
ca4fd145ee16acbc67b52b8563ab6e25c67ddfc8
|
[
"BSD-3-Clause"
] | null | null | null |
#! /usr/bin/python
# Multistage table builder
# (c) Peter Kankowski, 2008
##############################################################################
# This script was submitted to the PCRE project by Peter Kankowski as part of
# the upgrading of Unicode property support. The new code speeds up property
# matching many times. The script is for the use of PCRE maintainers, to
# generate the pcre2_ucd.c file that contains a digested form of the Unicode
# data tables. A number of extensions have been added to the original script.
#
# The script has now been upgraded to Python 3 for PCRE2, and should be run in
# the maint subdirectory, using the command
#
# [python3] ./MultiStage2.py >../src/pcre2_ucd.c
#
# It requires six Unicode data tables: DerivedGeneralCategory.txt,
# GraphemeBreakProperty.txt, Scripts.txt, ScriptExtensions.txt,
# CaseFolding.txt, and emoji-data.txt. These must be in the
# maint/Unicode.tables subdirectory.
#
# DerivedGeneralCategory.txt is found in the "extracted" subdirectory of the
# Unicode database (UCD) on the Unicode web site; GraphemeBreakProperty.txt is
# in the "auxiliary" subdirectory. Scripts.txt, ScriptExtensions.txt, and
# CaseFolding.txt are directly in the UCD directory. The emoji-data.txt file is
# in files associated with Unicode Technical Standard #51 ("Unicode Emoji"),
# for example:
#
# http://unicode.org/Public/emoji/11.0/emoji-data.txt
#
# -----------------------------------------------------------------------------
# Minor modifications made to this script:
# Added #! line at start
# Removed tabs
# Made it work with Python 2.4 by rewriting two statements that needed 2.5
# Consequent code tidy
# Adjusted data file names to take from the Unicode.tables directory
# Adjusted global table names by prefixing _pcre_.
# Commented out stuff relating to the casefolding table, which isn't used;
# removed completely in 2012.
# Corrected size calculation
# Add #ifndef SUPPORT_UCP to use dummy tables when no UCP support is needed.
# Update for PCRE2: name changes, and SUPPORT_UCP is abolished.
#
# Major modifications made to this script:
# Added code to add a grapheme break property field to records.
#
# Added code to search for sets of more than two characters that must match
# each other caselessly. A new table is output containing these sets, and
# offsets into the table are added to the main output records. This new
# code scans CaseFolding.txt instead of UnicodeData.txt, which is no longer
# used.
#
# Update for Python3:
# . Processed with 2to3, but that didn't fix everything
# . Changed string.strip to str.strip
# . Added encoding='utf-8' to the open() call
# . Inserted 'int' before blocksize/ELEMS_PER_LINE because an int is
# required and the result of the division is a float
#
# Added code to scan the emoji-data.txt file to find the Extended Pictographic
# property, which is used by PCRE2 as a grapheme breaking property. This was
# done when updating to Unicode 11.0.0 (July 2018).
#
# Added code to add a Script Extensions field to records. This has increased
# their size from 8 to 12 bytes, only 10 of which are currently used.
#
# 01-March-2010: Updated list of scripts for Unicode 5.2.0
# 30-April-2011: Updated list of scripts for Unicode 6.0.0
# July-2012: Updated list of scripts for Unicode 6.1.0
# 20-August-2012: Added scan of GraphemeBreakProperty.txt and added a new
# field in the record to hold the value. Luckily, the
# structure had a hole in it, so the resulting table is
# not much bigger than before.
# 18-September-2012: Added code for multiple caseless sets. This uses the
# final hole in the structure.
# 30-September-2012: Added RegionalIndicator break property from Unicode 6.2.0
# 13-May-2014: Updated for PCRE2
# 03-June-2014: Updated for Python 3
# 20-June-2014: Updated for Unicode 7.0.0
# 12-August-2014: Updated to put Unicode version into the file
# 19-June-2015: Updated for Unicode 8.0.0
# 02-July-2017: Updated for Unicode 10.0.0
# 03-July-2018: Updated for Unicode 11.0.0
# 07-July-2018: Added code to scan emoji-data.txt for the Extended
# Pictographic property.
# 01-October-2018: Added the 'Unknown' script name
# 03-October-2018: Added new field for Script Extensions
# 27-July-2019: Updated for Unicode 12.1.0
# ----------------------------------------------------------------------------
#
#
# The main tables generated by this script are used by macros defined in
# pcre2_internal.h. They look up Unicode character properties using short
# sequences of code that contains no branches, which makes for greater speed.
#
# Conceptually, there is a table of records (of type ucd_record), containing a
# script number, script extension value, character type, grapheme break type,
# offset to caseless matching set, offset to the character's other case, for
# every Unicode character. However, a real table covering all Unicode
# characters would be far too big. It can be efficiently compressed by
# observing that many characters have the same record, and many blocks of
# characters (taking 128 characters in a block) have the same set of records as
# other blocks. This leads to a 2-stage lookup process.
#
# This script constructs six tables. The ucd_caseless_sets table contains
# lists of characters that all match each other caselessly. Each list is
# in order, and is terminated by NOTACHAR (0xffffffff), which is larger than
# any valid character. The first list is empty; this is used for characters
# that are not part of any list.
#
# The ucd_digit_sets table contains the code points of the '9' characters in
# each set of 10 decimal digits in Unicode. This is used to ensure that digits
# in script runs all come from the same set. The first element in the vector
# contains the number of subsequent elements, which are in ascending order.
#
# The ucd_script_sets vector contains lists of script numbers that are the
# Script Extensions properties of certain characters. Each list is terminated
# by zero (ucp_Unknown). A character with more than one script listed for its
# Script Extension property has a negative value in its record. This is the
# negated offset to the start of the relevant list in the ucd_script_sets
# vector.
#
# The ucd_records table contains one instance of every unique record that is
# required. The ucd_stage1 table is indexed by a character's block number,
# which is the character's code point divided by 128, since 128 is the size
# of each block. The result of a lookup in ucd_stage1 a "virtual" block number.
#
# The ucd_stage2 table is a table of "virtual" blocks; each block is indexed by
# the offset of a character within its own block, and the result is the index
# number of the required record in the ucd_records vector.
#
# The following examples are correct for the Unicode 11.0.0 database. Future
# updates may make change the actual lookup values.
#
# Example: lowercase "a" (U+0061) is in block 0
# lookup 0 in stage1 table yields 0
# lookup 97 (0x61) in the first table in stage2 yields 17
# record 17 is { 34, 5, 12, 0, -32, 34, 0 }
# 34 = ucp_Latin => Latin script
# 5 = ucp_Ll => Lower case letter
# 12 = ucp_gbOther => Grapheme break property "Other"
# 0 => Not part of a caseless set
# -32 (-0x20) => Other case is U+0041
# 34 = ucp_Latin => No special Script Extension property
# 0 => Dummy value, unused at present
#
# Almost all lowercase latin characters resolve to the same record. One or two
# are different because they are part of a multi-character caseless set (for
# example, k, K and the Kelvin symbol are such a set).
#
# Example: hiragana letter A (U+3042) is in block 96 (0x60)
# lookup 96 in stage1 table yields 90
# lookup 66 (0x42) in table 90 in stage2 yields 564
# record 564 is { 27, 7, 12, 0, 0, 27, 0 }
# 27 = ucp_Hiragana => Hiragana script
# 7 = ucp_Lo => Other letter
# 12 = ucp_gbOther => Grapheme break property "Other"
# 0 => Not part of a caseless set
# 0 => No other case
# 27 = ucp_Hiragana => No special Script Extension property
# 0 => Dummy value, unused at present
#
# Example: vedic tone karshana (U+1CD0) is in block 57 (0x39)
# lookup 57 in stage1 table yields 55
# lookup 80 (0x50) in table 55 in stage2 yields 458
# record 458 is { 28, 12, 3, 0, 0, -101, 0 }
# 28 = ucp_Inherited => Script inherited from predecessor
# 12 = ucp_Mn => Non-spacing mark
# 3 = ucp_gbExtend => Grapheme break property "Extend"
# 0 => Not part of a caseless set
# 0 => No other case
# -101 => Script Extension list offset = 101
# 0 => Dummy value, unused at present
#
# At offset 101 in the ucd_script_sets vector we find the list 3, 15, 107, 29,
# and terminator 0. This means that this character is expected to be used with
# any of those scripts, which are Bengali, Devanagari, Grantha, and Kannada.
#
# Philip Hazel, 03 July 2008
# Last Updated: 07 October 2018
##############################################################################
import re
import string
import sys
MAX_UNICODE = 0x110000
NOTACHAR = 0xffffffff
# Parse a line of Scripts.txt, GraphemeBreakProperty.txt or DerivedGeneralCategory.txt
def make_get_names(enum):
return lambda chardata: enum.index(chardata[1])
# Parse a line of CaseFolding.txt
def get_other_case(chardata):
if chardata[1] == 'C' or chardata[1] == 'S':
return int(chardata[2], 16) - int(chardata[0], 16)
return 0
# Parse a line of ScriptExtensions.txt
def get_script_extension(chardata):
this_script_list = list(chardata[1].split(' '))
if len(this_script_list) == 1:
return script_abbrevs.index(this_script_list[0])
script_numbers = []
for d in this_script_list:
script_numbers.append(script_abbrevs.index(d))
script_numbers.append(0)
script_numbers_length = len(script_numbers)
for i in range(1, len(script_lists) - script_numbers_length + 1):
for j in range(0, script_numbers_length):
found = True
if script_lists[i+j] != script_numbers[j]:
found = False
break
if found:
return -i
# Not found in existing lists
return_value = len(script_lists)
script_lists.extend(script_numbers)
return -return_value
# Read the whole table in memory, setting/checking the Unicode version
def read_table(file_name, get_value, default_value):
global unicode_version
f = re.match(r'^[^/]+/([^.]+)\.txt$', file_name)
file_base = f.group(1)
version_pat = r"^# " + re.escape(file_base) + r"-(\d+\.\d+\.\d+)\.txt$"
file = open(file_name, 'r', encoding='utf-8')
f = re.match(version_pat, file.readline())
version = f.group(1)
if unicode_version == "":
unicode_version = version
elif unicode_version != version:
print("WARNING: Unicode version differs in %s", file_name, file=sys.stderr)
table = [default_value] * MAX_UNICODE
for line in file:
line = re.sub(r'#.*', '', line)
chardata = list(map(str.strip, line.split(';')))
if len(chardata) <= 1:
continue
value = get_value(chardata)
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
char = int(m.group(1), 16)
if m.group(3) is None:
last = char
else:
last = int(m.group(3), 16)
for i in range(char, last + 1):
# It is important not to overwrite a previously set
# value because in the CaseFolding file there are lines
# to be ignored (returning the default value of 0)
# which often come after a line which has already set
# data.
if table[i] == default_value:
table[i] = value
file.close()
return table
# Get the smallest possible C language type for the values
def get_type_size(table):
type_size = [("uint8_t", 1), ("uint16_t", 2), ("uint32_t", 4),
("signed char", 1), ("pcre_int16", 2), ("pcre_int32", 4)]
limits = [(0, 255), (0, 65535), (0, 4294967295),
(-128, 127), (-32768, 32767), (-2147483648, 2147483647)]
minval = min(table)
maxval = max(table)
for num, (minlimit, maxlimit) in enumerate(limits):
if minlimit <= minval and maxval <= maxlimit:
return type_size[num]
else:
raise OverflowError("Too large to fit into C types")
def get_tables_size(*tables):
total_size = 0
for table in tables:
type, size = get_type_size(table)
total_size += size * len(table)
return total_size
# Compress the table into the two stages
def compress_table(table, block_size):
blocks = {} # Dictionary for finding identical blocks
stage1 = [] # Stage 1 table contains block numbers (indices into stage 2 table)
stage2 = [] # Stage 2 table contains the blocks with property values
table = tuple(table)
for i in range(0, len(table), block_size):
block = table[i:i+block_size]
start = blocks.get(block)
if start is None:
# Allocate a new block
start = len(stage2) / block_size
stage2 += block
blocks[block] = start
stage1.append(start)
return stage1, stage2
# Print a table
def print_table(table, table_name, block_size = None):
type, size = get_type_size(table)
ELEMS_PER_LINE = 16
s = "const %s %s[] = { /* %d bytes" % (type, table_name, size * len(table))
if block_size:
s += ", block = %d" % block_size
print(s + " */")
table = tuple(table)
if block_size is None:
fmt = "%3d," * ELEMS_PER_LINE + " /* U+%04X */"
mult = MAX_UNICODE / len(table)
for i in range(0, len(table), ELEMS_PER_LINE):
print(fmt % (table[i:i+ELEMS_PER_LINE] +
(int(i * mult),)))
else:
if block_size > ELEMS_PER_LINE:
el = ELEMS_PER_LINE
else:
el = block_size
fmt = "%3d," * el + "\n"
if block_size > ELEMS_PER_LINE:
fmt = fmt * int(block_size / ELEMS_PER_LINE)
for i in range(0, len(table), block_size):
print(("/* block %d */\n" + fmt) % ((i / block_size,) + table[i:i+block_size]))
print("};\n")
# Extract the unique combinations of properties into records
def combine_tables(*tables):
records = {}
index = []
for t in zip(*tables):
i = records.get(t)
if i is None:
i = records[t] = len(records)
index.append(i)
return index, records
def get_record_size_struct(records):
size = 0
structure = '/* When recompiling tables with a new Unicode version, please check the\n' + \
'types in this structure definition from pcre2_internal.h (the actual\n' + \
'field names will be different):\n\ntypedef struct {\n'
for i in range(len(records[0])):
record_slice = [record[i] for record in records]
slice_type, slice_size = get_type_size(record_slice)
# add padding: round up to the nearest power of slice_size
size = (size + slice_size - 1) & -slice_size
size += slice_size
structure += '%s property_%d;\n' % (slice_type, i)
# round up to the first item of the next structure in array
record_slice = [record[0] for record in records]
slice_type, slice_size = get_type_size(record_slice)
size = (size + slice_size - 1) & -slice_size
structure += '} ucd_record;\n*/\n'
return size, structure
def test_record_size():
tests = [ \
( [(3,), (6,), (6,), (1,)], 1 ), \
( [(300,), (600,), (600,), (100,)], 2 ), \
( [(25, 3), (6, 6), (34, 6), (68, 1)], 2 ), \
( [(300, 3), (6, 6), (340, 6), (690, 1)], 4 ), \
( [(3, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(300, 300), (6, 6), (6, 340), (1, 690)], 4 ), \
( [(3, 100000), (6, 6), (6, 123456), (1, 690)], 8 ), \
( [(100000, 300), (6, 6), (123456, 6), (1, 690)], 8 ), \
]
for test in tests:
size, struct = get_record_size_struct(test[0])
assert(size == test[1])
#print struct
def print_records(records, record_size):
print('const ucd_record PRIV(ucd_records)[] = { ' + \
'/* %d bytes, record size %d */' % (len(records) * record_size, record_size))
records = list(zip(list(records.keys()), list(records.values())))
records.sort(key = lambda x: x[1])
for i, record in enumerate(records):
print((' {' + '%6d, ' * len(record[0]) + '}, /* %3d */') % (record[0] + (i,)))
print('};\n')
script_names = ['Unknown', 'Arabic', 'Armenian', 'Bengali', 'Bopomofo', 'Braille', 'Buginese', 'Buhid', 'Canadian_Aboriginal',
'Cherokee', 'Common', 'Coptic', 'Cypriot', 'Cyrillic', 'Deseret', 'Devanagari', 'Ethiopic', 'Georgian',
'Glagolitic', 'Gothic', 'Greek', 'Gujarati', 'Gurmukhi', 'Han', 'Hangul', 'Hanunoo', 'Hebrew', 'Hiragana',
'Inherited', 'Kannada', 'Katakana', 'Kharoshthi', 'Khmer', 'Lao', 'Latin', 'Limbu', 'Linear_B', 'Malayalam',
'Mongolian', 'Myanmar', 'New_Tai_Lue', 'Ogham', 'Old_Italic', 'Old_Persian', 'Oriya', 'Osmanya', 'Runic',
'Shavian', 'Sinhala', 'Syloti_Nagri', 'Syriac', 'Tagalog', 'Tagbanwa', 'Tai_Le', 'Tamil', 'Telugu', 'Thaana',
'Thai', 'Tibetan', 'Tifinagh', 'Ugaritic', 'Yi',
# New for Unicode 5.0
'Balinese', 'Cuneiform', 'Nko', 'Phags_Pa', 'Phoenician',
# New for Unicode 5.1
'Carian', 'Cham', 'Kayah_Li', 'Lepcha', 'Lycian', 'Lydian', 'Ol_Chiki', 'Rejang', 'Saurashtra', 'Sundanese', 'Vai',
# New for Unicode 5.2
'Avestan', 'Bamum', 'Egyptian_Hieroglyphs', 'Imperial_Aramaic',
'Inscriptional_Pahlavi', 'Inscriptional_Parthian',
'Javanese', 'Kaithi', 'Lisu', 'Meetei_Mayek',
'Old_South_Arabian', 'Old_Turkic', 'Samaritan', 'Tai_Tham', 'Tai_Viet',
# New for Unicode 6.0.0
'Batak', 'Brahmi', 'Mandaic',
# New for Unicode 6.1.0
'Chakma', 'Meroitic_Cursive', 'Meroitic_Hieroglyphs', 'Miao', 'Sharada', 'Sora_Sompeng', 'Takri',
# New for Unicode 7.0.0
'Bassa_Vah', 'Caucasian_Albanian', 'Duployan', 'Elbasan', 'Grantha', 'Khojki', 'Khudawadi',
'Linear_A', 'Mahajani', 'Manichaean', 'Mende_Kikakui', 'Modi', 'Mro', 'Nabataean',
'Old_North_Arabian', 'Old_Permic', 'Pahawh_Hmong', 'Palmyrene', 'Psalter_Pahlavi',
'Pau_Cin_Hau', 'Siddham', 'Tirhuta', 'Warang_Citi',
# New for Unicode 8.0.0
'Ahom', 'Anatolian_Hieroglyphs', 'Hatran', 'Multani', 'Old_Hungarian',
'SignWriting',
# New for Unicode 10.0.0
'Adlam', 'Bhaiksuki', 'Marchen', 'Newa', 'Osage', 'Tangut', 'Masaram_Gondi',
'Nushu', 'Soyombo', 'Zanabazar_Square',
# New for Unicode 11.0.0
'Dogra', 'Gunjala_Gondi', 'Hanifi_Rohingya', 'Makasar', 'Medefaidrin',
'Old_Sogdian', 'Sogdian',
# New for Unicode 12.0.0
'Elymaic', 'Nandinagari', 'Nyiakeng_Puachue_Hmong', 'Wancho'
]
script_abbrevs = [
'Zzzz', 'Arab', 'Armn', 'Beng', 'Bopo', 'Brai', 'Bugi', 'Buhd', 'Cans',
'Cher', 'Zyyy', 'Copt', 'Cprt', 'Cyrl', 'Dsrt', 'Deva', 'Ethi', 'Geor',
'Glag', 'Goth', 'Grek', 'Gujr', 'Guru', 'Hani', 'Hang', 'Hano', 'Hebr',
'Hira', 'Zinh', 'Knda', 'Kana', 'Khar', 'Khmr', 'Laoo', 'Latn', 'Limb',
'Linb', 'Mlym', 'Mong', 'Mymr', 'Talu', 'Ogam', 'Ital', 'Xpeo', 'Orya',
'Osma', 'Runr', 'Shaw', 'Sinh', 'Sylo', 'Syrc', 'Tglg', 'Tagb', 'Tale',
'Taml', 'Telu', 'Thaa', 'Thai', 'Tibt', 'Tfng', 'Ugar', 'Yiii',
#New for Unicode 5.0
'Bali', 'Xsux', 'Nkoo', 'Phag', 'Phnx',
#New for Unicode 5.1
'Cari', 'Cham', 'Kali', 'Lepc', 'Lyci', 'Lydi', 'Olck', 'Rjng', 'Saur',
'Sund', 'Vaii',
#New for Unicode 5.2
'Avst', 'Bamu', 'Egyp', 'Armi', 'Phli', 'Prti', 'Java', 'Kthi', 'Lisu',
'Mtei', 'Sarb', 'Orkh', 'Samr', 'Lana', 'Tavt',
#New for Unicode 6.0.0
'Batk', 'Brah', 'Mand',
#New for Unicode 6.1.0
'Cakm', 'Merc', 'Mero', 'Plrd', 'Shrd', 'Sora', 'Takr',
#New for Unicode 7.0.0
'Bass', 'Aghb', 'Dupl', 'Elba', 'Gran', 'Khoj', 'Sind', 'Lina', 'Mahj',
'Mani', 'Mend', 'Modi', 'Mroo', 'Nbat', 'Narb', 'Perm', 'Hmng', 'Palm',
'Phlp', 'Pauc', 'Sidd', 'Tirh', 'Wara',
#New for Unicode 8.0.0
'Ahom', 'Hluw', 'Hatr', 'Mult', 'Hung', 'Sgnw',
#New for Unicode 10.0.0
'Adlm', 'Bhks', 'Marc', 'Newa', 'Osge', 'Tang', 'Gonm', 'Nshu', 'Soyo',
'Zanb',
#New for Unicode 11.0.0
'Dogr', 'Gong', 'Rohg', 'Maka', 'Medf', 'Sogo', 'Sogd',
#New for Unicode 12.0.0
'Elym', 'Nand', 'Hmnp', 'Wcho'
]
category_names = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',
'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps',
'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs' ]
# The Extended_Pictographic property is not found in the file where all the
# others are (GraphemeBreakProperty.txt). It comes from the emoji-data.txt
# file, but we list it here so that the name has the correct index value.
break_property_names = ['CR', 'LF', 'Control', 'Extend', 'Prepend',
'SpacingMark', 'L', 'V', 'T', 'LV', 'LVT', 'Regional_Indicator', 'Other',
'ZWJ', 'Extended_Pictographic' ]
test_record_size()
unicode_version = ""
script = read_table('Unicode.tables/Scripts.txt', make_get_names(script_names), script_names.index('Unknown'))
category = read_table('Unicode.tables/DerivedGeneralCategory.txt', make_get_names(category_names), category_names.index('Cn'))
break_props = read_table('Unicode.tables/GraphemeBreakProperty.txt', make_get_names(break_property_names), break_property_names.index('Other'))
other_case = read_table('Unicode.tables/CaseFolding.txt', get_other_case, 0)
# The grapheme breaking rules were changed for Unicode 11.0.0 (June 2018). Now
# we need to find the Extended_Pictographic property for emoji characters. This
# can be set as an additional grapheme break property, because the default for
# all the emojis is "other". We scan the emoji-data.txt file and modify the
# break-props table.
file = open('Unicode.tables/emoji-data.txt', 'r', encoding='utf-8')
for line in file:
line = re.sub(r'#.*', '', line)
chardata = list(map(str.strip, line.split(';')))
if len(chardata) <= 1:
continue
if chardata[1] != "Extended_Pictographic":
continue
m = re.match(r'([0-9a-fA-F]+)(\.\.([0-9a-fA-F]+))?$', chardata[0])
char = int(m.group(1), 16)
if m.group(3) is None:
last = char
else:
last = int(m.group(3), 16)
for i in range(char, last + 1):
if break_props[i] != break_property_names.index('Other'):
print("WARNING: Emoji 0x%x has break property %s, not 'Other'",
i, break_property_names[break_props[i]], file=sys.stderr)
break_props[i] = break_property_names.index('Extended_Pictographic')
file.close()
# The Script Extensions property default value is the Script value. Parse the
# file, setting 'Unknown' as the default (this will never be a Script Extension
# value), then scan it and fill in the default from Scripts. Code added by PH
# in October 2018. Positive values are used for just a single script for a
# code point. Negative values are negated offsets in a list of lists of
# multiple scripts. Initialize this list with a single entry, as the zeroth
# element is never used.
script_lists = [0]
script_abbrevs_default = script_abbrevs.index('Zzzz')
scriptx = read_table('Unicode.tables/ScriptExtensions.txt', get_script_extension, script_abbrevs_default)
for i in range(0, MAX_UNICODE):
if scriptx[i] == script_abbrevs_default:
scriptx[i] = script[i]
# With the addition of the new Script Extensions field, we need some padding
# to get the Unicode records up to 12 bytes (multiple of 4). Set a value
# greater than 255 to make the field 16 bits.
padding_dummy = [0] * MAX_UNICODE
padding_dummy[0] = 256
# This block of code was added by PH in September 2012. I am not a Python
# programmer, so the style is probably dreadful, but it does the job. It scans
# the other_case table to find sets of more than two characters that must all
# match each other caselessly. Later in this script a table of these sets is
# written out. However, we have to do this work here in order to compute the
# offsets in the table that are inserted into the main table.
# The CaseFolding.txt file lists pairs, but the common logic for reading data
# sets only one value, so first we go through the table and set "return"
# offsets for those that are not already set.
for c in range(MAX_UNICODE):
if other_case[c] != 0 and other_case[c + other_case[c]] == 0:
other_case[c + other_case[c]] = -other_case[c]
# Now scan again and create equivalence sets.
sets = []
for c in range(MAX_UNICODE):
o = c + other_case[c]
# Trigger when this character's other case does not point back here. We
# now have three characters that are case-equivalent.
if other_case[o] != -other_case[c]:
t = o + other_case[o]
# Scan the existing sets to see if any of the three characters are already
# part of a set. If so, unite the existing set with the new set.
appended = 0
for s in sets:
found = 0
for x in s:
if x == c or x == o or x == t:
found = 1
# Add new characters to an existing set
if found:
found = 0
for y in [c, o, t]:
for x in s:
if x == y:
found = 1
if not found:
s.append(y)
appended = 1
# If we have not added to an existing set, create a new one.
if not appended:
sets.append([c, o, t])
# End of loop looking for caseless sets.
# Now scan the sets and set appropriate offsets for the characters.
caseless_offsets = [0] * MAX_UNICODE
offset = 1;
for s in sets:
for x in s:
caseless_offsets[x] = offset
offset += len(s) + 1
# End of block of code for creating offsets for caseless matching sets.
# Combine the tables
table, records = combine_tables(script, category, break_props,
caseless_offsets, other_case, scriptx, padding_dummy)
record_size, record_struct = get_record_size_struct(list(records.keys()))
# Find the optimum block size for the two-stage table
min_size = sys.maxsize
for block_size in [2 ** i for i in range(5,10)]:
size = len(records) * record_size
stage1, stage2 = compress_table(table, block_size)
size += get_tables_size(stage1, stage2)
#print "/* block size %5d => %5d bytes */" % (block_size, size)
if size < min_size:
min_size = size
min_stage1, min_stage2 = stage1, stage2
min_block_size = block_size
print("/* This module is generated by the maint/MultiStage2.py script.")
print("Do not modify it by hand. Instead modify the script and run it")
print("to regenerate this code.")
print()
print("As well as being part of the PCRE2 library, this module is #included")
print("by the pcre2test program, which redefines the PRIV macro to change")
print("table names from _pcre2_xxx to xxxx, thereby avoiding name clashes")
print("with the library. At present, just one of these tables is actually")
print("needed. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
print("#ifdef HAVE_CONFIG_H")
print("#include \"config.h\"")
print("#endif")
print()
print("#include \"pcre2_internal.h\"")
print()
print("#endif /* PCRE2_PCRE2TEST */")
print()
print("/* Unicode character database. */")
print("/* This file was autogenerated by the MultiStage2.py script. */")
print("/* Total size: %d bytes, block size: %d. */" % (min_size, min_block_size))
print()
print("/* The tables herein are needed only when UCP support is built,")
print("and in PCRE2 that happens automatically with UTF support.")
print("This module should not be referenced otherwise, so")
print("it should not matter whether it is compiled or not. However")
print("a comment was received about space saving - maybe the guy linked")
print("all the modules rather than using a library - so we include a")
print("condition to cut out the tables when not needed. But don't leave")
print("a totally empty module because some compilers barf at that.")
print("Instead, just supply some small dummy tables. */")
print()
print("#ifndef SUPPORT_UNICODE")
print("const ucd_record PRIV(ucd_records)[] = {{0,0,0,0,0,0,0 }};")
print("const uint16_t PRIV(ucd_stage1)[] = {0};")
print("const uint16_t PRIV(ucd_stage2)[] = {0};")
print("const uint32_t PRIV(ucd_caseless_sets)[] = {0};")
print("#else")
print()
print("const char *PRIV(unicode_version) = \"{}\";".format(unicode_version))
print()
print("/* If the 32-bit library is run in non-32-bit mode, character values")
print("greater than 0x10ffff may be encountered. For these we set up a")
print("special record. */")
print()
print("#if PCRE2_CODE_UNIT_WIDTH == 32")
print("const ucd_record PRIV(dummy_ucd_record)[] = {{")
print(" ucp_Unknown, /* script */")
print(" ucp_Cn, /* type unassigned */")
print(" ucp_gbOther, /* grapheme break property */")
print(" 0, /* case set */")
print(" 0, /* other case */")
print(" ucp_Unknown, /* script extension */")
print(" 0, /* dummy filler */")
print(" }};")
print("#endif")
print()
print(record_struct)
# --- Added by PH: output the table of caseless character sets ---
print("/* This table contains lists of characters that are caseless sets of")
print("more than one character. Each list is terminated by NOTACHAR. */\n")
print("const uint32_t PRIV(ucd_caseless_sets)[] = {")
print(" NOTACHAR,")
for s in sets:
s = sorted(s)
for x in s:
print(' 0x%04x,' % x, end=' ')
print(' NOTACHAR,')
print('};')
print()
# ------
print("/* When #included in pcre2test, we don't need the table of digit")
print("sets, nor the the large main UCD tables. */")
print()
print("#ifndef PCRE2_PCRE2TEST")
print()
# --- Added by PH: read Scripts.txt again for the sets of 10 digits. ---
digitsets = []
file = open('Unicode.tables/Scripts.txt', 'r', encoding='utf-8')
for line in file:
m = re.match(r'([0-9a-fA-F]+)\.\.([0-9a-fA-F]+)\s+;\s+\S+\s+#\s+Nd\s+', line)
if m is None:
continue
first = int(m.group(1),16)
last = int(m.group(2),16)
if ((last - first + 1) % 10) != 0:
print("ERROR: %04x..%04x does not contain a multiple of 10 characters" % (first, last),
file=sys.stderr)
while first < last:
digitsets.append(first + 9)
first += 10
file.close()
digitsets.sort()
print("/* This table lists the code points for the '9' characters in each")
print("set of decimal digits. It is used to ensure that all the digits in")
print("a script run come from the same set. */\n")
print("const uint32_t PRIV(ucd_digit_sets)[] = {")
print(" %d, /* Number of subsequent values */" % len(digitsets), end='')
count = 8
for d in digitsets:
if count == 8:
print("\n ", end='')
count = 0
print(" 0x%05x," % d, end='')
count += 1
print("\n};\n")
print("/* This vector is a list of lists of scripts for the Script Extension")
print("property. Each sublist is zero-terminated. */\n")
print("const uint8_t PRIV(ucd_script_sets)[] = {")
count = 0
print(" /* 0 */", end='')
for d in script_lists:
print(" %3d," % d, end='')
count += 1
if d == 0:
print("\n /* %3d */" % count, end='')
print("\n};\n")
# Output the main UCD tables.
print("/* These are the main two-stage UCD tables. The fields in each record are:")
print("script (8 bits), character type (8 bits), grapheme break property (8 bits),")
print("offset to multichar other cases or zero (8 bits), offset to other case")
print("or zero (32 bits, signed), script extension (16 bits, signed), and a dummy")
print("16-bit field to make the whole thing a multiple of 4 bytes. */\n")
print_records(records, record_size)
print_table(min_stage1, 'PRIV(ucd_stage1)')
print_table(min_stage2, 'PRIV(ucd_stage2)', min_block_size)
print("#if UCD_BLOCK_SIZE != %d" % min_block_size)
print("#error Please correct UCD_BLOCK_SIZE in pcre2_internal.h")
print("#endif")
print("#endif /* SUPPORT_UNICODE */")
print()
print("#endif /* PCRE2_PCRE2TEST */")
# This code was part of the original contribution, but is commented out as it
# was never used. A two-stage table has sufficed.
"""
# Three-stage tables:
# Find the optimum block size for 3-stage table
min_size = sys.maxint
for stage3_block in [2 ** i for i in range(2,6)]:
stage_i, stage3 = compress_table(table, stage3_block)
for stage2_block in [2 ** i for i in range(5,10)]:
size = len(records) * 4
stage1, stage2 = compress_table(stage_i, stage2_block)
size += get_tables_size(stage1, stage2, stage3)
# print "/* %5d / %3d => %5d bytes */" % (stage2_block, stage3_block, size)
if size < min_size:
min_size = size
min_stage1, min_stage2, min_stage3 = stage1, stage2, stage3
min_stage2_block, min_stage3_block = stage2_block, stage3_block
print "/* Total size: %d bytes" % min_size */
print_records(records)
print_table(min_stage1, 'ucd_stage1')
print_table(min_stage2, 'ucd_stage2', min_stage2_block)
print_table(min_stage3, 'ucd_stage3', min_stage3_block)
"""
| 42.72401
| 143
| 0.625648
| 4,909
| 34,521
| 4.31208
| 0.218578
| 0.01318
| 0.012283
| 0.005716
| 0.17295
| 0.128968
| 0.078468
| 0.061177
| 0.050548
| 0.045682
| 0
| 0.035126
| 0.239651
| 34,521
| 807
| 144
| 42.776952
| 0.771335
| 0.383564
| 0
| 0.207459
| 0
| 0.004662
| 0.325809
| 0.035675
| 0
| 0
| 0.00131
| 0
| 0.002331
| 1
| 0.027972
| false
| 0
| 0.006993
| 0.002331
| 0.062937
| 0.265734
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1314ffbb2b5a881e8cbdb62ecc8a53c659f4f382
| 638
|
py
|
Python
|
setup.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 9
|
2020-12-01T16:33:02.000Z
|
2022-01-19T20:02:42.000Z
|
setup.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 4
|
2020-10-02T14:38:32.000Z
|
2021-08-02T09:23:58.000Z
|
setup.py
|
ihayhurst/RetroBioCat
|
d674897459c0ab65faad5ed3017c55cf51bcc020
|
[
"MIT"
] | 6
|
2021-01-14T07:48:36.000Z
|
2022-03-20T17:34:27.000Z
|
from setuptools import setup, find_packages
from retrobiocat_web import __version__
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name = 'retrobiocat_web',
packages = find_packages(),
include_package_data=True,
version = __version__,
license='',
description = 'Retrosynthesis',
author = 'William Finnigan',
author_email = 'wjafinnigan@gmail.com',
url = '',
download_url = '',
keywords = ['enzyme'],
install_requires=requirements,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'],
)
| 26.583333
| 45
| 0.694357
| 67
| 638
| 6.358209
| 0.731343
| 0.056338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003788
| 0.172414
| 638
| 24
| 46
| 26.583333
| 0.80303
| 0
| 0
| 0
| 0
| 0
| 0.300469
| 0.032864
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131631df01aa9316264d6c8b1aaa6ecfd1254785
| 1,748
|
py
|
Python
|
rxn_yield_context/preprocess_data/preprocess/augmentation_utils.py
|
Lung-Yi/rxn_yield_context
|
116d6f21a1b6dc39016d87c001dc5b142cfb697a
|
[
"MIT"
] | null | null | null |
rxn_yield_context/preprocess_data/preprocess/augmentation_utils.py
|
Lung-Yi/rxn_yield_context
|
116d6f21a1b6dc39016d87c001dc5b142cfb697a
|
[
"MIT"
] | null | null | null |
rxn_yield_context/preprocess_data/preprocess/augmentation_utils.py
|
Lung-Yi/rxn_yield_context
|
116d6f21a1b6dc39016d87c001dc5b142cfb697a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import pickle
import numpy as np
from rdkit import Chem
from rdkit.Chem import AllChem,DataStructs
def get_classes(path):
f = open(path, 'rb')
dict_ = pickle.load(f)
f.close()
classes = sorted(dict_.items(), key=lambda d: d[1],reverse=True)
classes = [(x,y) for x,y in classes]
return classes
def create_rxn_Morgan2FP_concatenate(rsmi, psmi, rxnfpsize=16384, pfpsize=16384, useFeatures=False, calculate_rfp=True, useChirality=True):
# Similar as the above function but takes smiles separately and returns pfp and rfp separately
rsmi = rsmi.encode('utf-8')
psmi = psmi.encode('utf-8')
try:
mol = Chem.MolFromSmiles(rsmi)
except Exception as e:
print(e)
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=rxnfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(rxnfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build reactant fp due to {}".format(e))
return
rfp = fp
try:
mol = Chem.MolFromSmiles(psmi)
except Exception as e:
return
try:
fp_bit = AllChem.GetMorganFingerprintAsBitVect(
mol=mol, radius=2, nBits=pfpsize, useFeatures=useFeatures, useChirality=useChirality)
fp = np.empty(pfpsize, dtype='float32')
DataStructs.ConvertToNumpyArray(fp_bit, fp)
except Exception as e:
print("Cannot build product fp due to {}".format(e))
return
pfp = fp
rxn_fp = pfp - rfp
final_fp = np.concatenate((pfp, rxn_fp))
return final_fp
| 33.615385
| 140
| 0.642449
| 217
| 1,748
| 5.105991
| 0.400922
| 0.054152
| 0.061372
| 0.064982
| 0.430505
| 0.409747
| 0.373646
| 0.274368
| 0.274368
| 0.274368
| 0
| 0.016204
| 0.258581
| 1,748
| 52
| 141
| 33.615385
| 0.838735
| 0.065217
| 0
| 0.363636
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.272727
| 0.113636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131665ba7b9465c31b9a3f7865c4b018c27a3aec
| 6,434
|
py
|
Python
|
src/webstruct-demo/__init__.py
|
zanachka/webstruct-demo
|
f5b5081760d9a2b7924704041cd74748a5c98664
|
[
"MIT"
] | 5
|
2019-04-15T14:54:23.000Z
|
2020-10-03T04:47:12.000Z
|
src/webstruct-demo/__init__.py
|
zanachka/webstruct-demo
|
f5b5081760d9a2b7924704041cd74748a5c98664
|
[
"MIT"
] | 2
|
2021-06-01T22:49:44.000Z
|
2021-12-13T19:51:11.000Z
|
src/webstruct-demo/__init__.py
|
zanachka/webstruct-demo
|
f5b5081760d9a2b7924704041cd74748a5c98664
|
[
"MIT"
] | 3
|
2019-06-25T10:31:30.000Z
|
2020-10-03T04:49:01.000Z
|
import functools
import logging
import random
from flask import Flask, render_template, request
import joblib
from lxml.html import html5parser
import lxml.html
import requests
import yarl
import webstruct.model
import webstruct.sequence_encoding
import webstruct.webannotator
webstruct_demo = Flask(__name__, instance_relative_config=True)
webstruct_demo.config.from_pyfile('config.py')
def absolutize_link(link, base_url):
if link.startswith('#'):
return link
try:
target_url = yarl.URL(link)
except:
return link
if target_url.is_absolute() and target_url.scheme:
return link
if target_url.is_absolute() and not target_url.scheme:
target_url = target_url.with_scheme(base_url.scheme)
return str(target_url)
try:
target_url = base_url.join(target_url)
except:
return link
return str(target_url)
def absolute_links(tree, url):
_LINK_SOURCES = ['src', 'href']
try:
base_url = yarl.URL(url)
except:
return tree
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
for attr in _LINK_SOURCES:
if attr not in element.attrib:
continue
element.attrib[attr] = absolutize_link(element.attrib[attr], base_url)
return tree
def parent_links(tree, base_url):
base_url = yarl.URL(base_url)
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
if element.tag != 'a':
continue
if 'href' not in element.attrib:
continue
url = element.attrib['href']
if url.startswith('#'):
continue
element.attrib['target'] = '_parent'
element.attrib['href'] = str(base_url.update_query(url=url))
return tree
def remove_namespace(tree):
_NS="{http://www.w3.org/1999/xhtml}"
for _, element in lxml.html.etree.iterwalk(tree, events=('start', )):
if not isinstance(element.tag, str):
continue
if not element.tag.startswith(_NS):
continue
element.tag = element.tag[len(_NS):]
return tree
_TOKENS_PER_PART = 2000
def run_model(tree, model):
html_tokens, _ = model.html_tokenizer.tokenize_single(tree)
if not html_tokens:
return tree, list(), list()
tree = html_tokens[0].elem.getroottree().getroot()
tags = model.model.predict([html_tokens[i:i+_TOKENS_PER_PART] for i in range(0, len(html_tokens), _TOKENS_PER_PART)])
tags = [i for t in tags for i in t]
return tree, html_tokens, tags
def download(url):
splash_url = webstruct_demo.config.get('SPLASH_URL', None)
splash_user = webstruct_demo.config.get('SPLASH_USER', None)
splash_pass = webstruct_demo.config.get('SPLASH_PASS', None)
is_splash = functools.reduce(lambda x,y: x and y is not None,
[splash_url, splash_user, splash_pass],
True)
if not is_splash:
response = requests.get(url)
return response.content, response.url
load = {'url': url,
'images': 0,
'base_url': url}
response = requests.post(splash_url + '/render.html',
json=load,
auth=requests.auth.HTTPBasicAuth(splash_user, splash_pass))
return response.content, url
def extract_ner(response_content, response_url, base_url):
url = response_url
tree = html5parser.document_fromstring(response_content)
tree = remove_namespace(tree)
tree = absolute_links(tree, url)
tree = parent_links(tree, base_url)
title = tree.xpath('//title')[0].text
model = joblib.load(webstruct_demo.config['MODEL_PATH'])
tree, tokens, tags = run_model(tree, model)
tree = model.html_tokenizer.detokenize_single(tokens, tags)
tree = webstruct.webannotator.to_webannotator(
tree,
entity_colors=model.entity_colors,
url=url
)
content = lxml.html.tostring(tree, encoding='utf-8').decode('utf-8')
entities = webstruct.sequence_encoding.IobEncoder.group(zip(tokens, tags))
entities = webstruct.model._drop_empty(
(model.build_entity(tokens), tag)
for (tokens, tag) in entities if tag != 'O'
)
groups = webstruct.model.extract_entitiy_groups(
tokens,
tags,
dont_penalize=None,
join_tokens=model.build_entity
)
return content, title, entities, groups
def sample_entities(entities):
unique = list(set(entities))
random.shuffle(unique)
sampled = unique[:5]
sampled = sorted(sampled, key=lambda e:(e[1], e[0]))
return sampled
def sample_groups(groups):
groups = [tuple(sorted(g)) for g in groups]
sampled = sorted(list(set(groups)), key=lambda g:-len(g))
return sampled[:2]
@webstruct_demo.route('/')
def index():
url = request.args.get('url', 'http://en.wikipedia.org/')
output = request.args.get('output', 'html')
try:
response_content, response_url = download(url)
content, title, entities, groups = extract_ner(response_content,
response_url,
request.url)
except:
logging.exception('Got exception')
content = None
title = 'Error during obtaining %s' % (url, )
entities = []
groups = []
_TEMPLATE_MAPPING = {'html': 'main.html',
'entities': 'entities.html',
'groups': 'groups.html'}
template = _TEMPLATE_MAPPING.get(output, _TEMPLATE_MAPPING['html'])
sampled_entities = sample_entities(entities)
sampled_groups = sample_groups(groups)
base_url = yarl.URL(request.url)
routing = {t: str(base_url.update_query(output=t)) for t in ['html', 'entities', 'groups']}
values = {'url': url,
'title': title,
'entities': entities,
'sampled_entities': sampled_entities,
'sampled_groups': sampled_groups,
'routing': routing,
'srcdoc': content,
'groups': groups,
'output': output}
return render_template(template, **values)
| 28.981982
| 121
| 0.617656
| 766
| 6,434
| 5.002611
| 0.224543
| 0.025574
| 0.024791
| 0.02714
| 0.161273
| 0.10334
| 0.084551
| 0.084551
| 0.066806
| 0.066806
| 0
| 0.004483
| 0.271993
| 6,434
| 221
| 122
| 29.113122
| 0.813621
| 0
| 0
| 0.192771
| 0
| 0
| 0.061859
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060241
| false
| 0.018072
| 0.072289
| 0
| 0.240964
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13178607e92d499e0a8fa091130826ae93f57d37
| 757
|
py
|
Python
|
setup.py
|
Liang813/einops
|
9edce3d9a2d0a2abc51a6aaf86678eac43ffac0c
|
[
"MIT"
] | 4,738
|
2018-10-30T08:38:50.000Z
|
2022-03-31T17:35:50.000Z
|
setup.py
|
Liang813/einops
|
9edce3d9a2d0a2abc51a6aaf86678eac43ffac0c
|
[
"MIT"
] | 120
|
2018-10-30T09:04:01.000Z
|
2022-03-27T11:27:30.000Z
|
setup.py
|
Liang813/einops
|
9edce3d9a2d0a2abc51a6aaf86678eac43ffac0c
|
[
"MIT"
] | 216
|
2018-11-09T02:50:30.000Z
|
2022-03-30T05:46:44.000Z
|
__author__ = 'Alex Rogozhnikov'
from setuptools import setup
setup(
name="einops",
version='0.3.2',
description="A new flavour of deep learning operations",
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
url='https://github.com/arogozhnikov/einops',
author='Alex Rogozhnikov',
packages=['einops', 'einops.layers'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3 ',
],
keywords='deep learning, neural networks, tensor manipulation, machine learning, '
'scientific computations, einops',
install_requires=[
# no run-time or installation-time dependencies
],
)
| 29.115385
| 86
| 0.668428
| 80
| 757
| 6.2125
| 0.8
| 0.040241
| 0.084507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008278
| 0.202114
| 757
| 25
| 87
| 30.28
| 0.81457
| 0.059445
| 0
| 0.1
| 0
| 0
| 0.483099
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.05
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1317fa461eecfb928fe1d73e0f3c19ec9defb396
| 14,667
|
py
|
Python
|
ldp/tasks/dlp.py
|
evandez/low-dimensional-probing
|
3e4af6644a4db7fdf48bc40c5de4815f9db52a6e
|
[
"MIT"
] | 1
|
2022-03-06T06:59:42.000Z
|
2022-03-06T06:59:42.000Z
|
ldp/tasks/dlp.py
|
evandez/low-dimensional-probing
|
3e4af6644a4db7fdf48bc40c5de4815f9db52a6e
|
[
"MIT"
] | null | null | null |
ldp/tasks/dlp.py
|
evandez/low-dimensional-probing
|
3e4af6644a4db7fdf48bc40c5de4815f9db52a6e
|
[
"MIT"
] | null | null | null |
"""Core experiments for the dependency label prediction task."""
import collections
import copy
import logging
from typing import (Any, Dict, Iterator, Optional, Sequence, Set, Tuple, Type,
Union)
from ldp import datasets, learning
from ldp.models import probes, projections
from ldp.parse import ptb
from ldp.parse import representations as reps
from ldp.utils.typing import Device
import numpy
import torch
import wandb
UNK = 'unk'
class DLPIndexer:
"""Map pairs of words to their syntactic relationship, if any."""
def __init__(self, samples: Sequence[ptb.Sample], unk: str = UNK):
"""Map each relation label to an integer.
Args:
samples (Sequence[ptb.Sample]): The samples from which to determine
possible relations.
unk (str): Label to use when un-indexed dependency label is
encountered.
"""
labels = {rel for sample in samples for rel in sample.relations}
self.indexer = {unk: 0}
for label in sorted(labels):
self.indexer[label] = len(self.indexer)
self.unk = unk
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads, relations = sample.heads, sample.relations
labels = torch.empty(len(heads), len(heads), dtype=torch.long)
labels.fill_(self.indexer[self.unk])
for word, (head, rel) in enumerate(zip(heads, relations)):
if head == -1:
labels[word, word] = self.indexer[rel]
else:
label = self.indexer.get(rel, self.indexer[self.unk])
labels[word, head] = label
return labels
def __len__(self) -> int:
"""Return the number of unique labels for this task."""
return len(self.indexer)
class ControlDLPIndexer:
"""Map pairs of words to arbitrary syntactic relationships."""
def __init__(self,
samples: Sequence[ptb.Sample],
dist: Optional[Union[numpy.ndarray, Sequence[float]]] = None):
"""Map each relation label to an arbitrary (integer) label.
We only do this for pairs of words which have a head-dependent
relationship in the original dataset.
Args:
samples (Sequence[ptb.Samples]): The samples from which to pull
possible word pairs.
dist (Optional[Union[numpy.ndarray, Sequence[float]]], optional): A
distribution to use when sampling tags per word type.
By default, is computed from the list of samples.
"""
if dist is None:
counts: Dict[str, int] = collections.defaultdict(lambda: 0)
for sample in samples:
for relation in sample.relations:
counts[relation] += 1
dist = numpy.array([float(count) for count in counts.values()])
dist /= numpy.sum(dist)
assert dist is not None, 'uninitialized distribution?'
self.dist = dist
self.rels: Dict[Tuple[str, str], int] = {}
for sample in samples:
sentence = sample.sentence
heads = sample.heads
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sentence[dep], sentence[head])
if words not in self.rels:
# Add one so that 0 is reserved for "no relationship" tag.
rel = numpy.random.choice(len(dist), p=dist) + 1
self.rels[words] = rel
def __call__(self, sample: ptb.Sample) -> torch.Tensor:
"""Map all possible (word, word) pairs to labels.
Args:
sample (ptb.Sample): The sample to label.
Returns:
torch.Tensor: For length W sentence, returns shape (W, W) matrix
where element (v, w) is the index of the label describing
the relationship between word v and w, if any. Defaults to
the "unk" label, even if there is no relationship between
v and w.
"""
heads = sample.heads
labels = torch.zeros(len(heads), len(heads), dtype=torch.long)
for dep, head in enumerate(heads):
if head == -1:
head = dep
words = (sample.sentence[dep], sample.sentence[head])
labels[dep, head] = self.rels.get(words, 0)
return labels
def __len__(self) -> int:
"""Return the number of relationships, including the null one."""
return len(self.dist) + 1
class DLPTaskDataset(datasets.TaskDataset):
"""Iterate over (word representation pair, dependency label) pairs."""
def __init__(
self,
representations: reps.RepresentationLayerDataset,
annotations: Sequence[ptb.Sample],
indexer: Type[Union[DLPIndexer, ControlDLPIndexer]] = DLPIndexer,
**kwargs: Any,
):
"""Initialize dataset by mapping each dependency label to an index.
The kwargs are forwarded to indexer when it is instantiated.
Args:
representations (representations.RepresentationsLayerDataset): Word
representations corresponding to the words to be paired and
labeled.
annotations (Sequence[ptb.PTBSample]): The PTB annotations from
which to pull dependency labels.
indexer (Union[DLPIndexer, ControlDLPIndexer]): Type of the indexer
to use for mapping PTB dependency label annotations to integer
tensors. Instantiated with given annotations unless the
samples keyword is set in kwargs.
Raises:
ValueError: If number of representations/annotations do not match.
"""
if len(representations) != len(annotations):
raise ValueError(f'got {len(representations)} representations '
f'but {len(annotations)} annotations')
self.representations = representations
self.annotations = annotations
kwargs = kwargs.copy()
kwargs.setdefault('samples', annotations)
self.indexer = indexer(**kwargs)
def __getitem__(self, index: int) -> Tuple[torch.Tensor, torch.Tensor]:
"""Return (representations, integral POS tags) for index'th sentence.
Args:
index (int): Index of the sentence in the dataset.
Returns:
Tuple[torch.Tensor, torch.Tensor]: First tensor is shape
(sentence_length, representation_dimension) containing word
representations, and second is shape (sentence_length,)
containing integral POS tags.
"""
representations = self.representations[index]
annotations = self.annotations[index]
assert len(representations) == len(
annotations.sentence), 'diff sentence lengths?'
rels = self.indexer(annotations)
# Find all pairs of words sharing an edge.
indexes = set(range(len(representations)))
pairs = [(i, j) for i in indexes for j in indexes if rels[i, j]]
assert pairs and len(pairs) == len(representations), 'missing edges?'
# Stack everything before returning it.
bigrams = torch.stack([
torch.stack((representations[i], representations[j]))
for i, j in pairs
])
labels = torch.stack([rels[i, j] for i, j in pairs])
return bigrams, labels
def __iter__(self) -> Iterator[Tuple[torch.Tensor, torch.Tensor]]:
"""Yield all (sentence representations, sentence POS tags) samples."""
for index in range(len(self)):
yield self[index]
def __len__(self) -> int:
"""Return the number of sentences (batches) in the dataset."""
return len(self.annotations)
@property
def sample_representations_shape(self) -> Sequence[int]:
"""Return the dimensionality of the representation pairs."""
return (2, self.representations.dataset.dimension)
@property
def sample_features_shape(self) -> Sequence[int]:
"""Return the shape of each individual POS tag.
Since POS tags are integral scalars, there is no such shape!
"""
return ()
def count_samples(self) -> int:
"""Return the number of words in the dataset."""
return sum(
self.representations.dataset.length(index)
for index in range(len(self.representations)))
def count_unique_features(self) -> int:
"""Return number of unique POS seen in data."""
return len(self.indexer)
# Define the valid probe types for this task.
Probe = Union[probes.Linear, probes.MLP]
def train(train_dataset: datasets.TaskDataset,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
probe_t: Type[Probe] = probes.Linear,
project_to: Optional[int] = None,
share_projection: bool = False,
epochs: int = 25,
patience: int = 4,
lr: float = 1e-3,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Tuple[Probe, float]:
"""Train a probe on dependency label prediction.
Args:
train_dataset (TaskDataset): Training data for probe.
dev_dataset (TaskDataset): Validation data for probe, used for early
stopping.
test_dataset (TaskDataset): Test data for probe, used to compute
final accuracy after training.
probe_t (Type[Probe], optional): Probe type to train.
Defaults to probes.Linear.
project_to (Optional[int], optional): Project representations to this
dimensionality. Defaults to no projection.
share_projection (bool): If set, project the left and right components
of pairwise probes with the same projection. E.g. if the probe is
bilinear of the form xAy, we will always compute (Px)A(Py) as
opposed to (Px)A(Qy) for distinct projections P, Q. Defaults to NOT
shared.
epochs (int, optional): Maximum passes through the training dataset.
Defaults to 25.
patience (int, optional): Allow dev loss to not improve for this many
epochs, then stop training. Defaults to 4.
lr (float, optional): Learning rate for optimizer. Defaults to 1e-3.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (Optional[pathlib.Path], optional): If set, log
training data to wandb. By default, wandb is not used.
Returns:
Tuple[Probe, float]: The trained probe and its test accuracy.
"""
log = logging.getLogger(__name__)
device = device or 'cpu'
ndims = train_dataset.sample_representations_shape[-1]
log.info('representations have dimension %d', ndims)
ntags = train_dataset.count_unique_features()
assert ntags is not None, 'no label count, is dataset for different task?'
log.info('dependency labeling task has %d tags', ntags)
if project_to is None or ndims == project_to:
logging.info('projection dim = reps dim, not projecting')
projection = None
elif share_projection:
projection = projections.Projection(ndims, project_to)
else:
projection = projections.Projection(2 * ndims, 2 * project_to)
probe = probe_t(2 * (project_to or ndims), ntags, project=projection)
learning.train(probe,
train_dataset,
dev_dataset=dev_dataset,
stopper=learning.EarlyStopping(patience=patience),
epochs=epochs,
lr=lr,
device=device,
also_log_to_wandb=also_log_to_wandb)
accuracy = learning.test(probe, test_dataset, device=device)
return probe, accuracy
# TODO(evandez): May as well commonize this, since it's shared with POS.
def axis_alignment(
probe: Probe,
dev_dataset: datasets.TaskDataset,
test_dataset: datasets.TaskDataset,
device: Optional[Device] = None,
also_log_to_wandb: bool = False) -> Sequence[Tuple[int, float]]:
"""Measure whether the given probe is axis aligned.
Args:
probe (Probe): The probe to evaluate.
dev_dataset (datasets.TaskDataset): Data used to determine which axes
to cut.
test_dataset (datasets.TaskDataset): Data used to determine the effect
of cutting an axis.
device (Optional[Device], optional): Torch device on which to
train probe. Defaults to CPU.
also_log_to_wandb (bool, optional): If set, log results to wandb.
Returns:
Sequence[Tuple[int, float]]: The ablated axes paired with optimal probe
accuracy after that axis is zeroed.
"""
log = logging.getLogger(__name__)
projection = probe.project
assert projection is not None, 'no projection?'
axes = set(range(projection.project.in_features))
ablated: Set[int] = set()
accuracies = []
while axes:
best_model, best_axis, best_accuracy = probe, -1, -1.
for axis in axes:
model = copy.deepcopy(best_model).eval()
assert model.project is not None, 'no projection?'
model.project.project.weight.data[:, sorted(ablated | {axis})] = 0
accuracy = learning.test(model, dev_dataset, device=device)
if accuracy > best_accuracy:
best_model = model
best_axis = axis
best_accuracy = accuracy
accuracy = learning.test(best_model, test_dataset, device=device)
log.info('ablating axis %d, test accuracy %f', best_axis, accuracy)
if also_log_to_wandb:
wandb.log({
'axis': best_axis,
'dev accuracy': best_accuracy,
'test accuracy': accuracy,
})
axes.remove(best_axis)
ablated.add(best_axis)
accuracies.append((best_axis, accuracy))
return tuple(accuracies)
| 38.395288
| 79
| 0.614986
| 1,733
| 14,667
| 5.128679
| 0.201962
| 0.013614
| 0.020477
| 0.011026
| 0.227385
| 0.199145
| 0.169442
| 0.135239
| 0.118587
| 0.118587
| 0
| 0.00272
| 0.29822
| 14,667
| 381
| 80
| 38.496063
| 0.860779
| 0.380991
| 0
| 0.169312
| 0
| 0
| 0.047956
| 0.002638
| 0
| 0
| 0
| 0.002625
| 0.031746
| 1
| 0.084656
| false
| 0
| 0.063492
| 0
| 0.227513
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1318a68dfab5df9c8cd4a02ab91e86cccb7f469d
| 23,613
|
py
|
Python
|
pycquery_krb/common/ccache.py
|
naver/PyCQuery
|
a72f74f9b7c208a263fc7cdb14a30d0fe21e63b9
|
[
"Apache-2.0"
] | 2
|
2021-11-17T03:13:16.000Z
|
2021-12-03T05:30:22.000Z
|
pycquery_krb/common/ccache.py
|
naver/PyCQuery
|
a72f74f9b7c208a263fc7cdb14a30d0fe21e63b9
|
[
"Apache-2.0"
] | 1
|
2021-05-04T06:02:40.000Z
|
2021-05-04T06:02:40.000Z
|
pycquery_krb/common/ccache.py
|
naver/PyCQuery
|
a72f74f9b7c208a263fc7cdb14a30d0fe21e63b9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import os
import io
import datetime
import glob
import hashlib
from pycquery_krb.protocol.asn1_structs import Ticket, EncryptedData, \
krb5_pvno, KrbCredInfo, EncryptionKey, KRBCRED, TicketFlags, EncKrbCredPart
from pycquery_krb.common.utils import dt_to_kerbtime, TGSTicket2hashcat
from pycquery_krb.protocol.constants import EncryptionType, MESSAGE_TYPE
from pycquery_krb import logger
from asn1crypto import core
# http://repo.or.cz/w/krb5dissect.git/blob_plain/HEAD:/ccache.txt
class Header:
def __init__(self):
self.tag = None
self.taglen = None
self.tagdata = None
@staticmethod
def parse(data):
"""
returns a list of header tags
"""
reader = io.BytesIO(data)
headers = []
while reader.tell() < len(data):
h = Header()
h.tag = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.taglen = int.from_bytes(reader.read(2), byteorder='big', signed=False)
h.tagdata = reader.read(h.taglen)
headers.append(h)
return headers
def to_bytes(self):
t = self.tag.to_bytes(2, byteorder='big', signed=False)
t += len(self.tagdata).to_bytes(2, byteorder='big', signed=False)
t += self.tagdata
return t
def __str__(self):
t = 'tag: %s\n' % self.tag
t += 'taglen: %s\n' % self.taglen
t += 'tagdata: %s\n' % self.tagdata
return t
class DateTime:
def __init__(self):
self.time_offset = None
self.usec_offset = None
@staticmethod
def parse(reader):
d = DateTime()
d.time_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)
d.usec_offset = int.from_bytes(reader.read(4), byteorder='big', signed=False)
return d
def to_bytes(self):
t = self.time_offset.to_bytes(4, byteorder='big', signed=False)
t += self.usec_offset.to_bytes(4, byteorder='big', signed=False)
return t
class Credential:
def __init__(self):
self.client = None
self.server = None
self.key = None
self.time = None
self.is_skey = None
self.tktflags = None
self.num_address = None
self.addrs = []
self.num_authdata = None
self.authdata = []
self.ticket = None
self.second_ticket = None
def to_hash(self):
res = Ticket.load(self.ticket.to_asn1()).native
tgs_encryption_type = int(res['enc-part']['etype'])
t = len(res['sname']['name-string'])
if t == 1:
tgs_name_string = res['sname']['name-string'][0]
else:
tgs_name_string = res['sname']['name-string'][1]
tgs_realm = res['realm']
if tgs_encryption_type == EncryptionType.AES256_CTS_HMAC_SHA1_96.value:
tgs_checksum = res['enc-part']['cipher'][-12:]
tgs_encrypted_data2 = res['enc-part']['cipher'][:-12]
return '$krb5tgs$%s$%s$%s$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )
else:
tgs_checksum = res['enc-part']['cipher'][:16]
tgs_encrypted_data2 = res['enc-part']['cipher'][16:]
return '$krb5tgs$%s$*%s$%s$spn*$%s$%s' % (tgs_encryption_type,tgs_name_string,tgs_realm, tgs_checksum.hex(), tgs_encrypted_data2.hex() )
def to_tgt(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t
def to_tgs(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t
def to_kirbi(self):
filename = '%s@%s_%s' % (self.client.to_string() , self.server.to_string(), hashlib.sha1(self.ticket.to_asn1()).hexdigest()[:8])
krbcredinfo = {}
krbcredinfo['key'] = EncryptionKey(self.key.to_asn1())
krbcredinfo['prealm'] = self.client.realm.to_string()
krbcredinfo['pname'] = self.client.to_asn1()[0]
krbcredinfo['flags'] = core.IntegerBitString(self.tktflags).cast(TicketFlags)
if self.time.authtime != 0: #this parameter is not mandatory, and most of the time not present
krbcredinfo['authtime'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc)
if self.time.starttime != 0:
krbcredinfo['starttime'] = datetime.datetime.fromtimestamp(self.time.starttime, datetime.timezone.utc)
if self.time.endtime != 0:
krbcredinfo['endtime'] = datetime.datetime.fromtimestamp(self.time.endtime, datetime.timezone.utc)
if self.time.renew_till != 0: #this parameter is not mandatory, and sometimes it's not present
krbcredinfo['renew-till'] = datetime.datetime.fromtimestamp(self.time.authtime, datetime.timezone.utc)
krbcredinfo['srealm'] = self.server.realm.to_string()
krbcredinfo['sname'] = self.server.to_asn1()[0]
enc_krbcred = {}
enc_krbcred['ticket-info'] = [KrbCredInfo(krbcredinfo)]
krbcred = {}
krbcred['pvno'] = krb5_pvno
krbcred['msg-type'] = MESSAGE_TYPE.KRB_CRED.value
krbcred['tickets'] = [Ticket.load(self.ticket.to_asn1())]
krbcred['enc-part'] = EncryptedData({'etype': EncryptionType.NULL.value, 'cipher': EncKrbCredPart(enc_krbcred).dump()})
kirbi = KRBCRED(krbcred)
return kirbi, filename
@staticmethod
def from_asn1(ticket, data):
###
# data = KrbCredInfo
###
c = Credential()
c.client = CCACHEPrincipal.from_asn1(data['pname'], data['prealm'])
c.server = CCACHEPrincipal.from_asn1(data['sname'], data['srealm'])
c.key = Keyblock.from_asn1(data['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(data['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(ticket['enc-part']['cipher'])
c.second_ticket = CCACHEOctetString.empty()
return c
@staticmethod
def parse(reader):
c = Credential()
c.client = CCACHEPrincipal.parse(reader)
c.server = CCACHEPrincipal.parse(reader)
c.key = Keyblock.parse(reader)
c.time = Times.parse(reader)
c.is_skey = int.from_bytes(reader.read(1), byteorder='big', signed=False)
c.tktflags = int.from_bytes(reader.read(4), byteorder='little', signed=False)
c.num_address = int.from_bytes(reader.read(4), byteorder='big', signed=False)
for _ in range(c.num_address):
c.addrs.append(Address.parse(reader))
c.num_authdata = int.from_bytes(reader.read(4), byteorder='big', signed=False)
for _ in range(c.num_authdata):
c.authdata.append(Authdata.parse(reader))
c.ticket = CCACHEOctetString.parse(reader)
c.second_ticket = CCACHEOctetString.parse(reader)
return c
@staticmethod
def summary_header():
return ['client','server','starttime','endtime','renew-till']
def summary(self):
return [
'%s@%s' % (self.client.to_string(separator='/'), self.client.realm.to_string()),
'%s@%s' % (self.server.to_string(separator='/'), self.server.realm.to_string()),
datetime.datetime.fromtimestamp(self.time.starttime).isoformat() if self.time.starttime != 0 else 'N/A',
datetime.datetime.fromtimestamp(self.time.endtime).isoformat() if self.time.endtime != 0 else 'N/A',
datetime.datetime.fromtimestamp(self.time.renew_till).isoformat() if self.time.renew_till != 0 else 'N/A',
]
def to_bytes(self):
t = self.client.to_bytes()
t += self.server.to_bytes()
t += self.key.to_bytes()
t += self.time.to_bytes()
t += self.is_skey.to_bytes(1, byteorder='big', signed=False)
t += self.tktflags.to_bytes(4, byteorder='little', signed=False)
t += self.num_address.to_bytes(4, byteorder='big', signed=False)
for addr in self.addrs:
t += addr.to_bytes()
t += self.num_authdata.to_bytes(4, byteorder='big', signed=False)
for ad in self.authdata:
t += ad.to_bytes()
t += self.ticket.to_bytes()
t += self.second_ticket.to_bytes()
return t
class Keyblock:
def __init__(self):
self.keytype = None
self.etype = None
self.keylen = None
self.keyvalue = None
@staticmethod
def from_asn1(data):
k = Keyblock()
k.keytype = data['keytype']
k.etype = 0 # not sure
k.keylen = len(data['keyvalue'])
k.keyvalue = data['keyvalue']
return k
def to_asn1(self):
t = {}
t['keytype'] = self.keytype
t['keyvalue'] = self.keyvalue
return t
@staticmethod
def parse(reader):
k = Keyblock()
k.keytype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
k.etype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
k.keylen = int.from_bytes(reader.read(2), byteorder='big', signed=False)
k.keyvalue = reader.read(k.keylen)
return k
def to_bytes(self):
t = self.keytype.to_bytes(2, byteorder='big', signed=False)
t += self.etype.to_bytes(2, byteorder='big', signed=False)
t += self.keylen.to_bytes(2, byteorder='big', signed=False)
t += self.keyvalue
return t
class Times:
def __init__(self):
self.authtime = None
self.starttime = None
self.endtime = None
self.renew_till = None
@staticmethod
def from_asn1(enc_as_rep_part):
t = Times()
t.authtime = dt_to_kerbtime(enc_as_rep_part['authtime']) \
if 'authtime' in enc_as_rep_part and enc_as_rep_part['authtime'] else 0
t.starttime = dt_to_kerbtime(enc_as_rep_part['starttime']) \
if 'starttime' in enc_as_rep_part and enc_as_rep_part['starttime'] else 0
t.endtime = dt_to_kerbtime(enc_as_rep_part['endtime']) \
if 'endtime' in enc_as_rep_part and enc_as_rep_part['endtime'] else 0
t.renew_till = dt_to_kerbtime(enc_as_rep_part['renew_till']) \
if 'renew_till' in enc_as_rep_part and enc_as_rep_part['renew_till'] else 0
return t
@staticmethod
def dummy_time(start= datetime.datetime.now(datetime.timezone.utc)):
t = Times()
t.authtime = dt_to_kerbtime(start)
t.starttime = dt_to_kerbtime(start )
t.endtime = dt_to_kerbtime(start + datetime.timedelta(days=1))
t.renew_till = dt_to_kerbtime(start + datetime.timedelta(days=2))
return t
@staticmethod
def parse(reader):
t = Times()
t.authtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)
t.starttime = int.from_bytes(reader.read(4), byteorder='big', signed=False)
t.endtime = int.from_bytes(reader.read(4), byteorder='big', signed=False)
t.renew_till = int.from_bytes(reader.read(4), byteorder='big', signed=False)
return t
def to_bytes(self):
t = self.authtime.to_bytes(4, byteorder='big', signed=False)
t += self.starttime.to_bytes(4, byteorder='big', signed=False)
t += self.endtime.to_bytes(4, byteorder='big', signed=False)
t += self.renew_till.to_bytes(4, byteorder='big', signed=False)
return t
class Address:
def __init__(self):
self.addrtype = None
self.addrdata = None
@staticmethod
def parse(reader):
a = Address()
a.addrtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
a.addrdata = CCACHEOctetString.parse(reader)
return a
def to_bytes(self):
t = self.addrtype.to_bytes(2, byteorder='big', signed=False)
t += self.addrdata.to_bytes()
return t
class Authdata:
def __init__(self):
self.authtype = None
self.authdata = None
@staticmethod
def parse(reader):
a = Authdata()
a.authtype = int.from_bytes(reader.read(2), byteorder='big', signed=False)
a.authdata = CCACHEOctetString.parse(reader)
return a
def to_bytes(self):
t = self.authtype.to_bytes(2, byteorder='big', signed=False)
t += self.authdata.to_bytes()
return t
class CCACHEPrincipal:
def __init__(self):
self.name_type = None
self.num_components = None
self.realm = None
self.components = []
@staticmethod
def from_asn1(principal, realm):
p = CCACHEPrincipal()
p.name_type = principal['name-type']
p.num_components = len(principal['name-string'])
p.realm = CCACHEOctetString.from_string(realm)
for comp in principal['name-string']:
p.components.append(CCACHEOctetString.from_asn1(comp))
return p
@staticmethod
def dummy():
p = CCACHEPrincipal()
p.name_type = 1
p.num_components = 1
p.realm = CCACHEOctetString.from_string('kerbi.corp')
for _ in range(1):
p.components.append(CCACHEOctetString.from_string('kerbi'))
return p
def to_string(self, separator='-'):
return separator.join([c.to_string() for c in self.components])
def to_asn1(self):
t = {'name-type': self.name_type, 'name-string': [name.to_string() for name in self.components]}
return t, self.realm.to_string()
@staticmethod
def parse(reader):
p = CCACHEPrincipal()
p.name_type = int.from_bytes(reader.read(4), byteorder='big', signed=False)
p.num_components = int.from_bytes(reader.read(4), byteorder='big', signed=False)
p.realm = CCACHEOctetString.parse(reader)
for _ in range(p.num_components):
p.components.append(CCACHEOctetString.parse(reader))
return p
def to_bytes(self):
t = self.name_type.to_bytes(4, byteorder='big', signed=False)
t += len(self.components).to_bytes(4, byteorder='big', signed=False)
t += self.realm.to_bytes()
for com in self.components:
t += com.to_bytes()
return t
class CCACHEOctetString:
def __init__(self):
self.length = None
self.data = None
@staticmethod
def empty():
o = CCACHEOctetString()
o.length = 0
o.data = b''
return o
def to_asn1(self):
return self.data
def to_string(self):
return self.data.decode()
@staticmethod
def from_string(data):
o = CCACHEOctetString()
o.data = data.encode()
o.length = len(o.data)
return o
@staticmethod
def from_asn1(data):
o = CCACHEOctetString()
o.length = len(data)
if isinstance(data,str):
o.data = data.encode()
else:
o.data = data
return o
@staticmethod
def parse(reader):
o = CCACHEOctetString()
o.length = int.from_bytes(reader.read(4), byteorder='big', signed=False)
o.data = reader.read(o.length)
return o
def to_bytes(self):
if isinstance(self.data,str):
self.data = self.data.encode()
self.length = len(self.data)
t = len(self.data).to_bytes(4, byteorder='big', signed=False)
t += self.data
return t
class CCACHE:
"""
As the header is rarely used -mostly static- you'd need to init this object with empty = True to get an object without header already present
"""
def __init__(self, empty = False):
self.file_format_version = None #0x0504
self.headers = []
self.primary_principal = None
self.credentials = []
if empty == False:
self.__setup()
def __setup(self):
self.file_format_version = 0x0504
header = Header()
header.tag = 1
header.taglen = 8
#header.tagdata = b'\xff\xff\xff\xff\x00\x00\x00\x00'
header.tagdata = b'\x00\x00\x00\x00\x00\x00\x00\x00'
self.headers.append(header)
#t_hdr = b''
#for header in self.headers:
# t_hdr += header.to_bytes()
#self.headerlen = 1 #size of the entire header in bytes, encoded in 2 byte big-endian unsigned int
self.primary_principal = CCACHEPrincipal.dummy()
def __str__(self):
t = '== CCACHE ==\n'
t+= 'file_format_version : %s\n' % self.file_format_version
for header in self.headers:
t+= '%s\n' % header
t+= 'primary_principal : %s\n' % self.primary_principal
return t
def add_tgt(self, as_rep, enc_as_rep_part, override_pp = True): #from AS_REP
"""
Creates credential object from the TGT and adds to the ccache file
The TGT is basically the native representation of the asn1 encoded AS_REP data that the AD sends upon a succsessful TGT request.
This function doesn't do decryption of the encrypted part of the as_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(as_rep['cname'], as_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_as_rep_part['sname'], enc_as_rep_part['srealm'])
c.time = Times.from_asn1(enc_as_rep_part)
c.key = Keyblock.from_asn1(enc_as_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_as_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(as_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_tgs(self, tgs_rep, enc_tgs_rep_part, override_pp = False): #from AS_REP
"""
Creates credential object from the TGS and adds to the ccache file
The TGS is the native representation of the asn1 encoded TGS_REP data when the user requests a tgs to a specific service principal with a valid TGT
This function doesn't do decryption of the encrypted part of the tgs_rep object, it is expected that the decrypted XXX is supplied in enc_as_rep_part
override_pp: bool to determine if client principal should be used as the primary principal for the ccache file
"""
c = Credential()
c.client = CCACHEPrincipal.from_asn1(tgs_rep['cname'], tgs_rep['crealm'])
if override_pp == True:
self.primary_principal = c.client
c.server = CCACHEPrincipal.from_asn1(enc_tgs_rep_part['sname'], enc_tgs_rep_part['srealm'])
c.time = Times.from_asn1(enc_tgs_rep_part)
c.key = Keyblock.from_asn1(enc_tgs_rep_part['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(enc_tgs_rep_part['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(tgs_rep['ticket']).dump())
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
def add_kirbi(self, krbcred, override_pp = True, include_expired = False):
c = Credential()
enc_credinfo = EncKrbCredPart.load(krbcred['enc-part']['cipher']).native
ticket_info = enc_credinfo['ticket-info'][0]
"""
if ticket_info['endtime'] < datetime.datetime.now(datetime.timezone.utc):
if include_expired == True:
logging.debug('This ticket has most likely expired, but include_expired is forcing me to add it to cache! This can cause problems!')
else:
logging.debug('This ticket has most likely expired, skipping')
return
"""
c.client = CCACHEPrincipal.from_asn1(ticket_info['pname'], ticket_info['prealm'])
if override_pp == True:
self.primary_principal = c.client
#yaaaaay 4 additional weirdness!!!!
#if sname name-string contains a realm as well htne impacket will crash miserably :(
if len(ticket_info['sname']['name-string']) > 2 and ticket_info['sname']['name-string'][-1].upper() == ticket_info['srealm'].upper():
logger.debug('SNAME contains the realm as well, trimming it')
t = ticket_info['sname']
t['name-string'] = t['name-string'][:-1]
c.server = CCACHEPrincipal.from_asn1(t, ticket_info['srealm'])
else:
c.server = CCACHEPrincipal.from_asn1(ticket_info['sname'], ticket_info['srealm'])
c.time = Times.from_asn1(ticket_info)
c.key = Keyblock.from_asn1(ticket_info['key'])
c.is_skey = 0 #not sure!
c.tktflags = TicketFlags(ticket_info['flags']).cast(core.IntegerBitString).native
c.num_address = 0
c.num_authdata = 0
c.ticket = CCACHEOctetString.from_asn1(Ticket(krbcred['tickets'][0]).dump()) #kirbi only stores one ticket per file
c.second_ticket = CCACHEOctetString.empty()
self.credentials.append(c)
@staticmethod
def from_kirbi(kirbidata):
kirbi = KRBCRED.load(kirbidata).native
cc = CCACHE()
cc.add_kirbi(kirbi)
return cc
def get_all_tgt(self):
"""
Returns a list of AS_REP tickets in native format (dict).
To determine which ticket are AP_REP we check for the server principal to be the kerberos service
"""
tgts = []
for cred in self.credentials:
if cred.server.to_string(separator='/').lower().find('krbtgt') != -1:
tgt = [cred.to_tgt(), cred.time]
tgts.append(tgt)
return tgts
def get_all_tgs(self):
tgss = []
for cred in self.credentials:
if cred.server.to_string(separator = '/').lower().find('krbtgt') == -1:
tgss.append(cred.to_tgs())
return tgss
def get_hashes(self, all_hashes = False):
"""
Returns a list of hashes in hashcat-firendly format for tickets with encryption type 23 (which is RC4)
all_hashes: overrides the encryption type filtering and returns hash for all tickets
"""
hashes = []
for cred in self.credentials:
res = Ticket.load(cred.ticket.to_asn1()).native
if int(res['enc-part']['etype']) == 23 or all_hashes == True:
hashes.append(cred.to_hash())
return hashes
@staticmethod
def parse(reader):
c = CCACHE(True)
c.file_format_version = int.from_bytes(reader.read(2), byteorder='big', signed=False)
hdr_size = int.from_bytes(reader.read(2), byteorder='big', signed=False)
c.headers = Header.parse(reader.read(hdr_size))
#c.headerlen =
#for i in range(c.headerlen):
# c.headers.append(Header.parse(reader))
c.primary_principal = CCACHEPrincipal.parse(reader)
pos = reader.tell()
reader.seek(-1,2)
eof = reader.tell()
reader.seek(pos,0)
while reader.tell() < eof:
cred = Credential.parse(reader)
if not (len(cred.server.components) > 0 and cred.server.components[0].to_string() == 'krb5_ccache_conf_data'
and cred.server.realm.to_string() == 'X-CACHECONF:'):
c.credentials.append(cred)
return c
def to_bytes(self):
t = self.file_format_version.to_bytes(2, byteorder='big', signed=False)
t_hdr = b''
for header in self.headers:
t_hdr += header.to_bytes()
t += len(t_hdr).to_bytes(2, byteorder='big', signed=False)
t += t_hdr
t += self.primary_principal.to_bytes()
for cred in self.credentials:
t += cred.to_bytes()
return t
@staticmethod
def from_kirbifile(kirbi_filename):
kf_abs = os.path.abspath(kirbi_filename)
kirbidata = None
with open(kf_abs, 'rb') as f:
kirbidata = f.read()
return CCACHE.from_kirbi(kirbidata)
@staticmethod
def from_kirbidir(directory_path):
"""
Iterates trough all .kirbi files in a given directory and converts all of them into one CCACHE object
"""
cc = CCACHE()
dir_path = os.path.join(os.path.abspath(directory_path), '*.kirbi')
for filename in glob.glob(dir_path):
with open(filename, 'rb') as f:
kirbidata = f.read()
kirbi = KRBCRED.load(kirbidata).native
cc.add_kirbi(kirbi)
return cc
def to_kirbidir(self, directory_path):
"""
Converts all credential object in the CCACHE object to the kirbi file format used by mimikatz.
The kirbi file format supports one credential per file, so prepare for a lot of files being generated.
directory_path: str the directory to write the kirbi files to
"""
kf_abs = os.path.abspath(directory_path)
for cred in self.credentials:
kirbi, filename = cred.to_kirbi()
filename = '%s.kirbi' % filename.replace('..','!')
filepath = os.path.join(kf_abs, filename)
with open(filepath, 'wb') as o:
o.write(kirbi.dump())
@staticmethod
def from_file(filename):
"""
Parses the ccache file and returns a CCACHE object
"""
with open(filename, 'rb') as f:
return CCACHE.parse(f)
def to_file(self, filename):
"""
Writes the contents of the CCACHE object to a file
"""
with open(filename, 'wb') as f:
f.write(self.to_bytes())
@staticmethod
def from_bytes(data):
return CCACHE.parse(io.BytesIO(data))
| 31.909459
| 151
| 0.707915
| 3,534
| 23,613
| 4.574986
| 0.106395
| 0.021215
| 0.046759
| 0.059748
| 0.509278
| 0.419718
| 0.357806
| 0.311541
| 0.285007
| 0.234537
| 0
| 0.010742
| 0.152374
| 23,613
| 739
| 152
| 31.952639
| 0.797052
| 0.12256
| 0
| 0.327172
| 0
| 0
| 0.064923
| 0.0052
| 0
| 0
| 0.000297
| 0
| 0
| 1
| 0.121996
| false
| 0
| 0.018484
| 0.011091
| 0.253235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131ada8dd58eaa29a8303d1a7138ffe5d3485877
| 6,861
|
py
|
Python
|
src/tracks/settings.py
|
adcarmichael/tracks
|
04108bbdaf8554e57e278c1556efa9c5b9603973
|
[
"Apache-2.0"
] | null | null | null |
src/tracks/settings.py
|
adcarmichael/tracks
|
04108bbdaf8554e57e278c1556efa9c5b9603973
|
[
"Apache-2.0"
] | 41
|
2019-06-14T21:19:31.000Z
|
2022-02-10T14:41:00.000Z
|
src/tracks/settings.py
|
adcarmichael/tracks
|
04108bbdaf8554e57e278c1556efa9c5b9603973
|
[
"Apache-2.0"
] | null | null | null |
import os
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PWA_SERVICE_WORKER_PATH = os.path.join(
BASE_DIR, 'routes/static/routes/js', 'serviceworker.js')
print(os.path.join(
BASE_DIR, 'routes/static/routes/js', 'serviceworker.js'))
DEBUG = int(os.environ.get("DEBUG", default=0))
SECRET_KEY = os.environ.get("SECRET_KEY", 'asdfkhbsadgui87gjsbdfui')
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS", 'localhost').split(" ")
# Application definition
INSTALLED_APPS = [
'routes',
'accounts',
'dashboard.apps.DashboardConfig',
'api.apps.ApiConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'rest_framework',
'pwa',
]
# 'celery',
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tracks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tracks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATIC_ROOT = './static/'
MEDIA_ROOT = './media/'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
# no email for localhost or staging
EMAIL_USE_TLS = os.environ.get("EMAIL_USE_TLS")
EMAIL_HOST = os.environ.get("EMAIL_HOST")
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = os.environ.get("EMAIL_HOST_PASSWORD")
EMAIL_PORT = os.environ.get("EMAIL_PORT")
EMAIL_BACKEND = os.environ.get("EMAIL_BACKEND")
DEFAULT_FROM_EMAIL = 'chalktracks@gmail.com'
# CELERY
# CELERY_BROKER_URL = 'redis://redis:6379/0'
# CELERY_RESULT_BACKEND = 'redis://redis:6379/0'
# BROKER_URL = 'redis://localhost:6379/0'
# CELERY_RESULT_BACKEND = 'redis://localhost:6379/'
# CELERY_ACCEPT_CONTENT = ['application/json']
# CELERY_TASK_SERIALIZER = 'json'
# CELERY_RESULT_SERIALIZER = 'json'
REST_FRAMEWORK = {
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 10
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(levelname)s %(asctime)s %(module)s: %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'django.request': {
'level': 'INFO',
'handlers': ['console']
}
# 'celery': {
# 'handlers': ['console'],
# 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
# },
},
}
# STATICFILES_DIRS = [
# os.path.join(BASE_DIR, 'static'),
# ]
PWA_APP_NAME = 'ChalkTracks'
PWA_APP_DESCRIPTION = "Indoor Climbing Tracker"
PWA_APP_THEME_COLOR = '#000000'
PWA_APP_BACKGROUND_COLOR = '#000000'
PWA_APP_DISPLAY = 'standalone'
PWA_APP_SCOPE = '/'
PWA_APP_ORIENTATION = 'portrait'
PWA_APP_START_URL = '/'
PWA_APP_ICONS = [
{
'src': '/static/routes/favicon_io/favicon-32x32.png',
'sizes': '32x32',
"type": "image/png",
"purpose": "any maskable"
}, {
"src": "/static/routes/favicon_io/android-chrome-192x192.png",
"sizes": "192x192",
"type": "image/png",
"purpose": "any maskable"
}, {
"src": "/static/routes/favicon_io/android-chrome-512x512.png",
"sizes": "512x512",
"type": "image/png",
"purpose": "any maskable"
}
]
PWA_APP_DIR = 'ltr'
PWA_APP_LANG = 'en-US'
sentry_sdk.init(
dsn="https://09ce3488b18c4db19820b873eecc30c4@sentry.io/1878812",
integrations=[DjangoIntegration()],
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True
)
| 28.5875
| 91
| 0.655735
| 746
| 6,861
| 5.836461
| 0.353887
| 0.050758
| 0.041341
| 0.020671
| 0.215893
| 0.176849
| 0.120808
| 0.107487
| 0.107487
| 0.078548
| 0
| 0.022198
| 0.192392
| 6,861
| 239
| 92
| 28.707113
| 0.763581
| 0.189768
| 0
| 0.095808
| 0
| 0
| 0.472388
| 0.28499
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.041916
| 0.017964
| 0
| 0.017964
| 0.005988
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131b0afb1746ef9363aae186aba698e6731a895a
| 2,647
|
py
|
Python
|
examples/04-lights/plotter_builtins.py
|
akeshavan/pyvista
|
45fe8b1c38712776f9b628a60a8662d0716dd52b
|
[
"MIT"
] | null | null | null |
examples/04-lights/plotter_builtins.py
|
akeshavan/pyvista
|
45fe8b1c38712776f9b628a60a8662d0716dd52b
|
[
"MIT"
] | 6
|
2022-03-11T23:21:22.000Z
|
2022-03-25T03:32:21.000Z
|
examples/04-lights/plotter_builtins.py
|
akeshavan/pyvista
|
45fe8b1c38712776f9b628a60a8662d0716dd52b
|
[
"MIT"
] | null | null | null |
"""
Plotter Lighting Systems
~~~~~~~~~~~~~~~~~~~~~~~~
The :class:`pyvista.Plotter` class comes with three options for the default
lighting system:
* a light kit consisting of a headlight and four camera lights,
* an illumination system containing three lights arranged around the camera,
* no lighting.
With meshes that don't have depth information encoded in their color the
importance of an appropriate lighting setup becomes paramount for accurate
visualization.
Light kit
=========
The default ``lighting='light kit'`` option recreates a lighting setup that
corresponds to a ``vtk.vtkLightKit``. We can check what type of lights this
lighting comprises:
"""
# sphinx_gallery_thumbnail_number = 3
import pyvista as pv
from pyvista import examples
# default: light kit
plotter = pv.Plotter()
light_types = [light.light_type for light in plotter.renderer.lights]
# Remove from plotters so output is not produced in docs
pv.plotting._ALL_PLOTTERS.clear()
light_types
###############################################################################
# Add a white terrain to the scene:
mesh = examples.download_st_helens().warp_by_scalar()
plotter = pv.Plotter()
plotter.add_mesh(mesh, color='white')
plotter.show()
###############################################################################
# Three-lights illumination
# =========================
#
# Switching to three-lights illumination gives a different character to the
# figure, in this case showing less contrast when viewing the mountain from
# the top, but having more contrast with views closer to the side. This becomes
# especially clear when exploring the figures interactively.
plotter = pv.Plotter(lighting='three lights')
plotter.add_mesh(mesh, color='white')
plotter.show()
###############################################################################
# Again we can check what kind of lights this setting uses:
plotter = pv.Plotter(lighting='three lights')
light_types = [light.light_type for light in plotter.renderer.lights]
# Remove from plotters so output is not produced in docs
pv.plotting._ALL_PLOTTERS.clear()
light_types
###############################################################################
# Custom lighting
# ===============
#
# We can introduce our own lighting from scratch by disabling any lighting
# on plotter initialization. Adding a single scene light to a scene will
# often result in ominous visuals due to objects having larger regions in
# shadow:
plotter = pv.Plotter(lighting='none')
plotter.add_mesh(mesh, color='white')
light = pv.Light()
light.set_direction_angle(30, 0)
plotter.add_light(light)
plotter.show()
| 30.77907
| 79
| 0.66377
| 336
| 2,647
| 5.160714
| 0.443452
| 0.031719
| 0.046136
| 0.031142
| 0.258362
| 0.258362
| 0.201845
| 0.201845
| 0.156863
| 0.156863
| 0
| 0.001731
| 0.126936
| 2,647
| 85
| 80
| 31.141176
| 0.748594
| 0.572346
| 0
| 0.695652
| 0
| 0
| 0.054499
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131c13cd6c6c6b833141fea96f58ed4c3b53dc38
| 272
|
py
|
Python
|
src/swimport/tests/15_char_arrays/main.py
|
talos-gis/swimport
|
e8f0fcf02b0c9751b199f750f1f8bc57c8ff54b3
|
[
"MIT"
] | 1
|
2019-03-07T20:43:42.000Z
|
2019-03-07T20:43:42.000Z
|
src/swimport/tests/15_char_arrays/main.py
|
talos-gis/swimport
|
e8f0fcf02b0c9751b199f750f1f8bc57c8ff54b3
|
[
"MIT"
] | null | null | null |
src/swimport/tests/15_char_arrays/main.py
|
talos-gis/swimport
|
e8f0fcf02b0c9751b199f750f1f8bc57c8ff54b3
|
[
"MIT"
] | null | null | null |
from swimport.all import *
src = FileSource('src.h')
swim = Swim('example')
swim(pools.c_string)
swim(pools.numpy_arrays(r"../resources", allow_char_arrays=True))
swim(pools.include(src))
assert swim(Function.Behaviour()(src)) > 0
swim.write('example.i')
print('ok!')
| 19.428571
| 65
| 0.720588
| 41
| 272
| 4.682927
| 0.682927
| 0.140625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004032
| 0.088235
| 272
| 14
| 66
| 19.428571
| 0.770161
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131c14184c1fb810c136590d143d0fdf3f83e6df
| 4,523
|
py
|
Python
|
ipyvolume/astro.py
|
larsoner/ipyvolume
|
8603a47aff4531df69ace44efdcf6b85d6e51e51
|
[
"MIT"
] | 1
|
2019-04-09T11:57:07.000Z
|
2019-04-09T11:57:07.000Z
|
ipyvolume/astro.py
|
larsoner/ipyvolume
|
8603a47aff4531df69ace44efdcf6b85d6e51e51
|
[
"MIT"
] | null | null | null |
ipyvolume/astro.py
|
larsoner/ipyvolume
|
8603a47aff4531df69ace44efdcf6b85d6e51e51
|
[
"MIT"
] | null | null | null |
import numpy as np
import PIL.Image
import pythreejs
import ipyvolume as ipv
from .datasets import UrlCached
def _randomSO3():
"""return random rotatation matrix, algo by James Arvo"""
u1 = np.random.random()
u2 = np.random.random()
u3 = np.random.random()
R = np.array([[np.cos(2*np.pi*u1), np.sin(2*np.pi*u1), 0], [-np.sin(2*np.pi*u1), np.cos(2*np.pi*u1), 0], [0, 0, 1]])
v = np.array([np.cos(2*np.pi*u2)*np.sqrt(u3), np.sin(2*np.pi*u2)*np.sqrt(u3), np.sqrt(1-u3)])
H = np.identity(3)-2*v*np.transpose([v])
return - np.dot(H, R)
def spherical_galaxy_orbit(orbit_x, orbit_y, orbit_z, N_stars=100, sigma_r=1, orbit_visible=False, orbit_line_interpolate=5, N_star_orbits=10, color=[255, 220, 200], size_star=1, scatter_kwargs={}):
"""Create a fake galaxy around the points orbit_x/y/z with N_stars around it"""
if orbit_line_interpolate > 1:
import scipy.interpolate
x = np.linspace(0, 1, len(orbit_x))
x_smooth = np.linspace(0, 1, len(orbit_x)*orbit_line_interpolate)
kind = 'quadratic'
orbit_x_line = scipy.interpolate.interp1d(x, orbit_x, kind)(x_smooth)
orbit_y_line = scipy.interpolate.interp1d(x, orbit_y, kind)(x_smooth)
orbit_z_line = scipy.interpolate.interp1d(x, orbit_z, kind)(x_smooth)
else:
orbit_x_line = orbit_x
orbit_y_line = orbit_y
orbit_z_line = orbit_z
line = ipv.plot(orbit_x_line, orbit_y_line, orbit_z_line, visible=orbit_visible)
x = np.repeat(orbit_x, N_stars).reshape((-1, N_stars))
y = np.repeat(orbit_y, N_stars).reshape((-1, N_stars))
z = np.repeat(orbit_z, N_stars).reshape((-1, N_stars))
xr, yr, zr = np.random.normal(0, scale=sigma_r, size=(3, N_stars))# +
r = np.sqrt(xr**2 + yr**2 + zr**2)
for i in range(N_stars):
a = np.linspace(0, 1, x.shape[0]) * 2 * np.pi * N_star_orbits
xo = r[i] * np.sin(a)
yo = r[i] * np.cos(a)
zo = a * 0
xo, yo, zo = np.dot(_randomSO3(), [xo, yo, zo])
#print(x.shape, xo.shape)
x[:, i] += xo
y[:, i] += yo
z[:, i] += zo
sprite = ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', size=size_star, **scatter_kwargs)
with sprite.material.hold_sync():
sprite.material.blending = pythreejs.BlendingMode.CustomBlending
sprite.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor
sprite.material.blendDst = pythreejs.BlendFactors.OneFactor
sprite.material.blendEquation = 'AddEquation'
sprite.material.transparent = True
sprite.material.depthWrite = False
sprite.material.alphaTest = 0.1
return sprite, line
def radial_sprite(shape, color):
color = np.array(color)
ara = np.zeros(shape[:2] + (4,), dtype=np.uint8)
x = np.linspace(-1, 1, shape[0])
y = np.linspace(-1, 1, shape[1])
x, y = np.meshgrid(x, y)
s = 0.5
radius = np.sqrt(x**2+y**2)
amplitude = np.maximum(0, np.exp(-radius**2/s**2)).T
ara[...,3] = (amplitude * 255)
ara[...,:3] = color * amplitude.reshape(shape + (1,))
im = PIL.Image.fromarray(ara, 'RGBA')
return im
def stars(N=1000, radius=100000, thickness=3, seed=42, color=[255, 240, 240]):
import ipyvolume as ipv
rng = np.random.RandomState(seed)
x, y, z = rng.normal(size=(3, N))
r = np.sqrt(x**2 + y**2 + z**2)/(radius + thickness * radius * np.random.random(N))
x /= r
y /= r
z /= r
return ipv.scatter(x, y, z, texture=radial_sprite((64, 64), color), marker='square_2d', grow_limits=False, size=radius*0.7/100)
milkyway_url = 'https://www.nasa.gov/sites/default/files/images/620057main_milkyway_full.jpg'
milkyway_image = UrlCached(milkyway_url)
def plot_milkyway(R_sun=8, size=100):
mw_image = PIL.Image.open(milkyway_image.fetch())
rescale = 40
t = np.linspace(0, 1, 100)
xmw = np.linspace(0, 1, 10)
ymw = np.linspace(0, 1, 10)
xmw, ymw = np.meshgrid(xmw, ymw)
zmw = xmw * 0 + 0.01
mw = mesh = ipv.plot_mesh((xmw-0.5)*rescale, (ymw-0.5)*rescale+R_sun, zmw, u=xmw, v=ymw, texture=mw_image, wireframe=False)
mw.material.blending = pythreejs.BlendingMode.CustomBlending
mw.material.blendSrc = pythreejs.BlendFactors.SrcColorFactor
mw.material.blendDst = pythreejs.BlendFactors.OneFactor
mw.material.blendEquation = 'AddEquation'
mw.material.transparent = True
mw.material.depthWrite = False
mw.material.alphaTest = 0.1
ipv.xyzlim(size)
return mesh
| 41.118182
| 198
| 0.640946
| 721
| 4,523
| 3.898752
| 0.245492
| 0.021345
| 0.012451
| 0.025614
| 0.283885
| 0.154749
| 0.076485
| 0.051227
| 0.039132
| 0.039132
| 0
| 0.045894
| 0.20031
| 4,523
| 110
| 199
| 41.118182
| 0.731269
| 0.033827
| 0
| 0.021505
| 0
| 0
| 0.029594
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053763
| false
| 0
| 0.075269
| 0
| 0.182796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131e1d61812e10d7ea42b3ca418199cd229845a3
| 1,157
|
py
|
Python
|
deepfunning/function.py
|
Zrealshadow/DeepFunning
|
5c44210a6b30ea57a0be5f930da4ada540e7e3d0
|
[
"MIT"
] | null | null | null |
deepfunning/function.py
|
Zrealshadow/DeepFunning
|
5c44210a6b30ea57a0be5f930da4ada540e7e3d0
|
[
"MIT"
] | null | null | null |
deepfunning/function.py
|
Zrealshadow/DeepFunning
|
5c44210a6b30ea57a0be5f930da4ada540e7e3d0
|
[
"MIT"
] | null | null | null |
'''
* @author Waldinsamkeit
* @email Zenglz_pro@163.com
* @create date 2020-09-25 14:33:38
* @desc
'''
import torch
'''--------------------- Weighted Binary cross Entropy ----------------------'''
'''
In Torch BCELoss, weight is set to every element of input instead of to every class
'''
def weighted_binary_cross_entropy(output, target, weights=None):
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output)) + \
weights[0] * ((1 - target) * torch.log(1 - output))
else:
loss = target * torch.log(output) + (1 - target) * torch.log(1 - output)
return torch.neg(torch.mean(loss))
''' ---------------------- Binary focal loss function -------------------------- '''
'''
In some degree, it can reduce the influence of imbalanced dataset
'''
def focal_loss(y_true,y_pred,device):
alpha,gamma = torch.tensor(0.25).to(device) , torch.tensor(2.0).to(device)
y_pred=torch.clamp(y_pred,1e-7,1-1e-7)
return - alpha * y_true * torch.log(y_pred) * (1 - y_pred) ** gamma\
- (1 - alpha) * (1 - y_true) * torch.log(1 - y_pred) * y_pred
| 28.925
| 84
| 0.577355
| 161
| 1,157
| 4.055901
| 0.453416
| 0.053599
| 0.085758
| 0.068913
| 0.067381
| 0.067381
| 0
| 0
| 0
| 0
| 0
| 0.041304
| 0.20484
| 1,157
| 39
| 85
| 29.666667
| 0.668478
| 0.081245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.142857
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131e36d011ba94f7784c802143deb17326553c0e
| 7,055
|
py
|
Python
|
dlms_cosem/hdlc/address.py
|
pwitab/dlms-cosem
|
aa9e18e6ef8a4fee30da8b797dad03b0b7847780
|
[
"MIT"
] | 35
|
2018-05-24T08:16:35.000Z
|
2022-02-24T16:07:07.000Z
|
dlms_cosem/hdlc/address.py
|
Layty/dlms-cosem
|
95b67054a1dfb928e960547b0246b7b6794f0594
|
[
"MIT"
] | 41
|
2020-12-18T16:31:40.000Z
|
2021-12-13T20:59:42.000Z
|
dlms_cosem/hdlc/address.py
|
Layty/dlms-cosem
|
95b67054a1dfb928e960547b0246b7b6794f0594
|
[
"MIT"
] | 19
|
2019-04-02T14:32:01.000Z
|
2021-12-14T13:24:29.000Z
|
from typing import *
import attr
from dlms_cosem.hdlc import validators
@attr.s(auto_attribs=True)
class HdlcAddress:
"""
A client address shall always be expressed on one byte.
To enable addressing more than one logical device within a single physical device
and to support the multi-drop configuration the server address may be divided in
two parts– may be divided into two parts:
The logical address to address a logical device (separate addressable entity
within a physical device) makes up the upper HDLC address
The logical address must always be present.
The physical address is used to address a physical device ( a physical device on
a multi-drop)
The physical address can be omitted it not used.
"""
logical_address: int = attr.ib(validator=[validators.validate_hdlc_address])
physical_address: Optional[int] = attr.ib(
default=None, validator=[validators.validate_hdlc_address]
)
address_type: str = attr.ib(
default="client", validator=[validators.validate_hdlc_address_type]
)
@property
def length(self):
"""
The number of bytes the address makes up.
:return:
"""
return len(self.to_bytes())
def to_bytes(self):
out: List[Optional[int]] = list()
if self.address_type == "client":
# shift left 1 bit and set the lsb to mark end of address.
out.append(((self.logical_address << 1) | 0b00000001))
else:
# server address type
logical_higher, logical_lower = self._split_address(self.logical_address)
if self.physical_address:
physical_higher, physical_lower = self._split_address(
self.physical_address
)
# mark physical lower as end
physical_lower = physical_lower | 0b00000001
out.extend(
[logical_higher, logical_lower, physical_higher, physical_lower]
)
else:
# no physical address so mark the logial as end.
logical_lower = logical_lower | 0b00000001
out.extend([logical_higher, logical_lower])
out_bytes = list()
for address in out:
if address:
out_bytes.append(address.to_bytes(1, "big"))
return b"".join(out_bytes)
@staticmethod
def _split_address(address: int) -> Tuple[Optional[int], int]:
higher: Optional[int]
lower: int
if address > 0b01111111:
lower = (address & 0b0000000001111111) << 1
higher = (address & 0b0011111110000000) >> 6
else:
lower = address << 1
higher = None
return higher, lower
@staticmethod
def _address_to_byte(address: int) -> bytes:
return address.to_bytes(1, "big")
@classmethod
def destination_from_bytes(cls, frame_bytes: bytes, address_type: str):
destination_address_data, _ = HdlcAddress.find_address_in_frame_bytes(
frame_bytes
)
(
destination_logical,
destination_physical,
destination_length,
) = destination_address_data
return cls(destination_logical, destination_physical, address_type)
@classmethod
def source_from_bytes(cls, frame_bytes: bytes, address_type: str):
_, source_address_data = HdlcAddress.find_address_in_frame_bytes(frame_bytes)
source_logical, source_physical, source_length = source_address_data
return cls(source_logical, source_physical, address_type)
@staticmethod
def find_address_in_frame_bytes(
hdlc_frame_bytes: bytes,
) -> Tuple[Tuple[int, Optional[int], int], Tuple[int, Optional[int], int]]:
"""
address can be 1, 2 or 4 bytes long. the end byte is indicated by the of
the last byte LSB being 1
The first address is the destination address and the seconds is the
source address.
:param frame_bytes:
:return:
"""
# Find destination address.
destination_length: int = 1
destination_logical: int = 0
destination_physical: Optional[int] = 0
destination_positions_list: List[Tuple[int, int]] = [(3, 1), (4, 2), (6, 4)]
address_bytes: bytes
for pos, _length in destination_positions_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
destination_length = _length
break
continue
if destination_length == 1:
address_bytes = hdlc_frame_bytes[3].to_bytes(1, "big")
destination_logical = address_bytes[0] >> 1
destination_physical = None
elif destination_length == 2:
address_bytes = hdlc_frame_bytes[3:5]
destination_logical = address_bytes[0] >> 1
destination_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3:7]
destination_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
destination_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
# Find source address
source_length: int = 1
source_logical: int = 0
source_physical: Optional[int] = 0
source_position_list: List[Tuple[int, int]] = [
(item[0] + destination_length, item[1])
for item in destination_positions_list
]
for pos, _length in source_position_list:
end_byte = hdlc_frame_bytes[pos]
if bool(end_byte & 0b00000001):
# Found end byte:
source_length = _length
break
continue
if source_length == 1:
address_bytes = hdlc_frame_bytes[3 + destination_length].to_bytes(1, "big")
source_logical = address_bytes[0] >> 1
source_physical = None
elif source_length == 2:
address_bytes = hdlc_frame_bytes[3 + destination_length : 5 + source_length]
source_logical = address_bytes[0] >> 1
source_physical = address_bytes[1] >> 1
elif destination_length == 4:
address_bytes = hdlc_frame_bytes[3 + destination_length : 7 + source_length]
source_logical = HdlcAddress.parse_two_byte_address(address_bytes[:2])
source_physical = HdlcAddress.parse_two_byte_address(address_bytes[3:])
return (
(destination_logical, destination_physical, destination_length),
(source_logical, source_physical, source_length),
)
@staticmethod
def parse_two_byte_address(address_bytes: bytes):
if address_bytes != 2:
raise ValueError(f"Can only parse 2 bytes for address")
upper = address_bytes[0] >> 1
lower = address_bytes[1] >> 1
return lower + (upper << 7)
| 36.554404
| 88
| 0.624522
| 822
| 7,055
| 5.113139
| 0.180049
| 0.059957
| 0.029979
| 0.031644
| 0.402332
| 0.316916
| 0.265287
| 0.265287
| 0.157507
| 0.089936
| 0
| 0.031199
| 0.300354
| 7,055
| 192
| 89
| 36.744792
| 0.820097
| 0.152516
| 0
| 0.176923
| 0
| 0
| 0.009955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.023077
| 0.007692
| 0.176923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
131e68c02091db60b313cb5f13708b590b55dc83
| 3,676
|
py
|
Python
|
benchmarks/benchmarks/stats.py
|
RasmusSemmle/scipy
|
4ffeafe269597e6d41b3335549102cd5611b12cb
|
[
"FSFAP"
] | 1
|
2019-04-13T01:41:50.000Z
|
2019-04-13T01:41:50.000Z
|
benchmarks/benchmarks/stats.py
|
RasmusSemmle/scipy
|
4ffeafe269597e6d41b3335549102cd5611b12cb
|
[
"FSFAP"
] | 1
|
2018-10-16T01:50:18.000Z
|
2018-10-16T01:50:18.000Z
|
benchmarks/benchmarks/stats.py
|
RasmusSemmle/scipy
|
4ffeafe269597e6d41b3335549102cd5611b12cb
|
[
"FSFAP"
] | null | null | null |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
try:
import scipy.stats as stats
except ImportError:
pass
from .common import Benchmark
class Anderson_KSamp(Benchmark):
def setup(self, *args):
self.rand = [np.random.normal(loc=i, size=1000) for i in range(3)]
def time_anderson_ksamp(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
stats.anderson_ksamp(self.rand)
class CorrelationFunctions(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
oddsratio, pvalue = stats.fisher_exact(self.a, alternative=alternative)
class InferentialStats(Benchmark):
def setup(self):
np.random.seed(12345678)
self.a = stats.norm.rvs(loc=5, scale=10, size=500)
self.b = stats.norm.rvs(loc=8, scale=10, size=20)
self.c = stats.norm.rvs(loc=8, scale=20, size=20)
def time_ttest_ind_same_var(self):
# test different sized sample with variances
stats.ttest_ind(self.a, self.b)
stats.ttest_ind(self.a, self.b, equal_var=False)
def time_ttest_ind_diff_var(self):
# test different sized sample with different variances
stats.ttest_ind(self.a, self.c)
stats.ttest_ind(self.a, self.c, equal_var=False)
class Distribution(Benchmark):
param_names = ['distribution', 'properties']
params = [
['cauchy', 'gamma', 'beta'],
['pdf', 'cdf', 'rvs', 'fit']
]
def setup(self, distribution, properties):
np.random.seed(12345678)
self.x = np.random.rand(100)
def time_distribution(self, distribution, properties):
if distribution == 'gamma':
if properties == 'pdf':
stats.gamma.pdf(self.x, a=5, loc=4, scale=10)
elif properties == 'cdf':
stats.gamma.cdf(self.x, a=5, loc=4, scale=10)
elif properties == 'rvs':
stats.gamma.rvs(size=1000, a=5, loc=4, scale=10)
elif properties == 'fit':
stats.gamma.fit(self.x, loc=4, scale=10)
elif distribution == 'cauchy':
if properties == 'pdf':
stats.cauchy.pdf(self.x, loc=4, scale=10)
elif properties == 'cdf':
stats.cauchy.cdf(self.x, loc=4, scale=10)
elif properties == 'rvs':
stats.cauchy.rvs(size=1000, loc=4, scale=10)
elif properties == 'fit':
stats.cauchy.fit(self.x, loc=4, scale=10)
elif distribution == 'beta':
if properties == 'pdf':
stats.beta.pdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'cdf':
stats.beta.cdf(self.x, a=5, b=3, loc=4, scale=10)
elif properties == 'rvs':
stats.beta.rvs(size=1000, a=5, b=3, loc=4, scale=10)
elif properties == 'fit':
stats.beta.fit(self.x, loc=4, scale=10)
# Retain old benchmark results (remove this if changing the benchmark)
time_distribution.version = "fb22ae5386501008d945783921fe44aef3f82c1dafc40cddfaccaeec38b792b0"
class DescriptiveStats(Benchmark):
param_names = ['n_levels']
params = [
[10, 1000]
]
def setup(self, n_levels):
np.random.seed(12345678)
self.levels = np.random.randint(n_levels, size=(1000, 10))
def time_mode(self, n_levels):
stats.mode(self.levels, axis=0)
| 32.530973
| 98
| 0.596572
| 473
| 3,676
| 4.556025
| 0.236786
| 0.045476
| 0.050116
| 0.061253
| 0.353596
| 0.304408
| 0.284919
| 0.192575
| 0.107193
| 0.07471
| 0
| 0.058757
| 0.273123
| 3,676
| 112
| 99
| 32.821429
| 0.747754
| 0.044614
| 0
| 0.211765
| 0
| 0
| 0.059595
| 0.018249
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129412
| false
| 0.011765
| 0.070588
| 0
| 0.329412
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1325fa5a7b424ce8ba5d22a0e7ac2e6be5ce3b49
| 9,890
|
py
|
Python
|
docs/schema_mapping.py
|
NoAnyLove/pydantic
|
50fd2c5b48ffe611b5c4feb24f26f7202217faab
|
[
"MIT"
] | 1
|
2020-11-01T00:04:04.000Z
|
2020-11-01T00:04:04.000Z
|
docs/schema_mapping.py
|
NoAnyLove/pydantic
|
50fd2c5b48ffe611b5c4feb24f26f7202217faab
|
[
"MIT"
] | null | null | null |
docs/schema_mapping.py
|
NoAnyLove/pydantic
|
50fd2c5b48ffe611b5c4feb24f26f7202217faab
|
[
"MIT"
] | 1
|
2021-03-02T02:49:05.000Z
|
2021-03-02T02:49:05.000Z
|
#!/usr/bin/env python3
"""
Build a table of Python / Pydantic to JSON Schema mappings.
Done like this rather than as a raw rst table to make future edits easier.
Please edit this file directly not .tmp_schema_mappings.rst
"""
table = [
[
'bool',
'boolean',
'',
'JSON Schema Core',
''
],
[
'str',
'string',
'',
'JSON Schema Core',
''
],
[
'float',
'number',
'',
'JSON Schema Core',
''
],
[
'int',
'integer',
'',
'JSON Schema Validation',
''
],
[
'dict',
'object',
'',
'JSON Schema Core',
''
],
[
'list',
'array',
'',
'JSON Schema Core',
''
],
[
'tuple',
'array',
'',
'JSON Schema Core',
''
],
[
'set',
'array',
'{"uniqueItems": true}',
'JSON Schema Validation',
''
],
[
'List[str]',
'array',
'{"items": {"type": "string"}}',
'JSON Schema Validation',
'And equivalently for any other sub type, e.g. List[int].'
],
[
'Tuple[str, int]',
'array',
'{"items": [{"type": "string"}, {"type": "integer"}]}',
'JSON Schema Validation',
(
'And equivalently for any other set of subtypes. Note: If using schemas for OpenAPI, '
'you shouldn\'t use this declaration, as it would not be valid in OpenAPI (although it is '
'valid in JSON Schema).'
)
],
[
'Dict[str, int]',
'object',
'{"additionalProperties": {"type": "integer"}}',
'JSON Schema Validation',
(
'And equivalently for any other subfields for dicts. Have in mind that although you can use other types as '
'keys for dicts with Pydantic, only strings are valid keys for JSON, and so, only str is valid as '
'JSON Schema key types.'
)
],
[
'Union[str, int]',
'anyOf',
'{"anyOf": [{"type": "string"}, {"type": "integer"}]}',
'JSON Schema Validation',
'And equivalently for any other subfields for unions.'
],
[
'Enum',
'enum',
'{"enum": [...]}',
'JSON Schema Validation',
'All the literal values in the enum are included in the definition.'
],
[
'SecretStr',
'string',
'{"writeOnly": true}',
'JSON Schema Validation',
''
],
[
'SecretBytes',
'string',
'{"writeOnly": true}',
'JSON Schema Validation',
''
],
[
'EmailStr',
'string',
'{"format": "email"}',
'JSON Schema Validation',
''
],
[
'NameEmail',
'string',
'{"format": "name-email"}',
'Pydantic standard "format" extension',
''
],
[
'UrlStr',
'string',
'{"format": "uri"}',
'JSON Schema Validation',
''
],
[
'DSN',
'string',
'{"format": "dsn"}',
'Pydantic standard "format" extension',
''
],
[
'bytes',
'string',
'{"format": "binary"}',
'OpenAPI',
''
],
[
'Decimal',
'number',
'',
'JSON Schema Core',
''
],
[
'UUID1',
'string',
'{"format": "uuid1"}',
'Pydantic standard "format" extension',
''
],
[
'UUID3',
'string',
'{"format": "uuid3"}',
'Pydantic standard "format" extension',
''
],
[
'UUID4',
'string',
'{"format": "uuid4"}',
'Pydantic standard "format" extension',
''
],
[
'UUID5',
'string',
'{"format": "uuid5"}',
'Pydantic standard "format" extension',
''
],
[
'UUID',
'string',
'{"format": "uuid"}',
'Pydantic standard "format" extension',
'Suggested in OpenAPI.'
],
[
'FilePath',
'string',
'{"format": "file-path"}',
'Pydantic standard "format" extension',
''
],
[
'DirectoryPath',
'string',
'{"format": "directory-path"}',
'Pydantic standard "format" extension',
''
],
[
'Path',
'string',
'{"format": "path"}',
'Pydantic standard "format" extension',
''
],
[
'datetime',
'string',
'{"format": "date-time"}',
'JSON Schema Validation',
''
],
[
'date',
'string',
'{"format": "date"}',
'JSON Schema Validation',
''
],
[
'time',
'string',
'{"format": "time"}',
'JSON Schema Validation',
''
],
[
'timedelta',
'number',
'{"format": "time-delta"}',
'Difference in seconds (a ``float``), with Pydantic standard "format" extension',
'Suggested in JSON Schema repository\'s issues by maintainer.'
],
[
'Json',
'string',
'{"format": "json-string"}',
'Pydantic standard "format" extension',
''
],
[
'IPvAnyAddress',
'string',
'{"format": "ipvanyaddress"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 address as used in ``ipaddress`` module',
],
[
'IPvAnyInterface',
'string',
'{"format": "ipvanyinterface"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 interface as used in ``ipaddress`` module',
],
[
'IPvAnyNetwork',
'string',
'{"format": "ipvanynetwork"}',
'Pydantic standard "format" extension',
'IPv4 or IPv6 network as used in ``ipaddress`` module',
],
[
'StrictStr',
'string',
'',
'JSON Schema Core',
''
],
[
'ConstrainedStr',
'string',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``constr`` below.'
)
],
[
'constr(regex=\'^text$\', min_length=2, max_length=10)',
'string',
'{"pattern": "^text$", "minLength": 2, "maxLength": 10}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'ConstrainedInt',
'integer',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``conint`` below.'
)
],
[
'conint(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'integer',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'PositiveInt',
'integer',
'{"exclusiveMinimum": 0}',
'JSON Schema Validation',
''
],
[
'NegativeInt',
'integer',
'{"exclusiveMaximum": 0}',
'JSON Schema Validation',
''
],
[
'ConstrainedFloat',
'number',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations.'
'See the mapping for ``confloat`` below.'
)
],
[
'confloat(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'number',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'PositiveFloat',
'number',
'{"exclusiveMinimum": 0}',
'JSON Schema Validation',
''
],
[
'NegativeFloat',
'number',
'{"exclusiveMaximum": 0}',
'JSON Schema Validation',
''
],
[
'ConstrainedDecimal',
'number',
'',
'JSON Schema Core',
(
'If the type has values declared for the constraints, they are included as validations. '
'See the mapping for ``condecimal`` below.'
)
],
[
'condecimal(gt=1, ge=2, lt=6, le=5, multiple_of=2)',
'number',
'{"maximum": 5, "exclusiveMaximum": 6, "minimum": 2, "exclusiveMinimum": 1, "multipleOf": 2}',
'JSON Schema Validation',
'Any argument not passed to the function (not defined) will not be included in the schema.'
],
[
'BaseModel',
'object',
'',
'JSON Schema Core',
'All the properties defined will be defined with standard JSON Schema, including submodels.'
]
]
headings = [
'Python type',
'JSON Schema Type',
'Additional JSON Schema',
'Defined in',
'Notes',
]
v = ''
col_width = 300
for _ in range(5):
v += '+' + '-' * col_width
v += '+\n|'
for heading in headings:
v += f' {heading:{col_width - 2}} |'
v += '\n'
for _ in range(5):
v += '+' + '=' * col_width
v += '+'
for row in table:
v += '\n|'
for i, text in enumerate(row):
text = f'``{text}``' if i < 3 and text else text
v += f' {text:{col_width - 2}} |'
v += '\n'
for _ in range(5):
v += '+' + '-' * col_width
v += '+'
with open('.tmp_schema_mappings.rst', 'w') as f:
f.write(v)
| 23.491686
| 120
| 0.458544
| 873
| 9,890
| 5.174112
| 0.258877
| 0.090768
| 0.092982
| 0.102944
| 0.43768
| 0.366394
| 0.330529
| 0.303299
| 0.288466
| 0.288466
| 0
| 0.010464
| 0.381598
| 9,890
| 420
| 121
| 23.547619
| 0.72809
| 0.022042
| 0
| 0.536765
| 0
| 0.014706
| 0.529077
| 0.004967
| 0.007353
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.009804
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
13261dbcef738304d7319335d93a1caa3393465f
| 2,798
|
py
|
Python
|
hubspot3/test/test_broadcast.py
|
kevin2357/hubspot3
|
488f6ff4195034317d99431439087443bca1469f
|
[
"MIT"
] | 1
|
2019-02-25T01:09:51.000Z
|
2019-02-25T01:09:51.000Z
|
hubspot3/test/test_broadcast.py
|
kevin2357/hubspot3
|
488f6ff4195034317d99431439087443bca1469f
|
[
"MIT"
] | null | null | null |
hubspot3/test/test_broadcast.py
|
kevin2357/hubspot3
|
488f6ff4195034317d99431439087443bca1469f
|
[
"MIT"
] | null | null | null |
import time
import unittest
from nose.plugins.attrib import attr
from hubspot3.test import helper
from hubspot3.broadcast import Broadcast, BroadcastClient
class BroadcastClientTest(unittest.TestCase):
""" Unit tests for the HubSpot Broadcast API Python client.
This file contains some unittest tests for the Broadcast API.
Questions, comments: http://docs.hubapi.com/wiki/Discussion_Group
"""
def setUp(self):
self.client = BroadcastClient(**helper.get_options())
self.broadcast_guids = None
def tearDown(self):
# Cancel any broadcasts created as part of the tests
if self.broadcast_guids:
list(map(self.client.cancel_broadcast, self.broadcast_guids))
@attr("api")
def test_get_broadcasts(self):
# Should fetch at least 1 broadcast on the test portal 62515
broadcasts = self.client.get_broadcasts(limit=1)
self.assertTrue(len(broadcasts) > 0)
broadcast = broadcasts[0].to_dict()
self.assertIsNotNone(broadcast["channelGuid"])
print("\n\nFetched some broadcasts")
broadcast_guid = broadcast["broadcastGuid"]
# Re-fetch the broadcast using different call
bcast = self.client.get_broadcast(broadcast_guid)
# Should have expected fields
self.assertIsNotNone(bcast.broadcast_guid)
self.assertIsNotNone(bcast.channel_guid)
self.assertIsNotNone(bcast.status)
@attr("api")
def test_get_channels(self):
# Fetch older channels ensured to exist
channels = self.client.get_channels(current=True)
self.assertTrue(len(channels) > 0)
@attr("api")
def test_create_broadcast(self):
content = dict(body="Test hubspot3 unit tests http://www.hubspot.com")
channels = self.client.get_channels(current=True, publish_only=True)
if len(channels) == 0:
self.fail("Failed to find a publishable channel")
channel = channels[0]
# Get a trigger in the future
trigger_at = int(time.time() + 6000) * 1000
bcast = Broadcast(
{
"content": content,
"triggerAt": trigger_at,
"channelGuid": channel.channel_guid,
}
)
try:
resp = self.client.create_broadcast(bcast)
broadcast = Broadcast(resp)
self.assertIsNotNone(broadcast.broadcast_guid)
self.assertEqual(channel.channel_guid, broadcast.channel_guid)
# Ensure it is canceled
self.broadcast_guids = []
self.broadcast_guids.append(broadcast.broadcast_guid)
except Exception as e:
self.fail("Should not have raised exception: {}".format(e))
if __name__ == "__main__":
unittest.main()
| 34.121951
| 78
| 0.651537
| 318
| 2,798
| 5.606918
| 0.396226
| 0.03926
| 0.050477
| 0.023556
| 0.063937
| 0.044868
| 0.044868
| 0
| 0
| 0
| 0
| 0.011063
| 0.256969
| 2,798
| 81
| 79
| 34.54321
| 0.846561
| 0.162974
| 0
| 0.055556
| 0
| 0
| 0.092401
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.092593
| false
| 0
| 0.092593
| 0
| 0.203704
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1326b5cc799b2031e2e9803af2d0899c97761474
| 5,166
|
py
|
Python
|
benchmark/benchmarks/testdata.py
|
theroggy/geofile_ops
|
1b5ab42169d5c3332c0d8298c5a036257cfd68d5
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/benchmarks/testdata.py
|
theroggy/geofile_ops
|
1b5ab42169d5c3332c0d8298c5a036257cfd68d5
|
[
"BSD-3-Clause"
] | 26
|
2021-12-01T07:46:53.000Z
|
2022-03-30T23:40:43.000Z
|
benchmark/benchmarks/testdata.py
|
theroggy/geofile_ops
|
1b5ab42169d5c3332c0d8298c5a036257cfd68d5
|
[
"BSD-3-Clause"
] | 1
|
2021-11-30T17:51:34.000Z
|
2021-11-30T17:51:34.000Z
|
# -*- coding: utf-8 -*-
"""
Module to prepare test data for benchmarking geo operations.
"""
import enum
import logging
from pathlib import Path
import pprint
import shutil
import sys
import tempfile
from typing import Optional
import urllib.request
import zipfile
# Add path so the benchmark packages are found
sys.path.insert(0, str(Path(__file__).resolve().parent.parent.parent))
import geofileops as gfo
################################################################################
# Some inits
################################################################################
logger = logging.getLogger(__name__)
################################################################################
# The real work
################################################################################
class TestFile(enum.Enum):
AGRIPRC_2018 = (
0,
"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2018/Landbouwgebruikspercelen_LV_2018_GewVLA_Shape.zip",
"agriprc_2018.gpkg",
)
AGRIPRC_2019 = (
1,
"https://downloadagiv.blob.core.windows.net/landbouwgebruikspercelen/2019/Landbouwgebruikspercelen_LV_2019_GewVLA_Shapefile.zip",
"agriprc_2019.gpkg",
)
COMMUNES = (
2,
"https://downloadagiv.blob.core.windows.net/referentiebestand-gemeenten/VoorlopigRefBestandGemeentegrenzen_2019-01-01/VRBG_toestand_16_05_2018_(geldend_vanaf_01_01_2019)_GewVLA_Shape.zip",
"communes.gpkg",
)
def __init__(self, value, url, filename):
self._value_ = value
self.url = url
self.filename = filename
def get_file(self, tmp_dir: Path) -> Path:
testfile_path = download_samplefile(
url=self.url, dst_name=self.filename, dst_dir=tmp_dir
)
testfile_info = gfo.get_layerinfo(testfile_path)
logger.debug(
f"TestFile {self.name} contains {testfile_info.featurecount} rows."
)
return testfile_path
def download_samplefile(
url: str, dst_name: str, dst_dir: Optional[Path] = None
) -> Path:
"""
Download a sample file to dest_path.
If it is zipped, it will be unzipped. If needed, it will be converted to
the file type as determined by the suffix of dst_name.
Args:
url (str): the url of the file to download
dst_dir (Path): the dir to downloaded the sample file to.
If it is None, a dir in the default tmp location will be
used. Defaults to None.
Returns:
Path: the path to the downloaded sample file.
"""
# If the destination path is a directory, use the default file name
dst_path = prepare_dst_path(dst_name, dst_dir)
# If the sample file already exists, return
if dst_path.exists():
return dst_path
# Make sure the destination directory exists
dst_path.parent.mkdir(parents=True, exist_ok=True)
# If the url points to a file with the same suffix as the dst_path,
# just download
url_path = Path(url)
if url_path.suffix.lower() == dst_path.suffix.lower():
logger.info(f"Download to {dst_path}")
urllib.request.urlretrieve(url, dst_path)
else:
# The file downloaded is different that the destination wanted, so some
# converting will need to be done
tmp_dir = dst_path.parent / "tmp"
try:
# Remove tmp dir if it exists already
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
tmp_dir.mkdir(parents=True, exist_ok=True)
# Download file
tmp_path = tmp_dir / f"{dst_path.stem}{url_path.suffix.lower()}"
logger.info(f"Download tmp data to {tmp_path}")
urllib.request.urlretrieve(url, tmp_path)
# If the temp file is a .zip file, unzip to dir
if tmp_path.suffix == ".zip":
# Unzip
unzippedzip_dir = dst_path.parent / tmp_path.stem
logger.info(f"Unzip to {unzippedzip_dir}")
with zipfile.ZipFile(tmp_path, "r") as zip_ref:
zip_ref.extractall(unzippedzip_dir)
# Look for the file
tmp_paths = []
for suffix in [".shp", ".gpkg"]:
tmp_paths.extend(list(unzippedzip_dir.rglob(f"*{suffix}")))
if len(tmp_paths) == 1:
tmp_path = tmp_paths[0]
else:
raise Exception(
f"Should find 1 geofile, found {len(tmp_paths)}: \n{pprint.pformat(tmp_paths)}"
)
if dst_path.suffix == tmp_path.suffix:
gfo.move(tmp_path, dst_path)
else:
logger.info(f"Convert tmp file to {dst_path}")
gfo.makevalid(tmp_path, dst_path)
finally:
if tmp_dir.exists():
shutil.rmtree(tmp_dir)
return dst_path
def prepare_dst_path(dst_name: str, dst_dir: Optional[Path] = None):
if dst_dir is None:
return Path(tempfile.gettempdir()) / "geofileops_sampledata" / dst_name
else:
return dst_dir / dst_name
| 34.44
| 196
| 0.587108
| 630
| 5,166
| 4.625397
| 0.288889
| 0.04324
| 0.0151
| 0.025738
| 0.184626
| 0.138298
| 0.107756
| 0.043926
| 0
| 0
| 0
| 0.016918
| 0.267712
| 5,166
| 149
| 197
| 34.671141
| 0.75337
| 0.202284
| 0
| 0.111111
| 0
| 0.011111
| 0.218943
| 0.031661
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.122222
| 0
| 0.266667
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132764a2c0f4e72e8781ad3a0b75e85eb885eb90
| 12,911
|
py
|
Python
|
python-client/trustedanalytics/core/atktypes.py
|
blbarker/atk
|
bcb747d053e801820233a6439c88a457c8cf2438
|
[
"Apache-2.0"
] | 1
|
2016-04-05T21:57:16.000Z
|
2016-04-05T21:57:16.000Z
|
python-client/trustedanalytics/core/atktypes.py
|
blbarker/atk
|
bcb747d053e801820233a6439c88a457c8cf2438
|
[
"Apache-2.0"
] | null | null | null |
python-client/trustedanalytics/core/atktypes.py
|
blbarker/atk
|
bcb747d053e801820233a6439c88a457c8cf2438
|
[
"Apache-2.0"
] | null | null | null |
# vim: set encoding=utf-8
#
# Copyright (c) 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
trusted_analytics definitions for Data Types
"""
# TODO - consider server providing types, similar to commands
__all__ = ['valid_data_types', 'ignore', 'unknown', 'float32', 'float64', 'int32', 'int64', 'vector', 'unit', 'datetime']
import numpy as np
import json
import re
# alias numpy types
float32 = np.float32
float64 = np.float64
int32 = np.int32
int64 = np.int64
from datetime import datetime
import dateutil.parser as datetime_parser
# Chose python's datetime over numpy.datetime64 because of time zone support and string serialization
# Here's a long thread discussing numpy's datetime64 timezone problem:
# http://mail.scipy.org/pipermail/numpy-discussion/2013-April/066038.html
# If need be, UDFs can create numpy objects from x using: numpy.datatime64(x.isoformat())
class _Vector(object):
base_type = np.ndarray
re_pattern = re.compile(r"^vector\((\d+)\)$")
def __init__(self, length):
self.length = int(length)
self.is_complex_type = True
self.constructor = self._get_constructor()
def _get_constructor(self):
length = self.length
def constructor(value):
"""
Creates a numpy array from a value, which can be one of many types
"""
if value is None:
return None
try:
# first try numpy's constructor
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
except:
# also support json or comma-sep string
if valid_data_types.value_is_string(value):
try:
value = json.loads(value)
except:
value = [np.float64(item.strip()) for item in value.split(',') if item]
array = np.array(value, dtype=np.float64) # ensures the array is entirely made of doubles
else:
raise
array = np.atleast_1d(array) # numpy thing, so that vectors of size 1 will still have dimension and length
if len(array) != length:
raise ValueError("Could not construct vector in Python Client. Expected vector of length %s, but received length %d" % (length, len(array)))
return array
return constructor
@staticmethod
def get_from_string(data_type_str):
return _Vector(_Vector.re_pattern.match(data_type_str).group(1))
def __repr__(self):
return "vector(%d)" % self.length
vector = _Vector
class _Unit(object):
"""Ignore type used for schemas during file import"""
pass
unit = _Unit
class _Ignore(object):
"""Ignore type used for schemas during file import"""
pass
ignore = _Ignore
class _Unknown(object):
"""Unknown type used when type is indeterminate"""
pass
unknown = _Unknown
# map types to their string identifier
_primitive_type_to_str_table = {
#bool: "bool", TODO
#bytearray: "bytearray", TODO
#dict: "dict", TODO
float32: "float32",
float64: "float64",
int32: "int32",
int64: "int64",
#list: "list", TODO
unicode: "unicode",
ignore: "ignore",
datetime: "datetime",
}
# build reverse map string -> type
_primitive_str_to_type_table = dict([(s, t) for t, s in _primitive_type_to_str_table.iteritems()])
_primitive_alias_type_to_type_table = {
float: float64,
int: int32,
long: int64,
str: unicode,
#list: vector,
}
_primitive_alias_str_to_type_table = dict([(alias.__name__, t) for alias, t in _primitive_alias_type_to_type_table.iteritems()])
_primitive_type_to_default_value = {
#bool: False, TODO
float32: 0.0,
float64: 0.0,
int32: 0,
int64: 0,
unicode: "",
#datetime: "datetime",
}
def get_float_constructor(float_type):
"""Creates special constructor for floating point types which handles nan, inf, -inf"""
ft = float_type
def float_constructor(value):
result = ft(value)
if np.isnan(result) or result == np.inf or result == -np.inf: # this is 5x faster than calling np.isfinite()
return None
return ft(value)
return float_constructor
def datetime_constructor(value):
"""Creates special constructor for datetime parsing"""
if valid_data_types.value_is_string(value):
return datetime_parser.parse(value)
else:
try:
return datetime(*value)
except:
raise TypeError("cannot convert type to the datetime")
class _DataTypes(object):
"""
Provides functions with define and operate on supported data types.
"""
def __contains__(self, item):
try:
self.validate(item)
return True
except ValueError:
return False
def __repr__(self):
aliases = "\n(and aliases: %s)" % (", ".join(sorted(["%s->%s" % (alias.__name__, self.to_string(data_type)) for alias, data_type in _primitive_alias_type_to_type_table.iteritems()])))
return ", ".join(sorted(_primitive_str_to_type_table.keys() + ["vector(n)"])) + aliases
@staticmethod
def value_is_string(value):
"""get bool indication that value is a string, whether str or unicode"""
return isinstance(value, basestring)
@staticmethod
def value_is_missing_value(value):
return value is None or (type(value) in [float32, float64, float] and (np.isnan(value) or value in [np.inf, -np.inf]))
@staticmethod
def get_primitive_data_types():
return _primitive_type_to_str_table.keys()
@staticmethod
def to_string(data_type):
"""
Returns the string representation of the given type
Parameters
----------
data_type : type
valid data type; if invalid, a ValueError is raised
Returns
-------
result : str
string representation
Examples
--------
>>> valid_data_types.to_string(float32)
'float32'
"""
valid_data_type = _DataTypes.get_from_type(data_type)
try:
return _primitive_type_to_str_table[valid_data_type]
except KeyError:
# complex data types should use their repr
return repr(valid_data_type)
@staticmethod
def get_from_string(data_type_str):
"""
Returns the data type for the given type string representation
Parameters
----------
data_type_str : str
valid data type str; if invalid, a ValueError is raised
Returns
-------
result : type
type represented by the string
Examples
--------
>>> valid_data_types.get_from_string('unicode')
unicode
"""
try:
return _primitive_str_to_type_table[data_type_str]
except KeyError:
try:
return _primitive_alias_str_to_type_table[data_type_str]
except KeyError:
try:
return vector.get_from_string(data_type_str)
except:
raise ValueError("Unsupported type string '%s' " % data_type_str)
@staticmethod
def is_primitive_type(data_type):
return data_type in _primitive_type_to_str_table or data_type in _primitive_alias_type_to_type_table
@staticmethod
def is_complex_type(data_type):
try:
return data_type.is_complex_type
except AttributeError:
return False
@staticmethod
def is_primitive_alias_type(data_type):
return data_type in _primitive_alias_type_to_type_table
@staticmethod
def get_from_type(data_type):
"""
Returns the data type for the given type (often it will return the same type)
Parameters
----------
data_type : type
valid data type or type that may be aliased for a valid data type;
if invalid, a ValueError is raised
Returns
-------
result : type
valid data type for given type
Examples
--------
>>> valid_data_types.get_from_type(int)
numpy.int32
"""
if _DataTypes.is_primitive_alias_type(data_type):
return _primitive_alias_type_to_type_table[data_type]
if _DataTypes.is_primitive_type(data_type) or _DataTypes.is_complex_type(data_type):
return data_type
raise ValueError("Unsupported type %s" % data_type)
@staticmethod
def validate(data_type):
"""Raises a ValueError if data_type is not a valid data_type"""
_DataTypes.get_from_type(data_type)
@staticmethod
def get_constructor(to_type):
"""gets the constructor for the to_type"""
try:
return to_type.constructor
except AttributeError:
if to_type == float64 or to_type == float32:
return get_float_constructor(to_type)
if to_type == datetime:
return datetime_constructor
def constructor(value):
if value is None:
return None
return to_type(value)
return constructor
@staticmethod
def standardize_schema(schema):
return [(name, _DataTypes.get_from_type(t)) for name, t in schema]
@staticmethod
def validate_data(schema, data):
return [_DataTypes.cast(value, data_type) for value, data_type in zip(data, map(lambda t: t[1], schema))]
@staticmethod
def get_default_data_for_schema(schema):
return [_DataTypes.get_default_type_value(data_type) for name, data_type in schema]
@staticmethod
def get_default_type_value(data_type):
try:
return _primitive_type_to_default_value[data_type]
except KeyError:
if data_type == vector:
return []
if data_type == datetime:
return datetime.now()
raise ValueError("Unable to find default value for data type %s (invalid data type)" % data_type)
@staticmethod
def cast(value, to_type):
"""
Returns the given value cast to the given type. None is always returned as None
Parameters
----------
value : object
value to convert by casting
to_type : type
valid data type to use for the cast
Returns
-------
results : object
the value cast to the to_type
Examples
--------
>>> valid_data_types.cast(3, float64)
3.0
>>> valid_data_types.cast(4.5, str)
'4.5'
>>> valid_data_types.cast(None, str)
None
>>> valid_data_types.cast(np.inf, float32)
None
"""
if _DataTypes.value_is_missing_value(value): # Special handling for missing values
return None
elif _DataTypes.is_primitive_type(to_type) and type(value) is to_type: # Optimization
return value
try:
constructor = _DataTypes.get_constructor(to_type)
result = constructor(value)
return None if _DataTypes.value_is_missing_value(result) else result
except Exception as e:
raise ValueError(("Unable to cast to type %s\n" % to_type) + str(e))
@staticmethod
def datetime_from_iso(iso_string):
"""create datetime object from ISO 8601 string"""
return datetime_parser.parse(iso_string)
valid_data_types = _DataTypes()
def numpy_to_bson_friendly(obj):
"""take an object and convert it to a type that can be serialized to bson if neccessary."""
if isinstance(obj, float32) or isinstance(obj, float64):
return float(obj)
if isinstance(obj, int32):
return int(obj)
if isinstance(obj, vector.base_type):
return obj.tolist()
if isinstance(obj, datetime):
return obj.isoformat()
if isinstance(obj, dict):
return dict([(numpy_to_bson_friendly(key), numpy_to_bson_friendly(value)) for key, value in obj.items()])
if isinstance(obj, list):
return [numpy_to_bson_friendly(item) for item in obj]
# Let the base class default method raise the TypeError
return obj
| 31.036058
| 191
| 0.629386
| 1,609
| 12,911
| 4.830329
| 0.196395
| 0.055584
| 0.019815
| 0.01544
| 0.265183
| 0.212429
| 0.16405
| 0.141662
| 0.09264
| 0.070767
| 0
| 0.014162
| 0.283557
| 12,911
| 415
| 192
| 31.110843
| 0.826054
| 0.293471
| 0
| 0.300926
| 0
| 0
| 0.05398
| 0
| 0
| 0
| 0
| 0.004819
| 0
| 1
| 0.134259
| false
| 0.013889
| 0.023148
| 0.041667
| 0.421296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132884f1556370e07396101f0cb65bd3696963c4
| 6,530
|
py
|
Python
|
srd/pageaggregator.py
|
poikilos/tabletopManualMiner
|
94a824feabdf0a8efa1bf28670af44820aff9923
|
[
"MIT"
] | null | null | null |
srd/pageaggregator.py
|
poikilos/tabletopManualMiner
|
94a824feabdf0a8efa1bf28670af44820aff9923
|
[
"MIT"
] | null | null | null |
srd/pageaggregator.py
|
poikilos/tabletopManualMiner
|
94a824feabdf0a8efa1bf28670af44820aff9923
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import math
try:
# from PDFPageDetailedAggregator:
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine
except ModuleNotFoundError:
prerr("To use the aggregator (required for generating chunks.json)"
" you must first install the following module for Python:")
prerr(" pdfminer")
exit(1)
try:
input = raw_input
except NameError:
# Python 3
pass
# TODO:
from srd import (
objDict,
BBox,
DocChunk,
clean_frag_text,
clean_frag,
same_style,
frag_dict,
)
def ltannoDict(ltanno):
return objDict(ltanno)
'''
class DocFragment:
def __init__(self, text, fontname, size):
self.text = text
self.fontname = fontname
self.size = size
def sameStyle(self, fragment):
"""
Is same fontname and size.
"""
ffn = fragment.fontname
ffs = fragment.size
return (ffs == self.size) and (ffn == self.fontname)
def clean(self):
self.text = clean_frag_text(self.text)
'''
class PDFPageDetailedAggregator(PDFPageAggregator):
"""
This class is based on PDFPageDetailedAggregator from
lindblandro's Oct 4 '13 at 10:33 answer
edited by slushy Feb 4 '14 at 23:41
at <https://stackoverflow.com/a/19179114>
on <https://stackoverflow.com/questions/15737806/extract-text-using-
pdfminer-and-pypdf2-merges-columns>.
"""
def __init__(self, rsrcmgr, pageno=1, laparams=None,
colStarts=None):
PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.chunks = []
self.colStarts = colStarts
if self.colStarts is not None:
print("columns: {}".format(len(self.colStarts)))
self.page_number = 0
def receive_layout(self, ltpage):
def render(item, page_number):
if isinstance(item, LTPage) or isinstance(item, LTTextBox):
for child in item:
render(child, page_number)
elif isinstance(item, LTTextLine):
child_str = ''
fontSize = None
fontName = None
fontSizes = []
fontNames = []
warnings = []
parts = []
fragments = []
annotations = []
for child in item:
strp = None
if isinstance(child, LTChar):
child_str += child.get_text()
strp = child.get_text().strip()
# and (len(strp) > 0)
if fontName is not None:
if fontName != child.fontname:
warnings.append("mixed fontName")
if fontSize is not None:
if fontSize != child.size:
warnings.append("mixed fontSize")
fontName = child.fontname
fontSize = child.size
frag = frag_dict(
child.get_text(),
child.fontname,
child.size,
)
fragments.append(frag)
# fontNames.append(fontName)
# fontSizes.append(fontSize)
parts.append(strp)
elif isinstance(child, LTAnno):
child_str += child.get_text()
strp = child.get_text().strip()
annotations.append(ltannoDict(child))
child_str = ' '.join(child_str.split()).strip()
if child_str:
if len(warnings) > 0:
"""
print("Warnings in \"{}\":"
" {}: fonts {} sizes {} parts {}"
"".format(child_str, warnings, fontNames,
fontSizes, parts))
input("Press enter to continue...")
"""
fontSize = None
fontName = None
col = None
cols = 0
if self.colStarts is not None:
cols = len(self.colStarts)
if (cols is None) or (cols == 1):
col = 0
elif (cols == 2):
col = 0
col2Min = math.floor(self.colStarts[1])
if item.bbox[0] >= col2Min:
col = 1 # Index [1] is column 2.
else:
raise ValueError("Only a list of length 1 (same as None) or 2"
" is implemented for \"colStarts\".")
# if isinstance(child, LTChar):
'''
try:
fontName = child.fontname
fontSize = child.size
# Avoid "AttributeError:
# 'LTAnno' object has no attribute 'fontname'"
except AttributeError as ex:
print("dir(LTTextLine): {}".format(dir(LTTextLine)))
print("dir(child): {}".format(dir(child)))
raise ex
'''
chunk = DocChunk(
page_number,
col,
item.bbox,
child_str,
fontName=fontName,
fontSize=fontSize,
fragments=fragments,
annotations=annotations,
)
chunk.groupFragments()
self.chunks.append(chunk)
for child in item:
render(child, page_number)
return
render(ltpage, self.page_number)
self.page_number += 1
self.chunks = sorted(self.chunks, key = lambda f: (f.pageid, f.column, -f.bbox.y1))
self.result = ltpage
| 36.077348
| 91
| 0.467994
| 568
| 6,530
| 5.306338
| 0.318662
| 0.021234
| 0.019907
| 0.013935
| 0.091573
| 0.091573
| 0.050431
| 0.050431
| 0.027206
| 0.027206
| 0
| 0.014963
| 0.44732
| 6,530
| 180
| 92
| 36.277778
| 0.820172
| 0.072435
| 0
| 0.168142
| 0
| 0
| 0.048513
| 0
| 0
| 0
| 0
| 0.005556
| 0
| 1
| 0.035398
| false
| 0.00885
| 0.061947
| 0.00885
| 0.123894
| 0.00885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1328be197a55352f7669047e01b9ed9e941d72e5
| 1,131
|
py
|
Python
|
ctrltest.py
|
dkim286/cpsc454-proj
|
16314802bae3cfbd4d1bf6d7f75a9e6adeb6700d
|
[
"FTL",
"CNRI-Python"
] | null | null | null |
ctrltest.py
|
dkim286/cpsc454-proj
|
16314802bae3cfbd4d1bf6d7f75a9e6adeb6700d
|
[
"FTL",
"CNRI-Python"
] | null | null | null |
ctrltest.py
|
dkim286/cpsc454-proj
|
16314802bae3cfbd4d1bf6d7f75a9e6adeb6700d
|
[
"FTL",
"CNRI-Python"
] | null | null | null |
from pox.core import core
import pox.openflow.libopenflow_01 as of
from forwarding.l2_learning import *
from tkinter import *
from project.firewall import TestFW
from project.ui import UI
def setup():
top = Toplevel()
# quit POX when window is killed
top.protocol("WM_DELETE_WINDOW", core.quit)
top.title("firewall thing")
frame = Frame(top, padding="3")
frame.grid()
disp = Label(frame, text="hmm").grid(column=0, row=0)
def reload():
conn = core.openflow.getConnection(1)
disp.configure(str(dir(conn)))
b_reload = Button(frame, text="reload", command=reload).grid(column=0, row=1)
b_quit = Button(frame, text="quit", command=top.destroy).grid(column=0, row=2)
def launch():
fw_list_dpid = [51, 52]
srv_list = {"web" : ['10.0.0.100']}
# register firewall
core.registerNew(TestFW, fw_list_dpid[0], srv_list)
# just use L2 learning switch for others
core.registerNew(l2_learning, False)
#core.registerNew(UI)
def start_ui():
core.tk.do(setup)
core.call_when_ready(start_ui, ['openflow', 'tk'])
| 23.5625
| 82
| 0.654288
| 162
| 1,131
| 4.462963
| 0.475309
| 0.041494
| 0.045643
| 0.058091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028058
| 0.212202
| 1,131
| 47
| 83
| 24.06383
| 0.783389
| 0.096375
| 0
| 0
| 0
| 0
| 0.06588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1328eb00ae7fa054c34be04f558ebc32d9e45ce4
| 10,911
|
py
|
Python
|
image_classification/T2T_ViT/load_pytorch_weights.py
|
RangeKing/PaddleViT
|
0e25958686e04ed8872cf67fba0dfd6918e9b4dd
|
[
"Apache-2.0"
] | null | null | null |
image_classification/T2T_ViT/load_pytorch_weights.py
|
RangeKing/PaddleViT
|
0e25958686e04ed8872cf67fba0dfd6918e9b4dd
|
[
"Apache-2.0"
] | null | null | null |
image_classification/T2T_ViT/load_pytorch_weights.py
|
RangeKing/PaddleViT
|
0e25958686e04ed8872cf67fba0dfd6918e9b4dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""convert pytorch model weights to paddle pdparams"""
import os
import numpy as np
import paddle
import torch
import timm
from config import get_config
from t2t_vit import build_t2t_vit as build_model
from T2T_ViT_torch.models.t2t_vit import *
from T2T_ViT_torch.utils import load_for_transfer_learning
def print_model_named_params(model):
print('----------------------------------')
for name, param in model.named_parameters():
print(name, param.shape)
print('----------------------------------')
def print_model_named_buffers(model):
print('----------------------------------')
for name, param in model.named_buffers():
print(name, param.shape)
print('----------------------------------')
def torch_to_paddle_mapping(model_name, config):
# (torch_param_name, paddle_param_name)
mapping = [
('cls_token', 'cls_token'),
('pos_embed', 'pos_embed'),
]
for idx in range(1, 3):
th_prefix = f'tokens_to_token.attention{idx}'
pp_prefix = f'patch_embed.attn{idx}'
if '_t_' in model_name:
layer_mapping = [
(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),
(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),
(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),
]
else:
layer_mapping = [
(f'{th_prefix}.w', f'{pp_prefix}.w'),
(f'{th_prefix}.kqv', f'{pp_prefix}.kqv'),
(f'{th_prefix}.proj', f'{pp_prefix}.proj'),
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.0', f'{pp_prefix}.mlp.0'),
(f'{th_prefix}.mlp.2', f'{pp_prefix}.mlp.2'),
]
mapping.extend(layer_mapping)
mapping.append(('tokens_to_token.project','patch_embed.proj'))
num_layers = config.MODEL.DEPTH
for idx in range(num_layers):
th_prefix = f'blocks.{idx}'
pp_prefix = f'blocks.{idx}'
layer_mapping = [
(f'{th_prefix}.norm1', f'{pp_prefix}.norm1'),
(f'{th_prefix}.attn.qkv', f'{pp_prefix}.attn.qkv'),
(f'{th_prefix}.attn.proj', f'{pp_prefix}.attn.proj'),
(f'{th_prefix}.norm2', f'{pp_prefix}.norm2'),
(f'{th_prefix}.mlp.fc1', f'{pp_prefix}.mlp.fc1'),
(f'{th_prefix}.mlp.fc2', f'{pp_prefix}.mlp.fc2'),
]
mapping.extend(layer_mapping)
head_mapping = [
('norm', 'norm'),
('head', 'head'),
]
mapping.extend(head_mapping)
return mapping
def convert(torch_model, paddle_model, model_name, config):
def _set_value(th_name, pd_name, transpose=True):
th_shape = th_params[th_name].shape
pd_shape = tuple(pd_params[pd_name].shape) # paddle shape default type is list
#assert th_shape == pd_shape, f'{th_shape} != {pd_shape}'
print(f'**SET** {th_name} {th_shape} **TO** {pd_name} {pd_shape}')
if isinstance(th_params[th_name], torch.nn.parameter.Parameter):
value = th_params[th_name].data.numpy()
else:
value = th_params[th_name].numpy()
if len(value.shape) == 2 and transpose:
value = value.transpose((1, 0))
pd_params[pd_name].set_value(value)
# 1. get paddle and torch model parameters
pd_params = {}
th_params = {}
for name, param in paddle_model.named_parameters():
pd_params[name] = param
for name, param in torch_model.named_parameters():
th_params[name] = param
for name, param in paddle_model.named_buffers():
pd_params[name] = param
for name, param in torch_model.named_buffers():
th_params[name] = param
# 2. get name mapping pairs
mapping = torch_to_paddle_mapping(model_name, config)
missing_keys_th = []
missing_keys_pd = []
zip_map = list(zip(*mapping))
th_keys = list(zip_map[0])
pd_keys = list(zip_map[1])
for key in th_params:
missing = False
if key not in th_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in th_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in th_keys:
missing = False
if missing:
missing_keys_th.append(key)
for key in pd_params:
missing = False
if key not in pd_keys:
missing = True
if key.endswith('.weight'):
if key[:-7] in pd_keys:
missing = False
if key.endswith('.bias'):
if key[:-5] in pd_keys:
missing = False
if missing:
missing_keys_pd.append(key)
print('====================================')
print('missing_keys_pytorch:')
print(missing_keys_th)
print('missing_keys_paddle:')
print(missing_keys_pd)
print('====================================')
# 3. set torch param values to paddle params: may needs transpose on weights
for th_name, pd_name in mapping:
if th_name in th_params and pd_name in pd_params: # nn.Parameters
if th_name.endswith('w'):
_set_value(th_name, pd_name, transpose=False)
else:
_set_value(th_name, pd_name)
else:
if f'{th_name}.weight' in th_params and f'{pd_name}.weight' in pd_params:
th_name_w = f'{th_name}.weight'
pd_name_w = f'{pd_name}.weight'
_set_value(th_name_w, pd_name_w)
if f'{th_name}.bias' in th_params and f'{pd_name}.bias' in pd_params:
th_name_b = f'{th_name}.bias'
pd_name_b = f'{pd_name}.bias'
_set_value(th_name_b, pd_name_b)
if f'{th_name}.running_mean' in th_params and f'{pd_name}._mean' in pd_params:
th_name_b = f'{th_name}.running_mean'
pd_name_b = f'{pd_name}._mean'
_set_value(th_name_b, pd_name_b)
if f'{th_name}.running_var' in th_params and f'{pd_name}._variance' in pd_params:
th_name_b = f'{th_name}.running_var'
pd_name_b = f'{pd_name}._variance'
_set_value(th_name_b, pd_name_b)
return paddle_model
def main():
paddle.set_device('cpu')
model_name_list = ['t2t_vit_7',
't2t_vit_10',
't2t_vit_12',
't2t_vit_14',
't2t_vit_14_384',
't2t_vit_19',
't2t_vit_24',
't2t_vit_24_token_labeling',
't2t_vit_t_14',
't2t_vit_t_19',
't2t_vit_t_24']
pth_model_path_list = ['./T2T_ViT_torch/t2t-vit-pth-models/71.7_T2T_ViT_7.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/75.2_T2T_ViT_10.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/76.5_T2T_ViT_12.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.5_T2T_ViT_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/83.3_T2T_ViT_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.9_T2T_ViT_19.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.3_T2T_ViT_24.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/84.2_T2T_ViT_24.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/81.7_T2T_ViTt_14.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.4_T2T_ViTt_19.pth.tar',
'./T2T_ViT_torch/t2t-vit-pth-models/82.6_T2T_ViTt_24.pth.tar']
for model_name, pth_model_path in zip(model_name_list, pth_model_path_list):
print(f'============= NOW: {model_name} =============')
sz = 384 if '384' in model_name else 224
if 'token_labeling' in model_name:
config = get_config(f'./configs/{model_name[:-15]}.yaml')
else:
config = get_config(f'./configs/{model_name}.yaml')
paddle_model = build_model(config)
paddle_model.eval()
print_model_named_params(paddle_model)
print_model_named_buffers(paddle_model)
print('+++++++++++++++++++++++++++++++++++')
device = torch.device('cpu')
if 'token_labeling' in model_name:
torch_model = eval(f'{model_name[:-15]}(img_size={sz})')
else:
if '384' in model_name:
torch_model = eval(f'{model_name[:-4]}(img_size={sz})')
else:
torch_model = eval(f'{model_name}(img_size={sz})')
load_for_transfer_learning(torch_model,
pth_model_path,
use_ema=True,
strict=False,
num_classes=1000)
torch_model = torch_model.to(device)
torch_model.eval()
print_model_named_params(torch_model)
print_model_named_buffers(torch_model)
# convert weights
paddle_model = convert(torch_model, paddle_model, model_name, config)
# check correctness
x = np.random.randn(2, 3, sz, sz).astype('float32')
x_paddle = paddle.to_tensor(x)
x_torch = torch.Tensor(x).to(device)
out_torch = torch_model(x_torch)
out_paddle = paddle_model(x_paddle)
out_torch = out_torch.data.cpu().numpy()
out_paddle = out_paddle.cpu().numpy()
print(out_torch.shape, out_paddle.shape)
print(out_torch[0, 0:100])
print('========================================================')
print(out_paddle[0, 0:100])
assert np.allclose(out_torch, out_paddle, atol = 1e-2)
# save weights for paddle model
model_path = os.path.join(f'./{model_name}.pdparams')
paddle.save(paddle_model.state_dict(), model_path)
print(f'{model_name} done')
print('all done')
if __name__ == "__main__":
main()
| 38.284211
| 93
| 0.563926
| 1,475
| 10,911
| 3.871864
| 0.154576
| 0.048328
| 0.029942
| 0.026966
| 0.421993
| 0.389074
| 0.345474
| 0.257748
| 0.22693
| 0.199265
| 0
| 0.026944
| 0.285675
| 10,911
| 284
| 94
| 38.419014
| 0.705799
| 0.089268
| 0
| 0.257919
| 0
| 0
| 0.263801
| 0.141689
| 0
| 0
| 0
| 0
| 0.004525
| 1
| 0.027149
| false
| 0
| 0.040724
| 0
| 0.076923
| 0.122172
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132a98c8a3d62474bea30c75b83e6ea4827d2ca4
| 265
|
py
|
Python
|
examples/get_message.py
|
NeroAsmarr/fz-api
|
d688277b4c616e93c809381ab082cba834964681
|
[
"MIT"
] | 71
|
2019-12-04T03:58:33.000Z
|
2022-03-19T11:38:54.000Z
|
examples/get_message.py
|
NeroAsmarr/fz-api
|
d688277b4c616e93c809381ab082cba834964681
|
[
"MIT"
] | 6
|
2020-01-03T09:56:45.000Z
|
2022-03-10T09:29:04.000Z
|
examples/get_message.py
|
NeroAsmarr/fz-api
|
d688277b4c616e93c809381ab082cba834964681
|
[
"MIT"
] | 12
|
2019-11-23T03:37:39.000Z
|
2021-08-15T09:41:21.000Z
|
# 获取调课、改课通知例子
from zfnew import GetInfo, Login
base_url = '学校教务系统的主页url'
lgn = Login(base_url=base_url)
lgn.login('账号', '密码')
cookies = lgn.cookies # cookies获取方法
person = GetInfo(base_url=base_url, cookies=cookies)
message = person.get_message()
print(message)
| 20.384615
| 52
| 0.754717
| 38
| 265
| 5.105263
| 0.5
| 0.180412
| 0.123711
| 0.14433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120755
| 265
| 12
| 53
| 22.083333
| 0.832618
| 0.086792
| 0
| 0
| 0
| 0
| 0.066946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132acdacf1ba08411631ef7d5debcacf7e313231
| 1,030
|
py
|
Python
|
input/gera_entradas.py
|
AtilioA/Sort-merge-join
|
6ed3199aada921973833cafffc8cbde5062b76fb
|
[
"Unlicense"
] | null | null | null |
input/gera_entradas.py
|
AtilioA/Sort-merge-join
|
6ed3199aada921973833cafffc8cbde5062b76fb
|
[
"Unlicense"
] | null | null | null |
input/gera_entradas.py
|
AtilioA/Sort-merge-join
|
6ed3199aada921973833cafffc8cbde5062b76fb
|
[
"Unlicense"
] | null | null | null |
import sys
import random
from faker import Faker
def gera(nLinhas=100, nCampos=None):
with open(f"{path}/file{nLinhas}-{nCampos}_python.txt", "w+", encoding="utf8") as file:
if not nCampos:
nCampos = random.randint(2, 10)
camposFuncs = [
fake.name,
fake.date,
fake.ssn,
fake.ascii_email,
fake.job,
fake.phone_number,
fake.coordinate,
fake.license_plate,
fake.credit_card_expire,
][:nCampos]
for _ in range(nLinhas):
file.write(f"{random.randint(0, 999999)},")
for funcao in camposFuncs[:-1]:
file.write(f"{funcao()},")
file.write(camposFuncs[-1]())
file.write("\n")
if __name__ == "__main__":
fake = Faker("pt_BR")
path = "python/"
try:
nLinhas = int(sys.argv[1])
nCampos = int(sys.argv[2])
except:
nLinhas = 1000
nCampos = 10
gera(nLinhas, nCampos)
| 23.953488
| 91
| 0.523301
| 116
| 1,030
| 4.508621
| 0.517241
| 0.068834
| 0.038241
| 0.080306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035608
| 0.345631
| 1,030
| 42
| 92
| 24.52381
| 0.740356
| 0
| 0
| 0
| 0
| 0
| 0.104854
| 0.039806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132ade11b48d56eee57560835a5c3f4f30719ed6
| 1,639
|
py
|
Python
|
lessons 20/HomeWork/task9.py
|
zainllw0w/skillbox
|
896287b6f7f5612cf589094131fd1a12b0b192ba
|
[
"MIT"
] | null | null | null |
lessons 20/HomeWork/task9.py
|
zainllw0w/skillbox
|
896287b6f7f5612cf589094131fd1a12b0b192ba
|
[
"MIT"
] | null | null | null |
lessons 20/HomeWork/task9.py
|
zainllw0w/skillbox
|
896287b6f7f5612cf589094131fd1a12b0b192ba
|
[
"MIT"
] | null | null | null |
def sort(data, time):
tt = False
ft = True
st = False
is_find = True
winers_name = set()
index = 0
while is_find:
index += 1
for key, values in data.items():
if time[0 - index] == int(values[1]) and ft and values[0] not in winers_name:
first_id = key
ft = False
st = True
winers_name.add(values[0])
first_i = index
elif time[0 -index] == int(values[1]) and st and values[0] not in winers_name:
second_id = key
st = False
tt = True
winers_name.add(values[0])
second_i = index
elif time[0 -index] == int(values[1]) and tt and values[0] not in winers_name:
three_id = key
winers_name.add(values[0])
is_find = False
three_i = index
break
return first_id, second_id, three_id, first_i, second_i, three_i
n = int(input('Введите количество строк: '))
data = dict()
time_list = list()
for i in range(1, n+1):
print(f'Введите {i} строку: ', end='')
text = input().split()
time = text[0]
time_list.append(int(time))
name = text[1]
obj = [name, time]
data[i] = tuple(obj)
f, s, t, fi, si, ti = sort(data, sorted(time_list))
time_list = sorted(time_list)
print('1 место занимает: {0}, с очками {1}'.format(data[f][0], time_list[-fi]))
print('2 место занимает: {0}, с очками {1}'.format(data[s][0], time_list[-si]))
print('3 место занимает: {0}, с очками {1}'.format(data[t][0], time_list[-ti]))
| 32.78
| 90
| 0.534472
| 241
| 1,639
| 3.510373
| 0.273859
| 0.07565
| 0.042553
| 0.046099
| 0.387707
| 0.364066
| 0.307329
| 0.191489
| 0.078014
| 0.078014
| 0
| 0.027372
| 0.3313
| 1,639
| 50
| 91
| 32.78
| 0.744526
| 0
| 0
| 0.111111
| 0
| 0
| 0.092073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0
| 0
| 0.044444
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132c604ed66d104dc1d0fc33432f244eb013965a
| 7,620
|
py
|
Python
|
esm/model.py
|
crochereau/esm
|
881a3b924d3f74e3cddeb6929e91ee7224ef2ebd
|
[
"MIT"
] | 1
|
2021-01-21T17:54:20.000Z
|
2021-01-21T17:54:20.000Z
|
esm/model.py
|
crochereau/esm
|
881a3b924d3f74e3cddeb6929e91ee7224ef2ebd
|
[
"MIT"
] | null | null | null |
esm/model.py
|
crochereau/esm
|
881a3b924d3f74e3cddeb6929e91ee7224ef2ebd
|
[
"MIT"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .modules import (
TransformerLayer,
LearnedPositionalEmbedding,
SinusoidalPositionalEmbedding,
RobertaLMHead,
ESM1bLayerNorm,
ContactPredictionHead,
)
class ProteinBertModel(nn.Module):
@classmethod
def add_args(cls, parser):
parser.add_argument(
"--num_layers", default=36, type=int, metavar="N", help="number of layers"
)
parser.add_argument(
"--embed_dim", default=1280, type=int, metavar="N", help="embedding dimension"
)
parser.add_argument(
"--logit_bias", action="store_true", help="whether to apply bias to logits"
)
parser.add_argument(
"--ffn_embed_dim",
default=5120,
type=int,
metavar="N",
help="embedding dimension for FFN",
)
parser.add_argument(
"--attention_heads",
default=20,
type=int,
metavar="N",
help="number of attention heads",
)
def __init__(self, args, alphabet):
super().__init__()
self.args = args
self.alphabet_size = len(alphabet)
self.padding_idx = alphabet.padding_idx
self.mask_idx = alphabet.mask_idx
self.cls_idx = alphabet.cls_idx
self.eos_idx = alphabet.eos_idx
if self.args.arch == 'roberta_large':
self.model_version = 'ESM-1b'
self._init_submodules_esm1b()
else:
self.model_version = 'ESM-1'
self._init_submodules_esm1()
def _init_submodules_common(self):
self.embed_tokens = nn.Embedding(
self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx
)
self.layers = nn.ModuleList(
[
TransformerLayer(
self.args.embed_dim, self.args.ffn_embed_dim, self.args.attention_heads,
add_bias_kv=(self.model_version != 'ESM-1b'),
use_esm1b_layer_norm=(self.model_version == 'ESM-1b'),
)
for _ in range(self.args.layers)
]
)
self.contact_head = ContactPredictionHead(self.args.layers * self.args.attention_heads)
def _init_submodules_esm1b(self):
self._init_submodules_common()
self.embed_scale = 1
self.embed_positions = LearnedPositionalEmbedding(self.args.max_positions, self.args.embed_dim, self.padding_idx)
self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)
self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)
self.lm_head = RobertaLMHead(
embed_dim=self.args.embed_dim,
output_dim=self.alphabet_size,
weight=self.embed_tokens.weight
)
def _init_submodules_esm1(self):
self._init_submodules_common()
self.embed_scale = math.sqrt(self.args.embed_dim)
self.embed_positions = SinusoidalPositionalEmbedding(self.args.embed_dim, self.padding_idx)
self.embed_out = nn.Parameter(
torch.zeros((self.alphabet_size, self.args.embed_dim))
)
self.embed_out_bias = None
if self.args.final_bias:
self.embed_out_bias = nn.Parameter(torch.zeros(self.alphabet_size))
def forward(self, tokens, repr_layers=[], need_head_weights=False, return_contacts=False):
if return_contacts:
need_head_weights = True
assert tokens.ndim == 2
padding_mask = tokens.eq(self.padding_idx) # B, T
x = self.embed_scale * self.embed_tokens(tokens)
if getattr(self.args, 'token_dropout', False):
x.masked_fill_((tokens == self.mask_idx).unsqueeze(-1), 0.0)
# x: B x T x C
mask_ratio_train = 0.15 * 0.8
src_lengths = (~padding_mask).sum(-1)
mask_ratio_observed = (tokens == self.mask_idx).sum(-1).float() / src_lengths
x = x * (1 - mask_ratio_train) / (1 - mask_ratio_observed)[:, None, None]
x = x + self.embed_positions(tokens)
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_before(x)
if padding_mask is not None:
x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))
repr_layers = set(repr_layers)
hidden_representations = {}
if 0 in repr_layers:
hidden_representations[0] = x
if need_head_weights:
attn_weights = []
# (B, T, E) => (T, B, E)
x = x.transpose(0, 1)
if not padding_mask.any():
padding_mask = None
for layer_idx, layer in enumerate(self.layers):
x, attn = layer(x, self_attn_padding_mask=padding_mask, need_head_weights=need_head_weights)
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x.transpose(0, 1)
if need_head_weights:
# (H, B, T, T) => (B, H, T, T)
attn_weights.append(attn.transpose(1, 0))
if self.model_version == 'ESM-1b':
x = self.emb_layer_norm_after(x)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
# last hidden representation should have layer norm applied
if (layer_idx + 1) in repr_layers:
hidden_representations[layer_idx + 1] = x
x = self.lm_head(x)
else:
x = F.linear(x, self.embed_out, bias=self.embed_out_bias)
x = x.transpose(0, 1) # (T, B, E) => (B, T, E)
result = {"logits": x, "representations": hidden_representations}
if need_head_weights:
# attentions: B x L x H x T x T
attentions = torch.stack(attn_weights, 1)
if self.model_version == "ESM-1":
# ESM-1 models have an additional null-token for attention, which we remove
attentions = attentions[..., :-1]
if padding_mask is not None:
attention_mask = (1 - padding_mask.type_as(attentions))
attention_mask = attention_mask.unsqueeze(1) * attention_mask.unsqueeze(2)
attentions = attentions * attention_mask[:, None, None, :, :]
result["attentions"] = attentions
if return_contacts:
contacts = self._predict_contacts_from_token_attentions(tokens, attentions)
result["contacts"] = contacts
return result
def _predict_contacts_from_token_attentions(self, tokens, attentions):
# remove eos token attentions
if tokens[:, -1].eq(self.eos_idx).any():
eos_mask = tokens.ne(self.eos_idx).to(attentions)
eos_mask = eos_mask.unsqueeze(1) * eos_mask.unsqueeze(2)
attentions = attentions * eos_mask[:, None, None, :, :]
attentions = attentions[..., :-1, :-1]
# remove cls token attentions
if tokens[:, 0].eq(self.cls_idx).all():
attentions = attentions[..., 1:, 1:]
batch_size, layers, heads, seqlen, _ = attentions.size()
attentions = attentions.view(batch_size, layers * heads, seqlen, seqlen)
return self.contact_head(attentions)
def predict_contacts(self, tokens):
return self(tokens, return_contacts=True)["contacts"]
@property
def num_layers(self):
return self.args.layers
| 38.1
| 121
| 0.600656
| 927
| 7,620
| 4.702265
| 0.206041
| 0.038541
| 0.026841
| 0.033035
| 0.283092
| 0.177564
| 0.14407
| 0.086717
| 0.051847
| 0.051847
| 0
| 0.013564
| 0.293701
| 7,620
| 199
| 122
| 38.291457
| 0.796358
| 0.065879
| 0
| 0.169811
| 0
| 0
| 0.043931
| 0
| 0
| 0
| 0
| 0
| 0.006289
| 1
| 0.056604
| false
| 0
| 0.031447
| 0.012579
| 0.119497
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132d17111128f658179267c44013a769265d45f3
| 3,392
|
py
|
Python
|
python/tink/aead/kms_envelope_aead.py
|
bfloch/tink
|
aac780590902f726a8e7d6c4e3aa1cd75f4b0ed5
|
[
"Apache-2.0"
] | null | null | null |
python/tink/aead/kms_envelope_aead.py
|
bfloch/tink
|
aac780590902f726a8e7d6c4e3aa1cd75f4b0ed5
|
[
"Apache-2.0"
] | null | null | null |
python/tink/aead/kms_envelope_aead.py
|
bfloch/tink
|
aac780590902f726a8e7d6c4e3aa1cd75f4b0ed5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for envelope encryption with KMS."""
from __future__ import absolute_import
from __future__ import division
# Placeholder for import for type annotations
from __future__ import print_function
import struct
from tink.proto import tink_pb2
from tink import aead
from tink import core
# Defines in how many bytes the DEK length will be encoded.
DEK_LEN_BYTES = 4
class KmsEnvelopeAead(aead.Aead):
"""Implements envelope encryption.
Envelope encryption generates a data encryption key (DEK) which is used
to encrypt the payload. The DEK is then send to a KMS to be encrypted and
the encrypted DEK is attached to the ciphertext. In order to decrypt the
ciphertext, the DEK first has to be decrypted by the KMS, and then the DEK
can be used to decrypt the ciphertext. For further information see
https://cloud.google.com/kms/docs/envelope-encryption.
The ciphertext structure is as follows:
* Length of the encrypted DEK: 4 bytes (big endian)
* Encrypted DEK: variable length, specified by the previous 4 bytes
* AEAD payload: variable length
"""
def __init__(self, key_template: tink_pb2.KeyTemplate, remote: aead.Aead):
self.key_template = key_template
self.remote_aead = remote
def encrypt(self, plaintext: bytes, associated_data: bytes) -> bytes:
# Get new key from template
dek = core.Registry.new_key_data(self.key_template)
dek_aead = core.Registry.primitive(dek, aead.Aead)
# Encrypt plaintext
ciphertext = dek_aead.encrypt(plaintext, associated_data)
# Wrap DEK key values with remote
encrypted_dek = self.remote_aead.encrypt(dek.value, b'')
# Construct ciphertext, DEK length encoded as big endian
enc_dek_len = struct.pack('>I', len(encrypted_dek))
return enc_dek_len + encrypted_dek + ciphertext
def decrypt(self, ciphertext: bytes, associated_data: bytes) -> bytes:
ct_len = len(ciphertext)
# Recover DEK length
if ct_len < DEK_LEN_BYTES:
raise core.TinkError
dek_len = struct.unpack('>I', ciphertext[0:DEK_LEN_BYTES])[0]
# Basic check if DEK length can be valid.
if dek_len > (ct_len - DEK_LEN_BYTES) or dek_len < 0:
raise core.TinkError
# Decrypt DEK with remote AEAD
encrypted_dek_bytes = ciphertext[DEK_LEN_BYTES:DEK_LEN_BYTES + dek_len]
dek_bytes = self.remote_aead.decrypt(encrypted_dek_bytes, b'')
# Get AEAD primitive based on DEK
dek = tink_pb2.KeyData()
dek.type_url = self.key_template.type_url
dek.value = dek_bytes
dek.key_material_type = tink_pb2.KeyData.KeyMaterialType.SYMMETRIC
dek_aead = core.Registry.primitive(dek, aead.Aead)
# Extract ciphertext payload and decrypt
ct_bytes = ciphertext[DEK_LEN_BYTES + dek_len:]
return dek_aead.decrypt(ct_bytes, associated_data)
| 36.085106
| 79
| 0.740861
| 502
| 3,392
| 4.844622
| 0.328685
| 0.034539
| 0.031661
| 0.01727
| 0.099918
| 0.058388
| 0.058388
| 0.032072
| 0
| 0
| 0
| 0.006541
| 0.188679
| 3,392
| 93
| 80
| 36.473118
| 0.87718
| 0.479068
| 0
| 0.114286
| 0
| 0
| 0.002381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.2
| 0
| 0.371429
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
132da230699a189c6467f4c2e09699a5bd87f139
| 2,168
|
py
|
Python
|
tests/pyb/can.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | null | null | null |
tests/pyb/can.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | null | null | null |
tests/pyb/can.py
|
LabAixBidouille/micropython
|
11aa6ba456287d6c80598a7ebbebd2887ce8f5a2
|
[
"MIT"
] | null | null | null |
from pyb import CAN
CAN.initfilterbanks(14)
can = CAN(1)
print(can)
can.init(CAN.LOOPBACK)
print(can)
print(can.any(0))
# Catch all filter
can.setfilter(0, CAN.MASK16, 0, (0, 0, 0, 0))
can.send('abcd', 123)
print(can.any(0))
print(can.recv(0))
can.send('abcd', -1)
print(can.recv(0))
can.send('abcd', 0x7FF + 1)
print(can.recv(0))
# Test too long message
try:
can.send('abcdefghi', 0x7FF)
except ValueError:
print('passed')
else:
print('failed')
del can
# Testing extended IDs
can = CAN(1, CAN.LOOPBACK, extframe = True)
# Catch all filter
can.setfilter(0, CAN.MASK32, 0, (0, 0))
print(can)
try:
can.send('abcde', 0x7FF + 1)
except ValueError:
print('failed')
else:
r = can.recv(0)
if r[0] == 0x7FF+1 and r[3] == b'abcde':
print('passed')
else:
print('failed, wrong data received')
del can
# Test RxCallbacks
can = CAN(1, CAN.LOOPBACK)
can.setfilter(0, CAN.LIST16, 0, (1, 2, 3, 4))
can.setfilter(1, CAN.LIST16, 1, (5, 6, 7, 8))
def cb0(bus, reason):
print('cb0')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1(bus, reason):
print('cb1')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb0a(bus, reason):
print('cb0a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
def cb1a(bus, reason):
print('cb1a')
if reason == 0:
print('pending')
if reason == 1:
print('full')
if reason == 2:
print('overflow')
can.rxcallback(0, cb0)
can.rxcallback(1, cb1)
can.send('11111111',1)
can.send('22222222',2)
can.send('33333333',3)
can.rxcallback(0, cb0a)
can.send('44444444',4)
can.send('55555555',5)
can.send('66666666',6)
can.send('77777777',7)
can.rxcallback(1, cb1a)
can.send('88888888',8)
print(can.recv(0))
print(can.recv(0))
print(can.recv(0))
print(can.recv(1))
print(can.recv(1))
print(can.recv(1))
can.send('11111111',1)
can.send('55555555',5)
print(can.recv(0))
print(can.recv(1))
| 17.626016
| 45
| 0.600092
| 339
| 2,168
| 3.837758
| 0.224189
| 0.098386
| 0.10146
| 0.069946
| 0.472713
| 0.392775
| 0.357417
| 0.274404
| 0.233666
| 0.233666
| 0
| 0.107351
| 0.20941
| 2,168
| 122
| 46
| 17.770492
| 0.651692
| 0.042897
| 0
| 0.612903
| 0
| 0
| 0.121857
| 0
| 0
| 0
| 0.009671
| 0
| 0
| 1
| 0.043011
| false
| 0.021505
| 0.010753
| 0
| 0.053763
| 0.397849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|