hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74cf89eb0f06c514d58868658f9cede0db7a9aaa | 2,210 | py | Python | LeetCode/LC003_bruteForce.py | JeffreyAsuncion/CodingProblems_Python | db71cb46b2579c1c65767a644a0ea989da4fa559 | [
"MIT"
] | null | null | null | LeetCode/LC003_bruteForce.py | JeffreyAsuncion/CodingProblems_Python | db71cb46b2579c1c65767a644a0ea989da4fa559 | [
"MIT"
] | null | null | null | LeetCode/LC003_bruteForce.py | JeffreyAsuncion/CodingProblems_Python | db71cb46b2579c1c65767a644a0ea989da4fa559 | [
"MIT"
] | null | null | null | """
3. Longest Substring Without Repeating Characters
Given a string s,
find the length of the longest substring without repeating characters.
Example 1:
Input: s = "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: s = "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: s = "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.
Example 4:
Input: s = ""
Output: 0
Constraints:
0 <= s.length <= 5 * 104
s consists of English letters, digits, symbols and spaces.
"""
def allUnique(s, start, end):
seenStr = ''
for i in range(start, end):
char = s[i]
# check if char has been seen already in seenStr
if char in seenStr:
# return False - char is not unique
return False
else:
seenStr += char
# return True - char is unique
return True
def lengthOfLongestSubstring(s: str) -> int:
# base case where s is empty string
if s == "":
# return length of 0
return 0
longest = 0
for i in range(len(s)):
j = i + 1
# corrected the range to len(s) + 1 and works on edge cases but TimesOut longer strings
# O(n^3) need to optimize
for j in range(len(s)+1): # range == len(s) + 1 to correct for j = i + 1
if allUnique(s,i,j):
# ans is the max value of ans vs j -i
longest = max(longest, j-i)
return longest
# Example 1:
s1 = "abcabcbb"
print(lengthOfLongestSubstring(s1)) # Output: 3
# Explanation: The answer is "abc", with the length of 3.
# Example 2:
s2 = "bbbbb"
print(lengthOfLongestSubstring(s2))#Output: 1
# Explanation: The answer is "b", with the length of 1.
# Example 3:
s3 = "pwwkew"
print(lengthOfLongestSubstring(s3))#Output: 3
# Explanation: The answer is "wke", with the length of 3.
# Example 4:
s4 = ""
print(lengthOfLongestSubstring(s4))# Output: 0
# Example 5:
s5 = "aab"
print(lengthOfLongestSubstring(s5))# Output: 2 | 25.402299 | 95 | 0.615385 | 320 | 2,210 | 4.25 | 0.325 | 0.047059 | 0.056618 | 0.097059 | 0.299265 | 0.2375 | 0.232353 | 0.232353 | 0.232353 | 0.232353 | 0 | 0.031131 | 0.287783 | 2,210 | 87 | 96 | 25.402299 | 0.83291 | 0.58371 | 0 | 0 | 0 | 0 | 0.024664 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0 | 0 | 0.206897 | 0.172414 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d068093d0ae6b5a5ba2737ff2398f7a073b853 | 10,913 | py | Python | pattoo/agent.py | palisadoes/pattoo-os | cccf0ddb50a8bb971c0c527b4ea5ef96c6819fac | [
"Apache-2.0"
] | null | null | null | pattoo/agent.py | palisadoes/pattoo-os | cccf0ddb50a8bb971c0c527b4ea5ef96c6819fac | [
"Apache-2.0"
] | null | null | null | pattoo/agent.py | palisadoes/pattoo-os | cccf0ddb50a8bb971c0c527b4ea5ef96c6819fac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
"""Pattoo .Agent class.
Description:
This script:
1) Processes a variety of information from agents
2) Posts the data using HTTP to a server listed
in the configuration file
"""
# Standard libraries
import textwrap
import sys
import time
import argparse
import ipaddress
import multiprocessing
import os
from pprint import pprint
# PIP3 libraries
from gunicorn.app.base import BaseApplication
from gunicorn.six import iteritems
# Pattoo libraries
from pattoo import daemon
from pattoo.pattoo import CONFIG
from pattoo import log
from pattoo.api import API
class Agent(object):
"""Agent class for daemons."""
def __init__(self, parent, child=None):
"""Initialize the class.
Args:
parent: Name of parent daemon
child: Name of child daemon
Returns:
None
"""
# Initialize key variables (Parent)
self.parent = parent
self.pidfile_parent = daemon.pid_file(parent)
self.lockfile_parent = daemon.lock_file(parent)
# Initialize key variables (Child)
if bool(child) is None:
self._pidfile_child = None
else:
self._pidfile_child = daemon.pid_file(child)
def name(self):
"""Return agent name.
Args:
None
Returns:
value: Name of agent
"""
# Return
value = self.parent
return value
def query(self):
"""Placeholder method."""
# Do nothing
pass
class AgentDaemon(daemon.Daemon):
"""Class that manages agent deamonization."""
def __init__(self, agent):
"""Initialize the class.
Args:
agent: agent object
Returns:
None
"""
# Initialize variables to be used by daemon
self.agent = agent
# Call up the base daemon
daemon.Daemon.__init__(self, agent)
def run(self):
"""Start polling.
Args:
None
Returns:
None
"""
# Start polling. (Poller decides frequency)
while True:
self.agent.query()
class AgentCLI(object):
"""Class that manages the agent CLI.
Args:
None
Returns:
None
"""
def __init__(self):
"""Initialize the class.
Args:
None
Returns:
None
"""
# Initialize key variables
self.parser = None
def process(self, additional_help=None):
"""Return all the CLI options.
Args:
None
Returns:
args: Namespace() containing all of our CLI arguments as objects
- filename: Path to the configuration file
"""
# Header for the help menu of the application
parser = argparse.ArgumentParser(
description=additional_help,
formatter_class=argparse.RawTextHelpFormatter)
# CLI argument for starting
parser.add_argument(
'--start',
required=False,
default=False,
action='store_true',
help='Start the agent daemon.'
)
# CLI argument for stopping
parser.add_argument(
'--stop',
required=False,
default=False,
action='store_true',
help='Stop the agent daemon.'
)
# CLI argument for getting the status of the daemon
parser.add_argument(
'--status',
required=False,
default=False,
action='store_true',
help='Get daemon daemon status.'
)
# CLI argument for restarting
parser.add_argument(
'--restart',
required=False,
default=False,
action='store_true',
help='Restart the agent daemon.'
)
# CLI argument for stopping
parser.add_argument(
'--force',
required=False,
default=False,
action='store_true',
help=textwrap.fill(
'Stops or restarts the agent daemon ungracefully when '
'used with --stop or --restart.', width=80)
)
# Get the parser value
self.parser = parser
def control(self, agent):
"""Control the pattoo agent from the CLI.
Args:
agent: Agent object
Returns:
None
"""
# Get the CLI arguments
self.process()
parser = self.parser
args = parser.parse_args()
# Run daemon
_daemon = AgentDaemon(agent)
if args.start is True:
_daemon.start()
elif args.stop is True:
if args.force is True:
_daemon.force()
else:
_daemon.stop()
elif args.restart is True:
if args.force is True:
_daemon.force()
_daemon.start()
else:
_daemon.restart()
elif args.status is True:
_daemon.status()
else:
parser.print_help()
sys.exit(2)
class AgentAPI(Agent):
"""pattoo API agent that serves web pages.
Args:
None
Returns:
None
Functions:
__init__:
populate:
post:
"""
def __init__(self, parent, child):
"""Initialize the class.
Args:
parent: Name of parent daemon
child: Name of child daemon
Returns:
None
"""
# Initialize key variables
Agent.__init__(self, parent, child)
self.config = CONFIG
def query(self):
"""Query all remote devices for data.
Args:
None
Returns:
None
"""
# Initialize key variables
config = self.config
# Check for lock and pid files
if os.path.exists(self.lockfile_parent) is True:
log_message = (
'Lock file {} exists. Multiple API daemons running '
'API may have died '
'catastrophically in the past, in which case the lockfile '
'should be deleted. '
''.format(self.lockfile_parent))
log.log2see(1083, log_message)
if os.path.exists(self.pidfile_parent) is True:
log_message = (
'PID file: {} already exists. Daemon already running? '
'If not, it may have died catastrophically in the past '
'in which case you should use --stop --force to fix.'
''.format(self.pidfile_parent))
log.log2see(1084, log_message)
######################################################################
#
# Assign options in format that the Gunicorn WSGI will accept
#
# NOTE! to get a full set of valid options pprint(self.cfg.settings)
# in the instantiation of StandaloneApplication. The option names
# do not exactly match the CLI options found at
# http://docs.gunicorn.org/en/stable/settings.html
#
######################################################################
options = {
'bind': _ip_binding(),
'accesslog': config.log_file_api(),
'errorlog': config.log_file_api(),
'capture_output': True,
'pidfile': self._pidfile_child,
'loglevel': config.log_level(),
'workers': _number_of_workers(),
'umask': 0o0007,
}
# Log so that user running the script from the CLI knows that something
# is happening
log_message = (
'Pattoo API running on {}:{} and logging to file {}.'
''.format(
config.listen_address(),
config.bind_port(),
config.log_file_api()))
log.log2info(1022, log_message)
# Run
StandaloneApplication(API, options).run()
class StandaloneApplication(BaseApplication):
"""Class to integrate the Gunicorn WSGI with the Pattoo Flask application.
Modified from: http://docs.gunicorn.org/en/latest/custom.html
"""
def __init__(self, app, options=None):
"""Initialize the class.
args:
app: Flask application object of type Flask(__name__)
options: Gunicorn CLI options
"""
# Initialize key variables
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
pprint(self.cfg.settings)
def load_config(self):
"""Load the configuration."""
# Initialize key variables
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
# Assign configuration parameters
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
"""Run the Flask application throught the Gunicorn WSGI."""
return self.application
def _number_of_workers():
"""Get the number of CPU cores on this server."""
return (multiprocessing.cpu_count() * 2) + 1
def agent_sleep(agent_name, seconds=300):
"""Make agent sleep for a specified time, while updating PID every 300s.
Args:
agent_name: Name of agent
seconds: number of seconds to sleep
Returns:
uid: UID for agent
"""
# Initialize key variables
interval = 300
remaining = seconds
# Start processing
while True:
# Update the PID file timestamp (important)
daemon.update_pid(agent_name)
# Sleep for at least "interval" number of seconds
if remaining < interval:
time.sleep(remaining)
break
else:
time.sleep(interval)
# Decrement remaining time
remaining = remaining - interval
def _ip_binding():
"""Create IPv4 / IPv6 binding for Gunicorn.
Args:
None
Returns:
result: bind
"""
# Initialize key variables
config = CONFIG
ipv4 = False
ip_address = config.listen_address()
# Check IP address type
try:
ip_object = ipaddress.ip_address(ip_address)
except:
log_message = (
'The {} IP address in the configuration file is incorrectly '
'formatted'.format(ip_address))
log.log2die(1234, log_message)
# Is this an IPv4 address?
ipv4 = isinstance(ip_object, ipaddress.IPv4Address)
if ipv4 is True:
result = '{}:{}'.format(ip_address, config.bind_port())
else:
result = '[{}]:{}'.format(ip_address, config.bind_port())
return result
| 24.802273 | 79 | 0.550078 | 1,142 | 10,913 | 5.147986 | 0.24606 | 0.01684 | 0.033679 | 0.018711 | 0.199354 | 0.15853 | 0.143222 | 0.117367 | 0.079946 | 0.068379 | 0 | 0.007104 | 0.355081 | 10,913 | 439 | 80 | 24.85877 | 0.828218 | 0.297077 | 0 | 0.216216 | 0 | 0 | 0.111437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086486 | false | 0.005405 | 0.075676 | 0 | 0.210811 | 0.016216 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d0de2d8da21d05590e9e49b38f27e37b7c316e | 5,359 | py | Python | utils/create_dmg_masks.py | deg4uss3r/xview2-baseline | ae3b63003efe5ffd712a32b0083e044f595f0d3e | [
"BSD-3-Clause"
] | null | null | null | utils/create_dmg_masks.py | deg4uss3r/xview2-baseline | ae3b63003efe5ffd712a32b0083e044f595f0d3e | [
"BSD-3-Clause"
] | null | null | null | utils/create_dmg_masks.py | deg4uss3r/xview2-baseline | ae3b63003efe5ffd712a32b0083e044f595f0d3e | [
"BSD-3-Clause"
] | 1 | 2020-02-13T14:02:26.000Z | 2020-02-13T14:02:26.000Z | #####################################################################################################################################################################
# xView2 #
# Copyright 2019 Carnegie Mellon University. #
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO #
# WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, #
# EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, #
# TRADEMARK, OR COPYRIGHT INFRINGEMENT. #
# Released under a MIT (SEI)-style license, please see LICENSE.md or contact permission@sei.cmu.edu for full terms. #
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use #
# and distribution. #
# This Software includes and/or makes use of the following Third-Party Software subject to its own license: #
# 1. SpaceNet (https://github.com/motokimura/spacenet_building_detection/blob/master/LICENSE) Copyright 2017 Motoki Kimura. #
# DM19-0988 #
#####################################################################################################################################################################
import json
from os import walk, path, makedirs
from shapely import wkt
from shapely.geometry import Polygon
import numpy as np
from cv2 import fillPoly, imwrite
def get_files(base_dir):
files = []
dis_pre_files = [f for f in next(walk(path.join(base_dir, "labels")))[2] if 'post' in f]
for f in dis_pre_files:
files.append(path.join(base_dir, "labels", f))
return files
def create_image(inference_data):
damage_key = {'un-classified': 0, 'no-damage': 1, 'minor-damage': 2, 'major-damage': 3, 'destroyed': 4}
# Creating a blank img 1024x1024x1 (the size of the orginal images, but greyscale not full RGB)
mask_img = np.zeros((1024,1024,1), np.uint8)
# For each polygon in the image (according to the json)
# Fill the poylgon with the value from the damage key
for poly in inference_data['features']['xy']:
if 'subtype' in poly['properties']:
damage = poly['properties']['subtype']
else:
# If the subtype json field does not exist, do not write out the polygon
damage = 'un-classified'
coords = wkt.loads(poly['wkt'])
poly_np = np.array(coords.exterior.coords, np.int32)
fillPoly(mask_img, [poly_np], damage_key[damage])
# Return the image once we've gone over every polygon
return mask_img
def save_image(polygons, output_path):
# Output the filled in polygons to an image file
imwrite(output_path, polygons)
def write_gt(infile, output_dir):
with open(infile) as gt_file:
gt_json = json.load(gt_file)
# getting mask only if 'post' is in the title and writing out masks with damage value as the polygon pixel values
gt_masked_image = create_image(gt_json)
gt_masked_image_path = path.join(output_dir, path.basename(infile).split('.json')[0]+'_masked_dmg.png')
save_image(gt_masked_image, gt_masked_image_path)
if __name__ == "__main__":
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description="create_dmg_masks.py: Creates maskes with polygon filled by the damage value")
parser.add_argument('--base-dir',
required=True,
metavar='/path/to/xBD/train/',
help="Full path to the train directory; expects 'labels' under that directory")
parser.add_argument('--output-dir',
required=True,
metavar='/path/to/output/directory/',
help="Full path to the output directory you wish to store the output pngs")
args = parser.parse_args()
# Create output dir to save all masks if it doesn't exist already
if not path.isdir(args.output_dir):
makedirs(args.output_dir)
# We expect all label files to be under a base dir like:
# ~/Downloads/train/labels/<ALL_LABELS>.json
all_files = get_files(args.base_dir)
for infile in all_files:
write_gt(infile, args.output_dir)
| 54.131313 | 166 | 0.53816 | 597 | 5,359 | 4.720268 | 0.420436 | 0.022356 | 0.034067 | 0.004968 | 0.046842 | 0.019872 | 0 | 0 | 0 | 0 | 0 | 0.012591 | 0.333085 | 5,359 | 98 | 167 | 54.683673 | 0.775881 | 0.49095 | 0 | 0.04 | 0 | 0 | 0.185859 | 0.011008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.14 | 0 | 0.26 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d4070cc857186b6a593db78097011098c36c45 | 2,577 | py | Python | examples/source_TestLibraBot.py | kensoi/libragram | a0119244dceb09edca36b23c95f3e97a28ddae9a | [
"Apache-2.0"
] | null | null | null | examples/source_TestLibraBot.py | kensoi/libragram | a0119244dceb09edca36b23c95f3e97a28ddae9a | [
"Apache-2.0"
] | null | null | null | examples/source_TestLibraBot.py | kensoi/libragram | a0119244dceb09edca36b23c95f3e97a28ddae9a | [
"Apache-2.0"
] | null | null | null | import typing
from libragram import librabot
from libragram.objects.decorators import callback
from libragram.objects.filters import *
bot = librabot(token = "123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11", trust_env=True)
class NotCommand(Filter):
def __init__(self, commands: typing.Union[list, set]):
self.cash = isCommand(commands)
self.update_type = whichUpdate({'message'})
self.priority = 0
async def check(self, package):
response_command = await self.cash.check(package)
response_update = await self.update_type.check(package)
return not response_command and response_update
@callback(isCommand({'start'}), bot = bot)
async def start_message(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = """Welcome to TestLibraBot!
Command list - /help
Copyright 2021 Kensoi""")
@callback(isCommand({'help'}), bot = bot)
async def help_message(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = """Cat pics - /cats
Check Bot work - /ping
Author - /credits
Contributors - /contributors
Source - /source""")
@callback(isCommand({'cats'}), bot = bot)
async def cat_pics(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "meow 🐱")
@callback(isCommand({'ping'}), bot = bot)
async def cat_pics(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "pong :>")
await package.sdk.wait(1)
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "Yeah, I am here, do not worry")
@callback(isCommand({'credits'}), bot = bot)
async def author_info(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "Author's site: kensoi.github.io")
@callback(isCommand({'contributors'}), bot = bot)
async def contributors(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "There's no any contributors :/")
@callback(isCommand({'source'}), bot = bot)
async def source_link(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "To see source check this link: github.com/kensoi/libragram")
@callback(NotCommand({'start', 'help', 'cats', 'ping', 'credits', 'contributors', 'source'}), bot = bot)
async def void(package):
await package.sdk.api.sendMessage(
chat_id = package.chat.id,
text = "Mur Mur Mur")
bot.run(bot.start_polling()) | 28.318681 | 104 | 0.668607 | 326 | 2,577 | 5.205521 | 0.291411 | 0.063642 | 0.088391 | 0.095463 | 0.365351 | 0.34178 | 0.34178 | 0.34178 | 0.34178 | 0.34178 | 0 | 0.012112 | 0.199069 | 2,577 | 91 | 105 | 28.318681 | 0.809593 | 0 | 0 | 0.307692 | 0 | 0 | 0.185027 | 0.026377 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015385 | false | 0 | 0.061538 | 0 | 0.107692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d43131a7fc8558fde515471cde4f6a43cbcff0 | 4,542 | py | Python | utils.py | brain-bzh/videoannotation | d75d3261967a134854a16956fea602cad51949a2 | [
"MIT"
] | 3 | 2020-02-19T09:54:27.000Z | 2020-10-13T14:02:28.000Z | utils.py | courtois-neuromod/videoannotation | d75d3261967a134854a16956fea602cad51949a2 | [
"MIT"
] | null | null | null | utils.py | courtois-neuromod/videoannotation | d75d3261967a134854a16956fea602cad51949a2 | [
"MIT"
] | 2 | 2020-03-13T12:23:13.000Z | 2021-02-01T16:14:04.000Z | ## Author : Nicolas Farrugia, February 2020
from torchvision.models.detection import fasterrcnn_resnet50_fpn
import torch
from torchvision.io import read_video,read_video_timestamps
import matplotlib.patches as patches
from matplotlib import pyplot as plt
import datetime
import os
def convert_Audio(mediaFile, outFile):
cmd = 'ffmpeg -i '+mediaFile+' '+outFile
os.system(cmd)
return outFile
#### imagenet categories
def cat_file():
# load classes file
categories = []
try:
f = open('categories.txt', 'r')
for line in f:
cat = line.split(',')[0].split('\n')[0]
if cat != 'classes':
categories.append(cat)
f.close()
#print('Number of categories:', len(categories))
except:
print('Error opening file ' + ' categories.txt')
quit()
return categories
categories = cat_file() # load category file
COCO_INSTANCE_CATEGORY_NAMES = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
def annotate_img(preds,vframes,n_obj=5):
global COCO_INSTANCE_CATEGORY_NAMES
### vframes : last three dims input tensor to faster_rcnn
### preds : dictionary of outputs of fater_rcnn
predlabels = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in preds['labels'].numpy()]
scores = [i for i in preds['scores'].detach().numpy()]
bboxes = [i for i in preds['boxes'].detach().numpy()]
test_im = vframes.permute(1,2,0).numpy()
# Create figure and axes
fig,ax = plt.subplots(1,figsize=(20,25))
# Display the image
ax.imshow(test_im)
#### add the annotations
for curbbox,curlab in zip(bboxes[:n_obj],predlabels[:n_obj]):
topleftx = curbbox[0]
toplefty = curbbox[1]
bottomrightx = curbbox[2]
bottomrighty = curbbox[3]
# Create a Rectangle patch
rect = patches.Rectangle((topleftx,toplefty),abs(bottomrightx-topleftx),abs(bottomrighty-toplefty),linewidth=1,edgecolor='r',facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
ax.text(topleftx,toplefty,curlab,c='white',fontsize=16)
plt.show()
return fig
def gen_srt(strlabel,onset,srtfile,duration=2,num=1):
starttime = onset
endtime = starttime + duration
string_start = datetime.time(0,starttime//60,starttime%60).strftime("%H:%M:%S")
string_end = datetime.time(0,endtime//60,endtime%60).strftime("%H:%M:%S")
with open(srtfile,'a') as f:
f.write("{}\n".format(num+1))
f.write("{starttime} --> {endtime}\n".format(starttime=string_start,endtime=string_end))
f.write("{}\n".format(strlabel))
f.write("\n")
def gen_srt_coco_multiple(allpreds,onsets,srtfile,n_obj=5):
global COCO_INSTANCE_CATEGORY_NAMES
## check that both lists have the same size
if len(allpreds) != len(onsets):
raise(ValueError('List of predictions and onsets have different sizes'))
for num,(curpred,curonset) in enumerate(zip(allpreds,onsets)):
predlabels = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in curpred['labels'].numpy()[:n_obj]]
starttime = curonset
endtime = curonset + 2
string_start = datetime.time(0,starttime//60,starttime%60).strftime("%H:%M:%S")
string_end = datetime.time(0,endtime//60,endtime%60).strftime("%H:%M:%S")
with open(srtfile,'a') as f:
f.write("{}\n".format(num+1))
f.write("{starttime} --> {endtime}\n".format(starttime=string_start,endtime=string_end))
f.write("{}\n".format(predlabels))
f.write("\n")
| 31.985915 | 150 | 0.627257 | 584 | 4,542 | 4.792808 | 0.458904 | 0.007145 | 0.015005 | 0.044659 | 0.224723 | 0.214362 | 0.214362 | 0.214362 | 0.188639 | 0.158628 | 0 | 0.013789 | 0.201673 | 4,542 | 141 | 151 | 32.212766 | 0.758136 | 0.087406 | 0 | 0.170732 | 0 | 0 | 0.205682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060976 | false | 0 | 0.085366 | 0 | 0.182927 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d5aa0d303e7bb713fa63255c91d0a79ff9c4cc | 17,910 | py | Python | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | null | null | null | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | null | null | null | era5dataset/FrontDataset.py | stnie/FrontDetection | 742ebf9619dcde40d42891073739945a05631ea3 | [
"MIT"
] | 1 | 2022-01-17T04:58:10.000Z | 2022-01-17T04:58:10.000Z | from typing import final
import numpy as np
import torch
import os
import time
from datetime import datetime
import random
import numbers
from torch.utils.data import Dataset
from .ERA5Reader.readNetCDF import LatTokmPerLon
from .EraExtractors import DefaultEraExtractor
def labelnameToDataname(filename):
return os.path.splitext(filename)[0]+".nc"
def datanameToLabelname(filename, mapTypes, removePrefix):
return {key: os.path.join(str(x[0]), os.path.splitext(filename)[0][removePrefix:]+".txt") for key, x in mapTypes.items()}
# Dataset Class
class WeatherFrontDataset(Dataset):
"""Front dataset."""
def __init__(self, data_dir, label_dir = None, mapTypes = {"NA": ("", (35,70), (-40,35), (0.25,0.25), (1,1), None) }, levelRange = None, transform=None, outSize = None, printFileName = False, labelThickness = 2, label_extractor = None, asCoords = False, era_extractor = DefaultEraExtractor, has_subfolds = (False, False), removePrefix = 0, halfResEval = False):
"""
Args:
data_dir (string): Directory with all the images.
label_dir (string): Directory with all the labls (fronts)
validLats (int,int): Lowest and Highest Latitude (-90 to 90) from wich the data shall be sampled
validLons (int,int): Lowest and Highest Longitude (0 to 360-resolution[1]) from wich the data shall be sampled
resolution (float, float): Step Resolution in Latitudinal and Longitudinal direction
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.data_dir = data_dir
self.label_dir = label_dir
# Cropsize (used before reading from ERA!)
self.cropsize = outSize
# Augmentationtuple (data-augmentation, label-augmentation)
self.transform = transform
# Function that extracts label data from a given range
self.label_extractor = label_extractor
self.asCoords = asCoords
# Function that extracts era data from a given range
self.era_extractor = era_extractor
# Dictionary describing folder, latitudes, longitudes and resolution (signed) for different labels
self.mapTypes = mapTypes
# Should labels be randomly drawn if multiple are available for the same data
self.randomizeMapTypes = True
# Levelrange of era to extract
self.levelrange = levelRange
# Latrange of era to extract for each mapType (uised for crop)
self.latrange = {key: np.arange(int((90-x[1][0])*(1/np.abs(x[3][0]))),int((90-x[1][1])*(1/np.abs(x[3][0]))), 1) for key,x in self.mapTypes.items()}
# lonrange of era to extract for each mapType (used for crop)
self.lonrange = {key: np.arange(int(x[2][0]*(1/x[3][1])), int(x[2][1]*(1/x[3][1])), 1) for key,x in self.mapTypes.items()}
# Print file information
self.printFileName = printFileName
# Extract in a km grid instead of lat lon
self.extractRegularGrid = False
# is the evlauation to be on halfRes
self.halfRes = halfResEval
# Are labels provided? Else do not return labels
self.has_label = (not label_dir is None and not label_extractor is None)
if label_extractor is None:
print("No label extractor given, proceed without extracting labels")
if label_dir is None:
print("No label directory given, Labels need to be generated by the extractor")
# Check if an era_extractor exists
if era_extractor is None:
print("No Era-Extractor given, abort execution!")
exit(1)
self.removePrefix = removePrefix
self.hasSubfolds = has_subfolds
# ERA Data is organized in subfolders (2017->03->20170201_00.nc)
if(self.hasSubfolds[0]):
self.fileList = []
for fold in os.listdir(self.data_dir):
for filen in os.listdir(os.path.join(self.data_dir, fold)):
# if the dataset extracts labels, check if the corresponding labels exist
if(self.has_label):
potLabel = datanameToLabelname(filen, self.mapTypes, self.removePrefix)
labelExists = False
for key, val in potLabel.items():
foldna, filena = val.split("/")
if filena in os.listdir(os.path.join(self.label_dir,foldna)):
labelExists=True
if(labelExists):
self.fileList.append(os.path.join(fold,filen))
# if no labels are to be extracted simply append the data
else:
self.fileList.append(os.path.join(fold,filen))
# ERA Data is organized without subfolders (2017 -> 20170101_00.nc)
else:
self.fileList = []
for filen in os.listdir(self.data_dir):
if(self.has_label):
potLabel = datanameToLabelname(filen, self.mapTypes, self.removePrefix)
labelExists = False
for key, val in potLabel.items():
foldna, filena = val.split("/")
if filena in os.listdir(os.path.join(self.label_dir, foldna)):
labelExists = True
if(labelExists):
self.fileList.append(filen)
else:
self.fileList.append(filen)
# Sort file list
self.fileList = sorted(self.fileList)
def __len__(self):
# Length of all available Data (regardless of the existence of label!)
return len(self.fileList)
# Allow for slices or idx
def __getitem__(self, idx):
if not isinstance(idx, numbers.Number):
print("Currently not working")
exit(1)
return self.getBatch(idx)
filepath = self.fileList[idx]
filename = ""
if(self.hasSubfolds[0]):
filename = filepath.split("/")[-1]
else:
filename = filepath
if(filename == ""):
print("fileNotFound")
print(idx)
img_name = os.path.join(self.data_dir, filepath)
#Initialize projection type and seeds for possible transformations
projection_type = 0
extract_seed = datetime.now()
transform_seed = datetime.now()
mapType = list(self.mapTypes.keys())[0]
fronts = None
if(self.has_label):
# all corresponding front names (Take the first them if multiple are available)
if(self.hasSubfolds[1]):
front_name = datanameToLabelname(filepath, self.mapTypes, self.removePrefix)
else:
if(self.hasSubfolds[0]):
front_name = datanameToLabelname(filename, self.mapTypes, self.removePrefix)
else:
front_name = datanameToLabelname(filename, self.mapTypes, self.removePrefix)
mapType, front_name = self.getProjectionTypeAndFilePath(front_name)
# To distinguish the output name
#filename = os.path.splitext(filename)[0]+mapType+os.path.splitext(filename)[1]
# Read Label Data
#print("label:", filename)
#print(front_name, mapType, filename)
try:
if(self.extractRegularGrid):
fronts = self.getRegularGridLabel(front_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], mapType, extract_seed )
else:
fronts = self.getLabel(front_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], mapType, extract_seed )
except:
print("filename is", front_name)
if(self.printFileName):
print(idx)
print(img_name)
print(front_name)
print()
if(self.has_label and fronts is None):
print("Did not extract a Front even though it should")
print(idx, filename)
# Read Image Data
#print("image:", filename
image = None
try:
if(self.extractRegularGrid):
image = self.getRegularGridImage(img_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed, transform_seed)
else:
image = self.getImage(img_name, self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed, transform_seed)
except Exception as e:
print(e)
print("filename is", filename)
raise Exception(e,"\nfailed to extract image data {}".format(filename))
if(image is None):
print("failed to extract image data")
print(filename, img_name, front_name)
print(idx)
raise Exception("failed to extract image data {}".format(filename))
mask = None
if(len(self.mapTypes[mapType]) == 5 and (not self.mapTypes[mapType][4] is None)):
mask = self.getMask(self.mapTypes[mapType][-1], self.mapTypes[mapType][1], self.mapTypes[mapType][2], self.mapTypes[mapType][3], extract_seed)
# Perform transformation on the data (affine transformation + randm crop) => Crop enables equally sized images
if self.transform:
finalImage = self.transformImage(image, transform_seed)
if(mask is None):
finalMask = None
else:
finalMask = torch.from_numpy(self.transformImage(mask.reshape((1,*mask.shape)), transform_seed).reshape(*mask.shape)).detach()
if(self.has_label):
finalFronts = self.transformLabel(fronts, transform_seed)
if(self.asCoords):
return [torch.from_numpy(finalImage), finalFronts, filename, finalMask]
else:
return [torch.from_numpy(finalImage), torch.from_numpy(finalFronts), filename, finalMask]
else:
return [torch.from_numpy(finalImage), None, filename, finalMask]
else:
if(mask is None):
pass
else:
mask = torch.from_numpy(mask)
if(self.has_label):
if(self.asCoords):
return [torch.from_numpy(image), fronts, filename, mask]
else:
return [torch.from_numpy(image), torch.from_numpy(fronts), filename, mask]
else:
return [torch.from_numpy(image), None, filename, mask]
def getCropRange(self, latrange, lonrange, res, seed):
if(self.cropsize is None):
return latrange, lonrange
else:
# perform crop before reading data, to reduce memory usage
common_seed= seed
h,w = int(np.abs((latrange[1]-latrange[0]+res[0]-0.001)/res[0])), int(np.abs((lonrange[1]-lonrange[0])/res[1]))
th,tw = self.cropsize
random.seed(common_seed)
i = random.randint(0, h-th)*res[0]
j = random.randint(0, w-tw)*res[1]
th *= res[0]
tw *= res[1]
return (latrange[0]+i, latrange[0]+i+th), (lonrange[0]+j, lonrange[0]+j+tw)
def getImage(self, filename, latrange, lonrange, res, seed, tseed = 0):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
return self.era_extractor(filename, tgt_latrange, tgt_lonrange, self.levelrange, tseed)
def getLabel(self, filename, latrange, lonrange, res, types, seed):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
if(self.halfRes):
return self.label_extractor(filename, (tgt_latrange[0], tgt_latrange[1]), (tgt_lonrange[0], tgt_lonrange[1]), (res[0]*2, res[1]*2), types)
else:
return self.label_extractor(filename, (tgt_latrange[0], tgt_latrange[1]), (tgt_lonrange[0], tgt_lonrange[1]), res, types)
def getMask(self, mask, latrange, lonrange, res, seed):
tgt_latrange, tgt_lonrange = self.getCropRange(latrange, lonrange, res, seed)
return mask[int((90-tgt_latrange[0])/np.abs(res[0])):int((90-tgt_latrange[1])/np.abs(res[0])), int((180+tgt_lonrange[0])/res[1]):int((180+tgt_lonrange[1])/res[1])]
def transformImage(self, image, seed):
if(self.transform[0] is None):
return image
finalImage = np.zeros_like(image)
for channel in range(image.shape[0]):
#for level in range(image.shape[1]):
random.seed(seed)
finalImage[channel, :,:] = self.transform[0](image[channel,:,:])
return finalImage
def transformLabel(self, label, seed):
if(self.transform[1] is None):
return label
if(self.asCoords):
finalLabel = label
for group in range(len(label)):
random.seed(seed)
finalLabel[group] = self.transform[1](finalLabel[group])
else:
finalLabel = np.zeros((label.shape))
for channel in range(label.shape[2]):
random.seed(seed)
finalLabel[:,:,channel] = self.transform[1](label[:,:,channel])
return finalLabel
def getProjectionTypeAndFilePath(self, front_name):
projection_type = ""
keys, names = [], []
for key, fname in front_name.items():
currFold = os.path.join(self.label_dir, key)
# get filename without path
filename = fname.split("/")[-1]
#print(filename, currFold, fname)
#print(os.listdir(currFold))
if(filename in os.listdir(currFold)):
keys.append(key), names.append(os.path.join(self.label_dir, fname))
idx = 0
if(len(keys)>0):
if(self.randomizeMapTypes):
idx = random.randint(0,len(keys)-1)
return keys[idx], names[idx]
# No Label found
print(front_name)
print(os.listdir(self.label_dir))
print("Invalid label data pair, no label found!")
return projection_type, front_name
def __repr__(self):
myString = "WeatherFrontDataset\n"
myString += str(self.__dict__)
return myString
def getInfo(self):
myString = "WeatherFrontDataset\n"
myString += "data_dir :: "+ "str :: " +str(self.data_dir)+" :: end\n"
myString += "label_dir :: "+ "str :: " +str(self.label_dir)+" :: end\n"
myString += "map_types :: "+ "dict(str: tuple(str, tuple(float,float), tuple(float,float), tuple(float,float))) :: " +str(self.mapTypes)+" :: end\n"
myString += "levelrange :: "+ "list(int) :: " +str(list(self.levelrange))+" :: end\n"
myString += "transforms :: "+ "obj :: " +str(self.transform)+" :: end\n"
myString += "outsize :: "+ "tuple(int,int) :: " +str(self.cropsize)+" :: end\n"
myString += "translat :: "+ "tuple(int,int) :: " +str(self.label_extractor.imageCreator.maxOff)+" :: end\n"
myString += "printFileName :: "+ "bool :: " +str(self.printFileName)+" :: end\n"
myString += "labelThickness :: "+ "int :: " +str(self.label_extractor.imageCreator.thickness)+" :: end\n"
myString += "labelGrouping :: "+ "str :: " +str(self.label_extractor.imageCreator.labelGrouping)+" :: end\n"
myString += "Variables :: "+ "list(str) :: " +str(self.era_extractor.variables)+" :: end\n"
myString += "NormType :: "+ "int :: " +str(self.era_extractor.reader.normalize_type)+" :: end\n"
return myString
class WeatherFrontBatch:
def __init__ (self, data, label_as_float = True, transpose_rate = 0.5, swap_indices = None):
transposed_data = (list(zip(*data)))
self.data = torch.stack(transposed_data[0],0).float()
if(transposed_data[1][0] is None):
self.labels = None
else:
if(label_as_float):
self.labels = torch.stack(transposed_data[1],0).float()
else:
self.labels = torch.stack(transposed_data[1],0).long()
self.filenames = transposed_data[2]
def pin_memory(self):
self.data = self.data.pin_memory()
return [self.data, self.labels, self.filenames]
class WeatherFrontsAsCoordinatesBatch:
def __init__ (self, data, label_as_float = True, transpose_rate = 0.5, swap_indices = None):
transposed_data = (list(zip(*data)))
self.data = torch.stack(transposed_data[0],0).float()
if(transposed_data[1][0] is None):
self.labels = None
else:
self.labels = transposed_data[1]
if(transposed_data[3][0] is None):
self.masks = None
else:
self.masks = torch.stack(transposed_data[3],0).float()
self.filenames = transposed_data[2]
def pin_memory(self):
self.data = self.data.pin_memory()
return [self.data, self.labels, self.filenames, self.masks]
class collate_wrapper:
def __init__(self, binary = True, asCoordinates=False, transpose_rate = 0.5, swap_indices = None):
self.label_as_float = binary
self.transpose_rate = transpose_rate
self.swap_indices = swap_indices
self.asCoords = asCoordinates
def __call__(self, batch):
if(self.asCoords):
return WeatherFrontsAsCoordinatesBatch(batch, label_as_float=self.label_as_float, transpose_rate=self.transpose_rate, swap_indices = self.swap_indices)
else:
return WeatherFrontBatch(batch, label_as_float=self.label_as_float, transpose_rate=self.transpose_rate, swap_indices = self.swap_indices)
| 46.884817 | 365 | 0.599107 | 2,101 | 17,910 | 5.007139 | 0.161352 | 0.031939 | 0.03251 | 0.007985 | 0.355418 | 0.305228 | 0.262262 | 0.237833 | 0.208365 | 0.187643 | 0 | 0.016155 | 0.28459 | 17,910 | 381 | 366 | 47.007874 | 0.804886 | 0.128085 | 0 | 0.334495 | 0 | 0.003484 | 0.059803 | 0.00271 | 0.041812 | 0 | 0 | 0 | 0 | 1 | 0.069686 | false | 0.003484 | 0.038328 | 0.010453 | 0.219512 | 0.087108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d7b0958d8d2379fabfb2fe33a6017490b1f91e | 2,865 | py | Python | packages/mccomponents/tests/mccomponents/sample/kernel-orientation/kernelorientation_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mccomponents/tests/mccomponents/sample/kernel-orientation/kernelorientation_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mccomponents/tests/mccomponents/sample/kernel-orientation/kernelorientation_TestCase.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
# Jiao Lin <jiao.lin@gmail.com
"""
This test check the "orientation" parameter of kernels.
* Sub-kernels in a "KernelContainer" have the parameter "orientation"
to specify its orientation relative to its parent kernel.
* The root level KernelContainer always has the same coordinate system
as the scatterer.
In this test the coordinate system of the kernel
is rotated 30 deg around the y axis (vertical up)
with respect to the scatterer.
Roughtly it is illustrated below:
x' ^ x
|\ |
\ |
\ | > z'
\ | . '
\ | . '
\|. ' ) 30 deg
-------------------> z
So the transformation matrix is
sqrt(3)/2 0 1/2
0 1 0
-1/2 0 sqrt(3)/2
This is specified in cyl/X-scatterer.xml.
In kernel's coordinate system, we set the momentum transfer
of the kernel to be [2,0,0], which is is along x' axis.
The incident neutron is along z axis with energy 100meV.
With these information, we can compute the momentum transfer
in instrument cooridnate system, and then the final energy
and energy transfer E.
Turns out E = -37.07822meV, and this is set in cyl/X-scatterer.xml.
In the following test, we make sure the final velocities of
the scattered neutrons are expected, and the neutrons
have valid probabilities.
"""
import unittest, numpy as np
class TestCase(unittest.TestCase):
def test1(self):
'kernel orientation'
# source
from mcni.components.MonochromaticSource import MonochromaticSource
import mcni, numpy as np
Ei = 100
from mcni.utils import conversion as Conv
ki = Conv.e2k(Ei)
vi = Conv.e2v(Ei)
Qdir = np.array([np.sqrt(3)/2, 0, -1./2])
Q = Qdir * 2
kf = np.array([0,0,ki]) - Q
Ef = Conv.k2e(np.linalg.norm(kf))
E = Ei-Ef
dv = Qdir * Conv.k2v(Q)
vf = np.array([0,0,vi]) - dv
# print ki, Q, kf
# print Ei, Ef, E
neutron = mcni.neutron(r=(0,0,-1), v=(0,0,vi), prob=1)
source = MonochromaticSource('s', neutron, dx=0.001, dy=0.001, dE=0)
# sample
from mccomponents.sample import samplecomponent
scatterer = samplecomponent('sa', 'cyl/sampleassembly.xml' )
# incident
N = 1000
neutrons = mcni.neutron_buffer(N)
neutrons = source.process(neutrons)
# print neutrons
# scatter
scatterer.process(neutrons)
# print neutrons
self.assertEqual(len(neutrons), N)
for neutron in neutrons:
np.allclose(neutron.state.velocity, vf)
self.assertTrue(neutron.probability > 0)
continue
return
pass # end of scattererxml_TestCase
def main(): unittest.main()
if __name__ == "__main__": main()
# End of file
| 27.285714 | 76 | 0.610471 | 392 | 2,865 | 4.436224 | 0.431122 | 0.00575 | 0.010351 | 0.008051 | 0.033353 | 0.033353 | 0 | 0 | 0 | 0 | 0 | 0.033399 | 0.289354 | 2,865 | 104 | 77 | 27.548077 | 0.820727 | 0.520768 | 0 | 0 | 0 | 0 | 0.037199 | 0.016047 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.058824 | false | 0.029412 | 0.147059 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74d7f5258c12f0959a81cebaa0a5b91827535d65 | 27,239 | py | Python | python-modules/robcoewminterface/robcoewminterface/ewm.py | yschiebelhut/ewm-cloud-robotics | bdf3a6c13850d266b70168912494300c32d4d803 | [
"Apache-2.0"
] | 25 | 2019-07-31T12:50:33.000Z | 2022-01-11T15:53:40.000Z | python-modules/robcoewminterface/robcoewminterface/ewm.py | yschiebelhut/ewm-cloud-robotics | bdf3a6c13850d266b70168912494300c32d4d803 | [
"Apache-2.0"
] | 10 | 2019-07-11T13:12:12.000Z | 2022-03-15T15:46:58.000Z | python-modules/robcoewminterface/robcoewminterface/ewm.py | isabella232/ewm-cloud-robotics | 8210843df323379ded92ec14ec73b1f3ef6b2f41 | [
"Apache-2.0"
] | 23 | 2019-08-07T21:23:38.000Z | 2022-03-08T00:16:10.000Z | #!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""EWM OData provider for robcoewminterface."""
import logging
from typing import Any, Dict, List, Optional
from requests import Response
from robcoewmtypes.warehouse import Warehouse, WarehouseDescription, StorageBin
from robcoewmtypes.warehouseorder import (
WarehouseOrder, WarehouseTask, WarehouseTaskConfirmation, ConfirmWarehouseTask)
from robcoewmtypes.robot import (
Robot, RobotResourceType, ResourceGroup, ResourceTypeDescription, ResourceGroupDescription)
from .conversion import odata_to_attr
from .exceptions import ODataAPIException, get_exception_class
from .odata import ODataHandler
_LOGGER = logging.getLogger(__name__)
HTTP_SUCCESS = [200, 201, 202, 203, 204, 205, 206, 207, 208, 226]
HTTP_BUS_EXCEPTION = [404, 500]
STATE_SUCCEEDED = 'SUCCEEDED'
class EWMOdata:
"""Base class for EWM OData interface."""
def __init__(self, odata: ODataHandler) -> None:
"""Construct."""
self._odata = odata
def handle_http_response(self, endpoint: str, http_resp: Response) -> Any:
"""
Handle an OData HTTP request response.
Returns attrs data class in case of success and raises exception on error.
For PATCH requests the body of an OData request is empty on success. Returning True then.
"""
# Return code handling
if http_resp.status_code in HTTP_SUCCESS:
self._odata.odata_counter.labels( # pylint: disable=no-member
endpoint=endpoint, result=STATE_SUCCEEDED).inc()
if http_resp.text:
return odata_to_attr(http_resp.json())
else:
return True
# Determine error code
if http_resp.status_code == 403:
error_code = '403'
else:
# Get error code from HTTP response
try:
error_code = http_resp.json()['error']['code']
except KeyError:
error_code = ''
if http_resp.status_code == 404 and not error_code:
error_code = '404'
# Error handling for business exceptions raised in EWM backend
if http_resp.status_code in HTTP_BUS_EXCEPTION:
exception_class = get_exception_class(error_code)
self._odata.odata_counter.labels( # pylint: disable=no-member
endpoint=endpoint, result=exception_class.ERROR_CODE).inc()
raise exception_class()
# For any other error use generic exception
self._odata.odata_counter.labels( # pylint: disable=no-member
endpoint=endpoint, result=error_code).inc()
raise ODataAPIException(error_code=error_code)
class WarehouseOData(EWMOdata):
"""Interaction with EWM warehouse APIs."""
def get_warehouse(
self, lgnum: str, descriptions: bool = False, storagebins: bool = False) -> Warehouse:
"""
Get data of one warehouse.
Optionally expand descriptions and storage bins.
"""
# define endpoint
endpoint = '/WarehouseNumberSet'
# create URL parameter
params = {}
if descriptions or storagebins:
exvalues = []
if descriptions:
exvalues.append('WarehouseDescriptions')
if storagebins:
exvalues.append('StorageBins')
params['$expand'] = ','.join(exvalues)
# create IDs
ids = {'Lgnum': lgnum}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_warehouses(self, descriptions: bool = False,
storagebins: bool = False) -> Optional[List[Warehouse]]:
"""
Get data of all warehouses.
Optionally expand descriptions and storage bins.
"""
# define endpoint
endpoint = '/WarehouseNumberSet'
# create URL parameter
params = {}
if descriptions or storagebins:
exvalues = []
if descriptions:
exvalues.append('WarehouseDescriptions')
if storagebins:
exvalues.append('StorageBins')
params['$expand'] = ','.join(exvalues)
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_whdescription(self, lgnum: str, spras: str) -> WarehouseDescription:
"""Get description of one warehouse in a language."""
# define endpoint
endpoint = '/WarehouseDescriptionSet'
# create IDs
ids = {'Lgnum': lgnum, 'Spras': spras}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_whdescriptions(self, lgnum: Optional[str] = None) -> List[WarehouseDescription]:
"""
Get descriptions of warehouses in all languages.
Optionally filter by warehouse.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/WarehouseDescriptions'
else:
# define endpoint
endpoint = '/WarehouseDescriptionSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_storagebin(self, lgnum: str, lgpla: str) -> StorageBin:
"""Get one specific storage bin."""
# define endpoint
endpoint = '/StorageBinSet'
# create IDs
ids = {'Lgnum': lgnum, 'Lgpla': lgpla}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_storagebins(self, lgnum: Optional[str] = None) -> List[WarehouseDescription]:
"""
Get all storage bins from the system.
Optionally filter by warehouse.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/StorageBins'
else:
# define endpoint
endpoint = '/StorageBinSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
class WarehouseOrderOData(EWMOdata):
"""Interaction with EWM warehouse order APIs."""
def get_warehouseorder(
self, lgnum: str, who: str, openwarehousetasks: bool = False) -> WarehouseOrder:
"""
Get data of one warehouse order.
Optionally expand warehouse tasks.
"""
# define endpoint
endpoint = '/WarehouseOrderSet'
# create URL parameter
params = {}
if openwarehousetasks:
exvalues = []
exvalues.append('OpenWarehouseTasks')
params['$expand'] = ','.join(exvalues)
# create IDs
ids = {'Lgnum': lgnum, 'Who': who}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_warehouseorders(
self, lgnum: Optional[str] = None, topwhoid: Optional[str] = None,
openwarehousetasks: bool = False) -> List[WarehouseOrder]:
"""
Get data of all warehouse orders.
Optionally filter by warehouse expand warehouse tasks.
"""
# create URL parameter
params = {}
if openwarehousetasks:
exvalues = []
exvalues.append('OpenWarehouseTasks')
params['$expand'] = ','.join(exvalues)
# Define endpoint IDs and navigation based on parameter selection
if lgnum and topwhoid:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = None
# create navigation
nav = None
# add filter URL param
params['$filter'] = "Lgnum eq '{}' and Topwhoid eq '{}'".format(
lgnum, topwhoid)
elif lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/WarehouseOrders'
elif topwhoid:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = None
# create navigation
nav = None
# add filter URL param
params['$filter'] = "Topwhoid eq '{}'".format(topwhoid)
else:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(
endpoint, urlparams=params, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_robot_warehouseorders(self, lgnum: str, rsrc: str) -> List[WarehouseOrder]:
"""Get warehouse orders assigned to the robot resource."""
# define endpoint
endpoint = '/GetRobotWarehouseOrders'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc)}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def getnew_robot_warehouseorder(self, lgnum: str, rsrc: str) -> WarehouseOrder:
"""
Get a new warehouse order for a robot resource.
The warehouse order will be immediately assigned to the robot
resource in EWM.
"""
# define endpoint
endpoint = '/GetNewRobotWarehouseOrder'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint,
urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def getnew_rtype_warehouseorders(
self, lgnum: str, rsrcgrp: str, rsrctype: str, nowho: int) -> List[WarehouseOrder]:
"""
Get #nowho new warehouse orders for a robot type.
The warehouse order is marked as 'in process', but not assigned to a
robot resource yet. This needs to be done by calling the method:
assign_robot_warehouseorder.
"""
# define endpoint
endpoint = '/GetNewRobotTypeWarehouseOrders'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum),
'RsrcGrp': "'{}'".format(rsrcgrp),
'RsrcType': "'{}'".format(rsrctype),
'NoWho': int(nowho)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_in_process_warehouseorders(
self, lgnum: str, rsrcgrp: str, rsrctype: str) -> List[WarehouseOrder]:
"""Get warehouse orders in process but not assigned to a robot resource."""
# define endpoint
endpoint = '/GetInProcessWarehouseOrders'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum),
'RsrcGrp': "'{}'".format(rsrcgrp),
'RsrcType': "'{}'".format(rsrctype)}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def assign_robot_warehouseorder(self, lgnum: str, rsrc: str, who: str) -> WarehouseOrder:
"""Assign a robot resource to a warehouse order."""
# define endpoint
endpoint = '/AssignRobotToWarehouseOrder'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Who': "'{}'".format(who)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_openwarehousetask(self, lgnum: str, tanum: str) -> WarehouseTask:
"""Get data from one warehouse task."""
# define endpoint
endpoint = '/OpenWarehouseTaskSet'
# create IDs
ids = {'Lgnum': lgnum, 'Tanum': tanum}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_openwarehousetasks(
self, lgnum: Optional[str] = None, who: Optional[str] = None) -> List[WarehouseTask]:
"""
Get data of all open warehouse tasks.
Optionally filter by warehouse and warehouse order.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum and who:
# define endpoint
endpoint = '/WarehouseOrderSet'
# create IDs
ids = {'Lgnum': lgnum, 'Who': who}
# create navigation
nav = '/OpenWarehouseTasks'
elif lgnum or who:
raise AttributeError(
'Either filter "lgnum" AND "who" or none of them ')
else:
# define endpoint
endpoint = '/OpenWarehouseTaskSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def confirm_warehousetask(
self, lgnum: str, tanum: str, rsrc: str) -> WarehouseTaskConfirmation:
"""
Confirm a warehouse task - putaway.
TODO: Implement exceptions: partly confirmations, bin change etc.
"""
# define endpoint
endpoint = '/ConfirmWarehouseTask'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Tanum': "'{}'".format(tanum),
'Rsrc': "'{}'".format(rsrc)}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def confirm_warehousetask_firststep(
self, lgnum: str, tanum: str, rsrc: str) -> WarehouseTaskConfirmation:
"""
Confirm a warehouse task - first step.
First confirmation of a warehouse task.
This also assigns the warehouse task to the resource.
"""
# define endpoint
endpoint = '/ConfirmWarehouseTaskFirstStep'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Tanum': "'{}'".format(tanum),
'Rsrc': "'{}'".format(rsrc)}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def send_confirmation_error(
self, lgnum: str, rsrc: str, who: str, tanum: str, confnumber: str) -> WarehouseOrder:
"""Send error before confirmation of a warehouse task."""
# define endpoint
if confnumber == ConfirmWarehouseTask.FIRST_CONF:
endpoint = '/SendFirstConfirmationError'
elif confnumber == ConfirmWarehouseTask.SECOND_CONF:
endpoint = '/SendSecondConfirmationError'
else:
raise ValueError('Could be used only for FIRST and SECOND confirmation')
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Who': "'{}'".format(who), 'Tanum': "'{}'".format(tanum)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def unassign_robot_warehouseorder(self, lgnum: str, rsrc: str, who: str) -> WarehouseOrder:
"""Unassign a robot resource from a warehouse order."""
# define endpoint
endpoint = '/UnassignRobotFromWarehouseOrder'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Who': "'{}'".format(who)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def unset_warehouseorder_in_process(self, lgnum: str, who: str) -> WarehouseOrder:
"""Unset in process status of a warehouse order."""
# define endpoint
endpoint = '/UnsetWarehouseorderInProcessStatus'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Who': "'{}'".format(who)}
# HTTP OData GET request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
class RobotOData(EWMOdata):
"""Interaction with EWM warehouse robot APIs."""
def get_robot(self, lgnum: str, rsrc: str) -> Robot:
"""Get data of one robot."""
# define endpoint
endpoint = '/RobotSet'
# create IDs
ids = {'Lgnum': lgnum, 'Rsrc': rsrc}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_robots(self, lgnum: Optional[str] = None) -> List[Robot]:
"""
Get data of all robots.
Optionally filter by warehouse.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/Robots'
else:
# define endpoint
endpoint = '/RobotSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def create_robot(self, lgnum: str, rsrc: str, rsrctype: str, rsrcgrp: str) -> Robot:
"""Create a new robot resource in EWM."""
# define endpoint
endpoint = '/RobotSet'
# create body
jsonbody = {'Lgnum': lgnum, 'Rsrc': rsrc, 'RsrcType': rsrctype, 'RsrcGrp': rsrcgrp}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, jsonbody=jsonbody)
return self.handle_http_response(endpoint, http_resp)
def change_robot(
self, lgnum: str, rsrc: str, rsrctype: Optional[str] = None,
rsrcgrp: Optional[str] = None) -> bool:
"""Change an existing robot resource in EWM."""
# define endpoint
endpoint = '/RobotSet'
# create IDs
ids = {'Lgnum': lgnum, 'Rsrc': rsrc}
# create body
jsonbody = {}
if rsrctype is not None:
jsonbody['RsrcType'] = rsrctype
if rsrcgrp is not None:
jsonbody['RsrcGrp'] = rsrcgrp
# HTTP OData PATCH request
http_resp = self._odata.http_patch_post('patch', endpoint, ids=ids, jsonbody=jsonbody)
# No HTTP body on successfull PATCH requests
# Body only exists in case of exceptions
return self.handle_http_response(endpoint, http_resp)
def set_robot_status(self, lgnum: str, rsrc: str, exccode: str) -> Robot:
"""Set exception codes for robot resources in EWM."""
# define endpoint
endpoint = '/SetRobotStatus'
# create URL parameter
params = {'Lgnum': "'{}'".format(lgnum), 'Rsrc': "'{}'".format(rsrc),
'Exccode': "'{}'".format(exccode)}
# HTTP OData POST request
http_resp = self._odata.http_patch_post('post', endpoint, urlparams=params)
return self.handle_http_response(endpoint, http_resp)
def get_robot_resource_type(self, lgnum: str, rsrctype: str) -> RobotResourceType:
"""Get data of one robot resource type."""
# define endpoint
endpoint = '/RobotResourceTypeSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcType': rsrctype}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_robot_resource_types(self, lgnum: Optional[str] = None) -> List[RobotResourceType]:
"""
Get data of all robot resource types.
Optionally filter by warehouse.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/RobotResourceTypes'
else:
# define endpoint
endpoint = '/RobotResourceTypeSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_resource_type_description(
self, lgnum: str, rsrctype: str, langu: str) -> ResourceTypeDescription:
"""Get description of one resource type in a language."""
# define endpoint
endpoint = '/ResourceTypeDescriptionSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcType': rsrctype, 'Langu': langu}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_resource_type_descriptions(
self, lgnum: Optional[str] = None,
rsrctype: Optional[str] = None) -> List[ResourceTypeDescription]:
"""
Get descriptions of resource types in all languages.
Optionally filter by warehouse and resource type.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum or rsrctype:
# define endpoint
endpoint = '/RobotResourceTypeSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcType': rsrctype}
# create navigation
nav = '/ResourceTypeDescriptions'
else:
# define endpoint
endpoint = '/ResourceTypeDescriptionSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_resource_group(self, lgnum: str, rsrcgrp: str) -> ResourceGroup:
"""Get data of one robot resource group."""
# define endpoint
endpoint = '/ResourceGroupSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcGrp': rsrcgrp}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_resource_groups(self, lgnum: Optional[str] = None) -> List[ResourceGroup]:
"""
Get data of all resource groups.
Optionally filter by warehouse.
"""
# Define endpoint IDs and navigation based on parameter selection
ids: Optional[Dict]
nav: Optional[str]
if lgnum:
# define endpoint
endpoint = '/WarehouseNumberSet'
# create IDs
ids = {'Lgnum': lgnum}
# create navigation
nav = '/ResourceGroups'
else:
# define endpoint
endpoint = '/ResourceGroupSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
def get_resource_group_description(
self, lgnum: str, rsrcgrp: str, langu: str) -> ResourceGroupDescription:
"""Get description of one resource group in a language."""
# define endpoint
endpoint = '/ResourceGroupDescriptionSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcGrp': rsrcgrp, 'Langu': langu}
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids)
return self.handle_http_response(endpoint, http_resp)
def get_resource_group_descriptions(
self, lgnum: Optional[str] = None,
rsrcgrp: Optional[str] = None) -> List[ResourceGroupDescription]:
"""
Get descriptions of resource groups in all languages.
Optionally filter by warehouse and resource group.
"""
ids: Optional[Dict]
nav: Optional[str]
if lgnum or rsrcgrp:
# define endpoint
endpoint = '/ResourceGroupSet'
# create IDs
ids = {'Lgnum': lgnum, 'RsrcGrp': rsrcgrp}
# create navigation
nav = '/ResourceGroupDescriptions'
else:
# define endpoint
endpoint = '/ResourceGroupDescriptionSet'
# create IDs
ids = None
# create navigation
nav = None
# HTTP OData GET request
http_resp = self._odata.http_get(endpoint, ids=ids, navigation=nav)
return self.handle_http_response(endpoint, http_resp)
| 33.921544 | 98 | 0.598003 | 2,786 | 27,239 | 5.727925 | 0.110194 | 0.037097 | 0.059281 | 0.039291 | 0.692881 | 0.648264 | 0.615929 | 0.540795 | 0.526068 | 0.492668 | 0 | 0.002906 | 0.305224 | 27,239 | 802 | 99 | 33.96384 | 0.840317 | 0.231286 | 0 | 0.585635 | 0 | 0 | 0.098287 | 0.034256 | 0 | 0 | 0 | 0.001247 | 0 | 1 | 0.096685 | false | 0 | 0.024862 | 0 | 0.229282 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74dd569b79e52382362c820740fae840ef4bfce6 | 4,121 | py | Python | pyflowline/mesh/tin/create_tin_mesh.py | changliao1025/pyflowline | fb8677c5ebb3d0db8638f7fcc495ffb97376e00f | [
"Unlicense"
] | 4 | 2022-03-23T12:10:20.000Z | 2022-03-29T13:41:16.000Z | pyflowline/mesh/tin/create_tin_mesh.py | changliao1025/pyflowline | fb8677c5ebb3d0db8638f7fcc495ffb97376e00f | [
"Unlicense"
] | 1 | 2022-03-24T16:08:35.000Z | 2022-03-24T16:08:35.000Z | pyflowline/mesh/tin/create_tin_mesh.py | changliao1025/pyflowline | fb8677c5ebb3d0db8638f7fcc495ffb97376e00f | [
"Unlicense"
] | null | null | null | import os, sys
import numpy as np
from osgeo import ogr
from pyflowline.classes.tin import pytin
from pyflowline.formats.convert_coordinates import convert_pcs_coordinates_to_cell
def create_tin_mesh(dX_left_in, dY_bot_in, dResolution_meter_in, ncolumn_in, nrow_in,
sFilename_output_in, sFilename_spatial_reference_in):
if os.path.exists(sFilename_output_in):
#delete it if it exists
os.remove(sFilename_output_in)
pDriver_shapefile = ogr.GetDriverByName('Esri Shapefile')
pDataset = pDriver_shapefile.CreateDataSource(sFilename_output_in)
pDataset_shapefile = pDriver_shapefile.Open(sFilename_spatial_reference_in, 0)
pLayer_shapefile = pDataset_shapefile.GetLayer(0)
pSrs = pLayer_shapefile.GetSpatialRef()
#pSrs = osr.SpatialReference()
#pSrs.ImportFromEPSG(4326) # WGS84 lat/lon
pLayer = pDataset.CreateLayer('cell', pSrs, ogr.wkbPolygon)
# Add one attribute
pLayer.CreateField(ogr.FieldDefn('id', ogr.OFTInteger64)) #long type for high resolution
pLayerDefn = pLayer.GetLayerDefn()
pFeature = ogr.Feature(pLayerDefn)
xleft = dX_left_in
ybottom = dY_bot_in
dArea = np.power(dResolution_meter_in,2.0)
#tin edge
dLength_edge = np.sqrt( 4.0 * dArea / np.sqrt(3.0) )
dX_shift = 0.5 * dLength_edge
dY_shift = 0.5 * dLength_edge * np.sqrt(3.0)
dX_spacing = dX_shift * 2
dY_spacing = dY_shift
lID =0
#geojson
aTin=list()
#.........
#(x2,y2)-----(x3,y3)
# | |
#(x1,y1)-----(x4,y4)
#...............
for column in range(0, ncolumn_in):
for row in range(0, nrow_in):
if column % 2 == 0 :
if row % 2 == 0:
#define a polygon here
x1 = xleft + (column * dX_shift)
y1 = ybottom + (row * dY_spacing)
x2 = x1 + dX_spacing
y2 = y1
x3 = x1 + dX_shift
y3 = y1 + dY_spacing
else:
x1 = xleft + (column * dX_shift)
y1 = ybottom + (row +1)* dY_spacing
x2 = x1 + dX_shift
y2 = y1 - dY_shift
x3 = x1 + dX_spacing
y3 = y1
else:
if row % 2 == 0:
x1 = xleft + column * dX_shift
y1 = ybottom + (row + 1)* dY_spacing
x2 = x1 + dX_shift
y2 = y1 - dY_shift
x3 = x1 + dX_spacing
y3 = y1
else:
x1 = xleft + column * dX_shift
y1 = ybottom + (row )* dY_spacing
x2 = x1 + dX_spacing
y2 = y1
x3 = x1 + dX_shift
y3 = y1 + dY_spacing
aCoords = np.full((4,2), -9999.0, dtype=float)
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x1, y1)
ring.AddPoint(x2, y2)
ring.AddPoint(x3, y3)
ring.AddPoint(x1, y1)
pPolygon = ogr.Geometry(ogr.wkbPolygon)
pPolygon.AddGeometry(ring)
pFeature.SetGeometry(pPolygon)
pFeature.SetField("id", lID)
pLayer.CreateFeature(pFeature)
lID = lID + 1
#dummy = loads( ring.ExportToWkt() )
#aCoords = dummy.exterior.coords
aCoords[0,0] = x1
aCoords[0,1] = y1
aCoords[1,0] = x2
aCoords[1,1] = y2
aCoords[2,0] = x3
aCoords[2,1] = y3
aCoords[3,0] = x1
aCoords[3,1] = y1
dummy1= np.array(aCoords)
pHexagon = convert_pcs_coordinates_to_cell(1, dummy1)
aTin.append(pHexagon)
pass
pDataset = pLayer = pFeature = None
return aTin
| 29.435714 | 92 | 0.499151 | 451 | 4,121 | 4.390244 | 0.305987 | 0.035354 | 0.034343 | 0.030303 | 0.226263 | 0.170707 | 0.170707 | 0.170707 | 0.170707 | 0.168687 | 0 | 0.051293 | 0.408639 | 4,121 | 139 | 93 | 29.647482 | 0.761182 | 0.078379 | 0 | 0.337209 | 0 | 0 | 0.005817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011628 | false | 0.011628 | 0.05814 | 0 | 0.081395 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74dfa0968b1a584e0d2faaacfbe4171a4e652fc1 | 1,415 | py | Python | 450.Delete-Node-in-a-BST.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | 18 | 2018-07-14T12:45:37.000Z | 2022-03-26T14:51:04.000Z | 450.Delete-Node-in-a-BST.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | null | null | null | 450.Delete-Node-in-a-BST.py | mickey0524/leetcode | 6bedeb6ff29b02a97178cca464c5fd639951801f | [
"MIT"
] | 3 | 2019-05-29T04:09:22.000Z | 2021-06-07T23:37:46.000Z | # https://leetcode.com/problems/delete-node-in-a-bst/
#
# algorithms
# Medium (38.78%)
# Total Accepted: 52,907
# Total Submissions: 136,417
# beats 93.27% of python submissions
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def deleteNode(self, root, key):
"""
:type root: TreeNode
:type key: int
:rtype: TreeNode
"""
node = root
parent = root
while node and node.val != key:
parent = node
if node.val > key:
node = node.left
else:
node = node.right
if not node:
return root
def delete_node(node):
if not node.left and not node.right:
return None
if not node.left:
return node.right
if not node.right:
return node.left
tmp = node.right
while tmp.left:
tmp = tmp.left
tmp.left = node.left
return node.right
new_node = delete_node(node)
if parent.val > key:
parent.left = new_node
return root
if parent.val < key:
parent.right = new_node
return root
return new_node
| 23.983051 | 53 | 0.504594 | 165 | 1,415 | 4.266667 | 0.339394 | 0.076705 | 0.051136 | 0.039773 | 0.160511 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022809 | 0.411307 | 1,415 | 58 | 54 | 24.396552 | 0.822329 | 0.267138 | 0 | 0.15625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e04283616bdcecdb6e7f34d9657947a999aff3 | 8,407 | py | Python | tests/components/test_tasks.py | jbenden/pipeline | 43c5196e466324007cf6e2e173d4610102d6a838 | [
"MIT"
] | null | null | null | tests/components/test_tasks.py | jbenden/pipeline | 43c5196e466324007cf6e2e173d4610102d6a838 | [
"MIT"
] | null | null | null | tests/components/test_tasks.py | jbenden/pipeline | 43c5196e466324007cf6e2e173d4610102d6a838 | [
"MIT"
] | null | null | null | """Testing of class Tasks."""
# pylint: disable=no-self-use, invalid-name
import unittest
from hamcrest import assert_that, equal_to
from spline.components.tasks import Tasks, worker
from spline.components.hooks import Hooks
from spline.components.config import ApplicationOptions
from spline.pipeline import PipelineData
class FakePipeline(object):
"""Fake pipeline class for tests."""
def __init__(self, hooks=None):
"""Initialization of fake pipeline."""
self.data = PipelineData(hooks)
self.model = {}
self.options = ApplicationOptions(definition='fake.yaml')
self.variables = {}
class TestTasks(unittest.TestCase):
"""Testing of class Tasks."""
def test_tasks_ordered(self):
"""Testing with two task only (ordered)."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
document = [{'shell': {'script': '''echo hello1''', 'when': ''}},
{'shell': {'script': '''echo hello2''', 'when': ''}},
{'python': {'script': '''print("hello3")''', 'when': ''}}]
result = tasks.process(document)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(3))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello2'))
assert_that(output[2], equal_to('hello3'))
def test_two_tasks_parallel(self):
"""Testing with two task only (parallel)."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=True)
definition = [{'shell': {'script': '''echo hello1''', 'when': ''}},
{'shell': {'script': '''echo hello2''', 'when': ''}}]
result = tasks.process(definition)
output = sorted([line for line in result['output'] if line.find("hello") >= 0])
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello2'))
def test_failed_ordered(self):
"""Testing cleanup when a task has failed (ordered)."""
hooks = Hooks()
hooks.cleanup = '''echo cleanup hello'''
pipeline = FakePipeline(hooks=hooks)
tasks = Tasks(pipeline, parallel=False)
definition = [{'shell': {'script': '''exit 123''', 'when': ''}},
{'shell': {'script': '''echo hello''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('cleanup hello'))
def test_failed_parallel(self):
"""Testing cleanup when a task has failed (parallel)."""
hooks = Hooks()
hooks.cleanup = '''echo cleanup 123'''
pipeline = FakePipeline(hooks=hooks)
tasks = Tasks(pipeline, parallel=True)
definition = [{'shell': {'script': '''exit 123''', 'when': ''}},
{'shell': {'script': '''echo hello''', 'when': ''}}]
result = tasks.process(definition)
output = sorted([line for line in result['output']
if line.find("hello") >= 0 or line.find("cleanup") >= 0])
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('cleanup 123'))
assert_that(output[1], equal_to('hello'))
def test_failed_two_blocks(self):
"""Testing cleanup when a task has failed (ordered with two blocks)."""
hooks = Hooks()
hooks.cleanup = '''echo cleanup hello'''
pipeline = FakePipeline(hooks=hooks)
tasks = Tasks(pipeline, parallel=False)
definition = [{'shell': {'script': '''exit 123''', 'when': ''}},
{'shell': {'script': '''echo hello1''', 'when': ''}},
{'env': {'block': 'two'}},
{'shell': {'script': '''echo hello2''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('cleanup hello'))
def test_tags_ordered(self):
"""Testing for filtering of tags."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
definition = [{'shell': {'script': '''echo hello1''', 'when': '', 'tags': ['first']}},
{'shell': {'script': '''echo hello2''', 'when': '', 'tags': ['second']}}]
pipeline.options.tags = ['first']
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('hello1'))
pipeline.options.tags = ['second']
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('hello2'))
def test_env_ordered(self):
"""Testing environment variables (ordered)."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
definition = [{'env': {'message': 'hello'}},
{'shell': {'script': '''echo "1:{{env.message}}"''', 'when': ''}},
{'shell': {'script': '''echo "2:$message"''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('1:hello'))
assert_that(output[1], equal_to('2:hello'))
def test_worker(self):
"""Testing worker used by class Tasks for parallel execution."""
data = {'id': 1, 'creator': 'shell',
'entry': {'script': '''echo "{{model.mode}}:{{env.message}} {{ variables.message }}"''',
'when': ''},
'env': {'message': 'hello'}, 'model': {'mode': 'test'}, 'item': None,
'dry_run': False, 'debug': False, 'variables': {'message': 'world'}, 'strict': False,
'temporary_scripts_path': ''}
result = worker(data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(1))
assert_that(output[0], equal_to('test:hello world'))
def test_dry_run(self):
"""Testing dry run mode."""
pipeline = FakePipeline()
pipeline.options.dry_run = True
tasks = Tasks(pipeline, parallel=True)
definition = [{'shell': {'script': '''echo hello1''', 'when': ''}},
{'shell': {'script': '''echo hello2''', 'when': ''}}]
result = tasks.process(definition)
output = [line for line in result['output'] if len(line.strip()) > 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(4))
assert_that(tasks.parallel, equal_to(False))
assert_that(output[0], equal_to('''#!/bin/bash'''))
assert_that(output[1], equal_to('''echo hello1'''))
assert_that(output[2], equal_to('''#!/bin/bash'''))
assert_that(output[3], equal_to('''echo hello2'''))
def test_variables(self):
"""Testing variables."""
pipeline = FakePipeline()
tasks = Tasks(pipeline, parallel=False)
document = [{'shell': {'script': '''echo hello1''', 'variable': 'hello1', 'when': ''}},
{'shell': {'script': '''echo {{ variables.hello1 }}''', 'when': ''}}]
result = tasks.process(document)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello1'))
| 43.559585 | 104 | 0.569406 | 940 | 8,407 | 4.973404 | 0.12234 | 0.08984 | 0.068449 | 0.030588 | 0.698824 | 0.679144 | 0.629305 | 0.61369 | 0.561711 | 0.543316 | 0 | 0.0139 | 0.246937 | 8,407 | 192 | 105 | 43.786458 | 0.72453 | 0.067801 | 0 | 0.542254 | 0 | 0 | 0.156966 | 0.006825 | 0 | 0 | 0 | 0 | 0.295775 | 1 | 0.077465 | false | 0 | 0.042254 | 0 | 0.133803 | 0.007042 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e0e926c6c1b0dd7463e03c1ecc0a04002c96d2 | 9,119 | py | Python | src/utils/stalkMarketGraphs.py | amadea-system/StalkMarketBot | dc43e496e49361fe75ce9b94486981e134edc39e | [
"Apache-2.0"
] | null | null | null | src/utils/stalkMarketGraphs.py | amadea-system/StalkMarketBot | dc43e496e49361fe75ce9b94486981e134edc39e | [
"Apache-2.0"
] | null | null | null | src/utils/stalkMarketGraphs.py | amadea-system/StalkMarketBot | dc43e496e49361fe75ce9b94486981e134edc39e | [
"Apache-2.0"
] | null | null | null | """
Graphing code for Stalk Market Predictions
Part of Stalk Market Bot.
"""
import logging
from io import BytesIO
from typing import TYPE_CHECKING, Optional, Dict, List, Union, Tuple, NamedTuple, Any
import matplotlib.pyplot as plt
from scipy.interpolate import Akima1DInterpolator, pchip_interpolate
import numpy as np
import discord
from utils.stalkMarketPredictions import day_segment_names, Pattern, fix_sell_prices_length, analyze_possibilities, max_guild_predictions
if TYPE_CHECKING:
from utils.stalkMarketHelpers import UserPredictions
log = logging.getLogger(__name__)
def smooth_plot(x_data: List[Any], y_data: List[float]):
# return old_smooth_plot(x_data, y_data)
x = np.arange(len(y_data))
xnew = np.linspace(x[0], x[-1], 300)
# ynew = Akima1DInterpolator(x, y_data)(xnew)
ynew = pchip_interpolate(x, y_data, xnew)
return xnew, ynew
def format_plot(ax: plt.Axes):
"""Apply formatting to a plot"""
# Add the legend
legend = ax.legend(shadow=True, fontsize='medium')
ax.grid(linewidth="0.5", color="#283442") # Add gridlines. #283442
ax.set_axisbelow(True) # Make sure the gridlines are behind the graphs
# Remove the border
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.tick_params(color="#000000")
# Make room for the x axis labels
plt.gcf().subplots_adjust(bottom=0.15)#, right=0.1)
plt.tight_layout()
def matplotgraph_predictions(user: discord.Member, predictions: List[Pattern], min_max_pattern: Pattern, average_prices: List[float], testing=False) -> BytesIO:
"""Graph the predictions"""
x_axis = day_segment_names[2:]
abs_min_points = [price.min for price in min_max_pattern.prices][2:]
abs_max_points = [price.max for price in min_max_pattern.prices][2:]
# avg_points = [0 for i in abs_max_points]
if min_max_pattern.prices[0].min is not None:
buy_price_points = [min_max_pattern.prices[0].min for i in abs_max_points]
else:
buy_price_points = None
actual_price_points = [price.actual if price.is_actual_price() else None for price in min_max_pattern.prices][2:]
# for pred in predictions:
# for i, price in enumerate(pred.prices[2:]):
# avg_points[i] += price.min + price.max
# avg_points = [i/(len(predictions)*2) for i in avg_points]
avg_points = average_prices
title = f"{user.display_name}'s Stalk Market Predictions" if user is not None else f"Stalk Market Predictions"
# Set up the plots
plt.style.use('dark_background')
fig: plt.Figure
ax: plt.Axes
fig, ax = plt.subplots()
ax.plot(*smooth_plot(x_axis, avg_points), color="#1f77b4", label="Potential Price")
ax.plot(x_axis, abs_min_points, color="#000000", alpha=0)
ax.plot(x_axis, abs_max_points, color="#000000", alpha=0)
smooth_x, smooth_min_points = smooth_plot(x_axis, abs_min_points)
smooth_x, smooth_msx_points = smooth_plot(x_axis, abs_max_points)
ax.fill_between(smooth_x, smooth_min_points, smooth_msx_points, alpha=0.5, color="#1f77b4")
# ax.plot(x_axis, avg_points)
# ax.plot(x_axis, abs_min_points)
# ax.plot(x_axis, abs_max_points)
if buy_price_points is not None:
ax.plot(x_axis, buy_price_points, color="#FF7F0E", alpha=0.7, marker=0, linestyle='None', label="Buy Price")
ax.plot(x_axis, actual_price_points, 'o', color="#C5FFFF", label="Actual Price")#color="#BD9467")
plt.xticks(np.arange(12), x_axis, rotation=-50) # Set the x ticks to the day names
format_plot(ax)
if testing:
# plt.show()
plt.savefig("test_plot.png", format="png", dpi=150) # , bbox_inches='tight')
plt.close()
return None
imgBuffer = BytesIO()
plt.savefig(imgBuffer, format="png", dpi=150) #, bbox_inches='tight')
plt.close()
return imgBuffer
"""
fig: go.Figure = go.Figure(layout_title_text=title,
layout_template="plotly_dark",
layout_xaxis_title="Day of the Week",
layout_yaxis_title="Bells",
)
plot = get_filled_scatter_plot("Potential Turnip Prices", x_axis, abs_min_points, abs_max_points, avgs=avg_points, )
plot.set_color(DEFAULT_PLOTLY_COLORS[0])
ht = '<b>%{x}</b><br><br>' + \
'%{text}' + \
'<extra></extra>'
custom_text = []
for i in range(len(abs_min_points)):
txt = f"<i>Avg Price</i>: {avg_points[i]:.2f}<br>" +\
f"Max Price: {abs_max_points[i]}<br>" + \
f"Min Price: {abs_min_points[i]}<br>"
if actual_price_points[i] is not None:
txt += f"Actual Price: {actual_price_points[i]}<br>"
if buy_price_points is not None:
txt += f"Buy Price: {buy_price_points[i]}<br>"
custom_text.append(txt)
plot.set_hover_template(ht, custom_text)
plot.add_to_fig(fig)
if buy_price_points is not None:
# Add plot indicating the buy price.
fig.add_trace(go.Scatter(x=x_axis, y=buy_price_points,
mode='lines',
name=f"Buy Price",
line_dash='dash',
hoverinfo="none",
# hovertemplate=ht,
# text=custom_text,
# line_width=0,
# line_shape='spline',
# showlegend=False,
# legendgroup=name,
)
)
# Add plot indicating the actual price.
fig.add_trace(go.Scatter(x=x_axis, y=actual_price_points,
mode='lines',
name=f"Actual Sell Price",
line_dash='dash',
hoverinfo="none",
line_shape='spline',
# hovertemplate=ht,
# text=custom_text,
)
)
fig.show()
"""
def matplotgraph_guild_predictions(users_predictions: List['UserPredictions']) -> BytesIO:
"""Graph the predictions"""
max_graphs = max_guild_predictions
x_axis = day_segment_names[2:]
plt.style.use('dark_background')
fig: plt.Figure
ax: plt.Axes
fig, ax = plt.subplots()
for i, pred in enumerate(users_predictions):
if i >= max_graphs:
break
best_price_points = [price.actual if price.is_actual_price() else price.max for price in pred.best().prices][2:]
ax.plot(*smooth_plot(x_axis, best_price_points), label=f"{pred.user_name} - Best")
# avg_price_points = pred.average
# ax.plot(*smooth_plot(x_axis, avg_price_points), label=f"{pred.user_name} - Average")
plt.xticks(np.arange(12), x_axis, rotation=-50) # Set the x ticks to the day names
format_plot(ax)
imgBuffer = BytesIO()
plt.savefig(imgBuffer, format="png", dpi=150) #, bbox_inches='tight')
plt.close()
return imgBuffer
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
# test_graph()
# buy_price = 90
# sell_price = [buy_price, buy_price]
#
# sell_price.append(78)
# sell_price.append(74)
#
# sell_price.append(70)
# sell_price.append(104)
#
# sell_price.append(167)
# sell_price.append(518)
# #
# sell_price.append(160)
# sell_price.append(98)
buy_price = 93
sell_price = [buy_price, buy_price]
sell_price.append(100)
sell_price.append(100)
sell_price.append(98)
sell_price = fix_sell_prices_length(sell_price)
possibilities, min_max_pattern, avg_prices = analyze_possibilities(sell_price)
print(avg_prices)
for prediction in possibilities:
# desc.append(prediction.description)
log.info(f"\nDesc: {prediction.description}\n\n"
f"Sunday Sell: {prediction.prices[0]}\n"
f"Monday AM: {prediction.prices[2]}\n"
f"Monday PM: {prediction.prices[3]}\n"
f"Tuesday AM: {prediction.prices[4]}\n"
f"Tuesday PM: {prediction.prices[5]}\n"
f"Wednesday AM: {prediction.prices[6]}\n"
f"Wednesday AM: {prediction.prices[7]}\n"
f"Thursday AM: {prediction.prices[8]}\n"
f"Thursday AM: {prediction.prices[9]}\n"
f"Friday AM: {prediction.prices[10]}\n"
f"Friday AM: {prediction.prices[11]}\n"
f"Saturday AM: {prediction.prices[12]}\n"
f"Saturday AM: {prediction.prices[13]}"
f"\n")
matplotgraph_predictions(None, possibilities, min_max_pattern, avg_prices, testing=True)
print("Done")
| 33.40293 | 160 | 0.605768 | 1,198 | 9,119 | 4.394825 | 0.222037 | 0.018993 | 0.020513 | 0.014625 | 0.392403 | 0.34302 | 0.225071 | 0.154226 | 0.137132 | 0.121937 | 0 | 0.022034 | 0.273385 | 9,119 | 272 | 161 | 33.525735 | 0.772563 | 0.137515 | 0 | 0.233645 | 0 | 0 | 0.161243 | 0.066362 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037383 | false | 0 | 0.084112 | 0 | 0.158879 | 0.018692 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e246baa4ba88b3c44d649fb3735c42f268b166 | 2,719 | py | Python | addr/parser.py | euske/osmtools | 581da9129f489cb57763578127ead42fa43b5c1f | [
"MIT"
] | null | null | null | addr/parser.py | euske/osmtools | 581da9129f489cb57763578127ead42fa43b5c1f | [
"MIT"
] | null | null | null | addr/parser.py | euske/osmtools | 581da9129f489cb57763578127ead42fa43b5c1f | [
"MIT"
] | 3 | 2015-12-27T22:13:40.000Z | 2019-12-23T14:34:54.000Z | #!/usr/bin/env python
import sys
import xml.parsers.expat
## Parser
##
class Parser(object):
def __init__(self):
self.pos = {}
self.name = {}
self._state = 0
self._expat = xml.parsers.expat.ParserCreate()
self._expat.StartElementHandler = self._start_element
self._expat.EndElementHandler = self._end_element
self._expat.CharacterDataHandler = self._char_data
return
def feed(self, data):
self._expat.Parse(data)
return
def get(self):
for (k,(x,y)) in self.pos.iteritems():
name = self.name[k]
yield (name,(x,y))
return
def _start_element(self, name, attrs):
#print 'start', name, attrs
if name == 'jps:GM_Point':
self.id = attrs['id']
self._state = 1
elif self._state == 1 and name == 'DirectPosition.coordinate':
self._state = 2
elif name == 'ksj:FB01':
self._state = 3
elif self._state == 3 and name == 'ksj:POS':
self.idref = attrs['idref']
elif self._state == 3 and name in ('ksj:NA0','ksj:NA8'):
self._state = 4
elif self._state == 3 and name == 'ksj:AAC':
self._state = 5
return
def _end_element(self, name):
if name == 'ksj:FB01':
self.name[self.idref] = (self._name1, self._cid)
elif self._state == 2:
self._state = 0
elif self._state == 4:
self._state = 3
elif self._state == 5:
self._state = 3
return
def _char_data(self, data):
#print 'char', len(data)
if self._state == 2:
(lat,lng) = data.split(' ')
self.pos[self.id] = (lat,lng)
#print (float(x), float(y))
elif self._state == 4:
self._name1 = data
#print (data,)
elif self._state == 5:
self._cid = int(data)
#print (data,)
return
# main
def main(argv):
import re
import os.path
import zipfile
import csv
pat = re.compile(r'P\d\d-\d\d_\d\d.xml')
args = argv[1:]
out = csv.writer(sys.stdout)
for path in args:
zf = zipfile.ZipFile(path)
for name in zf.namelist():
if not pat.match(os.path.basename(name)): continue
print >>sys.stderr, name
data = zf.read(name)
p = Parser()
p.feed(data)
for ((name,cid),(lat,lng)) in p.get():
row = (cid,name.encode('utf-8'),lat,lng)
out.writerow(row)
zf.close()
return
if __name__ == '__main__': sys.exit(main(sys.argv))
| 28.030928 | 70 | 0.515631 | 341 | 2,719 | 3.953079 | 0.293255 | 0.126855 | 0.086795 | 0.031157 | 0.12092 | 0.075668 | 0.035608 | 0 | 0 | 0 | 0 | 0.01644 | 0.351232 | 2,719 | 96 | 71 | 28.322917 | 0.747732 | 0.049283 | 0 | 0.210526 | 0 | 0 | 0.047009 | 0.009713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092105 | false | 0 | 0.078947 | 0 | 0.276316 | 0.013158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e3c847b45abe7dd310f46f2b20094db76be087 | 9,934 | py | Python | tests/test_functions.py | Qfabiolous/QuanGuru | 285ca44ae857cc61337f73ea2eb600f485a09e32 | [
"BSD-3-Clause"
] | 9 | 2021-05-23T06:30:45.000Z | 2021-12-27T13:33:54.000Z | tests/test_functions.py | cahitkargi/QuanGuru | 9b5c94465cd58bc32f6ff845f29dfdec7e0f9075 | [
"BSD-3-Clause"
] | 26 | 2022-03-18T02:40:54.000Z | 2022-03-25T07:00:25.000Z | tests/test_functions.py | cahitkargi/QuanGuru | 9b5c94465cd58bc32f6ff845f29dfdec7e0f9075 | [
"BSD-3-Clause"
] | 5 | 2021-05-23T06:30:24.000Z | 2022-02-04T02:40:08.000Z | import numpy as np
import pytest
from quanguru.QuantumToolbox import linearAlgebra as la #pylint: disable=import-error
from quanguru.QuantumToolbox import operators as ops #pylint: disable=import-error
from quanguru.QuantumToolbox import functions as fns #pylint: disable=import-error
#testCase = collections.namedtuple('testCase', ['operator', 'state', 'expected'])
def test_expectationWithNumber(helpers):
# using randomly generated ket states of random dimension, and also by converting them into density matrix
# test expectation function by using number operator, whose expectation should be sum of photon_number*populations
for _ in range(3):
state, dim, excs = helpers.generateRndPureState()
calcva = fns.expectation(ops.number(dim), state)
expect = sum([k*v for (k, v) in excs.items()])
assert round(calcva, 12) == round(expect, 12)
denMat = la.outerProd(state)
assert round(fns.expectation(ops.number(dim), denMat), 12) == round(expect, 12)
def test_expectationWithJz(helpers):
# using randomly generated ket states of random j value, and also by converting them into density matrix
# test expectation function by using Jz operator, whose expectation is jValue*populations
for _ in range(3):
state, dim, excs = helpers.generateRndPureState()
calcva = fns.expectation(ops.Jz((dim-1)/2), state)
expect = sum([((dim-1)/2-k)*v for (k, v) in excs.items()])
assert round(calcva, 12) == round(expect, 12)
denMat = la.outerProd(state)
assert round(fns.expectation(ops.Jz((dim-1)/2), denMat), 12) == round(expect, 12)
def test_expectationWithSigmaz(helpers, singleQubitOperators):
# using randomly generated ket states, and also by converting them into density matrix
# test expectation function by using sigmaz operator, whose expectation is +-1*populations
op = singleQubitOperators['sz']
for _ in range(3):
state, _, excs = helpers.generateRndPureState(dim=2)
calcva = fns.expectation(op, state)
expect = sum([((not bool(k))-k)*v for (k, v) in excs.items()])
assert round(calcva, 12) == round(expect, 12)
denMat = la.outerProd(state)
assert round(fns.expectation(op, denMat), 12) == round(expect, 12)
@pytest.mark.parametrize("op, ex", [
['sz', [1, -1, 0, 0, 0, 0]], ['sy', [0, 0, 0, 0, 1, -1]], ['sx', [0, 0, 1, -1, 0, 0]]
])
def test_expectationWithSigmaOps(op, ex, specialQubitStates, singleQubitOperators):
# test expectation of Pauli operators against eigenvectors
op = singleQubitOperators[op]
zp = fns.expectation(op, specialQubitStates['1'])
zm = fns.expectation(op, specialQubitStates['0'])
xp = fns.expectation(op, specialQubitStates['x+'])
xm = fns.expectation(op, specialQubitStates['x-'])
yp = fns.expectation(op, specialQubitStates['y+'])
ym = fns.expectation(op, specialQubitStates['y-'])
assert [round(a, 12) for a in [zp, zm, xp, xm, yp, ym]] == ex
zpdm = fns.expectation(op, la.outerProd(specialQubitStates['1']))
zmdm = fns.expectation(op, la.outerProd(specialQubitStates['0']))
xpdm = fns.expectation(op, la.outerProd(specialQubitStates['x+']))
xmdm = fns.expectation(op, la.outerProd(specialQubitStates['x-']))
ypdm = fns.expectation(op, la.outerProd(specialQubitStates['y+']))
ymdm = fns.expectation(op, la.outerProd(specialQubitStates['y-']))
assert [round(a, 12) for a in [zpdm, zmdm, xpdm, xmdm, ypdm, ymdm]] == ex
def test_fidelityPure(helpers):
# using randomly generated states, and also by converting them into density matrix
# test fidelity (which uses linerAlgebra.py) against hard coded calculation of fidelity from populations
for _ in range(3):
state1, dim1, excs1 = helpers.generateRndPureState()
state2, _, excs2 = helpers.generateRndPureState(dim=dim1)
fid = fns.fidelityPure(state1, state2)
fin = abs(sum([np.sqrt(excs2[k2]*excs1[k1]) for k1 in excs1 for k2 in excs2 if k1 == k2]))**2
assert round(fid, 12) == round(fin, 12)
state1 = la.outerProd(state1)
fid = fns.fidelityPure(state1, state2)
assert round(fid, 12) == round(fin, 12)
state2 = la.outerProd(state2)
fid = fns.fidelityPure(state1, state2)
assert round(fid, 12) == round(fin, 12)
stateNames = ['0', '1', 'x+', 'x-', 'y+', 'y-']
bellStateN = ['BellPhi+', 'BellPhi-', 'BellPsi+', 'BellPsi-']
productNames = ['product1', 'product2', 'product3', 'product4']
@pytest.mark.parametrize("state1, state2, fid", [
*[(stateNames[0], name, f) for name, f in zip(stateNames, [1, 0, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[1], name, f) for name, f in zip(stateNames, [0, 1, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[2], name, f) for name, f in zip(stateNames, [0.5, 0.5, 1, 0, 0.5, 0.5])],
*[(stateNames[3], name, f) for name, f in zip(stateNames, [0.5, 0.5, 0, 1, 0.5, 0.5])],
*[(stateNames[4], name, f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 1, 0])],
*[(stateNames[5], name, f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 0, 1])],
*[(bellStateN[0], name, f) for name, f in zip(bellStateN, [1, 0, 0, 0])],
*[(bellStateN[1], name, f) for name, f in zip(bellStateN, [0, 1, 0, 0])],
*[(bellStateN[2], name, f) for name, f in zip(bellStateN, [0, 0, 1, 0])],
*[(bellStateN[3], name, f) for name, f in zip(bellStateN, [0, 0, 0, 1])]
])
def test_fidelityPureWithSpecialQubitStates(state1, state2, fid, specialQubitStates):
# test fidelity with some known ket states (and their density matrices) and expected fidelities between them
state1 = specialQubitStates[state1]
state2 = specialQubitStates[state2]
fidCalc = fns.fidelityPure(state1, state2)
assert round(fidCalc, 12) == fid
state1 = la.outerProd(state1)
fidCalc = fns.fidelityPure(state1, state2)
assert round(fidCalc, 12) == fid
state2 = la.outerProd(state2)
fidCalc = fns.fidelityPure(state1, state2)
assert round(fidCalc, 12) == fid
@pytest.mark.parametrize("mat1, mat2, fid", [
*[(stateNames[0]+'dm', name+'dm', f) for name, f in zip(stateNames, [1, 0, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[1]+'dm', name+'dm', f) for name, f in zip(stateNames, [0, 1, 0.5, 0.5, 0.5, 0.5])],
*[(stateNames[2]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 1, 0, 0.5, 0.5])],
*[(stateNames[3]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 0, 1, 0.5, 0.5])],
*[(stateNames[4]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 1, 0])],
*[(stateNames[5]+'dm', name+'dm', f) for name, f in zip(stateNames, [0.5, 0.5, 0.5, 0.5, 0, 1])],
*[(bellStateN[0]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 0, 0, 0])],
*[(bellStateN[1]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 1, 0, 0])],
*[(bellStateN[2]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 0, 1, 0])],
*[(bellStateN[3]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 0, 0, 1])]
])
def test_fidelityWithPureDensityMatrices(mat1, mat2, fid, specialQubitStates):
# test fidelity with some known density matrices
fidCalc = fns.fidelityPure(specialQubitStates[mat1], specialQubitStates[mat2])
assert round(fidCalc, 12) == fid
def test_entropyPureState(specialQubitStates):
# should give zero for a pure state (uses known states), tests both ket and density matrix inputs
for v in specialQubitStates.values():
assert round(fns.entropy(v), 12) == 0
assert round(fns.entropy(la.outerProd(v)), 12) == 0
@pytest.mark.parametrize('name', bellStateN)
def test_entropyReducedBell(name, specialQubitStates):
# test entropy of reduced Bell states, tests both ket and density matrix inputs
qs1 = la.partialTrace(0, [2, 2], specialQubitStates[name])
qs2 = la.partialTrace(1, [2, 2], specialQubitStates[name])
e1 = fns.entropy(qs1)
e2 = fns.entropy(qs2)
expe = round(np.log(2), 12)
assert e1 == e2
assert round(e1, 12) == expe
assert round(fns.entropy(la.outerProd(qs1)), 12) == expe
assert round(fns.entropy(la.outerProd(qs2)), 12) == expe
@pytest.mark.parametrize('name, val', [*[(b, 1) for b in bellStateN], *[(p, 0) for p in productNames]])
def test_concurrenceBellAndProduct(name, val, specialQubitStates):
# test concurrence of Bell states, tests both ket and density matrix inputs
state = specialQubitStates[name]
cKet = fns.concurrence(state)
cDm = fns.concurrence(la.outerProd(state))
assert round(cKet, 12) == val
assert round(cDm, 12) == val
sq2 = 1/np.sqrt(2)
@pytest.mark.parametrize("mat1, mat2, dis", [
*[(stateNames[0]+'dm', name+'dm', f) for name, f in zip(stateNames, [0, 1, sq2, sq2, sq2, sq2])],
*[(stateNames[1]+'dm', name+'dm', f) for name, f in zip(stateNames, [1, 0, sq2, sq2, sq2, sq2])],
*[(stateNames[2]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, 0, 1, sq2, sq2])],
*[(stateNames[3]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, 1, 0, sq2, sq2])],
*[(stateNames[4]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, sq2, sq2, 0, 1])],
*[(stateNames[5]+'dm', name+'dm', f) for name, f in zip(stateNames, [sq2, sq2, sq2, sq2, 1, 0])],
*[(bellStateN[0]+'dm', name+'dm', f) for name, f in zip(bellStateN, [0, 1, 1, 1])],
*[(bellStateN[1]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 0, 1, 1])],
*[(bellStateN[2]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 1, 0, 1])],
*[(bellStateN[3]+'dm', name+'dm', f) for name, f in zip(bellStateN, [1, 1, 1, 0])]
])
def test_traceDistanceWithPureDensityMatrices(mat1, mat2, dis, specialQubitStates):
# uses density matrices of known states and compare the output with known values
disCalc = fns.traceDistance(specialQubitStates[mat1], specialQubitStates[mat2])
assert round(disCalc, 12) == round(dis, 12)
| 58.781065 | 118 | 0.649487 | 1,457 | 9,934 | 4.415923 | 0.131778 | 0.014921 | 0.016786 | 0.019894 | 0.61004 | 0.551912 | 0.515853 | 0.460989 | 0.409854 | 0.38281 | 0 | 0.052037 | 0.179787 | 9,934 | 168 | 119 | 59.130952 | 0.737604 | 0.147876 | 0 | 0.234043 | 0 | 0 | 0.0296 | 0 | 0 | 0 | 0 | 0 | 0.170213 | 1 | 0.078014 | false | 0 | 0.035461 | 0 | 0.113475 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74e8211474a71b34bfa3a7500bf5a3ef7c8f9bf0 | 700 | py | Python | 2020/day09/two.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | 2020/day09/two.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | 2020/day09/two.py | geberl/advent-of-code | 152ac94676830ac920bf06a1a3f1aa88377cd775 | [
"MIT"
] | null | null | null | TARGET = 776203571
data = []
with open("input.txt") as file_handler:
for n, line in enumerate(file_handler):
data.append(int(line.strip()))
def contiguous_sum(index):
c_sum = 0
for i in range(index, len(data)):
c_sum += data[i]
if c_sum == TARGET:
return True, i
elif c_sum < TARGET:
pass
elif c_sum > TARGET:
return False, i
for start_index in range(len(data)):
match, end_index = contiguous_sum(start_index)
if match:
print("match %d - %d" % (start_index, end_index))
result_range = data[start_index:end_index+1]
print(min(result_range) + max(result_range))
break
| 24.137931 | 57 | 0.594286 | 99 | 700 | 4.010101 | 0.434343 | 0.050378 | 0.075567 | 0.080605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022267 | 0.294286 | 700 | 28 | 58 | 25 | 0.781377 | 0 | 0 | 0 | 0 | 0 | 0.031429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0 | 0 | 0.136364 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f235f44770ebb4082a9f693309b40e2e0bc8f1 | 3,907 | py | Python | PIDKiller.py | godoyp/PIDKiller | 94ae8b77b5e5bca0552dee4ecaa1c1da16d3b39e | [
"MIT"
] | null | null | null | PIDKiller.py | godoyp/PIDKiller | 94ae8b77b5e5bca0552dee4ecaa1c1da16d3b39e | [
"MIT"
] | null | null | null | PIDKiller.py | godoyp/PIDKiller | 94ae8b77b5e5bca0552dee4ecaa1c1da16d3b39e | [
"MIT"
] | null | null | null | # Modulos
import PySimpleGUI as Sg
import wmi
from smb.SMBConnection import SMBConnection
from configparser import ConfigParser
from time import sleep
import sys
from multiprocessing import Process, freeze_support
# Variavel de controle do While (sair = 1 finaliza o programa)
sair = 0
# Carrega o arquivo de configuração
cfg = ConfigParser()
cfg.read('config.ini')
comp = cfg.get('Server', 'IP')
user = cfg.get('Server', 'user')
passwd = cfg.get('Server', 'passwd')
# Layout de tela
Sg.theme('Reddit')
layout = [[Sg.Text('PID Killer'), Sg.Text('Servidor:'), Sg.Text(comp)],
[Sg.Output(size=(90, 30), key='-OUTPUT-')],
[Sg.Button('Carregar Pool'), Sg.Button('Carregar Task'), Sg.Button('Sair')],
[Sg.Text('Qual PID deseja finalizar?'), Sg.Input(key='input'), Sg.Button('Finalizar Processo')]]
window = Sg.Window('PIDKiller', layout, icon='icon.ico')
# Function de Loading
def _splash():
for i in range(500000):
Sg.PopupAnimated('load.gif', background_color='white', time_between_frames=60)
# noinspection PyTypeChecker
Sg.PopupAnimated(None)
# Function Principal
def _program():
# Cria a conexão WMI usando os dados informados, para executar comandos remotos
try:
remoto = wmi.WMI(comp, user=user, password=passwd)
local = wmi.WMI()
except wmi.x_wmi:
sleep(3)
Sg.popup("Atenção, não foi possível conectar ao servidor! Verifique as configurações!", title="Atenção!")
sys.exit(1)
# Executa o comando definido, gerando um arquivo TXT na raiz disco
remoto.Win32_Process.Create(CommandLine='cmd.exe /c C:/Windows/System32/inetsrv/appcmd.exe list wp >> '
'"C:/InfoPool.txt"')
remoto.Win32_Process.Create(CommandLine='cmd.exe /c tasklist >> "C:/InfoList.txt"')
# Realiza a conexão SMB com a maquina remota para copia dos arquivos para a maquina local
conn = SMBConnection(user, passwd, 'client', comp)
conn.connect(comp, 139, timeout=10000)
global sair
while sair := 0:
with open('C:/InfoOutPool.txt', 'wb') as fp1:
sleep(1)
conn.retrieveFile('C$', '/InfoPool.txt', fp1)
arquivo1 = open('C:/InfoOutPool.txt', 'r')
listapool = arquivo1.read()
arquivo1.close()
with open('C:/InfoOutList.txt', 'wb') as fp2:
sleep(1)
conn.retrieveFile('C$', '/InfoList.txt', fp2)
arquivo2 = open('C:/InfoOutList.txt', 'r')
listatask = arquivo2.read()
arquivo2.close()
sleep(1)
# Eventos da Interface Gráfica
while True:
(event, values) = window.read(timeout=100)
if event == 'Carregar Task':
window['-OUTPUT-'].update(listatask)
if event == 'Carregar Pool':
window['-OUTPUT-'].update(listapool)
if event == Sg.WIN_CLOSED or event == 'Sair':
remoto.Win32_Process.Create(CommandLine='cmd.exe /c DEL "C:/Info*.txt"')
local.Win32_Process.Create(CommandLine='cmd.exe /c DEL "C:/Info*.txt"')
Sg.popup_auto_close('Saindo...', auto_close_duration=2, button_type=5, no_titlebar=True)
sair = 1
break
if event == 'Finalizar Processo':
processo = values['input']
killer = str(processo)
remoto.Win32_Process.Create(CommandLine="cmd.exe /b /c taskkill -pid " + killer + " /f")
Sg.popup_ok('Processo Finalizado com Sucesso!')
break
conn.close()
window.close()
if __name__ == '__main__':
freeze_support()
load = Process(target=_splash)
exe = Process(target=_program)
jobs = [load, exe]
for job in jobs:
job.start()
| 33.681034 | 114 | 0.594574 | 466 | 3,907 | 4.918455 | 0.446352 | 0.026178 | 0.039267 | 0.063264 | 0.118237 | 0.098168 | 0.098168 | 0.080279 | 0.041012 | 0.041012 | 0 | 0.019788 | 0.275659 | 3,907 | 115 | 115 | 33.973913 | 0.790106 | 0.113386 | 0 | 0.064103 | 0 | 0 | 0.210968 | 0.011387 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0.038462 | 0.089744 | 0 | 0.115385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f2722b6fd0d9275b8a2fdd984c9a1ced8700d9 | 21,298 | py | Python | eventdata/parameter_sources/randomevent.py | ywelsch/rally-eventdata-track | 148fe2ffc90f192a1d3d68c614031e40ecc67eae | [
"Apache-2.0"
] | 33 | 2017-02-22T17:59:46.000Z | 2021-11-02T07:07:40.000Z | eventdata/parameter_sources/randomevent.py | ywelsch/rally-eventdata-track | 148fe2ffc90f192a1d3d68c614031e40ecc67eae | [
"Apache-2.0"
] | 68 | 2017-03-10T12:57:36.000Z | 2021-07-14T14:26:03.000Z | eventdata/parameter_sources/randomevent.py | isabella232/rally-eventdata-track | d7f25419ba3ef554998d89caa3fdb5a2d2100d41 | [
"Apache-2.0"
] | 45 | 2017-02-22T18:03:58.000Z | 2022-01-01T02:18:41.000Z | # Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import gzip
import itertools
import json
import os
import random
import re
from eventdata.parameter_sources.timeutils import TimestampStructGenerator
from eventdata.parameter_sources.weightedarray import WeightedArray
from eventdata.utils import elasticlogs_bulk_source as ebs
cwd = os.path.dirname(__file__)
class Agent:
def __init__(self):
if '_agents' in ebs.global_lookups.keys():
self._agents = ebs.global_lookups['_agents']
else:
self._agents = WeightedArray('%s/data/agents.json.gz' % cwd)
ebs.global_lookups['_agents'] = self._agents
if '_agents_name_lookup' in ebs.global_lookups.keys():
self._agents_name_lookup = ebs.global_lookups['_agents_name_lookup']
else:
with gzip.open('%s/data/agents_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_name_lookup = json.load(data_file)
ebs.global_lookups['_agents_name_lookup'] = self._agents_name_lookup
if '_agents_os_lookup' in ebs.global_lookups.keys():
self._agents_os_lookup = ebs.global_lookups['_agents_os_lookup']
else:
with gzip.open('%s/data/agents_os_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_os_lookup = json.load(data_file)
ebs.global_lookups['_agents_os_lookup'] = self._agents_os_lookup
if '_agents_os_name_lookup' in ebs.global_lookups.keys():
self._agents_os_name_lookup = ebs.global_lookups['_agents_os_name_lookup']
else:
with gzip.open('%s/data/agents_os_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_os_name_lookup = json.load(data_file)
ebs.global_lookups['_agents_os_name_lookup'] = self._agents_os_name_lookup
if '_agents_os_major_lookup' in ebs.global_lookups.keys():
self._agents_os_major_lookup = ebs.global_lookups['_agents_os_major_lookup']
else:
with gzip.open('%s/data/agents_os_major_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_os_major_lookup = json.load(data_file)
ebs.global_lookups['_agents_os_major_lookup'] = self._agents_os_major_lookup
if '_agents_major_lookup' in ebs.global_lookups.keys():
self._agents_major_lookup = ebs.global_lookups['_agents_major_lookup']
else:
with gzip.open('%s/data/agents_major_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_major_lookup = json.load(data_file)
ebs.global_lookups['_agents_major_lookup'] = self._agents_major_lookup
if '_agents_device_lookup' in ebs.global_lookups.keys():
self._agents_device_lookup = ebs.global_lookups['_agents_device_lookup']
else:
with gzip.open('%s/data/agents_device_lookup.json.gz' % cwd, 'rt') as data_file:
self._agents_device_lookup = json.load(data_file)
ebs.global_lookups['_agents_device_lookup'] = self._agents_device_lookup
if '_agent_lookup' in ebs.global_lookups.keys():
self._agent_lookup = ebs.global_lookups['_agent_lookup']
else:
with gzip.open('%s/data/agent_lookup.json.gz' % cwd, 'rt') as data_file:
self._agent_lookup = json.load(data_file)
ebs.global_lookups['_agent_lookup'] = self._agent_lookup
def add_fields(self, event):
agent = self._agents.get_random()
event['useragent_name'] = self.__get_lookup_value(self._agents_name_lookup, agent[0])
event['useragent_os'] = self.__get_lookup_value(self._agents_os_lookup, agent[1])
event['useragent_os_name'] = self.__get_lookup_value(self._agents_os_name_lookup, agent[2])
event['useragent_device'] = self.__get_lookup_value(self._agents_device_lookup, agent[3])
event['useragent_os_major'] = self.__get_lookup_value(self._agents_os_major_lookup, agent[4])
event['useragent_major'] = self.__get_lookup_value(self._agents_major_lookup, agent[5])
event['agent'] = self.__get_lookup_value(self._agent_lookup, agent[6])
def __get_lookup_value(self, lookup, key):
if key == "":
return key
else :
return lookup[key]
class ClientIp:
def __init__(self):
self._rare_clientip_probability = 0.269736965199
if '_clientips' in ebs.global_lookups.keys():
self._clientips = ebs.global_lookups['_clientips']
else:
self._clientips = WeightedArray('%s/data/clientips.json.gz' % cwd)
ebs.global_lookups['_clientips'] = self._clientips
if '_rare_clientips' in ebs.global_lookups.keys():
self._rare_clientips = ebs.global_lookups['_rare_clientips']
else:
self._rare_clientips = WeightedArray('%s/data/rare_clientips.json.gz' % cwd)
ebs.global_lookups['_rare_clientips'] = self._rare_clientips
if '_clientips_country_name_lookup' in ebs.global_lookups.keys():
self._clientips_country_name_lookup = ebs.global_lookups['_clientips_country_name_lookup']
else:
with gzip.open('%s/data/clientips_country_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_country_name_lookup = json.load(data_file)
ebs.global_lookups['_clientips_country_name_lookup'] = self._clientips_country_name_lookup
if '_clientips_country_iso_code_lookup' in ebs.global_lookups.keys():
self._clientips_country_iso_code_lookup = ebs.global_lookups['_clientips_country_iso_code_lookup']
else:
with gzip.open('%s/data/clientips_country_iso_code_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_country_iso_code_lookup = json.load(data_file)
ebs.global_lookups['_clientips_country_iso_code_lookup'] = self._clientips_country_iso_code_lookup
if '_clientips_continent_name_lookup' in ebs.global_lookups.keys():
self._clientips_continent_name_lookup = ebs.global_lookups['_clientips_continent_name_lookup']
else:
with gzip.open('%s/data/clientips_continent_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_continent_name_lookup = json.load(data_file)
ebs.global_lookups['_clientips_continent_name_lookup'] = self._clientips_continent_name_lookup
if '_clientips_continent_code_lookup' in ebs.global_lookups.keys():
self._clientips_continent_code_lookup = ebs.global_lookups['_clientips_continent_code_lookup']
else:
with gzip.open('%s/data/clientips_continent_code_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_continent_code_lookup = json.load(data_file)
ebs.global_lookups['_clientips_continent_code_lookup'] = self._clientips_continent_code_lookup
if '_clientips_city_name_lookup' in ebs.global_lookups.keys():
self._clientips_city_name_lookup = ebs.global_lookups['_clientips_city_name_lookup']
else:
with gzip.open('%s/data/clientips_city_name_lookup.json.gz' % cwd, 'rt') as data_file:
self._clientips_city_name_lookup = json.load(data_file)
ebs.global_lookups['_clientips_city_name_lookup'] = self._clientips_city_name_lookup
def add_fields(self, event):
p = random.random()
if p < self._rare_clientip_probability:
data = self._rare_clientips.get_random()
event['clientip'] = self.__fill_out_ip_prefix(data[0])
else:
data = self._clientips.get_random()
event['clientip'] = data[0]
event['geoip_location_lat'] = data[1][0]
event['geoip_location_lon'] = data[1][1]
event['geoip_city_name'] = self.__get_lookup_value(self._clientips_city_name_lookup, data[2])
event['geoip_country_name'] = self.__get_lookup_value(self._clientips_country_name_lookup, data[3])
event['geoip_country_iso_code'] = self.__get_lookup_value(self._clientips_country_iso_code_lookup, data[4])
event['geoip_continent_name'] = self.__get_lookup_value(self._clientips_continent_name_lookup, data[5])
event['geoip_continent_code'] = self.__get_lookup_value(self._clientips_continent_code_lookup, data[5])
def __fill_out_ip_prefix(self, ip_prefix):
rnd1 = random.random()
v1 = rnd1 * (1 - rnd1) * 255 * 4
k1 = (int)(v1)
rnd2 = random.random()
v2 = rnd2 * (1 - rnd2) * 255 * 4
k2 = (int)(v2)
return "{}.{}.{}".format(ip_prefix, k1, k2)
def __get_lookup_value(self, lookup, key):
if key == "":
return key
else :
return lookup[key]
class Referrer:
def __init__(self):
if '_referrers' in ebs.global_lookups.keys():
self._referrers = ebs.global_lookups['_referrers']
else:
self._referrers = WeightedArray('%s/data/referrers.json.gz' % cwd)
ebs.global_lookups['_referrers'] = self._referrers
if '_referrers_url_base_lookup' in ebs.global_lookups.keys():
self._referrers_url_base_lookup = ebs.global_lookups['_referrers_url_base_lookup']
else:
with gzip.open('%s/data/referrers_url_base_lookup.json.gz' % cwd, 'rt') as data_file:
self._referrers_url_base_lookup = json.load(data_file)
ebs.global_lookups['_referrers_url_base_lookup'] = self._referrers_url_base_lookup
def add_fields(self, event):
data = self._referrers.get_random()
event['referrer'] = "%s%s" % (self._referrers_url_base_lookup[data[0]], data[1])
class Request:
def __init__(self):
if '_requests' in ebs.global_lookups.keys():
self._requests = ebs.global_lookups['_requests']
else:
self._requests = WeightedArray('%s/data/requests.json.gz' % cwd)
ebs.global_lookups['_requests'] = self._requests
if '_requests_url_base_lookup' in ebs.global_lookups.keys():
self._requests_url_base_lookup = ebs.global_lookups['_requests_url_base_lookup']
else:
with gzip.open('%s/data/requests_url_base_lookup.json.gz' % cwd, 'rt') as data_file:
self._requests_url_base_lookup = json.load(data_file)
ebs.global_lookups['_requests_url_base_lookup'] = self._requests_url_base_lookup
def add_fields(self, event):
data = self._requests.get_random()
event['request'] = "{}{}".format(self._requests_url_base_lookup[data[0]], data[1])
event['bytes'] = data[2]
event['verb'] = data[3]
event['response'] = data[4]
event['httpversion'] = data[5]
def convert_to_bytes(size):
matched_size = re.match(r"^(\d+)\s?(kB|MB|GB)?$", size)
if matched_size:
value = int(matched_size.group(1))
unit = matched_size.group(2)
if unit == "kB":
return value << 10
elif unit == "MB":
return value << 20
elif unit == "GB":
return value << 30
elif unit is None:
return value
else:
# we should only reach this if the regex does not match the code here
raise ValueError("Unrecognized unit [{}] for byte size value [{}]".format(unit, size))
else:
raise ValueError("Invalid byte size value [{}]".format(size))
class RandomEvent:
def __init__(self, params, agent=Agent, client_ip=ClientIp, referrer=Referrer, request=Request):
self._agent = agent()
self._clientip = client_ip()
self._referrer = referrer()
self._request = request()
# We will reuse the event dictionary. This assumes that each field will be present (and thus overwritten) in each event.
# This reduces object churn and improves peak indexing throughput.
self._event = {}
if "index" in params:
index = re.sub(r"<\s*yyyy\s*>", "{ts[yyyy]}", params["index"], flags=re.IGNORECASE)
index = re.sub(r"<\s*yy\s*>", "{ts[yy]}", index, flags=re.IGNORECASE)
index = re.sub(r"<\s*mm\s*>", "{ts[mm]}", index, flags=re.IGNORECASE)
index = re.sub(r"<\s*dd\s*>", "{ts[dd]}", index, flags=re.IGNORECASE)
index = re.sub(r"<\s*hh\s*>", "{ts[hh]}", index, flags=re.IGNORECASE)
self._index = index
self._index_pattern = True
else:
self._index = "elasticlogs"
self._index_pattern = False
self._type = "doc"
self._timestamp_generator = TimestampStructGenerator(
params.get("starting_point", "now"),
params.get("offset"),
float(params.get("acceleration_factor", "1.0")),
# this is only expected to be used in tests
params.get("__utc_now")
)
if "daily_logging_volume" in params and "client_count" in params:
# in bytes
self.daily_logging_volume = convert_to_bytes(params["daily_logging_volume"]) // int(params["client_count"])
else:
self.daily_logging_volume = None
self.current_logging_volume = 0
self.total_days = params.get("number_of_days")
self.remaining_days = self.total_days
self.record_raw_event_size = params.get("record_raw_event_size", False)
self._offset = 0
self._web_host = itertools.cycle([1, 2, 3])
self._timestruct = None
self._index_name = None
self._time_interval_current_bulk = 0
@property
def percent_completed(self):
if self.daily_logging_volume is None or self.total_days is None:
return None
else:
full_days = self.total_days - self.remaining_days
already_generated = self.daily_logging_volume * full_days + self.current_logging_volume
total = self.total_days * self.daily_logging_volume
return already_generated / total
def start_bulk(self, bulk_size):
self._time_interval_current_bulk = 1 / bulk_size
self._timestruct = self._timestamp_generator.next_timestamp()
self._index_name = self.__generate_index_pattern(self._timestruct)
def generate_event(self):
if self.remaining_days == 0:
raise StopIteration()
# advance time by a few micros
self._timestruct = self._timestamp_generator.simulate_tick(self._time_interval_current_bulk)
# index for the current line - we may cross a date boundary later if we're above the daily logging volume
index = self._index_name
event = self._event
event["@timestamp"] = self._timestruct["iso"]
# assume a typical event size of 263 bytes but limit the file size to 4GB
event["offset"] = (self._offset + 263) % (4 * 1024 * 1024 * 1024)
self._agent.add_fields(event)
self._clientip.add_fields(event)
self._referrer.add_fields(event)
self._request.add_fields(event)
event["hostname"] = "web-%s-%s.elastic.co" % (event["geoip_continent_code"], next(self._web_host))
if self.record_raw_event_size or self.daily_logging_volume:
# determine the raw event size (as if this were contained in nginx log file). We do not bother to
# reformat the timestamp as this is not worth the overhead.
raw_event = '%s - - [%s] "%s %s HTTP/%s" %s %s "%s" "%s"' % (event["clientip"], event["@timestamp"],
event["verb"], event["request"],
event["httpversion"], event["response"],
event["bytes"], event["referrer"],
event["agent"])
if self.daily_logging_volume:
self.current_logging_volume += len(raw_event)
if self.current_logging_volume > self.daily_logging_volume:
if self.remaining_days is not None:
self.remaining_days -= 1
self._timestamp_generator.skip(datetime.timedelta(days=1))
# advance time now for real (we usually use #simulate_tick() which will keep everything except for
# microseconds constant.
self._timestruct = self._timestamp_generator.next_timestamp()
self._index_name = self.__generate_index_pattern(self._timestruct)
self.current_logging_volume = 0
if self.record_raw_event_size:
# we are on the hot code path here and thus we want to avoid conditionally creating strings so we duplicate
# the event.
line = '{"@timestamp": "%s", ' \
'"_raw_event_size":%d, ' \
'"offset":%s, ' \
'"source":"/usr/local/var/log/nginx/access.log","fileset":{"module":"nginx","name":"access"},"input":{"type":"log"},' \
'"beat":{"version":"6.3.0","hostname":"%s","name":"%s"},' \
'"prospector":{"type":"log"},' \
'"nginx":{"access":{"user_name": "-",' \
'"agent":"%s","user_agent": {"major": "%s","os": "%s","os_major": "%s","name": "%s","os_name": "%s","device": "%s"},' \
'"remote_ip": "%s","remote_ip_list":["%s"],' \
'"geoip":{"continent_name": "%s","city_name": "%s","country_name": "%s","country_iso_code": "%s","location":{"lat": %s,"lon": %s} },' \
'"referrer":"%s",' \
'"url": "%s","body_sent":{"bytes": %s},"method":"%s","response_code":%s,"http_version":"%s"} } }' % \
(event["@timestamp"],
len(raw_event),
event["offset"],
event["hostname"],event["hostname"],
event["agent"], event["useragent_major"], event["useragent_os"], event["useragent_os_major"], event["useragent_name"], event["useragent_os_name"], event["useragent_device"],
event["clientip"], event["clientip"],
event["geoip_continent_name"], event["geoip_city_name"], event["geoip_country_name"], event["geoip_country_iso_code"], event["geoip_location_lat"], event["geoip_location_lon"],
event["referrer"],
event["request"], event["bytes"], event["verb"], event["response"], event["httpversion"])
else:
line = '{"@timestamp": "%s", ' \
'"offset":%s, ' \
'"source":"/usr/local/var/log/nginx/access.log","fileset":{"module":"nginx","name":"access"},"input":{"type":"log"},' \
'"beat":{"version":"6.3.0","hostname":"%s","name":"%s"},' \
'"prospector":{"type":"log"},' \
'"nginx":{"access":{"user_name": "-",' \
'"agent":"%s","user_agent": {"major": "%s","os": "%s","os_major": "%s","name": "%s","os_name": "%s","device": "%s"},' \
'"remote_ip": "%s","remote_ip_list":["%s"],' \
'"geoip":{"continent_name": "%s","city_name": "%s","country_name": "%s","country_iso_code": "%s","location":{"lat": %s,"lon": %s} },' \
'"referrer":"%s",' \
'"url": "%s","body_sent":{"bytes": %s},"method":"%s","response_code":%s,"http_version":"%s"} } }' % \
(event["@timestamp"],
event["offset"],
event["hostname"],event["hostname"],
event["agent"], event["useragent_major"], event["useragent_os"], event["useragent_os_major"], event["useragent_name"], event["useragent_os_name"], event["useragent_device"],
event["clientip"], event["clientip"],
event["geoip_continent_name"], event["geoip_city_name"], event["geoip_country_name"], event["geoip_country_iso_code"], event["geoip_location_lat"], event["geoip_location_lon"],
event["referrer"],
event["request"], event["bytes"], event["verb"], event["response"], event["httpversion"])
return line, index, self._type
def __generate_index_pattern(self, timestruct):
if self._index_pattern:
return self._index.format(ts=timestruct)
else:
return self._index
| 51.569007 | 196 | 0.621326 | 2,612 | 21,298 | 4.721286 | 0.12902 | 0.041599 | 0.073954 | 0.027733 | 0.566818 | 0.495864 | 0.453779 | 0.366607 | 0.353876 | 0.252595 | 0 | 0.007143 | 0.250681 | 21,298 | 412 | 197 | 51.694175 | 0.765587 | 0.076909 | 0 | 0.273846 | 0 | 0.027692 | 0.238792 | 0.127471 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052308 | false | 0 | 0.030769 | 0 | 0.141538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f423b852ccc3707d9d4c3d66b423e8f820b294 | 11,608 | py | Python | scripts/tracking/main_tracking.py | MaaaasayaK/Self-Supervised-Small-Soccer-Player-Detection-Tracking | 96d87367afdf4cca8aeca3f32c313e8632c70fe4 | [
"MIT"
] | 1 | 2021-08-17T18:22:12.000Z | 2021-08-17T18:22:12.000Z | scripts/tracking/main_tracking.py | cballester/Self-Supervised-Small-Soccer-Player-Detection-Tracking | a5d2d0c31a992919a270bd0e02379844196271f0 | [
"MIT"
] | null | null | null | scripts/tracking/main_tracking.py | cballester/Self-Supervised-Small-Soccer-Player-Detection-Tracking | a5d2d0c31a992919a270bd0e02379844196271f0 | [
"MIT"
] | 1 | 2021-08-19T14:21:52.000Z | 2021-08-19T14:21:52.000Z | import sys
import torchvision
import os
import torch
from tracking_utils import light_track
from natsort import natsorted, ns
import numpy as np
from argparse import ArgumentParser
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--data_name', type=str, default='issia')
parser.add_argument('--use_GT_position', dest='use_GT_position', action='store_true')
parser.set_defaults(use_GT_position=False)
parser.add_argument('--rescale_img_factor', type=float, default=1.0)
parser.add_argument('--model_name', type=str, default='frcnn_fpn')
parser.add_argument('--backbone', type=str, default='resnet18')
parser.add_argument('--checkpoint', type=str, default='../../checkpoints_runs/player_det_resnet18_student.pth')
parser.add_argument('--detection_score_thres', type=float, default=0.8)
parser.add_argument('--no_use_context', dest='use_context', action='store_false')
parser.set_defaults(use_context=True)
parser.add_argument('--no_use_soft_nms', dest='use_soft_nms', action='store_false')
parser.set_defaults(use_soft_nms=True)
parser.add_argument('--nms_thres', type=float, default=0.4)
parser.add_argument('--anchor_sizes', type=int, nargs='+', default=[32, 64, 128, 256, 512])
parser.add_argument('--use_track_branch_model', dest='use_track_branch_model', action='store_true')
parser.set_defaults(use_track_branch_model=False)
parser.add_argument('--use_track_branch_embed', dest='use_track_branch_embed', action='store_true')
parser.set_defaults(use_track_branch_embed=False)
parser.add_argument('--pose_model', type=str, default='mobile-deconv')
parser.add_argument('--keyframe_interval', type=int, default=1)
parser.add_argument('--frame_interval', type=int, default=1)
parser.add_argument('--init_frame', type=int, default=100)
parser.add_argument('--n_img_max', type=int, default=50)
parser.add_argument('--no_use_IOU', dest='use_IOU', action='store_false')
parser.set_defaults(use_IOU=True)
parser.add_argument('--spacial_iou_thresh', type=float, default=0.5)
parser.add_argument('--no_use_features', dest='use_features', action='store_false')
parser.set_defaults(use_features=True)
parser.add_argument('--no_use_visual_feat', dest='use_visual_feat', action='store_false')
parser.set_defaults(use_visual_feat=True)
parser.add_argument('--visual_feat_model_name', type=str, default='faster-rcnn')
parser.add_argument('--imagenet_model', dest='imagenet_model', action='store_false')
parser.set_defaults(imagenet_model=True)
parser.add_argument('--use_pose', dest='use_pose', action='store_true')
parser.set_defaults(use_pose=False)
parser.add_argument('--weight_loss', dest='weight_loss', action='store_true')
parser.set_defaults(weight_loss=False)
parser.add_argument('--w_spacial', type=float, default=0.97)
parser.add_argument('--w_visual', type=float, default=0.03)
parser.add_argument('--w_pose', type=float, default=0.0)
parser.add_argument('--visual_metric', type=str, default='l2')
parser.add_argument('--use_filter_tracks', dest='use_filter_tracks', action='store_true')
parser.set_defaults(use_filter_tracks=False)
parser.add_argument('--thres_count_ids', type=int, default=2)
parser.add_argument('--use_ReID_module', dest='use_ReID_module', action='store_true')
parser.set_defaults(use_ReID_module=False)
parser.add_argument('--max_vis_reID', type=int, default=4)
parser.add_argument('--max_vis_feat', type=int, default=4)
parser.add_argument('--N_past_to_keep_reID', type=int, default=3)
parser.add_argument('--N_past_to_keep', type=int, default=1)
parser.add_argument('--N_frame_lost_keep', type=int, default=10)
parser.add_argument('--display_pose', dest='display_pose', action='store_true')
parser.set_defaults(display_pose=False)
parser.add_argument('--write_csv', dest='write_csv', action='store_true')
parser.set_defaults(write_csv=False)
parser.add_argument('--write_video', dest='write_video', action='store_true')
parser.set_defaults(write_video=False)
parser.add_argument('--visualize', dest='visualize', action='store_true')
parser.set_defaults(visualize=False)
parser.add_argument('--output_path', type=str, default='../../data/intermediate/tracking')
hparams = parser.parse_args()
hparams.current_model_detection = None
hparams.flag_method = True
if not hparams.use_visual_feat:
hparams.w_visual = 0
if not hparams.use_pose:
hparams.w_pose = 0
if hparams.visual_feat_model_name == 'faster-rcnn':
hparams.imagenet_model = False
max_dist_factor_feat = 32 * (1 / hparams.rescale_img_factor)
max_dist_factor_reID = max_dist_factor_feat / 4
if not hparams.use_GT_position:
if hparams.current_model_detection is None:
from train_tracker import get_model_detection
model_detection = get_model_detection(hparams.model_name, hparams.weight_loss, hparams.backbone, False,
False, False, hparams.detection_score_thres, False,
hparams.use_soft_nms, anchor_sizes=hparams.anchor_sizes, use_context=hparams.use_context,
nms_thres=hparams.nms_thres, use_track_branch=hparams.use_track_branch_model)
model_detection.load_state_dict(torch.load(hparams.checkpoint))
model_detection.to(torch.device('cuda'))
model_detection.eval()
else:
model_detection = hparams.current_model_detection
else:
model_detection = None
if hparams.use_visual_feat:
if hparams.visual_feat_model_name == 'faster-rcnn':
if hparams.current_model_detection is None:
from train_tracker import get_model_detection
visual_feat_model = get_model_detection(hparams.model_name, hparams.weight_loss, hparams.backbone, False,
False, False, hparams.detection_score_thres, False,
hparams.use_soft_nms, anchor_sizes=hparams.anchor_sizes,
use_context=hparams.use_context, nms_thres=hparams.nms_thres,
use_track_branch=hparams.use_track_branch_model)
visual_feat_model.load_state_dict(torch.load(hparams.checkpoint))
visual_feat_model.to(torch.device('cuda'))
else:
visual_feat_model = hparams.current_model_detection
visual_feat_model.eval()
layer = visual_feat_model._modules.get('fc7')
elif hparams.visual_feat_model_name == 'resnet50':
visual_feat_model = torchvision.models.resnet50(pretrained=True)
visual_feat_model.to(torch.device('cuda'))
visual_feat_model.eval()
layer = visual_feat_model._modules.get('avgpool')
elif hparams.visual_feat_model_name == 'vgg19':
visual_feat_model = torchvision.models.vgg19(pretrained=True)
visual_feat_model.to(torch.device('cuda'))
visual_feat_model.eval()
layer = visual_feat_model._modules.get('avgpool')
else:
print(' visual feature model does not exist')
use_visual_feat = False
else:
visual_feat_model = None
layer = None
if hparams.use_pose:
if hparams.pose_model == 'mobile-deconv':
from network_mobile_deconv import Network
pose_model_path = "../other_utils/lighttrack/weights/mobile-deconv/snapshot_296.ckpt"
elif hparams.pose_model == 'MSRA152':
from network_MSRA152 import Network
pose_model_path = "../other_utils/lighttrack/weights/MSRA152/MSRA_snapshot_285.ckpt"
elif hparams.pose_model == 'CPN101':
from network_CPN101 import Network
pose_model_path = '../other_utils/lighttrack/weights/CPN101/CPN_snapshot_293.ckpt'
else:
sys.exit('pose model not available')
# initialize pose estimator
pose_estimator = Tester(Network(), cfg)
pose_estimator.load_weights(pose_model_path)
else:
pose_estimator = None
if hparams.data_name == 'issia':
base_image_folder = '../../data/issia/frames/'
base_annotation_folder = '../../data/issia/annotations/'
rescale_bbox = [0., 0.]
if hparams.data_name == 'SoccerNet':
base_image_folder = '../../data/SoccerNet/sequences/'
base_annotation_folder = None
rescale_bbox = [0., 0.]
if hparams.data_name == 'panorama':
base_image_folder = '../../data/panorama/frames/'
base_annotation_folder = None
rescale_bbox = [0., 0.]
if hparams.data_name == 'SPD':
base_image_folder = '../../data/SPD/frames/'
base_annotation_folder = None
rescale_bbox = [0., 0.]
for s in natsorted(os.listdir(base_image_folder), alg=ns.PATH | ns.IGNORECASE):
print('eval tracking on seq', s)
image_folder = base_image_folder + str(s) + '/'
if base_annotation_folder is not None:
annotation_folder = base_annotation_folder + str(s) + '/'
else:
annotation_folder = None
base_dir = hparams.output_path + '/output_tracking'
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base_dir = os.path.join(base_dir, hparams.data_name)
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base_dir = os.path.join(base_dir, str(s))
if not os.path.exists(base_dir):
os.mkdir(base_dir)
visualize_folder = os.path.join(base_dir, 'visualize_tracking')
if not os.path.exists(visualize_folder):
os.mkdir(visualize_folder)
output_folder = os.path.join(base_dir, 'output_tracking')
if not os.path.exists(output_folder):
os.mkdir(output_folder)
output_video_path = os.path.join(output_folder, "out.mp4")
output_csv_path = os.path.join(output_folder, "out.csv")
if hparams.write_csv and os.path.exists(output_csv_path):
continue
out = light_track(pose_estimator, model_detection, visual_feat_model, layer,
image_folder, annotation_folder, rescale_bbox, hparams.rescale_img_factor,
visualize_folder, output_video_path, output_csv_path, hparams.use_features,
hparams.w_spacial, hparams.w_visual, hparams.w_pose, hparams.use_IOU, hparams.spacial_iou_thresh,
hparams.detection_score_thres, hparams.use_pose, hparams.use_visual_feat, hparams.imagenet_model,
hparams.display_pose, hparams.use_GT_position, hparams.flag_method,hparams.n_img_max, hparams.init_frame,
hparams.frame_interval, hparams.write_csv, hparams.write_video, hparams.keyframe_interval, hparams.visualize,
hparams.use_filter_tracks, hparams.thres_count_ids, hparams.visual_metric,
hparams.N_frame_lost_keep, hparams.N_past_to_keep, hparams.use_ReID_module,
hparams.N_past_to_keep_reID,hparams.max_vis_feat, max_dist_factor_feat, hparams.max_vis_reID,
max_dist_factor_reID,
hparams.use_track_branch_embed)
| 50.912281 | 139 | 0.67419 | 1,475 | 11,608 | 4.959322 | 0.139661 | 0.052905 | 0.099932 | 0.031579 | 0.453042 | 0.379631 | 0.332878 | 0.225974 | 0.200547 | 0.159535 | 0 | 0.01086 | 0.21468 | 11,608 | 227 | 140 | 51.136564 | 0.791575 | 0.002154 | 0 | 0.188776 | 0 | 0 | 0.154238 | 0.049253 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066327 | 0 | 0.066327 | 0.010204 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f5824cf904c5800c6d4ef10dc07a58fe417b71 | 7,245 | py | Python | src/analyse/ternary.py | timtroendle/money-land | fe3ed6e531cfe91156886d4fa685a14840749f36 | [
"MIT"
] | null | null | null | src/analyse/ternary.py | timtroendle/money-land | fe3ed6e531cfe91156886d4fa685a14840749f36 | [
"MIT"
] | null | null | null | src/analyse/ternary.py | timtroendle/money-land | fe3ed6e531cfe91156886d4fa685a14840749f36 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
import ternary
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import gridspec
import seaborn as sns
TICK_FONT_SIZE = 9
RED = "#A01914"
BLUE = "#4F6DB8"
SEQUENTIAL_PALETTE = sns.light_palette(RED, as_cmap=True)
RED_TO_BLUE = [ # from https://gka.github.io using lightness correction
'#002d6e', '#375aa2', '#6f8ad1', '#a7bffa',
'#f5f5f5', '#fdad97', '#e36b55', '#b23125', '#720000'
]
DIVERGING_PALETTE = matplotlib.colors.LinearSegmentedColormap.from_list("signature-BlRd", RED_TO_BLUE)
idx = pd.IndexSlice
@dataclass
class PlotData:
data: pd.Series
norm: matplotlib.colors.Normalize
left_axis_label: str
panel_name: str
bottom_axis_label: str = "Utility-scale PV (%) →"
right_axis_label: str = "← Onshore wind (%)"
def plot_both_ternary(path_to_data, path_to_plot):
plot_datas = read_data(path_to_data)
fig = plt.figure(figsize=(7.5, 7.5))
gs = gridspec.GridSpec(3, 2, width_ratios=[5, 5], height_ratios=[25, 25, 1])
ax_1 = fig.add_subplot(gs[0, 0])
ax_2 = fig.add_subplot(gs[0, 1])
ax_3 = fig.add_subplot(gs[1, 0])
ax_4 = fig.add_subplot(gs[1, 1])
cbar_ax_1 = fig.add_subplot(gs[2, 0])
cbar_ax_2 = fig.add_subplot(gs[2, 1])
plot_ternary(plot_datas[0], ax=ax_1, cmap=SEQUENTIAL_PALETTE)
plot_ternary(plot_datas[1], ax=ax_2, cmap=DIVERGING_PALETTE)
plot_ternary(plot_datas[2], ax=ax_3, cmap=SEQUENTIAL_PALETTE)
plot_ternary(plot_datas[3], ax=ax_4, cmap=DIVERGING_PALETTE)
plot_sequential_colorbar(fig, cbar_ax_1, plot_datas[0].norm, cmap=SEQUENTIAL_PALETTE,
label="Cost relative to cost minimal case")
plot_diverging_colorbar(fig, cbar_ax_2, plot_datas[1].norm, cmap=DIVERGING_PALETTE,
label="Land requirements relative to cost minimal case",
land_use_data=plot_datas[1].data)
plt.subplots_adjust(
left=0.05,
bottom=0.07,
right=0.95,
top=0.98,
wspace=0.2,
hspace=0.05
)
fig.savefig(path_to_plot, pil_kwargs={"compression": "tiff_lzw"})
def read_data(path_to_data):
data = xr.open_dataset(path_to_data)
data.coords["roof"] = data.coords["roof"] // 10
data.coords["util"] = data.coords["util"] // 10
data.coords["wind"] = data.coords["wind"] // 10
data.coords["offshore"] = data.coords["offshore"] // 10
data = (
data
.mean("sample_id")
.sel(scenario=(data.roof == 0) | (data.offshore == 0))
.to_dataframe()
.set_index(["util", "wind", "roof", "offshore"])
)
data = data / data.loc[data.cost.idxmin()]
return [
PlotData(
data=filter_three_dimensions(data.cost, "roof"),
left_axis_label="← Rooftop PV (%)",
norm=matplotlib.colors.Normalize(vmin=data.cost.min(), vmax=data.cost.max()),
panel_name="a"
),
PlotData(
data=filter_three_dimensions(data.land_use, "roof"),
left_axis_label="← Rooftop PV (%)",
norm=matplotlib.colors.Normalize(vmin=data.land_use.min(), vmax=1 + (1 - data.land_use.min())),
panel_name="b"
),
PlotData(
data=filter_three_dimensions(data.cost, "offshore"),
left_axis_label="← Offshore wind (%)",
norm=matplotlib.colors.Normalize(vmin=data.cost.min(), vmax=data.cost.max()),
panel_name="c"
),
PlotData(
data=filter_three_dimensions(data.land_use, "offshore"),
left_axis_label="← Offshore wind (%)",
norm=matplotlib.colors.Normalize(vmin=data.land_use.min(), vmax=1 + (1 - data.land_use.min())),
panel_name="d"
)
]
def filter_three_dimensions(data, case):
if case == "roof":
column = "offshore"
else:
column = "roof"
return (
data
.reset_index()[data.reset_index()[column] == 0]
.drop(columns=[column])
.set_index(["util", "wind", case])
.iloc[:, 0]
)
def plot_ternary(plot_data, ax, cmap):
scale = 10
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
figure, tax = ternary.figure(ax=ax, scale=scale)
tax.boundary(linewidth=1.0)
tax.heatmap(
plot_data.data.to_dict(),
scale=10,
style="triangular",
colorbar=False,
cmap=cmap,
vmin=plot_data.norm.vmin,
vmax=plot_data.norm.vmax
)
tax.bottom_axis_label(plot_data.bottom_axis_label, ha="center")
tax.right_axis_label(plot_data.right_axis_label, offset=0.16)
tax.left_axis_label(plot_data.left_axis_label, ha="center", offset=0.14)
tax.ticks(ticks=range(0, 110, 20), axis='b', linewidth=1, multiple=1, offset=0.02, fontsize=TICK_FONT_SIZE)
tax.ticks(ticks=range(0, 110, 20), axis='l', linewidth=1, multiple=1, offset=0.03, fontsize=TICK_FONT_SIZE)
tax.ticks(ticks=range(0, 110, 20), axis='r', linewidth=1, multiple=1, offset=0.04, fontsize=TICK_FONT_SIZE)
tax.clear_matplotlib_ticks()
tax._redraw_labels()
ax.set_title(plot_data.panel_name, loc="left")
ax.set_aspect(1)
def plot_sequential_colorbar(fig, ax, norm, cmap, label):
s_m = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
s_m.set_array([])
cbar = fig.colorbar(s_m, ax=ax, fraction=1, aspect=35, shrink=1.0, orientation="horizontal")
cbar_ticks = np.linspace(
start=norm.vmin,
stop=norm.vmax,
num=4
)
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(["{:.1f}".format(tick)
for tick in cbar.get_ticks()])
cbar.outline.set_linewidth(0)
cbar.set_label(label)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axis('off')
def plot_diverging_colorbar(fig, ax, norm, cmap, label, land_use_data):
s_m = matplotlib.cm.ScalarMappable(cmap=cmap, norm=norm)
cmap = s_m.get_cmap()
rel_max = (land_use_data.max() - land_use_data.min()) / (norm.vmax - norm.vmin)
colors = cmap(np.linspace(0, rel_max, cmap.N))
cmap = matplotlib.colors.LinearSegmentedColormap.from_list('cut_jet', colors)
s_m = matplotlib.cm.ScalarMappable(cmap=cmap, norm=matplotlib.colors.Normalize(vmin=0, vmax=land_use_data.max()))
s_m.set_array([])
cbar = fig.colorbar(s_m, ax=ax, fraction=1, aspect=35, shrink=1.0, orientation="horizontal")
cbar_ticks = [0, 0.5, 1.0, land_use_data.max()]
cbar.set_ticks(cbar_ticks)
cbar.set_ticklabels(["{:.1f}".format(tick)
for tick in cbar.get_ticks()])
cbar.outline.set_linewidth(0)
cbar.set_label(label)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.axis('off')
if __name__ == "__main__":
plot_both_ternary(
path_to_data=snakemake.input.results,
path_to_plot=snakemake.output[0]
)
| 35.866337 | 117 | 0.642926 | 1,029 | 7,245 | 4.314869 | 0.217687 | 0.026351 | 0.040541 | 0.042117 | 0.496622 | 0.43018 | 0.373423 | 0.336486 | 0.301351 | 0.281532 | 0 | 0.03263 | 0.208972 | 7,245 | 201 | 118 | 36.044776 | 0.741057 | 0.007315 | 0 | 0.264045 | 0 | 0 | 0.077608 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033708 | false | 0 | 0.050562 | 0 | 0.134831 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f5eeb590efc75b4298bcbbba1e165b62b754b6 | 20,491 | py | Python | cogs/game/minigames/black_box/game.py | FellowHashbrown/omega-psi-py | 4ea33cdbef15ffaa537f2c9e382de508c58093fc | [
"MIT"
] | 4 | 2018-12-23T08:49:40.000Z | 2021-03-25T16:51:43.000Z | cogs/game/minigames/black_box/game.py | FellowHashbrown/omega-psi-py | 4ea33cdbef15ffaa537f2c9e382de508c58093fc | [
"MIT"
] | 23 | 2020-11-03T17:40:40.000Z | 2022-02-01T17:12:59.000Z | cogs/game/minigames/black_box/game.py | FellowHashbrown/omega-psi-py | 4ea33cdbef15ffaa537f2c9e382de508c58093fc | [
"MIT"
] | 1 | 2019-07-11T23:40:13.000Z | 2019-07-11T23:40:13.000Z | from discord import Embed
from random import randint
from cogs.errors import get_error_message
from cogs.game.minigames.base_game.game import Game
from cogs.game.minigames.black_box.variables import NUMBERS, SYMBOLS, LEFT, RIGHT, UP, DOWN, GUESS, DIRECT, FINALIZE, HIT, MISS
from cogs.game.minigames.functions import wait_for_reaction
from util.database.database import database
from util.functions import get_embed_color
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
dir_to_initial = {
LEFT: "right",
RIGHT: "left",
UP: "bottom",
DOWN: "top"
}
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class BlackBoxGame(Game):
"""A BlackBoxGame contains information about a game of
Black Box being played
"""
def __init__(self, bot, ctx, challenger):
super().__init__(
bot, ctx,
challenger = challenger,
opponent = challenger
)
self.current_player = 0
self.locations = []
self.message = None
self.guesses = {
"left": [ None ] * 8,
"right": [ None ] * 8,
"top": [ None ] * 8,
"bottom": [ None ] * 8
}
self.amt_guesses = 0
self.location_guesses = []
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def get_black_box(self, *, show_atoms=False) -> str:
"""Turns the black box into emojis to present inside
a Discord Embed object
:param show_atoms: Whether or not to actually show the atoms in the black box string
"""
# Add the top layer of column emojis
grid = ":white_large_square: "
for column in range(8):
if self.guesses["top"][column] is not None:
grid += self.guesses["top"][column] + " "
else:
grid += NUMBERS[column] + " "
grid += ":white_large_square:\n"
# Add each row of the black box
for row in range(8):
# Add the left column of row emojis
if self.guesses["left"][row] is not None:
grid += self.guesses["left"][row] + " "
else:
grid += NUMBERS[row] + " "
for col in range(8):
# If we want to show the atoms (at the end of the game)
# we choose specific circles for the following:
# actual location of atoms: blue circle
# correct location of atom: green circle
# incorrect location of atom: red circle
if show_atoms:
if (col, row) in self.locations and (col, row) in self.location_guesses:
grid += ":green_circle: "
elif (col, row) in self.locations:
grid += ":blue_circle: "
elif (col, row) in self.location_guesses:
grid += ":red_circle: "
else:
grid += ":black_large_square: "
# However, if we are not showing the atoms, just show white squares
# for where the guesses are and black squares for other squares
else:
if (col, row) in self.location_guesses:
grid += ":white_large_square: "
else:
grid += ":black_large_square: "
# Add the right column of row emojis
if self.guesses["right"][row] is not None:
grid += self.guesses["right"][row] + "\n"
else:
grid += NUMBERS[row] + "\n"
# Add the bottom layer of column emojis
grid += ":white_large_square: "
for column in range(8):
if self.guesses["bottom"][column] is not None:
grid += self.guesses["bottom"][column] + " "
else:
grid += NUMBERS[column] + " "
grid += ":white_large_square: \n"
return grid
def direct_laser(self, direction, offset):
"""Directs a laser through the black box
:param direction: The direction to move the laser in
:param offset: The row or column to move the laser through
"""
# Start off at the block on the proper side
# and create the movement tuple
if direction == "left":
movement = [-1, 0]
current = initial = (7, offset)
initial_side = "right"
elif direction == "right":
movement = [1, 0]
current = initial = (0, offset)
initial_side = "left"
elif direction == "up":
movement = [0, -1]
current = initial = (offset, 7)
initial_side = "bottom"
elif direction == "down":
movement = [0, 1]
current = initial = (offset, 0)
initial_side = "top"
# Continue looping until the laser either hits an atom
# or leaves the black box
blocks_processed = 0
while True:
# Check if the current location is an atom
if current in self.locations:
self.guesses[initial_side][offset] = HIT
break
# Test all the boxes in the corners of the current block:
# upper left, upper right, lower left, lower right
# tuple is (column, row)
ul_block = (current[0] - 1, current[1] - 1)
ur_block = (current[0] + 1, current[1] - 1)
ll_block = (current[0] - 1, current[1] + 1)
lr_block = (current[0] + 1, current[1] + 1)
if blocks_processed == 0:
if direction in ["left", "right"]:
up_block = (current[0], current[1] - 1)
lo_block = (current[0], current[1] + 1)
else:
ri_block = (current[0] + 1, current[1])
le_block = (current[0] - 1, current[1])
# Check the first blocks depending on if this is the first block processed
# if so, the laser bounces back to the input
if blocks_processed == 0:
if direction in ["left", "right"] and (up_block in self.locations or lo_block in self.locations):
movement[0] = -movement[0]
elif direction in ["up", "down"] and (ri_block in self.locations or le_block in self.locations):
movement[1] = -movement[1]
# Check the corner blocks even if on the first block
if movement[0] != 0:
u_block = ul_block if movement[0] == -1 else ur_block
l_block = ll_block if movement[0] == -1 else lr_block
if u_block in self.locations and l_block in self.locations:
movement[0] = -movement[0]
elif u_block in self.locations:
movement = [0, 1]
elif l_block in self.locations:
movement = [0, -1]
else:
l_block = ul_block if movement[1] == -1 else ll_block
r_block = ur_block if movement[1] == -1 else lr_block
if l_block in self.locations and r_block in self.locations:
movement[1] = -movement[1]
elif l_block in self.locations:
movement = [1, 0]
elif r_block in self.locations:
movement = [-1, 0]
# Check if the next movement will leave the black box
if ((current[0] + movement[0]) >= 8 or (current[0] + movement[0]) < 0 or
(current[1] + movement[1]) >= 8 or (current[1] + movement[1]) < 0):
if current == initial:
self.guesses[initial_side][offset] = MISS
else:
self.guesses[initial_side][offset] = SYMBOLS[self.amt_guesses]
if current[0] == 0 and movement[0] == -1:
self.guesses["left"][current[1]] = SYMBOLS[self.amt_guesses]
elif current[0] == 7 and movement[0] == 1:
self.guesses["right"][current[1]] = SYMBOLS[self.amt_guesses]
elif current[1] == 0 and movement[1] == -1:
self.guesses["top"][current[0]] = SYMBOLS[self.amt_guesses]
elif current[1] == 7 and movement[1] == 1:
self.guesses["bottom"][current[0]] = SYMBOLS[self.amt_guesses]
self.amt_guesses += 1
break
current = (current[0] + movement[0], current[1] + movement[1])
blocks_processed += 1
async def setup(self):
"""Sets up the game by asking the player how many atoms they want"""
message = await self.ctx.send(embed = Embed(
title = "Configuration",
description = "How many atoms do you want to exist in the black box?",
colour = await get_embed_color(self.challenger)
))
for reaction in NUMBERS[2:5]:
await message.add_reaction(reaction)
num_atoms = await wait_for_reaction(
self.bot, message,
self.challenger, NUMBERS[2:5])
num_atoms = NUMBERS.index(num_atoms) + 1
# Add the locations of the "atoms"
invalid_locations = []
for locations in range(num_atoms):
location = (randint(0, 7), randint(0, 7))
while location in invalid_locations:
location = (randint(0, 7), randint(0, 7))
self.locations.append(location)
# Add invalid locations that exist around the created "atom"
for r_off in range(-1, 2):
for c_off in range(-1, 2):
if (location[0] + c_off >= 0 and location[1] + r_off >= 0 and
location[0] + c_off < 8 and location[1] + r_off < 8):
invalid_locations.append((location[0] + c_off, location[1] + r_off))
async def play(self):
"""Allows the player to play a game of Black Box"""
await self.setup()
# Continue looping until the player finishes their game
self.message = await self.ctx.send("_ _")
while True:
found = False
valid_options = [GUESS]
# If there can still be lasers pushed through, add the DIRECT
# reaction
for direction in self.guesses:
for item in self.guesses[direction]:
if item is None:
valid_options.append(DIRECT)
found = True
break
if found:
break
# If there are an equivalent amount of location guesses
# as there are locations, give the option to finalize the guesses
if len(self.location_guesses) == len(self.locations):
valid_options.append(FINALIZE)
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{}\n\n{}\n{}\n{}".format(
self.get_black_box(),
"To make a guess, react with {}".format(GUESS),
"To direct a \"laser\", react with {}".format(DIRECT) if DIRECT in valid_options else "",
"To finalize your guesses, react with {}".format(FINALIZE) if FINALIZE in valid_options else ""
),
color = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
for reaction in valid_options:
await self.message.add_reaction(reaction)
# Ask the player if they want to make a guess or direct a "laser" (or finalize their guesses)
option = await wait_for_reaction(
self.bot, self.message,
self.challenger, valid_options)
await self.message.clear_reactions()
if option == GUESS:
await self.make_location_guess()
elif option == DIRECT:
await self.make_input_guess()
else:
if await self.finalize_guesses():
break
async def make_location_guess(self):
"""Allows the player to make a guess on where an atom may be"""
# Check if all guesses have been made
# if so, don't try asking for any more guesses
if len(self.location_guesses) == len(self.locations):
await self.ctx.send(embed = get_error_message(
"You have already made {} guesses. Remove one to make another!".format(
len(self.locations)
)
))
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{0}\n\n{1} {2}\n{1} {3}".format(
self.get_black_box(), GUESS,
"To place a guess, react with the column first and then the row",
"To remove a guess, react with the same column and row as it is in"
),
colour = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
for number in NUMBERS:
await self.message.add_reaction(number)
column = await wait_for_reaction(
self.bot, self.message,
self.challenger, NUMBERS)
row = await wait_for_reaction(
self.bot, self.message,
self.challenger, NUMBERS)
await self.message.clear_reactions()
column = NUMBERS.index(column)
row = NUMBERS.index(row)
if (column, row) in self.location_guesses:
self.location_guesses.remove((column, row))
elif len(self.location_guesses) < len(self.locations):
self.location_guesses.append((column, row))
async def make_input_guess(self):
"""Allows the player to make a guess on the sides of the black box"""
# Get the direction the user wants to move input through
# and which row or column they want to move input through
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{}\n\n{}{}".format(
self.get_black_box(), DIRECT,
"Choose a direction to push a laser through using the directional arrows"
),
colour = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
# Create a new list of valid direction reactions the user
# can react with
# Then add the reactions to the message and have the user
# select which direction they want to move in
directions = []
for direction in [LEFT, RIGHT, UP, DOWN]:
if not all(self.guesses[dir_to_initial[direction]]):
directions.append(direction)
for direction in directions:
await self.message.add_reaction(direction)
direction = await wait_for_reaction(
self.bot, self.message,
self.challenger, directions)
direction = {LEFT: "left", RIGHT: "right", UP: "up", DOWN: "down"}[direction]
# Ask the user which row or column to push a laser through
await self.message.clear_reactions()
await self.message.edit(
embed = Embed(
title = "Black Box - {} Atoms".format(len(self.locations)),
description = "{}\n\n{}{}".format(
self.get_black_box(), DIRECT,
"Choose which {} to push the laser through".format(
"row" if direction in ["left", "right"] else "column"
)
),
colour = await get_embed_color(self.challenger)
).add_field(
name = "Symbol Meanings",
value = (
"""
{} This symbol means that you hit an atom
{} This symbol means that the laser you directed came back to the same spot
Any other symbol means that the directed laser went in through one spot
and came out at the matching symbol's spot
"""
).format(HIT, MISS)
))
# Create a new list of valid number reactions the user
# can react with
# Then add the reactions to the message and have the user
# select which row or column they want to push a laser through
numbers = []
for i in range(len(NUMBERS)):
if ((direction == "left" and self.guesses["right"][i] is None) or
(direction == "right" and self.guesses["left"][i] is None) or
(direction == "up" and self.guesses["bottom"][i] is None) or
(direction == "down" and self.guesses["top"][i] is None)):
numbers.append(NUMBERS[i])
for number in numbers:
await self.message.add_reaction(number)
offset = await wait_for_reaction(
self.bot, self.message,
self.challenger, numbers)
offset = NUMBERS.index(offset)
await self.message.clear_reactions()
self.direct_laser(direction, offset) # Direct the laser through the black box
async def finalize_guesses(self):
"""Finalizes the guesses of the player and determines if they won or lost
In order for the player to win, they must get up to one less than the amount of atoms
of the spots correct
"""
if len(self.location_guesses) != len(self.locations):
await self.ctx.send(embed = get_error_message(
"You need to place at least {} more guess{}!".format(
len(self.locations) - len(self.location_guesses),
"" if len(self.location_guesses) == (len(self.locations) - 1) else "es"
)
))
return False
else:
correct = 0
for location in self.locations:
if location in self.location_guesses:
correct += 1
won = correct >= (len(self.locations) - 1)
embed = Embed(
title = "You Won!" if won else "You Lost :(",
description = self.get_black_box(show_atoms = True),
colour = await get_embed_color(self.challenger))
await self.message.edit(embed = embed)
await database.users.update_black_box(self.challenger, won)
return True
| 43.229958 | 127 | 0.516373 | 2,346 | 20,491 | 4.426257 | 0.127451 | 0.037558 | 0.023112 | 0.023112 | 0.499037 | 0.39975 | 0.356029 | 0.289099 | 0.267623 | 0.242103 | 0 | 0.012264 | 0.391147 | 20,491 | 473 | 128 | 43.321353 | 0.820055 | 0.128886 | 0 | 0.343558 | 0 | 0 | 0.0724 | 0.001392 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009202 | false | 0 | 0.02454 | 0 | 0.046012 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74f9d1119f5a828ec576c313747eb837e48217fb | 3,437 | py | Python | rewrite_example.py | Giangblackk/hacksterio-smart-nids | 283166e8880aaeb280520053b4dcd431d30b3ed3 | [
"MIT"
] | null | null | null | rewrite_example.py | Giangblackk/hacksterio-smart-nids | 283166e8880aaeb280520053b4dcd431d30b3ed3 | [
"MIT"
] | null | null | null | rewrite_example.py | Giangblackk/hacksterio-smart-nids | 283166e8880aaeb280520053b4dcd431d30b3ed3 | [
"MIT"
] | null | null | null | # Steps for offline training:
# 1. load benign pcap file
# 2. extract features
# 3. train feature mapper model and save model
import numpy as np
from kitsune.FeatureExtractor import FE
from kitsune.KitNET import corClust as CC
from kitsune.KitNET import dA as AE
from scipy.stats import norm
from matplotlib import pyplot as plt
if __name__ == "__main__":
# load benign pcap file
packet_file = "capEC2AMAZ-O4EL3NG-172.31.69.26a.pcap.tsv"
packet_limit = np.Inf
max_AE = 10
FM_grace = 10000
AD_grace = 20000
threshold_grace = 30000
learning_rate = 0.1
hidden_ratio = 0.75
# create feature extractor to get next input vector
fe = FE(packet_file, limit=packet_limit)
fm = CC.corClust(fe.get_num_features())
# get next input vector
print("Feature Mapper training")
curIndex = 0
while True:
x = fe.get_next_vector()
if len(x) == 0:
break
# train feature mapper
fm.update(x)
curIndex += 1
if curIndex == FM_grace:
break
# get trained feature mapper
feature_map = fm.cluster(max_AE)
print(feature_map)
# intialize ensemble layers and output layer
ensembleLayers = []
for m in feature_map:
params = AE.dA_params(
n_visible=len(m),
n_hidden=0,
lr=learning_rate,
corruption_level=0,
gracePeriod=0,
hiddenRatio=hidden_ratio,
)
ensembleLayers.append(AE.dA(params))
params = AE.dA_params(
len(feature_map),
n_hidden=0,
lr=learning_rate,
corruption_level=0,
gracePeriod=0,
hiddenRatio=hidden_ratio,
)
outputLayer = AE.dA(params)
print("Anomaly Detector training")
# put input vector into feature mapper to train it
while True:
x = fe.get_next_vector()
if len(x) == 0:
break
# train
S_l1 = np.zeros(len(ensembleLayers))
for a in range(len(ensembleLayers)):
xi = x[feature_map[a]]
S_l1[a] = ensembleLayers[a].train(xi)
outputLayer.train(S_l1)
curIndex += 1
if curIndex == AD_grace:
break
print("Prediction")
# execute trained model on benign part of dataset
RMSEs = []
while True:
x = fe.get_next_vector()
if len(x) == 0:
break
# execute
S_l1 = np.zeros(len(ensembleLayers))
for a in range(len(ensembleLayers)):
xi = x[feature_map[a]]
S_l1[a] = ensembleLayers[a].execute(xi)
pred = outputLayer.execute(S_l1)
RMSEs.append(pred)
curIndex += 1
if curIndex == threshold_grace:
break
# calculate threshold
benignSample = np.log(RMSEs)
logProbs = norm.logsf(np.log(RMSEs), np.mean(benignSample), np.std(benignSample))
print(np.min(logProbs), np.max(logProbs))
print(np.min(RMSEs), np.max(RMSEs))
# plot the RMSE anomaly scores
plt.figure(figsize=(10, 5))
fig = plt.scatter(range(len(RMSEs)), RMSEs, s=1.1, c=logProbs, cmap="RdYlGn")
plt.yscale("log")
plt.title("Anomaly Scores from Kitsune's Execution Phase")
plt.ylabel("RMSE (log scaled")
plt.xlabel("Time elapsed [min]")
figbar = plt.colorbar()
figbar.ax.set_ylabel("Log Probability\n ", rotation=270)
plt.show()
# save trained mapper to file
| 26.037879 | 85 | 0.609252 | 450 | 3,437 | 4.533333 | 0.353333 | 0.029412 | 0.019608 | 0.017647 | 0.216176 | 0.216176 | 0.216176 | 0.216176 | 0.216176 | 0.216176 | 0 | 0.026283 | 0.291533 | 3,437 | 131 | 86 | 26.236641 | 0.811499 | 0.142566 | 0 | 0.4 | 0 | 0 | 0.072721 | 0.013998 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fb274c512334729a04267dec3df32bc39cd9ae | 452 | py | Python | position/urls.py | drowolath/position | 2d27a56732d195003d35762931fd2484ac270501 | [
"BSD-2-Clause"
] | null | null | null | position/urls.py | drowolath/position | 2d27a56732d195003d35762931fd2484ac270501 | [
"BSD-2-Clause"
] | null | null | null | position/urls.py | drowolath/position | 2d27a56732d195003d35762931fd2484ac270501 | [
"BSD-2-Clause"
] | null | null | null | import views
from django.conf.urls import url
urlpatterns = [
url(
r'(?P<latitude>[\d.@+-]+)/(?P<longitude>[\d.@+-]+)',
views.mapit,
name='mapit'),
url(
r'(?P<name>[\alphanum]+)',
views.trackers,
name='named_liveposition'),
url(
r'(?P<imei>\d{15})',
views.trackers,
name='liveposition'),
url(
r'^$',
views.index,
name='index'),
]
| 19.652174 | 60 | 0.462389 | 47 | 452 | 4.425532 | 0.468085 | 0.076923 | 0.072115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006579 | 0.327434 | 452 | 22 | 61 | 20.545455 | 0.677632 | 0 | 0 | 0.3 | 0 | 0 | 0.283186 | 0.154867 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fd94e4feeda1bcbc1a8631162221222bc2c165 | 2,010 | py | Python | bin/run_all_benchmarks.py | finiteautomata/finetune_vs_scratch | 444c9f9f2e1086f833c674e5d819b7a16ff8345a | [
"MIT"
] | 12 | 2021-11-19T18:40:17.000Z | 2022-03-07T10:56:54.000Z | bin/run_all_benchmarks.py | finiteautomata/finetune_vs_scratch | 444c9f9f2e1086f833c674e5d819b7a16ff8345a | [
"MIT"
] | 2 | 2022-02-20T17:28:00.000Z | 2022-03-06T21:34:21.000Z | bin/run_all_benchmarks.py | finiteautomata/finetune_vs_scratch | 444c9f9f2e1086f833c674e5d819b7a16ff8345a | [
"MIT"
] | null | null | null | import os
import re
import fire
import json
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def run_all(times=10):
models = [
("finiteautomata/robertuito-base-uncased", "robertuito-uncased.json"),
("finiteautomata/robertuito-base-cased", "robertuito-cased.json"),
("finiteautomata/robertuito-base-deacc", "robertuito-deacc.json"),
("bertin-project/bertin-roberta-base-spanish", "bertin.json"),
("BSC-TeMU/roberta-base-bne", "roberta-bne.json"),
("dccuchile/bert-base-spanish-wwm-uncased", "beto-uncased.json"),
("models/beto-uncased-2500", "beto-uncased-2500.json"),
("models/beto-uncased-5000", "beto-uncased-5000.json"),
("models/beto-uncased-10000", "beto-uncased-10000.json"),
("models/beto-uncased-20000", "beto-uncased-20000.json"),
("dccuchile/bert-base-spanish-wwm-cased", "beto-cased.json"),
("models/beto-cased-2500", "beto-cased-2500.json"),
("models/beto-cased-5000", "beto-cased-5000.json"),
("models/beto-cased-10000", "beto-cased-10000.json"),
("models/beto-cased-20000", "beto-cased-20000.json"),
]
logger.info("Running benchmarks")
for model_name, output_path in models:
logger.info(f"Running model: {model_name}")
output_path=f"output/{output_path}"
if os.path.exists(output_path):
with open(output_path) as f:
report = json.load(f)
run_times = len(report["hate"])
if run_times >= times:
logger.info(f"Skipping model: {model_name}")
continue
else:
logger.info(f"Found {run_times}")
effective_times = times - run_times
else:
effective_times = times
cmd = f"python bin/run_benchmark.py {model_name} {effective_times} {output_path} --max_length 128"
os.system(cmd)
if __name__ == "__main__":
fire.Fire(run_all)
| 34.655172 | 106 | 0.621891 | 243 | 2,010 | 5.00823 | 0.292181 | 0.081348 | 0.09203 | 0.069022 | 0.050945 | 0.050945 | 0 | 0 | 0 | 0 | 0 | 0.049296 | 0.222886 | 2,010 | 57 | 107 | 35.263158 | 0.729834 | 0 | 0 | 0.044444 | 0 | 0 | 0.471877 | 0.317571 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.111111 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fdb498bb4874db8d8ec3451adbf242259fd94c | 1,110 | py | Python | Python/Backspace_String_Compare.py | treethree/LeetCode | 4c6d6e1ee92d87424fe5b9f20b8eef8d34e74761 | [
"Unlicense"
] | null | null | null | Python/Backspace_String_Compare.py | treethree/LeetCode | 4c6d6e1ee92d87424fe5b9f20b8eef8d34e74761 | [
"Unlicense"
] | null | null | null | Python/Backspace_String_Compare.py | treethree/LeetCode | 4c6d6e1ee92d87424fe5b9f20b8eef8d34e74761 | [
"Unlicense"
] | null | null | null | #Approach #1: Build String(Stack)
#Time Complexity: O(M + N), where M, N are the lengths of S and T respectively.
#Space Complexity: O(M + N).
class Solution(object):
def backspaceCompare(self, S, T):
def build(S):
ans = []
for c in S:
if c != '#':
ans.append(c)
elif ans:
ans.pop()
return "".join(ans)
return build(S) == build(T)
#Approach #2: Two Pointer
#Time Complexity: O(M + N), where M, N are the lengths of S and T respectively.
#Space Complexity: O(1).
class Solution2():
def backspaceCompare(self, S, T):
i, j = len(S) - 1, len(T) - 1
backS = backT = 0
while True:
while i >= 0 and (backS or S[i] == '#'):
backS += 1 if S[i] == '#' else -1
i -= 1
while j >= 0 and (backT or T[j] == '#'):
backT += 1 if T[j] == '#' else -1
j -= 1
if not (i >= 0 and j >= 0 and S[i] == T[j]):
return i == j == -1
i, j = i - 1, j - 1
| 32.647059 | 79 | 0.43964 | 159 | 1,110 | 3.069182 | 0.320755 | 0.020492 | 0.07377 | 0.079918 | 0.397541 | 0.295082 | 0.295082 | 0.295082 | 0.295082 | 0.295082 | 0 | 0.030628 | 0.411712 | 1,110 | 33 | 80 | 33.636364 | 0.716692 | 0.234234 | 0 | 0.08 | 0 | 0 | 0.005938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74fe0f956d636c3650d5cdff79ce6ca98d344de7 | 7,008 | py | Python | hyperion/pdfs/jfa/jfa_total.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 14 | 2021-12-19T04:24:15.000Z | 2022-03-18T03:24:04.000Z | hyperion/pdfs/jfa/jfa_total.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | null | null | null | hyperion/pdfs/jfa/jfa_total.py | hyperion-ml/hyperion | c4c9eee0acab1ba572843373245da12d00dfffaa | [
"Apache-2.0"
] | 5 | 2021-12-14T20:41:27.000Z | 2022-02-24T14:18:11.000Z | """
Copyright 2018 Johns Hopkins University (Author: Jesus Villalba)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
import numpy as np
from scipy import linalg as sla
from ...hyp_defs import float_cpu
from ...utils.math import (
invert_pdmat,
invert_trimat,
logdet_pdmat,
vec2symmat,
symmat2vec,
)
from ..core.pdf import PDF
class JFATotal(PDF):
def __init__(self, K, y_dim=None, T=None, **kwargs):
super(JFATotal, self).__init__(**kwargs)
if T is not None:
y_dim = T.shape[0]
self.K = K
self.y_dim = y_dim
self.T = T
# aux
self._TT = None
self.__upptr = None
def reset_aux(self):
self._TT = None
@property
def is_init():
if self._is_init:
return True
if self.T is not None:
self._is_init = True
return self._is_init
def initialize(self, N, F):
assert N.shape[0] == self.K
self.T = np.random.randn(self.y_dim, F.shape[1]).astype(float_cpu(), copy=False)
def compute_py_g_x(
self, N, F, G=None, return_cov=False, return_elbo=False, return_acc=False
):
assert self.is_init
x_dim = int(F.shape[1] / self.K)
M = F.shape[0]
y_dim = self.y_dim
compute_inv = return_cov or return_acc
return_tuple = compute_inv or return_elbo
TF = np.dot(F, self.T.T)
L = self.compute_L(self.TT, N, self._upptr)
y = np.zeros((M, y_dim), dtype=float_cpu())
if return_cov:
Sy = np.zeros((M, y_dim * (y_dim + 1) / 2), dtype=float_cpu())
else:
Sy = None
if return_elbo:
elbo = np.zeros((M,), dtype=float_cpu())
if return_acc:
Py = np.zeros((y_dim, y_dim), dtype=float_cpu())
Ry = np.zeros((self.K, y_dim * (y_dim + 1) / 2), dtype=float_cpu())
Li = np.zeros((self.y_dim, self.y_dim), dtype=float_cpu())
for i in range(N.shape[0]):
Li[self._upptr] = L[i]
r = invert_pdmat(
Li, right_inv=True, return_logdet=return_elbo, return_inv=compute_inv
)
mult_iL = r[0]
if return_elbo:
elbo[i] = -r[2] / 2
if compute_inv:
iL = r[-1]
y[i] = mult_iL(TF[i])
if return_cov:
Sy[i] = iL[self.__upptr]
if return_acc:
iL += np.outer(y[i], y[i])
Py += iL
Ry += iL[self.__uppr] * N[i][:, None]
if not return_tuple:
return y
r = [y]
if return_cov:
r += [Sy]
if return_elbo:
if G is not None:
elbo += G
elbo += 0.5 * np.sum(VF * y, axis=-1)
r += [elbo]
if return_acc:
r += [Ry, Py]
return tuple(r)
def Estep(self, N, F, G=None):
y, elbo, Ry, Py = self.compute_py_g_x(
N, F, G, return_elbo=True, return_acc=True
)
M = y.shape[0]
y_acc = np.sum(y, axis=0)
Cy = np.dot(F, y)
elbo = np.sum(elbo)
stats = (elbo, M, y_acc, Ry, Cy, Py)
return stats
def MstepML(self, stats):
_, M, y_acc, Ry, Cy, _ = stats
T = np.zeros_like(self.T)
Ryk = np.zeros((self.y_dim, self.y_dim), dtype=float_cpu())
x_dim = T.shape[1] / self.K
for k in range(self.K):
idx = k * x_dim
Ryk[self._upptr] = Ry[k]
iRyk_mult = invert_pdmat(Ryk, right_inv=False)[0]
T[:, idx : idx + x_dim] = iRyk_mult(Cy[idx : idx + x_dim].T)
self.T = T
self.reset_aux()
def MstepMD(self, stats):
_, M, y_acc, Ry, Cy, Py = stats
mu_y = y_acc / M
Cy = Py / M - np.outer(my_y, mu_y)
chol_Cy = la.cholesky(Cy, lower=False, overwrite_a=True)
self.T = np.dot(chol_Cy, self.T)
self.reset_aux()
def fit(
self,
N,
F,
G=None,
N_val=None,
F_val=None,
epochs=20,
ml_md="ml+md",
md_epochs=None,
):
use_ml = False if ml_md == "md" else True
use_md = False if ml_md == "ml" else True
if not self.is_init:
self.initialize(N, F)
elbo = np.zeros((epochs,), dtype=float_cpu())
elbo_val = np.zeros((epochs,), dtype=float_cpu())
for epoch in range(epochs):
stats = self.Estep(N, F, G)
elbo[epoch] = stats[0]
if N_val is not None and F_val is not None:
_, elbo_val_e = self.compute_py_x(N, F, G, return_elbo=True)
elbo_val[epoch] = np.sum(elbo_val_e)
if use_ml:
self.MstepML(stats)
if use_md and (md_epochs is None or epoch in md_epochs):
self.MstepMD(stats)
elbo_norm = elbo / np.sum(N)
if x_val is None:
return elbo, elbo_norm
else:
elbo_val_norm = elbo_val / np.sum(N_val)
return elbo, elbo_norm, elbo_val, elbo_val_norm
@property
def TT(self):
if self._TT is None:
self._TT = self.compute_TT(self.T, self.K)
return self._TT
@property
def _upptr(self):
if self.__upptr is None:
I = np.eye(self.y_dim, dtype=float_cpu())
self.__upptr = np.triu(I).ravel()
return self.__upptr
@staticmethod
def compute_TT(self, T, K, upptr):
x_dim = int(T.shape[1] / K)
y_dim = T.shape[0]
TT = np.zeros((K, y_dim * (y_dim + 1) / 2), dtype=float_cpu())
for k in range(K):
idx = k * x_dim
T_k = T[:, idx : idx + x_dim]
TT_k = np.dot(T_k, T_k.T)
TT[k] = TT_k[self._upptr]
return TT
@staticmethod
def compute_L(TT, N, upptr):
y_dim = self._upptr.shape[0]
I = np.eye(y_dim, dtype=float_cpu())[self._upptr]
return I + np.dot(N, TT)
@staticmethod
def normalize_T(T, chol_prec):
Tnorm = np.zeros_like(T)
K = chol_prec.shape[0]
x_dim = int(T.shape[1] / K)
for k in range(K):
idx = k * x_dim
Tnorm[:, idx : idx + x_dim] = np.dot(
T[:, idx : idx + x_dim], chol_prec[k].T
)
return Tnorm
def get_config(self):
config = {"K": self.K}
base_config = super(JFATotal, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def save_params(self, f):
params = {"T": self.T}
self._save_params_from_dict(f, params)
@classmethod
def load_params(cls, f, config):
param_list = ["T"]
params = cls._load_params_to_dict(f, config["name"], param_list)
kwargs = dict(list(config.items()) + list(params.items()))
return cls(**kwargs)
def sample(self, num_samples):
pass
| 27.057915 | 88 | 0.516124 | 1,043 | 7,008 | 3.25791 | 0.152445 | 0.028252 | 0.045909 | 0.02472 | 0.187758 | 0.119482 | 0.099176 | 0.053855 | 0.053855 | 0.035315 | 0 | 0.009097 | 0.356878 | 7,008 | 258 | 89 | 27.162791 | 0.744841 | 0.018122 | 0 | 0.17 | 0 | 0 | 0.002329 | 0 | 0 | 0 | 0 | 0 | 0.01 | 1 | 0.09 | false | 0.005 | 0.025 | 0 | 0.19 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d00069adda8a7efef667916f453029310a82aea | 515 | py | Python | CodeChef/Python/arrange.py | dfm066/Programming | 53d28460cd40b966cca1d4695d9dc6792ced4c6f | [
"MIT"
] | null | null | null | CodeChef/Python/arrange.py | dfm066/Programming | 53d28460cd40b966cca1d4695d9dc6792ced4c6f | [
"MIT"
] | null | null | null | CodeChef/Python/arrange.py | dfm066/Programming | 53d28460cd40b966cca1d4695d9dc6792ced4c6f | [
"MIT"
] | null | null | null | facts = [1]
fact = 1
for i in range(1,100001):
fact *= i
fact %= 1000000007
facts.append(fact)
T = int(input())
letters = []
for i in range(0,26):
letters.append(0)
while T > 0:
T -= 1
s = input()
cnt = 0
ans = 1
for i in s:
letters[ord(i)-97] += 1
for i in letters:
if i != 0:
cnt += 1
ans = ans*facts[i]
ans = ans*facts[cnt]%1000000007
for i in range(0, 26):
letters[i] = 0;
print(ans)
| 19.074074 | 36 | 0.464078 | 79 | 515 | 3.025316 | 0.303797 | 0.083682 | 0.125523 | 0.087866 | 0.175732 | 0.175732 | 0.175732 | 0 | 0 | 0 | 0 | 0.14791 | 0.396117 | 515 | 26 | 37 | 19.807692 | 0.620579 | 0 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d0218e45fe129209799296000566bde73d084af | 4,380 | py | Python | poptimizer/shared/app.py | poliyev/poptimizer | 71935c4365b0572e65b6d3172f925701dda283db | [
"Unlicense"
] | null | null | null | poptimizer/shared/app.py | poliyev/poptimizer | 71935c4365b0572e65b6d3172f925701dda283db | [
"Unlicense"
] | null | null | null | poptimizer/shared/app.py | poliyev/poptimizer | 71935c4365b0572e65b6d3172f925701dda283db | [
"Unlicense"
] | 1 | 2021-12-02T13:32:44.000Z | 2021-12-02T13:32:44.000Z | """Unit of Work and EventBus."""
import asyncio
import contextlib
from types import TracebackType
from typing import Callable, Generic, Optional, TypeVar
from poptimizer import config
from poptimizer.shared import adapters, domain
EntityType = TypeVar("EntityType", bound=domain.BaseEntity)
class UoW(
contextlib.AbstractAsyncContextManager[domain.AbstractRepo[EntityType]],
domain.AbstractRepo[EntityType],
):
"""Контекстный менеджер транзакции.
Предоставляет интерфейс репо, хранит загруженные доменные объекты и сохраняет их при выходе из
контекста.
"""
def __init__(self, mapper: adapters.Mapper[EntityType]) -> None:
"""Сохраняет mapper и является его тонкой надстройкой."""
self._mapper = mapper
self._seen: set[EntityType] = set()
async def __call__(self, id_: domain.ID) -> EntityType:
"""Загружает доменный объект из базы."""
entity = await self._mapper(id_)
self._seen.add(entity)
return entity
async def __aenter__(self) -> domain.AbstractRepo[EntityType]:
"""Возвращает репо с таблицами."""
return self
async def __aexit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
"""Сохраняет изменные доменные объекты в MongoDB."""
commit = self._mapper.commit
await asyncio.gather(*[commit(entity) for entity in self._seen])
FutureEvent = asyncio.Future[list[domain.AbstractEvent]]
PendingTasks = set[FutureEvent]
class EventBus(Generic[EntityType]):
"""Шина для обработки событий."""
_logger = adapters.AsyncLogger()
def __init__(
self,
uow_factory: Callable[[], UoW[EntityType]],
event_handler: domain.AbstractHandler[EntityType],
):
"""Для работы нужна фабрика транзакций и обработчик событий."""
self._uow_factory = uow_factory
self._event_handler = event_handler
def handle_event(
self,
event: domain.AbstractEvent,
) -> None:
"""Обработка события."""
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(self._handle_event(event))
except config.POptimizerError:
_shutdown_tasks(loop)
raise
async def _handle_event(
self,
event: domain.AbstractEvent,
) -> None:
"""Асинхронная обработка события и следующих за ним."""
pending: PendingTasks = self._create_tasks([event])
while pending:
done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
for task in done:
pending |= self._create_tasks(task.result())
def _create_tasks(self, events: list[domain.AbstractEvent]) -> set[FutureEvent]:
"""Создает задания для событий."""
return {asyncio.create_task(self._handle_one_command(event)) for event in events}
async def _handle_one_command(self, event: domain.AbstractEvent) -> list[domain.AbstractEvent]:
"""Обрабатывает одно событие и помечает его сделанным."""
self._logger(str(event))
async with self._uow_factory() as repo:
return await self._event_handler.handle_event(event, repo)
def _shutdown_tasks(loop: asyncio.AbstractEventLoop) -> None:
"""Завершение в случае ошибки.
После ошибки происходит отмена всех заданий, чтобы не захламлять сообщение об ошибке множеством
сообщений, о том, что результат выполнения задания не был awaited.
Идея кода позаимствована из реализации asyncio.app.
"""
to_cancel = asyncio.all_tasks(loop)
if not to_cancel:
return
for task in to_cancel:
task.cancel()
loop.run_until_complete(asyncio.gather(*to_cancel, loop=loop, return_exceptions=True))
for canceled_task in to_cancel:
if canceled_task.cancelled():
continue
if canceled_task.exception() is not None:
loop.call_exception_handler(
{
"message": "unhandled EventBus exception",
"exception": canceled_task.exception(),
"task": canceled_task,
},
)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(loop.shutdown_default_executor())
| 32.686567 | 99 | 0.663242 | 473 | 4,380 | 5.938689 | 0.40592 | 0.040584 | 0.017088 | 0.02848 | 0.055536 | 0.055536 | 0.032752 | 0.032752 | 0 | 0 | 0 | 0 | 0.241781 | 4,380 | 133 | 100 | 32.932331 | 0.84583 | 0.136301 | 0 | 0.130952 | 0 | 0 | 0.016695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059524 | false | 0 | 0.071429 | 0 | 0.22619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d04272c0dbb7d2f3b68496a3df2f8724ac5e827 | 1,372 | py | Python | osh/builtin_misc_test.py | Schweinepriester/oil | 8b0e5c58a825223341896064d63a95c8b57a9c05 | [
"Apache-2.0"
] | 2,209 | 2016-11-20T10:32:58.000Z | 2022-03-31T20:51:27.000Z | osh/builtin_misc_test.py | Schweinepriester/oil | 8b0e5c58a825223341896064d63a95c8b57a9c05 | [
"Apache-2.0"
] | 1,074 | 2016-12-07T05:02:48.000Z | 2022-03-22T02:09:11.000Z | osh/builtin_misc_test.py | Schweinepriester/oil | 8b0e5c58a825223341896064d63a95c8b57a9c05 | [
"Apache-2.0"
] | 147 | 2016-12-11T04:13:28.000Z | 2022-03-27T14:50:00.000Z | #!/usr/bin/env python2
"""
builtin_misc_test.py: Tests for builtin_misc.py
"""
from __future__ import print_function
import unittest
from core import pyutil
from frontend import flag_def # side effect: flags are defined!
_ = flag_def
from osh import split
from osh import builtin_misc # module under test
class BuiltinTest(unittest.TestCase):
def testAppendParts(self):
# allow_escape is True by default, but False when the user passes -r.
CASES = [
(['Aa', 'b', ' a b'], 100, 'Aa b \\ a\\ b'),
(['a', 'b', 'c'], 3, 'a b c '),
]
for expected_parts, max_results, line in CASES:
sp = split.IfsSplitter(split.DEFAULT_IFS, '')
spans = sp.Split(line, True)
print('--- %r' % line)
for span in spans:
print(' %s %s' % span)
parts = []
builtin_misc._AppendParts(line, spans, max_results, False, parts)
strs = [buf.getvalue() for buf in parts]
self.assertEqual(expected_parts, strs)
print('---')
def testPrintHelp(self):
# Localization: Optionally use GNU gettext()? For help only. Might be
# useful in parser error messages too. Good thing both kinds of code are
# generated? Because I don't want to deal with a C toolchain for it.
loader = pyutil.GetResourceLoader()
builtin_misc.Help([], loader)
if __name__ == '__main__':
unittest.main()
| 28 | 77 | 0.648688 | 190 | 1,372 | 4.531579 | 0.573684 | 0.063879 | 0.010453 | 0.011614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004744 | 0.231778 | 1,372 | 48 | 78 | 28.583333 | 0.812144 | 0.290087 | 0 | 0 | 0 | 0 | 0.055208 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.068966 | false | 0 | 0.206897 | 0 | 0.310345 | 0.137931 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d060e9092d4d4c7bf4bc0ec5921fae018518af1 | 3,670 | py | Python | src/python/dart/service/message.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 18 | 2016-03-03T19:10:21.000Z | 2021-07-14T22:37:35.000Z | src/python/dart/service/message.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 62 | 2016-04-11T15:17:23.000Z | 2017-09-08T17:18:53.000Z | src/python/dart/service/message.py | RetailMeNotSandbox/dart | 58a05f56c04fadd6741501262d92aeb143cd2f2e | [
"MIT"
] | 15 | 2016-03-03T15:38:34.000Z | 2019-03-27T19:33:08.000Z | import os
import boto3
from boto.regioninfo import RegionInfo
from sqlalchemy import text
from dart.model.orm import MessageDao
from dart.context.database import db
from dart.service.patcher import patch_difference
class MessageService(object):
def __init__(self, ecs_task_status_override=None, region='us-east-1'):
self._ecs_task_status_override = ecs_task_status_override
self._region = RegionInfo(self, region, 'ecs.%s.amazonaws.com' % region) if region else None
self._conn = None
@staticmethod
def save_message(message_id, message_body, state):
message_dao = MessageDao()
message_dao.id = message_id
message_dao.message_body = message_body
message_dao.instance_id = os.environ['DART_INSTANCE_ID']
message_dao.container_id = os.environ['DART_CONTAINER_ID']
message_dao.ecs_cluster = os.environ['DART_ECS_CLUSTER']
message_dao.ecs_container_instance_arn = os.environ['DART_ECS_CONTAINER_INSTANCE_ARN']
message_dao.ecs_family = os.environ['DART_ECS_FAMILY']
message_dao.ecs_task_arn = os.environ['DART_ECS_TASK_ARN']
message_dao.state = state
db.session.add(message_dao)
db.session.commit()
return message_dao.to_model()
def get_batch_job_status(self, message):
""" :type message: dart.model.message.Message """
if self._ecs_task_status_override:
if self._ecs_task_status_override == 'passthrough':
return 'RUNNING' if message.state == 'RUNNING' else 'STOPPED'
return self._ecs_task_status_override
return self.get_batch_job_status_direct(message.batch_job_id)
# http://boto3.readthedocs.io/en/latest/reference/services/batch.html#Batch.Client.describe_jobs
def get_batch_job_status_direct(self, job_id):
if not job_id:
return None # we commented out the call to this flow from broker.py:receive_message().
result = self.conn.describe_jobs(jobs=[job_id])
jobs = result['jobs']
if len(jobs) == 0:
return None
# batch possible statuses: 'SUBMITTED'|'PENDING'|'RUNNABLE'|'STARTING'|'RUNNING'|'SUCCEEDED'|'FAILED'
batch_status = jobs[0]['status']
# we translate the batch status to RUNNING|COMPLETED|FAILED
# see dart.model.message.MessageState and dart.message.broker
if batch_status == 'SUBMITTED':
return 'QUEUED'
elif batch_status in ('PENDING', 'RUNNABLE', 'STARTING'):
return 'PENDING'
elif batch_status == 'RUNNING':
return 'RUNNING'
elif batch_status == 'SUCCEEDED':
return 'COMPLETED'
else:
return 'FAILED'
return None
@staticmethod
def get_message(message_id, raise_when_missing=True):
message_dao = MessageDao.query.get(message_id)
if not message_dao and raise_when_missing:
raise Exception('message with id=%s not found' % message_id)
return message_dao.to_model() if message_dao else None
@staticmethod
def update_message_state(message, state):
""" :type message: dart.model.message.Message """
source_message = message.copy()
message.state = state
return patch_difference(MessageDao, source_message, message)
@staticmethod
def purge_old_messages():
db.session.execute(text(""" DELETE FROM message WHERE created < (NOW() - INTERVAL '5 days') """))
db.session.commit()
@property
def conn(self):
if self._conn:
return self._conn
self._conn = boto3.client('batch')
return self._conn
| 39.462366 | 109 | 0.672207 | 459 | 3,670 | 5.11329 | 0.283224 | 0.068172 | 0.033234 | 0.053686 | 0.149127 | 0.051981 | 0 | 0 | 0 | 0 | 0 | 0.002482 | 0.231608 | 3,670 | 92 | 110 | 39.891304 | 0.829787 | 0.128338 | 0 | 0.150685 | 0 | 0 | 0.114744 | 0.009745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0.013699 | 0.09589 | 0 | 0.438356 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d06a010a220a8f3363a231e8addbcd8ff9894e9 | 770 | py | Python | setPixmap.py | EvaGalois/LinsImgPro | daed9bffcf5bea6bf41f36d21f773be18374f7bc | [
"MIT"
] | 1 | 2020-05-19T08:58:58.000Z | 2020-05-19T08:58:58.000Z | setPixmap.py | EvaGalois/LinsImgPro | daed9bffcf5bea6bf41f36d21f773be18374f7bc | [
"MIT"
] | null | null | null | setPixmap.py | EvaGalois/LinsImgPro | daed9bffcf5bea6bf41f36d21f773be18374f7bc | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QHBoxLayout, QLabel
from PyQt5.QtGui import QPixmap
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
hbox = QHBoxLayout(self) # 创建布局
lb1 = QLabel(self) # 实例化 QLabel 类
lb1.setPixmap(QPixmap('./inputImgs/test2.jpg')) # 给 QLabel 的实例嵌入图片
hbox.addWidget(lb1) # 布局中加入 这个 QLabel 的实例
self.setLayout(hbox) # 给 self 设置这个布局
self.move(300, 300)
self.setWindowTitle('像素图控件')
self.show()
# def showDate(self, date):
# self.lb1.setText(date.toString())
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) | 24.83871 | 74 | 0.618182 | 91 | 770 | 5.043956 | 0.571429 | 0.039216 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022847 | 0.261039 | 770 | 31 | 75 | 24.83871 | 0.783831 | 0.171429 | 0 | 0 | 0 | 0 | 0.053883 | 0.033281 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d075408ad95df9320ec62cc02811a6bf1787742 | 3,292 | py | Python | tests/test_exporter.py | coderanger/celery-local-exporter | 3db869b7e0ec09309834b8835c619edbe8898504 | [
"Apache-2.0"
] | 3 | 2020-06-30T22:26:18.000Z | 2021-09-27T23:52:11.000Z | tests/test_exporter.py | coderanger/celery-local-exporter | 3db869b7e0ec09309834b8835c619edbe8898504 | [
"Apache-2.0"
] | null | null | null | tests/test_exporter.py | coderanger/celery-local-exporter | 3db869b7e0ec09309834b8835c619edbe8898504 | [
"Apache-2.0"
] | null | null | null | import os
import os.path
import subprocess
import sys
import time
import pytest
import requests
@pytest.fixture
def launch_worker(tmp_path_factory):
procs = []
def _inner(pool="threads", *args):
if procs:
raise ValueError("already started")
os.environ["DATA_FOLDER_IN"] = str(tmp_path_factory.mktemp("data_in"))
os.environ["DATA_FOLDER_OUT"] = str(
tmp_path_factory.mktemp("data_out")
)
os.environ["RESULTS"] = str(tmp_path_factory.mktemp("results"))
proc = subprocess.Popen(
[
sys.executable,
"-m",
"celery",
"-A",
"app1",
"worker",
"-l",
"debug",
"-P",
pool,
]
+ list(args),
cwd=os.path.dirname(os.path.abspath(__file__)),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
procs.append(proc)
# Wait a second to let it start up.
time.sleep(1)
# For future calls to run(), set them up to deliver to the inbox.
os.environ["DATA_FOLDER_OUT"] = os.environ["DATA_FOLDER_IN"]
return proc
yield _inner
for proc in procs:
proc.terminate()
proc.wait()
out, err = proc.communicate()
print(out.decode())
print(err.decode())
proc.stdout.close()
proc.stderr.close()
@pytest.fixture
def run():
def _inner(code):
read, write = os.pipe()
os.write(write, code.encode())
os.close(write)
return subprocess.check_call(
[sys.executable],
cwd=os.path.dirname(os.path.abspath(__file__)),
stdin=read,
)
return _inner
def test_starting(launch_worker):
w = launch_worker()
assert w.poll() is None
r = requests.get("http://localhost:9000/")
assert "celery_task_execution_time" in r.text
def test_run_task_add(launch_worker, run):
w = launch_worker()
assert w.poll() is None
run(
"""
import app1
app1.add.delay(1, 1).wait(60)
"""
)
r = requests.get("http://localhost:9000/")
assert (
'celery_task_postrun_count_total{state="SUCCESS",task="app1.add"} 1.0'
in r.text
)
def test_run_task_add_twice(launch_worker, run):
w = launch_worker()
assert w.poll() is None
run(
"""
import app1
x = app1.add.delay(1, 1)
y = app1.add.delay(1, 2)
x.wait(60)
y.wait(60)
"""
)
r = requests.get("http://localhost:9000/")
assert (
'celery_task_postrun_count_total{state="SUCCESS",task="app1.add"} 2.0'
in r.text
)
def test_run_task_sleep(launch_worker, run):
w = launch_worker()
assert w.poll() is None
run(
"""
import app1
app1.sleep.delay(5).wait(60)
"""
)
r = requests.get("http://localhost:9000/")
assert (
'celery_task_postrun_count_total{state="SUCCESS",task="app1.sleep"} 1.0'
in r.text
)
assert (
'celery_task_execution_time_bucket{le="5.0",task="app1.sleep"} 0.0'
in r.text
)
assert (
'celery_task_execution_time_bucket{le="7.5",task="app1.sleep"} 1.0'
in r.text
)
| 23.683453 | 80 | 0.559235 | 416 | 3,292 | 4.247596 | 0.278846 | 0.061121 | 0.054329 | 0.022637 | 0.55631 | 0.473118 | 0.442558 | 0.442558 | 0.332201 | 0.306735 | 0 | 0.025877 | 0.307412 | 3,292 | 138 | 81 | 23.855072 | 0.749123 | 0.029465 | 0 | 0.278846 | 0 | 0 | 0.196524 | 0.114305 | 0 | 0 | 0 | 0 | 0.096154 | 1 | 0.076923 | false | 0 | 0.067308 | 0 | 0.173077 | 0.019231 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d0a2f1569fadbc34d20318046924ba2aa98f716 | 2,906 | py | Python | examples/Structural/main.py | HerminioTH/GeoFlow1D | 44a5c11e3297827b265c1ea44bb18256b074fa66 | [
"MIT"
] | 2 | 2020-02-10T11:23:16.000Z | 2020-07-01T20:28:57.000Z | examples/Structural/main.py | HerminioTH/GeoFlow1D | 44a5c11e3297827b265c1ea44bb18256b074fa66 | [
"MIT"
] | null | null | null | examples/Structural/main.py | HerminioTH/GeoFlow1D | 44a5c11e3297827b265c1ea44bb18256b074fa66 | [
"MIT"
] | null | null | null | import geoflow1D
from geoflow1D.GridModule import *
from geoflow1D.FieldsModule import *
from geoflow1D.LinearSystemModule import *
from geoflow1D.GeoModule import *
from geoflow1D.SolverModule import *
import numpy as np
from matplotlib import pyplot as plt
# -------------- PROBLEM ILLUSTRATION -----------------
# | sigma
# |
# +---V---+ ---
# | | |
# | | |
# | | |
# | | |
# | | |
# | | | H
# | | |
# | | |
# | | |
# x ^ | | |
# | | | |
# _|_ |_______| _|_
# -----------------------------------------------------
class SolidProps(object):
def __init__(self, grid, M, rho):
self.M = ScalarField(grid.getNumberOfRegions())
self.M.setValue(grid.getRegions()[0], M)
self.rho = ScalarField(grid.getNumberOfRegions())
self.rho.setValue(grid.getRegions()[0], rho)
mm = 1000.
# -------------- GRID DATA ----------------------------
H = 10
nVertices = 15
nodesCoord, elemConn = createGridData(H, nVertices)
gridData = GridData()
gridData.setElementConnectivity(elemConn)
gridData.setNodeCoordinates(nodesCoord)
grid = Grid_1D(gridData)
grid.buildStencil()
# -----------------------------------------------------
# -------------- PROPERTIES ----------------------------
M = 1.3e8 # Constrained modulus
rho = 2300. # Solid density
props = SolidProps(grid, M, rho)
g = -9.81
# -----------------------------------------------------
# ------------- CREATE LINEAR SYSTEM ------------------
nDOF = 1
ls = LinearSystemCOO(grid.stencil, nDOF)
ls.initialize()
# -----------------------------------------------------
# -------------- NUMERICAL SOLUTION -------------------
AssemblyStiffnessMatrix(ls, grid, props, 0)
AssemblyGravityToVector(ls, grid, props, g, 0)
# -----------------------------------------------------
# ------------- BOUNDARY CONDITIONS -------------------
ls.applyDirichlet(0, 0)
sigma = -5e4
ls.applyNeumann(-1, sigma)
# -----------------------------------------------------
# ----------------- DEFINE SOLVER ---------------------
solver = Solver(tol=1e-8, maxiter=500)
solver.solve(ls.matrix, ls.rhs)
# -----------------------------------------------------
# ------------- ANALYTICAL SOLUTION -------------------
def analyticalSolution(M, stress, L, x, gravity, rho):
x = np.array(x)
return x*(-stress + rho*g*L)/M - rho*g*x*x/(2*M)
x_a = np.linspace(0, H, 100)
u_a = analyticalSolution(M, sigma, H, x_a, g, rho)
# -----------------------------------------------------
# -------------- PLOT SOLUTION ------------------------
x_n = [v.getCoordinate() for v in grid.getVertices()]
u_n = solver.solution
plt.plot(u_n*mm, x_n, 'o', label='Numeric')
plt.plot(u_a*mm, x_a, '-', label='Analytic')
plt.grid(True)
plt.xlabel('Displacement (mm)')
plt.ylabel('Coordinate X (m)')
plt.show()
# -----------------------------------------------------
| 29.653061 | 56 | 0.456986 | 260 | 2,906 | 5.011538 | 0.442308 | 0.049885 | 0.058327 | 0.056792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018428 | 0.15967 | 2,906 | 97 | 57 | 29.958763 | 0.515152 | 0.433586 | 0 | 0 | 0 | 0 | 0.031056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d0e5939c50882dfd177fbde933852e0ecf02d4f | 1,024 | py | Python | torch2trt/converters/matmul.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | torch2trt/converters/matmul.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | torch2trt/converters/matmul.py | grimoire/torch2trt | bf65d573f69879442d542e16c6280de4a1354d72 | [
"MIT"
] | null | null | null | from torch2trt.torch2trt import *
from torch2trt.module_test import add_module_test
import tensorrt as trt
@tensorrt_converter('torch.matmul')
def convert_matmul(ctx):
input_a = ctx.method_args[0]
input_b = ctx.method_args[1]
input_a_trt, input_b_trt = trt_(ctx.network, input_a, input_b)
output = ctx.method_return
mm_op = trt.MatrixOperation.NONE
layer = ctx.network.add_matrix_multiply(input_a_trt, mm_op, input_b_trt, mm_op)
output._trt = layer.get_output(0)
class MatmulTest(torch.nn.Module):
def __init__(self):
super(MatmulTest, self).__init__()
def forward(self, x, y):
return torch.matmul(x, y)
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6), (1, 2, 6, 7)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 2, 4, 6), (1, 2, 6, 7)])
@add_module_test(torch.float32, torch.device('cuda'), [(1, 4, 6), (1, 6, 7)])
# @add_module_test(torch.float32, torch.device('cuda'), [(4, 6), (6, 7)])
def test_matmul():
return MatmulTest() | 32 | 83 | 0.6875 | 164 | 1,024 | 4.018293 | 0.29878 | 0.091047 | 0.098634 | 0.109256 | 0.273141 | 0.273141 | 0.273141 | 0.273141 | 0.273141 | 0.273141 | 0 | 0.044983 | 0.15332 | 1,024 | 32 | 84 | 32 | 0.71511 | 0.069336 | 0 | 0 | 0 | 0 | 0.02521 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.136364 | 0.090909 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d1213c410c2a6b8aac4888a4f6bc94463fa2640 | 4,538 | py | Python | HayStack_Client/IOTA_Module.py | ConsensusGroup/Haystack | c2d0b8fb7b2064b05a5d256bb949dda9a0ef569d | [
"MIT"
] | 1 | 2019-11-28T08:50:26.000Z | 2019-11-28T08:50:26.000Z | HayStack_Client/IOTA_Module.py | ConsensusGroup/Haystack | c2d0b8fb7b2064b05a5d256bb949dda9a0ef569d | [
"MIT"
] | 3 | 2019-11-22T04:23:47.000Z | 2019-11-30T07:11:24.000Z | HayStack_Client/IOTA_Module.py | ConsensusGroup/Haystack | c2d0b8fb7b2064b05a5d256bb949dda9a0ef569d | [
"MIT"
] | 3 | 2018-03-19T05:20:44.000Z | 2019-11-22T00:56:31.000Z | ####################################################################################
############# The purpose of the module is to handle IOTA interactions #############
####################################################################################
#IOTA library
from iota import TryteString, Address, ProposedBundle, ProposedTransaction, Bundle
from iota.crypto.addresses import AddressGenerator
from iota.adapter.wrappers import RoutingWrapper
from iota.adapter import HttpAdapter
from iota import *
#Other libraries
from random import SystemRandom
from Configuration_Module import Configuration
from Tools_Module import Tools
import config
######## Base IOTA classes ########
def Seed_Generator():
random_trytes = [i for i in map(chr, range(65,91))]
random_trytes.append('9')
seed = [random_trytes[SystemRandom().randrange(len(random_trytes))] for x in range(81)]
return ''.join(seed)
def Return_Fastest_Node():
x = Configuration()
Node_Dictionary = Tools().Read_From_Json(directory = x.UserFolder+"/"+x.NodeFolder+"/"+x.NodeFile)
Send_initial = 999.0
Receive_initial = 999.0
Fastest_Combination = {}
for Node, Stats in Node_Dictionary.items():
try:
Send = Stats["Send"]
Receive = Stats["Receive"]
float
except TypeError:
Send = 999.0
Receive = 999.0
if Send_initial > Send:
Send_initial = Send
Fastest_Combination["Send"] = Node
if Receive_initial > Receive:
Receive_initial = Receive
Fastest_Combination["Receive"] = Node
return Fastest_Combination
class IOTA_Module(Configuration):
def __init__(self, Seed, IOTA_Instance = ""):
Configuration.__init__(self)
try:
Optimal_Node = Return_Fastest_Node()["Send"]
if Optimal_Node == 999.0:
Optimal_Node = Return_Fastest_Node()["Receive"]
config.Node = Optimal_Node
except:
config.Node = "http://localhost:14265"
if config.Node == "http://localhost:14265":
self.IOTA_Api = Iota(RoutingWrapper(str(config.Node)).add_route('attachToTangle', 'http://localhost:14265'), seed = Seed)
else:
self.IOTA_Api = Iota(config.Node, seed = Seed)
if IOTA_Instance != "":
self.IOTA_Api = IOTA_Instance
self.Seed_Copy = Seed
def Generate_Address(self, Index = 0):
generate = self.IOTA_Api.get_new_addresses(index = int(Index))
Address = str(generate.get('addresses')).strip("[Address(").strip(")]").strip("'")
return Address
def Send(self, ReceiverAddress, Message, Test_Node = False):
def Bundle_Generation(Recepient, ToSend):
text_transfer = TryteString.from_string(str(ToSend))
txn_2 = ProposedTransaction(address = Address(Recepient), message = text_transfer, value = 0)
bundle.add_transaction(txn_2)
bundle = ProposedBundle()
if type(ReceiverAddress) == list and type(Message) == list and (len(ReceiverAddress) == len(Message)):
for i in range(len(ReceiverAddress)):
Bundle_Generation(ReceiverAddress[i], Message[i])
elif type(ReceiverAddress) == str and type(Message) == str:
Bundle_Generation(ReceiverAddress, Message)
bundle.finalize()
coded = bundle.as_tryte_strings()
hashed = bundle.hash
#Return the fastest sender node from the DB if localhost is not present.
if str(self.Node) != "http://localhost:14265":
if Test_Node == False:
self.Node = Return_Fastest_Node()["Send"]
self.IOTA_Api = Iota(self.Node, seed = self.Seed_Copy)
send = self.IOTA_Api.send_trytes(trytes = coded, depth = 4)
return hashed
def Receive(self, Start = 0, Stop = "", JSON = False, Test_Node = False):
#Return the fastest sender node from the DB if localhost is not present.
if self.Node != "http://localhost:14265":
if Test_Node == False:
self.Node = Return_Fastest_Node()["Receive"]
self.IOTA_Api = Iota(self.Node, seed = self.Seed_Copy)
#This chunck of code is used to choose a segment of Tx history to be retrieved
if Stop == "":
mess = self.IOTA_Api.get_account_data(start = Start)
else:
mess = self.IOTA_Api.get_account_data(start = Start, stop = Stop)
#Decompose the Bundle into components
bundle = mess.get('bundles')
Message = []
self.Message = []
for i in bundle:
message = str(i.get_messages()).strip("[u'").strip("']")
if JSON == True:
Json = i.as_json_compatible()[0]
message = [Json,message]
self.Message.append(message)
return self
def LatestTangleTime(self):
Node = self.IOTA_Api.get_node_info()
self.TangleTime = Node.get("time")
return self
| 33.865672 | 124 | 0.666373 | 568 | 4,538 | 5.167254 | 0.272887 | 0.027257 | 0.037479 | 0.025554 | 0.185349 | 0.139012 | 0.139012 | 0.139012 | 0.139012 | 0.112436 | 0 | 0.015763 | 0.175187 | 4,538 | 133 | 125 | 34.120301 | 0.768368 | 0.078889 | 0 | 0.10101 | 0 | 0 | 0.052485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080808 | false | 0 | 0.090909 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d15578f96b8143429650898e1882b35ef941ed3 | 2,412 | py | Python | demo.py | TiagoFilipeSousaGoncalves/code2model-codesprinters | 9f38a994a3a1036916ea82f9523baa8a5eed8226 | [
"MIT"
] | null | null | null | demo.py | TiagoFilipeSousaGoncalves/code2model-codesprinters | 9f38a994a3a1036916ea82f9523baa8a5eed8226 | [
"MIT"
] | null | null | null | demo.py | TiagoFilipeSousaGoncalves/code2model-codesprinters | 9f38a994a3a1036916ea82f9523baa8a5eed8226 | [
"MIT"
] | null | null | null | # Imports
import streamlit as st
import pandas as pd
import difflib
# Load results .CSV
code_simil_results = pd.read_csv('results/resultados.csv')
prog_lang_results = pd.read_csv('results/resultados_language.csv')
# Create a select box to choose the demo
add_selectbox = st.sidebar.selectbox(
"What demo would you like to see?",
("Code similarity", "Language identification")
)
# The input index of our data
input_number = st.number_input('Select an index', min_value=0, max_value=len(code_simil_results))
# Code similarity
if add_selectbox == 'Code similarity':
st.write("Code similarity")
col1, col2, col3 = st.columns(3)
original = code_simil_results.iloc[input_number][['corpo']].values[0]
most_similar = code_simil_results.iloc[input_number][['most_similar']].values[0]
similarity = code_simil_results.iloc[input_number][['most_similar']].values[0]
with col1:
st.text("Original Code")
st.text(original)
with col2:
st.text("Most similar code")
st.text(most_similar)
with col3:
st.text("Diff between code")
init_text = ''
for text in difflib.unified_diff(original.split("\n"), most_similar.split("\n")):
if text[:3] not in ('+++', '---', '@@ '):
if '+' in text[0]:
text = f"<p style='color: green'> {text} </p>"
elif '-' in text[0]:
text = f"<p style='color: red'> {text} </p>"
init_text = init_text + '\n' + text
st.markdown(init_text, unsafe_allow_html=True)
# st.text(init_text)
# Language identification
elif add_selectbox == 'Language identification':
st.write("Language identification")
col1, col2, col3 = st.columns(3)
original_code = prog_lang_results.iloc[input_number][['corpo']].values[0]
original_prog_lang = prog_lang_results.iloc[input_number][['platafor']].values[0]
predicted_prog_lan = prog_lang_results.iloc[input_number][['platafor_predict']].values[0]
with col1:
st.text("Original Code")
st.text(original_code)
with col2:
st.text("Predicted Programming Language")
st.text(predicted_prog_lan)
with col3:
st.text("Original Programming Language")
st.text(original_prog_lang)
| 28.714286 | 98 | 0.620232 | 306 | 2,412 | 4.699346 | 0.29085 | 0.05007 | 0.066759 | 0.091794 | 0.363004 | 0.363004 | 0.308067 | 0.207928 | 0.128651 | 0.128651 | 0 | 0.013296 | 0.251658 | 2,412 | 83 | 99 | 29.060241 | 0.78338 | 0.062604 | 0 | 0.208333 | 0 | 0 | 0.220277 | 0.024424 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d1ab381951528b1b2a476a21ada75863d82ce29 | 2,436 | py | Python | Bot.py | Grohiik/BotMauData2020 | 7b7eecca1b5fdc73a0e9b3593cea62b2fb1333f8 | [
"MIT"
] | null | null | null | Bot.py | Grohiik/BotMauData2020 | 7b7eecca1b5fdc73a0e9b3593cea62b2fb1333f8 | [
"MIT"
] | null | null | null | Bot.py | Grohiik/BotMauData2020 | 7b7eecca1b5fdc73a0e9b3593cea62b2fb1333f8 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from secrets import Token as Token
bot = commands.Bot(command_prefix="!")
role_add_channel_id = 750036407757832242
@bot.event
async def on_message(message):
if bot.user == message.author:
return
if message.channel.id == role_add_channel_id:
await bot.process_commands(message)
await message.delete(delay=10)
@bot.command(name="färg", help="lägg till en färg")
async def color(payload):
if payload.channel.id == role_add_channel_id:
member = payload.author
guild = member.guild
check = True
color = discord.Colour(int(payload.message.content[6:12], base=16))
for role in member.roles:
if str(member.id) == role.name:
await role.edit(colour=(color))
await payload.channel.send(
content=f"Ändrade {member.display_name} färg till {color.value}",
delete_after=10,
)
check = False
break
if check:
await guild.create_role(
name=str(member.id), color=color, reason="färg roll"
)
for role in guild.roles:
if str(member.id) == role.name:
await member.add_roles(role)
await payload.channel.send(
content=f"Ändrade {member.display_name} färg till {color.value}",
delete_after=10,
)
break
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, discord.ext.commands.errors.CommandNotFound):
await ctx.send("detta kommandet finns inte", delete_after=15)
await ctx.delete(delay=10)
@bot.event
async def on_command_error2(ctx, error):
await ctx.send("kommand error", delete_after=15)
await ctx.delete(delay=10)
# command to test if the bot is running
@bot.command(name="test", help="test")
async def test(ctx):
response = "Jag är online!"
await ctx.send(response)
# command to test if the bot is running
@bot.command(name="ping", help="test")
async def test2(ctx):
response = "pong 🏓"
await ctx.send(response)
# print a message if the bot is online
@bot.event
async def on_ready():
print("bot connected")
# change status to online
await bot.change_presence(activity=discord.Game("FÄRG"))
bot.run(Token)
| 29 | 89 | 0.614122 | 316 | 2,436 | 4.655063 | 0.316456 | 0.038069 | 0.03535 | 0.043508 | 0.35758 | 0.333107 | 0.265126 | 0.265126 | 0.176751 | 0.176751 | 0 | 0.022311 | 0.28243 | 2,436 | 83 | 90 | 29.349398 | 0.81865 | 0.055829 | 0 | 0.290323 | 0 | 0 | 0.099782 | 0.018301 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.048387 | 0 | 0.064516 | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d1abf839257188564b4d1db171a9a9e2fcac08f | 1,572 | py | Python | cms/blocks.py | mitodl/micromasters | 2b1df8ac7c4395cc0a0227d936b3f021f0ae3019 | [
"BSD-3-Clause"
] | 32 | 2016-03-25T01:03:13.000Z | 2022-01-15T19:35:42.000Z | cms/blocks.py | mitodl/micromasters | 2b1df8ac7c4395cc0a0227d936b3f021f0ae3019 | [
"BSD-3-Clause"
] | 4,858 | 2016-03-03T13:48:30.000Z | 2022-03-29T22:09:51.000Z | cms/blocks.py | mitodl/micromasters | 2b1df8ac7c4395cc0a0227d936b3f021f0ae3019 | [
"BSD-3-Clause"
] | 20 | 2016-08-18T22:07:44.000Z | 2021-11-15T13:35:35.000Z | """Page blocks"""
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class CourseTeamBlock(blocks.StructBlock):
"""
Block class that defines a course team member
"""
name = blocks.CharBlock(max_length=100, help_text="Name of the course team member.")
title = blocks.RichTextBlock(
required=False,
features=["bold", "italic"],
help_text="Title of the course team member."
)
bio = blocks.TextBlock(help_text="Short bio of course team member.")
image = ImageChooserBlock(
help_text='Image for the faculty member. Should be 385px by 385px.'
)
class ImageWithLinkBlock(blocks.StructBlock):
""" Image with a clickable link on it """
image = ImageChooserBlock(label="Image", required=True, help_text="The image to display.")
link = blocks.URLBlock(
label="Link",
required=True,
help_text="Absolute URL to the image, like https://example.com/some_image.jpg"
)
align = blocks.ChoiceBlock(
choices=[('center', 'Center'), ('right', 'Right'), ('left', 'Left')],
default='left',
max_length=10,
)
width = blocks.IntegerBlock(required=False)
height = blocks.IntegerBlock(required=False)
class Meta:
template = 'cms/imagewithlink.html'
form_classname = 'ImageWithLinkBlock'
icon = 'picture'
class ResourceBlock(blocks.StructBlock):
"""
A custom block for resource pages.
"""
heading = blocks.CharBlock(max_length=100)
detail = blocks.RichTextBlock()
| 30.230769 | 94 | 0.660305 | 179 | 1,572 | 5.73743 | 0.497207 | 0.046738 | 0.062317 | 0.046738 | 0.093476 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011438 | 0.221374 | 1,572 | 51 | 95 | 30.823529 | 0.827614 | 0.080789 | 0 | 0 | 0 | 0 | 0.2402 | 0.015681 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.058824 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d1e81bfe87388671566d46153dbbdae8f99b502 | 15,698 | py | Python | zygrader/grader.py | natecraddock/zygrader | 3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a | [
"MIT"
] | 5 | 2019-11-15T17:42:42.000Z | 2021-04-20T19:35:25.000Z | zygrader/grader.py | natecraddock/zygrader | 3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a | [
"MIT"
] | 76 | 2020-02-22T01:42:16.000Z | 2021-04-28T18:47:20.000Z | zygrader/grader.py | natecraddock/zygrader | 3a1d5c1dbe76c8f76c2a99f271a26b2ec873006a | [
"MIT"
] | 2 | 2020-02-21T04:39:38.000Z | 2021-04-20T19:35:20.000Z | """Grader: Menus and popups for grading and pair programming"""
import curses
import getpass
from zygrader import data, ui, utils
from zygrader.config import preferences
from zygrader.config.shared import SharedData
from zygrader.data import model
from zygrader.zybooks import Zybooks
from zygrader.ui import colors
def get_student_row_color_sort_index(lab, student):
"""Color the student names in the grader based on locked, flagged, or normal status"""
if data.lock.is_locked(student, lab) and not isinstance(student, str):
return curses.color_pair(colors.COLOR_PAIR_LOCKED), 0
if data.flags.is_submission_flagged(student,
lab) and not isinstance(student, str):
return curses.color_pair(colors.COLOR_PAIR_FLAGGED), 1
return curses.color_pair(colors.COLOR_PAIR_DEFAULT), 2
def fill_student_list(student_list: ui.layers.ListLayer,
students,
lab,
use_locks,
callback_fn=None):
student_list.clear_rows()
for student in students:
row = student_list.add_row_text(str(student), callback_fn, student, lab,
use_locks)
color, sort_index = get_student_row_color_sort_index(lab, student)
row.set_row_color(color)
row.set_row_sort_index(sort_index)
student_list.rebuild = True
def set_submission_message(popup: ui.layers.OptionsPopup,
submission: data.model.Submission):
popup.set_message(list(submission))
def get_submission(lab, student, use_locks=True):
"""Get a submission from zyBooks given the lab and student"""
window = ui.get_window()
zy_api = Zybooks()
# Lock student
if use_locks:
data.lock.lock(student, lab)
submission_response = zy_api.download_assignment(student, lab)
submission = data.model.Submission(student, lab, submission_response)
# Report missing files
if submission.flag & data.model.SubmissionFlag.BAD_ZIP_URL:
msg = [
f"One or more URLs for {student.full_name}'s code submission are bad.",
"Some files could not be downloaded. Please",
"View the most recent submission on zyBooks.",
]
popup = ui.layers("Warning", msg)
window.run_layer(popup)
# A student may have submissions beyond the due date, and an exception
# In case that happens, always allow a normal grade, but show a message
if submission.flag == data.model.SubmissionFlag.NO_SUBMISSION:
pass
return submission
def pick_submission(submission_popup: ui.layers.OptionsPopup,
lab: data.model.Lab, student: data.model.Student,
submission: data.model.Submission):
"""Allow the user to pick a submission to view"""
window = ui.get_window()
zy_api = Zybooks()
# If the lab has multiple parts, prompt to pick a part
part_index = 0
if len(lab.parts) > 1:
part_index = submission.pick_part(pick_all=True)
if part_index is None:
return
if part_index == -1:
def wait_fn():
for i, part in enumerate(lab.parts):
part_submissions = zy_api.get_submissions_list(
part["id"], student.id)
if len(part_submissions) > 0:
part_response = zy_api.download_assignment_part(
lab, student.id, part,
len(part_submissions) - 1)
submission.update_part(part_response,
lab.parts.index(part))
set_submission_message(submission_popup, submission)
popup = ui.layers.WaitPopup("Downloading")
popup.set_message([f"Downloading latest submissions..."])
popup.set_wait_fn(wait_fn)
window.run_layer(popup)
return
# Get list of all submissions for that part
part = lab.parts[part_index]
all_submissions = zy_api.get_submissions_list(part["id"], student.id)
if not all_submissions:
popup = ui.layers.Popup("No Submissions",
["The student did not submit this part"])
window.run_layer(popup)
return
# Reverse to display most recent submission first
all_submissions.reverse()
popup = ui.layers.ListLayer("Select Submission", popup=True)
popup.set_exit_text("Cancel")
for sub in all_submissions:
popup.add_row_text(sub)
window.run_layer(popup)
if popup.canceled:
return
submission_index = popup.selected_index()
# Modify submission index to un-reverse the index
submission_index = abs(submission_index - (len(all_submissions) - 1))
# Fetch that submission
part_response = zy_api.download_assignment_part(lab, student.id, part,
submission_index)
submission.update_part(part_response, lab.parts.index(part))
set_submission_message(submission_popup, submission)
def view_diff(first: model.Submission, second: model.Submission):
"""View a diff of the two submissions"""
if (first.flag & model.SubmissionFlag.NO_SUBMISSION
or second.flag & model.SubmissionFlag.NO_SUBMISSION):
window = ui.get_window()
popup = ui.layers.Popup("No Submissions", [
"Cannot diff submissions because at least one student has not submitted."
])
window.run_layer(popup)
return
use_browser = preferences.get("browser_diff")
paths_a = utils.get_source_file_paths(first.files_directory)
paths_b = utils.get_source_file_paths(second.files_directory)
paths_a.sort()
paths_b.sort()
diff = utils.make_diff_string(paths_a, paths_b, first.student.full_name,
second.student.full_name, use_browser)
utils.view_string(diff, "submissions.diff", use_browser)
def run_code_fn(window, submission):
"""Callback to compile and run a submission's code"""
use_gdb = False
if not submission.compile_and_run_code(use_gdb):
popup = ui.layers.OptionsPopup("Error", ["Could not compile code"])
popup.add_option("View Log", submission.view_stderr)
window.run_layer(popup)
def pair_programming_submission_callback(lab, submission):
"""Show both pair programming students for viewing a diff"""
window = ui.get_window()
popup = ui.layers.OptionsPopup("Pair Programming Submission")
popup.set_message(submission)
popup.add_option(
"Pick Submission",
lambda: pick_submission(popup, lab, submission.student, submission))
popup.add_option("Run", lambda: run_code_fn(window, submission))
popup.add_option("View", lambda: submission.show_files())
window.run_layer(popup)
SharedData.running_process = None
def flag_submission(lab, student, flag_text="", flagtag=""):
"""Flag a submission with a note"""
window = ui.get_window()
if not flagtag:
flagtags = ["Needs Head TA", "Student Action Required", "Other"]
tag_input = ui.layers.ListLayer("Flag Tag", popup=True)
for tag in flagtags:
tag_input.add_row_text(tag)
window.run_layer(tag_input)
if tag_input.canceled:
return
flagtag = flagtags[tag_input.selected_index()]
text_input = ui.layers.TextInputLayer("Flag Note")
text_input.set_prompt(["Enter a flag note"])
text_input.set_text(flag_text)
window.run_layer(text_input)
if text_input.canceled:
return
flag_note = text_input.get_text()
full_message = f"{flagtag}: {flag_note}"
data.flags.flag_submission(student, lab, full_message)
def edit_flag(flag_string: str, student: model.Student, lab: model.Lab):
"""Edit the text in a flagged submission"""
# The note might contain `:` characters, so we handle that case
parts = flag_string.split(":")
tag_type = parts[0].strip()
tag_text = ":".join(parts[1:]).strip()
flag_submission(lab, student, tag_text, tag_type)
def can_get_through_locks(use_locks, student, lab):
if not use_locks:
return True
window = ui.get_window()
if data.lock.is_locked(student, lab):
netid = data.lock.get_locked_netid(student, lab)
# If being graded by the user who locked it, allow grading
if netid != getpass.getuser():
name = data.netid_to_name(netid)
msg = [f"This student is already being graded by {name}"]
popup = ui.layers.Popup("Student Locked", msg)
window.run_layer(popup)
return False
if data.flags.is_submission_flagged(student, lab):
flag_message = data.flags.get_flag_message(student, lab)
msg = [
"This submission has been flagged",
"",
flag_message,
]
popup = ui.layers.OptionsPopup("Submission Flagged", msg)
popup.add_option("Edit")
popup.add_option("Unflag")
popup.add_option("View")
window.run_layer(popup)
choice = popup.get_selected()
if choice == "Edit":
edit_flag(flag_message, student, lab)
return False
elif choice == "Unflag":
data.flags.unflag_submission(student, lab)
elif choice == "View":
return True
else:
return False
return True
def pair_programming_message(first, second) -> list:
"""To support dynamic updates on the pair programming popup"""
return [
f"{first.student.full_name} {first.latest_submission}",
f"{second.student.full_name} {second.latest_submission}",
"",
"Pick a student's submission to view or view the diff",
]
def grade_pair_programming(first_submission, use_locks):
"""Pick a second student to grade pair programming with"""
# Get second student
window = ui.get_window()
students = data.get_students()
lab = first_submission.lab
student_list = ui.layers.ListLayer()
student_list.set_searchable("Student")
student_list.set_sortable()
fill_student_list(student_list, students, lab, use_locks)
window.run_layer(student_list)
if student_list.canceled:
return
# Get student
student_index = student_list.selected_index()
student = students[student_index]
if not can_get_through_locks(use_locks, student, lab):
return
try:
second_submission = get_submission(lab, student, use_locks)
if second_submission is None:
return
if second_submission == first_submission:
popup = ui.layers.Popup(
"Invalid Student",
["The first and second students are the same"])
window.run_layer(popup)
return
first_submission_fn = lambda: pair_programming_submission_callback(
lab, first_submission)
second_submission_fn = lambda: pair_programming_submission_callback(
lab, second_submission)
msg = lambda: pair_programming_message(first_submission,
second_submission)
popup = ui.layers.OptionsPopup("Pair Programming")
popup.set_message(msg)
popup.add_option(first_submission.student.full_name,
first_submission_fn)
popup.add_option(second_submission.student.full_name,
second_submission_fn)
popup.add_option("View Diff",
lambda: view_diff(first_submission, second_submission))
window.run_layer(popup)
finally:
if use_locks:
data.lock.unlock(student, lab)
def diff_parts_fn(window, submission):
"""Callback for text diffing parts of a submission"""
error = submission.diff_parts()
if error:
popup = ui.layer.Popup("Error", [error])
window.run_layer(popup)
def student_select_fn(student, lab, use_locks):
"""Show the submission for the selected lab and student"""
window = ui.get_window()
# Wait for student's assignment to be available
if not can_get_through_locks(use_locks, student, lab):
return
try:
# Get the student's submission
submission = get_submission(lab, student, use_locks)
# Exit if student has not submitted
if submission is None:
return
def flag_submission_fn():
flag_submission(lab, student)
# Return to the list of students
events = ui.get_events()
events.push_layer_close_event()
popup = ui.layers.OptionsPopup("Submission")
set_submission_message(popup, submission)
popup.add_option("Flag", flag_submission_fn)
popup.add_option(
"Pick Submission",
lambda: pick_submission(popup, lab, student, submission))
popup.add_option("Pair Programming",
lambda: grade_pair_programming(submission, use_locks))
if submission.flag & data.model.SubmissionFlag.DIFF_PARTS:
popup.add_option("Diff Parts",
lambda: diff_parts_fn(window, submission))
popup.add_option("Run", lambda: run_code_fn(window, submission))
popup.add_option("View", lambda: submission.show_files())
window.run_layer(popup)
SharedData.running_process = None
finally:
# Always unlock the lab when no longer grading
if use_locks:
data.lock.unlock(student, lab)
def watch_students(student_list, students, lab, use_locks):
"""Register paths when the filtered list is created"""
paths = [SharedData.get_locks_directory(), SharedData.get_flags_directory()]
data.fs_watch.fs_watch_register(paths, "student_list_watch",
fill_student_list, student_list, students,
lab, use_locks, student_select_fn)
def lab_select_fn(selected_index, use_locks, student: model.Student = None):
"""Callback function that executes after selecting a lab"""
lab = data.get_labs()[selected_index]
# Skip selecting a student and go immediately to the grader
if student:
student_select_fn(student, lab, use_locks)
return
window = ui.get_window()
students = data.get_students()
student_list = ui.layers.ListLayer()
student_list.set_searchable("Student")
student_list.set_sortable()
fill_student_list(student_list, students, lab, use_locks, student_select_fn)
# Register a watch function to watch the students
watch_students(student_list, students, lab, use_locks)
# # Remove the file watch handler when done choosing students
student_list.set_destroy_fn(
lambda: data.fs_watch.fs_watch_unregister("student_list_watch"))
window.register_layer(student_list, lab.name)
def grade(use_locks=True, student: model.Student = None):
"""Create the list of labs to pick one to grade"""
window = ui.get_window()
labs = data.get_labs()
if not labs:
popup = ui.layers.Popup("Error")
popup.set_message(["No labs have been created yet"])
window.run_layer(popup)
return
title = "Grader"
if not use_locks:
title = "Run for Fun"
lab_list = ui.layers.ListLayer()
lab_list.set_searchable("Lab")
for index, lab in enumerate(labs):
lab_list.add_row_text(str(lab), lab_select_fn, index, use_locks,
student)
window.register_layer(lab_list, title)
| 35.118568 | 90 | 0.647407 | 1,940 | 15,698 | 5.022165 | 0.140722 | 0.029354 | 0.024428 | 0.027302 | 0.362619 | 0.272401 | 0.235862 | 0.19573 | 0.149133 | 0.141538 | 0 | 0.000951 | 0.263409 | 15,698 | 446 | 91 | 35.197309 | 0.84165 | 0.107657 | 0 | 0.27476 | 0 | 0 | 0.080757 | 0.008845 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063898 | false | 0.009585 | 0.025559 | 0 | 0.172524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d220d13853eb72792eb10d11e5500d16a8130fd | 3,857 | py | Python | physics/n-body/n-body.py | ludius0/simulations | 3f8992edfef89d0450479647a96b889c6f0f43a3 | [
"MIT"
] | null | null | null | physics/n-body/n-body.py | ludius0/simulations | 3f8992edfef89d0450479647a96b889c6f0f43a3 | [
"MIT"
] | null | null | null | physics/n-body/n-body.py | ludius0/simulations | 3f8992edfef89d0450479647a96b889c6f0f43a3 | [
"MIT"
] | null | null | null | # libs
import pygame
import math
import sys
from random import randrange, seed
from numbers import Real
# simulation settings
G = 1
sum_mass = 50.0
softening = 100 # also function as speed component
seed(1)
# additional settings
COLLIS_MERGE = True
ACTIVE_BORDERS = False
# world objects
NBODIES = 150
BODIES = []
# pygame settings
COLOR = (255, 192, 64)
min_size = 0
wsize = (700, 700)
# init pygame
pygame.init()
screen = pygame.display.set_mode(wsize)
pygame.display.set_caption("N-body")
# support functions
def check_borders(body, min=min_size, max=wsize[0]):
for index, (p, v) in enumerate(zip(body.pos, body.vel)):
if p+v <= 0:
b.vel[index] = -v
elif p+v >= wsize[1]:
b.vel[index] = -v
def create_rand_vec3(min=min_size, max=500, regulate=1):
return [randrange(min, max) / regulate, randrange(min, max) / regulate, randrange(min, max) / regulate]
# physic object
class Body:
def __init__(self, mass: float, position: list, velocity: list):
assert isinstance(mass, Real) and isinstance(position, list) and isinstance(velocity, list)
assert len(position) == 3 and len(velocity) == 3
self.mass = mass
self.pos = position
self.vel = velocity
self.dvel = [0., 0., 0.]
self.collision = False
self.volume = 5
self.radius = 1.06
def fg(self, other):
assert isinstance(other, Body)
# distance between two bodies
x_ = other.pos[0] - self.pos[0]
y_ = other.pos[1] - self.pos[1]
z_ = other.pos[2] - self.pos[2]
distance = [x_, y_, z_]
r = math.sqrt(x_**2 + y_**2 + z_**2)
# collision
error = abs(x_)+abs(y_)+abs(z_)
if error <= 1:
print("Collision!")
other.collision = True
# compute Newton law based on distance F=G*(m1*m2)/r (with some regulation for each axis)
for index in range(3):
f = (G * self.mass * other.mass / r**2) * distance[index] #/ r * softening
self.dvel[index] = self.dvel[index] + f / self.mass
def comp_radius(self, other):
self.volume += other.volume
self.radius = (self.volume * 3 / 4 * math.pi)**(1/3)
def update(self):
# Velocity and delta velocity
self.vel = [self.vel[0]+self.dvel[0], self.vel[1]+self.dvel[1], self.vel[2]+self.dvel[2]]
self.dvel = [0., 0., 0.]
x = self.pos[0] + self.vel[0]
y = self.pos[1] + self.vel[1]
z = self.pos[2] + self.vel[2]
self.pos = [x, y, z]
# generate bodies
mass = sum_mass / NBODIES
for n in range(NBODIES):
BODIES.append(Body(mass, create_rand_vec3(min=min_size+100, max=wsize[0]-100), create_rand_vec3(min=-1, max=1, regulate=10)))
# Event loop
while 1:
screen.fill((0, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
# update simulation
for b1 in BODIES:
for index, b2 in enumerate(BODIES):
# if b1 and b2 are same than ignore
if b1 == b2:
continue
# calculate Newton law
b1.fg(b2)
# check for collision
if b2.collision == True and COLLIS_MERGE == True:
# delete one body and update velocity & mass of second one
b1.mass += b2.mass
b1.vel = [b1.vel[0]+b2.vel[0], b1.vel[1]+b2.vel[1], b1.vel[2]+b2.vel[2]]
b1.comp_radius(b2)
BODIES.remove(b2)
# update every pos
for b in BODIES:
if ACTIVE_BORDERS == True:
check_borders(b)
b.update()
# draw with pygame
for b in BODIES:
pygame.draw.circle(screen, COLOR, (b.pos[0], b.pos[1]), b.radius)
#pygame.display.update()
pygame.display.flip()
pygame.quit()
| 29 | 129 | 0.577132 | 560 | 3,857 | 3.907143 | 0.280357 | 0.025594 | 0.013711 | 0.023309 | 0.063528 | 0.053473 | 0.031536 | 0.031536 | 0 | 0 | 0 | 0.044542 | 0.289863 | 3,857 | 132 | 130 | 29.219697 | 0.75429 | 0.143635 | 0 | 0.068182 | 0 | 0 | 0.004881 | 0 | 0 | 0 | 0 | 0 | 0.034091 | 1 | 0.068182 | false | 0 | 0.056818 | 0.011364 | 0.147727 | 0.011364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d23358acadeaa1dff525002691c0097420904a8 | 4,026 | py | Python | src/item/consumables.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | src/item/consumables.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | src/item/consumables.py | roozhou/botty | a67a87845687cdf6900af10a13dc7170684faa9a | [
"MIT"
] | null | null | null | from config import Config
from dataclasses import dataclass
from logger import Logger
from d2r_image.data_models import HoveredItem
@dataclass
class Consumables:
tp: int = 0
id: int = 0
rejuv: int = 0
health: int = 0
mana: int = 0
key: int = 0
def __getitem__(self, key):
return super().__getattribute__(key)
def __setitem__(self, key, value):
setattr(self, key, value)
def any_needs(self):
return sum([self.tp, self.id, self.rejuv, self.health, self.mana, self.key])
def as_dict(self):
return {
"tp": self.tp,
"id": self.id,
"rejuv": self.rejuv,
"health": self.health,
"mana": self.mana,
"key": self.key
}
consumable_needs = Consumables()
ITEM_CONSUMABLES_MAP = {
"rejuvenation potion": "rejuv",
"full rejuvenation potion": "rejuv",
"rejuvpotion": "rejuv",
"super healing potion": "health",
"greater healing potion": "health",
"healing potion": "health",
"healingpotion": "health",
"light healing potion": "health",
"minor healing potion": "health",
"super mana potion": "mana",
"greater mana potion": "mana",
"mana potion": "mana",
"manapotion": "mana",
"light mana potion": "mana",
"minor mana potion": "mana",
"scroll of town portal": "tp",
"scroll of identify": "id",
"key": "key"
}
pot_cols = {
"rejuv": Config().char["belt_rejuv_columns"],
"health": Config().char["belt_hp_columns"],
"mana": Config().char["belt_mp_columns"],
}
def get_needs(consumable_type: str = None):
if consumable_type:
consumable = reduce_name(consumable_type)
return consumable_needs[consumable]
return consumable_needs
def set_needs(consumable_type: str, quantity: int):
global consumable_needs
consumable = reduce_name(consumable_type)
consumable_needs[consumable] = quantity
def increment_need(consumable_type: str = None, quantity: int = 1):
"""
Adjust the consumable_needs of a specific consumable
:param consumable_type: Name of item in pickit or in consumable_map
:param quantity: Increase the need (+int) or decrease the need (-int)
"""
global consumable_needs
consumable = reduce_name(consumable_type)
consumable_needs[consumable] = max(0, consumable_needs[reduce_name(consumable)] + quantity)
def reduce_name(consumable_type: str):
if consumable_type.lower() in ITEM_CONSUMABLES_MAP:
consumable_type = ITEM_CONSUMABLES_MAP[consumable_type]
elif consumable_type.lower() in ITEM_CONSUMABLES_MAP.values():
pass
else:
Logger.warning(f"adjust_consumable_need: unknown item: {consumable_type}")
return consumable_type
def get_remaining(item_name: str = None) -> int:
if item_name is None:
Logger.error("get_remaining: param item_name is required")
return -1
if item_name.lower() in ["health", "mana", "rejuv"]:
return pot_cols[item_name] * Config().char["belt_rows"] - consumable_needs[item_name]
elif item_name.lower() in ['tp', 'id']:
return 20 - consumable_needs[item_name]
elif item_name.lower() == "key":
return 12 - consumable_needs[item_name]
else:
Logger.error(f"get_remaining: error with item_name={item_name}")
return -1
def should_buy(item_name: str = None, min_remaining: int = None, min_needed: int = None) -> bool:
if item_name is None:
Logger.error("should_buy: param item_name is required")
return False
if min_needed:
return consumable_needs[item_name] >= min_needed
elif min_remaining:
return get_remaining(item_name) <= min_remaining
else:
Logger.error("should_buy: need to specify min_remaining or min_needed")
return False
def is_consumable(item: HoveredItem) -> str | bool:
for consumable_type in ITEM_CONSUMABLES_MAP.keys():
if item.Name.lower() == consumable_type:
return consumable_type
return False | 34.410256 | 97 | 0.664183 | 509 | 4,026 | 5.037328 | 0.21611 | 0.098284 | 0.035101 | 0.037442 | 0.227769 | 0.170047 | 0.147426 | 0.095944 | 0.064743 | 0.064743 | 0 | 0.004792 | 0.222553 | 4,026 | 117 | 98 | 34.410256 | 0.814377 | 0.047193 | 0 | 0.203884 | 0 | 0 | 0.191975 | 0.011539 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106796 | false | 0.009709 | 0.038835 | 0.029126 | 0.378641 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d24021470465eba916fd57e573635e8e812a4a2 | 2,612 | py | Python | gorden_crawler/spiders/shopbop.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | 2 | 2019-02-22T13:51:08.000Z | 2020-08-03T14:01:30.000Z | gorden_crawler/spiders/shopbop.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | null | null | null | gorden_crawler/spiders/shopbop.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | 1 | 2020-08-03T14:01:32.000Z | 2020-08-03T14:01:32.000Z | # -*- coding: utf-8 -*-
from scrapy.spiders import Spider
from scrapy.selector import Selector
import re
from scrapy import Request
from gorden_crawler.spiders.shopbop_eastdane_common import ShopbopEastdaneCommon
class ShopBopSpider(ShopbopEastdaneCommon):
#class ShopBopSpider(Spider):
name = "shopbop"
allowed_domains = ["shopbop.com"]
shopbop_base_url = 'https://www.shopbop.com'
custom_settings = {
# 'USER_AGENT': 'search_crawler (+http://www.shijisearch.com)',
'COOKIES_ENABLED' : True,
'DOWNLOAD_TIMEOUT': 60,
'RETRY_TIMES': 20,
}
start_urls = [
'https://www.shopbop.com',
]
#
gender_start_urls_map = {
'https://cn.shopbop.com/clothing/br/v=1/2534374302155112.htm' : {'product_type' : 'clothing'},
'https://cn.shopbop.com/shoes/br/v=1/2534374302024643.htm' : {'product_type' : 'shoes'},
'https://cn.shopbop.com/bags/br/v=1/2534374302024667.htm' : {'product_type' : 'bags'},
'https://cn.shopbop.com/accessories/br/v=1/2534374302024641.htm' : {'product_type' : 'accessories'},
}
def parse(self, response):
url_suffixs = [
# shopbop
'https://www.shopbop.com/clothing/br/v=1/2534374302155112.htm',
'https://www.shopbop.com/shoes/br/v=1/2534374302024643.htm',
'https://www.shopbop.com/bags/br/v=1/2534374302024667.htm',
'https://www.shopbop.com/accessories/br/v=1/2534374302024641.htm'
]
avoid_302_redirect_tail_str = '?switchToCurrency=USD&switchToLocation=US&switchToLanguage=zh'
for url_suffix in url_suffixs:
url = url_suffix + avoid_302_redirect_tail_str
yield Request(url, callback=self.parse_product_type)
def parse_product_type(self, response):
response_link=response.url
product_type = self.gender_start_urls_map[response_link]['product_type']
gender = 'women'
sel = Selector(response)
category_links = sel.xpath('//li[@class="leftNavCategoryLi nav-item"]/a')[1:]
category_url={}
for category_link in category_links:
url =self.shopbop_base_url + category_link.xpath('./@href').extract()[0]
category = category_link.xpath('./text()').extract()[0]
if not re.search(r'Boutique', category):
category_url[category] = url
yield Request(url, callback=self.parse_pages, meta={'category' : category, 'product_type' : product_type, 'gender' : gender, 'category_url' : category_url}) | 45.824561 | 172 | 0.637825 | 302 | 2,612 | 5.327815 | 0.344371 | 0.068365 | 0.019888 | 0.067122 | 0.282163 | 0.238658 | 0.198881 | 0.198881 | 0 | 0 | 0 | 0.074111 | 0.225115 | 2,612 | 57 | 172 | 45.824561 | 0.72085 | 0.048622 | 0 | 0 | 0 | 0 | 0.335484 | 0.036694 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.106383 | 0 | 0.297872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2653b77757ecdc03fee246ac217f3cdcfedd54 | 3,679 | py | Python | app/main/views.py | arondasamuel123/FlaskBlog | 33fa1bbc288a831b29a95bfff0b514a8d6f93f4e | [
"MIT"
] | null | null | null | app/main/views.py | arondasamuel123/FlaskBlog | 33fa1bbc288a831b29a95bfff0b514a8d6f93f4e | [
"MIT"
] | null | null | null | app/main/views.py | arondasamuel123/FlaskBlog | 33fa1bbc288a831b29a95bfff0b514a8d6f93f4e | [
"MIT"
] | null | null | null | from . import main
from flask import render_template, url_for,redirect
from app.models import Posts, User, Comment
from flask_login import current_user, login_required
from .forms import PostForm, CommentForm, UpdateBlogForm
from .. import db
from sqlalchemy import desc
from ..requests import get_quotes
from ..email import mail_message
@main.route('/')
def home():
posts = Posts.query.order_by(Posts.blog_created.desc()).all()
quote = get_quotes()
user = current_user
if user.user_type=='User':
return render_template('home.html', posts=posts, quote=quote)
else:
return render_template('notuser.html')
@main.route('/writer')
def writer():
posts = Posts.query.order_by(Posts.blog_created.desc()).all()
user = current_user
if user.user_type=='Writer':
return render_template('writer.html',posts=posts)
@main.route('/create', methods=['GET','POST'])
@login_required
def create_post():
post_form = PostForm()
user = current_user
if user.user_type=='Writer':
if post_form.validate_on_submit():
post = Posts(title=post_form.title.data, category=post_form.category.data, blog=post_form.post.data,user=current_user)
post.save_post()
users = User.query.filter_by(user_type='User').all()
for user in users:
mail_message("New Post has arrived", "email/new_post", user.email, user=user)
return redirect(url_for('main.writer'))
else:
return "This page is for only writers"
return render_template('createpost.html', post_form=post_form)
@main.route('/post/<int:id>')
def get_post(id):
post = Posts.query.filter_by(id=id).all()
return render_template('viewpost.html', post=post)
@main.route('/createcomment/<int:id>', methods=['GET', 'POST'])
@login_required
def create_comment(id):
comment_post = Posts.query.get(id)
user = current_user
comment_form = CommentForm()
if user.user_type=='User':
if comment_form.validate_on_submit():
new_comment = Comment(comment=comment_form.comment.data, user=current_user, post=comment_post)
new_comment.save_comment()
return "Comment added"
else:
return "This page is for only users"
return render_template('addcomment.html', comment_form=comment_form)
@main.route('/viewcomments/<int:id>')
def get_comments(id):
comments = Comment.query.filter_by(post_id=id).all()
return render_template('viewcomment.html', comments=comments)
@main.route('/dblog/<int:id>', methods=['GET', 'POST'])
def delete_blog(id):
delete_post = Posts.query.filter_by(id=id).first()
db.session.delete(delete_post)
db.session.commit()
return redirect(url_for('main.writer'))
# return "Post Deleted"
@main.route('/ublog/<int:id>', methods=['GET', 'POST'])
def update_blog(id):
blog_update = Posts.query.filter_by(id=id).first()
update_form = UpdateBlogForm()
if update_form.validate_on_submit():
blog_update.title = update_form.title.data
blog_update.blog = update_form.post.data
blog_update.category = update_form.category.data
db.session.add(blog_update)
db.session.commit()
return "Blog updated"
return render_template("update.html", update_form=update_form)
@main.route('/dcomment/<int:id>', methods=['GET', 'POST'])
def delete_comment(id):
delete_comm = Comment.query.filter_by(id=id).first()
db.session.delete(delete_comm)
db.session.commit()
return redirect(url_for('main.writer'))
| 31.991304 | 130 | 0.668116 | 487 | 3,679 | 4.858316 | 0.186858 | 0.053254 | 0.067625 | 0.023669 | 0.331361 | 0.300507 | 0.257396 | 0.142012 | 0.112426 | 0.074387 | 0 | 0 | 0.199783 | 3,679 | 115 | 131 | 31.991304 | 0.803668 | 0.005708 | 0 | 0.244186 | 0 | 0 | 0.117856 | 0.012305 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104651 | false | 0 | 0.104651 | 0 | 0.383721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d26eedc95197a2362633c769d7c96b7f86fab9b | 2,117 | py | Python | examples/tree_view_example.py | cgpipline/dayu_widgets | 040a09fb9a20ce72997a3fba60e381e3944bff59 | [
"MIT"
] | 157 | 2019-03-10T05:55:21.000Z | 2022-03-31T09:07:00.000Z | examples/tree_view_example.py | cgpipline/dayu_widgets | 040a09fb9a20ce72997a3fba60e381e3944bff59 | [
"MIT"
] | 16 | 2019-07-15T11:30:53.000Z | 2021-12-16T14:17:59.000Z | examples/tree_view_example.py | phenom-films/dayu_widgets | 1eb8fbf2847f9de95af2cd62d5eaec392f1c1e22 | [
"MIT"
] | 56 | 2019-06-19T03:35:27.000Z | 2022-03-22T08:07:32.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
import examples._mock_data as mock
from dayu_widgets import dayu_theme
from dayu_widgets.field_mixin import MFieldMixin
from dayu_widgets.item_model import MTableModel, MSortFilterModel
from dayu_widgets.item_view import MTreeView
from dayu_widgets.line_edit import MLineEdit
from dayu_widgets.push_button import MPushButton
from dayu_widgets.qt import *
class TreeViewExample(QWidget, MFieldMixin):
def __init__(self, parent=None):
super(TreeViewExample, self).__init__(parent)
self._init_ui()
def _init_ui(self):
model_1 = MTableModel()
model_1.set_header_list(mock.header_list)
model_sort = MSortFilterModel()
model_sort.setSourceModel(model_1)
tree_view = MTreeView()
tree_view.setModel(model_sort)
model_sort.set_header_list(mock.header_list)
tree_view.set_header_list(mock.header_list)
model_1.set_data_list(mock.tree_data_list)
line_edit = MLineEdit().search().small()
line_edit.textChanged.connect(model_sort.set_search_pattern)
expand_all_button = MPushButton('Expand All').small()
expand_all_button.clicked.connect(tree_view.expandAll)
collapse_all_button = MPushButton('Collapse All').small()
collapse_all_button.clicked.connect(tree_view.collapseAll)
button_lay = QHBoxLayout()
button_lay.addWidget(expand_all_button)
button_lay.addWidget(collapse_all_button)
button_lay.addWidget(line_edit)
button_lay.addStretch()
main_lay = QVBoxLayout()
main_lay.addLayout(button_lay)
main_lay.addWidget(tree_view)
main_lay.addStretch()
self.setLayout(main_lay)
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = TreeViewExample()
dayu_theme.apply(test)
test.show()
sys.exit(app.exec_())
| 32.569231 | 68 | 0.671233 | 251 | 2,117 | 5.290837 | 0.358566 | 0.042169 | 0.079066 | 0.038404 | 0.155873 | 0.115211 | 0.048193 | 0 | 0 | 0 | 0 | 0.009233 | 0.181389 | 2,117 | 64 | 69 | 33.078125 | 0.757069 | 0.047709 | 0 | 0 | 0 | 0 | 0.015983 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.2 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d28a277601e69e469768302b3fb700a887a42db | 1,131 | py | Python | test/use_case_examples/startUp.py | sbanik1/sheetTrap | 287746bf33b41e7f1066e80ee12bd08f75b155bc | [
"MIT"
] | null | null | null | test/use_case_examples/startUp.py | sbanik1/sheetTrap | 287746bf33b41e7f1066e80ee12bd08f75b155bc | [
"MIT"
] | null | null | null | test/use_case_examples/startUp.py | sbanik1/sheetTrap | 287746bf33b41e7f1066e80ee12bd08f75b155bc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This code sets the directories for running the test codes
Created on Sat Dec 19 12:51:22 2020
@author: Swarnav Banik
sbanik1@umd.edu
"""
# %% Import all ###############################################################
import sys
import matplotlib.pyplot as plt
# %% Add necessary paths ######################################################
sys.path.insert(1, '/Users/swarnav/Google Drive/Work/Projects/Imaging/sheetTrap/src')
# %% Define the output directory ##############################################
saveDir = '/Users/swarnav/Google Drive/Work/Projects/Imaging/sheetTrap/test/out'
# %% Set some default values ##################################################
params = {
'image.origin': 'lower',
'image.interpolation': 'nearest',
'image.cmap': 'gray',
'axes.grid': True,
'axes.labelsize': 14, # fontsize for x and y labels (was 10)
'axes.titlesize': 12,
'font.size': 8,
'legend.fontsize': 6, # was 10
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': False,
'font.family': 'serif',
}
plt.rcParams.update(params) | 34.272727 | 85 | 0.542882 | 127 | 1,131 | 4.834646 | 0.748032 | 0.039088 | 0.058632 | 0.074919 | 0.166124 | 0.166124 | 0.166124 | 0.166124 | 0 | 0 | 0 | 0.030864 | 0.140584 | 1,131 | 33 | 86 | 34.272727 | 0.600823 | 0.282051 | 0 | 0 | 0 | 0 | 0.523077 | 0.220513 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2af0f28011528a2d6ef57a8fde31ab7d31de44 | 743 | py | Python | tests/test_collection_client.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | 52 | 2021-04-15T23:24:12.000Z | 2022-03-09T23:02:27.000Z | tests/test_collection_client.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | 119 | 2021-04-13T11:42:01.000Z | 2022-02-24T10:02:35.000Z | tests/test_collection_client.py | ocefpaf/pystac-client | ddf0e0566b2b1783a4d32d3d77f9f51b80270df3 | [
"Apache-2.0"
] | 14 | 2021-04-13T19:00:19.000Z | 2022-02-23T09:17:30.000Z | import pytest
from pystac_client import CollectionClient
from pystac_client.client import Client
from .helpers import STAC_URLS
class TestCollectionClient:
@pytest.mark.vcr
def test_instance(self):
client = Client.open(STAC_URLS['PLANETARY-COMPUTER'])
collection = client.get_collection('aster-l1t')
assert isinstance(collection, CollectionClient)
assert str(collection) == '<CollectionClient id=aster-l1t>'
@pytest.mark.vcr
def test_get_items(self):
client = Client.open(STAC_URLS['PLANETARY-COMPUTER'])
collection = client.get_collection('aster-l1t')
for item in collection.get_items():
assert (item.collection_id == collection.id)
return
| 29.72 | 67 | 0.699865 | 85 | 743 | 5.976471 | 0.388235 | 0.070866 | 0.062992 | 0.062992 | 0.401575 | 0.322835 | 0.322835 | 0.322835 | 0.322835 | 0.322835 | 0 | 0.005085 | 0.205922 | 743 | 24 | 68 | 30.958333 | 0.855932 | 0 | 0 | 0.333333 | 0 | 0 | 0.114401 | 0 | 0 | 0 | 0 | 0 | 0.166667 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2b1238c9004522816921ce7316d847dd12d582 | 7,135 | py | Python | frasco_models/backends/sqlalchemy.py | frascoweb/frasco-models | f7c1e14424cadf3dc07c2bd81cc32b0fd046ccba | [
"MIT"
] | 1 | 2015-09-24T10:01:03.000Z | 2015-09-24T10:01:03.000Z | frasco_models/backends/sqlalchemy.py | frascoweb/frasco-models | f7c1e14424cadf3dc07c2bd81cc32b0fd046ccba | [
"MIT"
] | null | null | null | frasco_models/backends/sqlalchemy.py | frascoweb/frasco-models | f7c1e14424cadf3dc07c2bd81cc32b0fd046ccba | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from frasco import copy_extra_feature_options, current_app
from frasco.utils import JSONEncoder, ContextStack, DelayedCallsContext
from frasco_models import Backend, ModelSchemaError, and_, split_field_operator, QueryError
from frasco_models.utils import clean_proxy
from flask_sqlalchemy import SQLAlchemy, Model as BaseModel
from sqlalchemy.ext.declarative import declarative_base
import sqlalchemy
from sqlalchemy.inspection import inspect as sqlainspect
from sqlalchemy.sql import sqltypes
import inspect
import datetime
from contextlib import contextmanager
import functools
class Model(BaseModel):
def __taskdump__(self):
return 'frasco::current_app.features.models[%s]' % self.__class__.__name__, str(self.id)
@classmethod
def __taskload__(cls, id):
return cls.query.get(id)
sqla_type_mapping = [
(sqltypes.Integer, int),
(sqltypes.Float, float),
(sqltypes.Boolean, bool),
(sqltypes.DateTime, datetime.datetime),
(sqltypes.Date, datetime.date)
]
class SqlalchemyBackend(Backend):
name = "sqlalchemy"
def __init__(self, app, options):
super(SqlalchemyBackend, self).__init__(app, options)
copy_extra_feature_options(app.features.models, app.config, 'SQLALCHEMY_')
self.db = SQLAlchemy(app, session_options=options.get('session_options'),
model_class=Model)
@app.cli.command()
def create_db():
try:
self.db.create_all()
except sqlalchemy.exc.CircularDependencyError as e:
try:
self.graph_circular_dependency_error(e)
except ImportError:
app.logger.info('Install networkx and pygraphviz to generate a graph of the circular dependency')
pass
raise
app.cli.command('drop_db')(self.db.drop_all)
if app.features.exists('tasks'):
from celery.signals import task_postrun
def handle_celery_postrun(retval=None, *args, **kwargs):
if app.config.get('SQLALCHEMY_COMMIT_ON_TEARDOWN'):
if not isinstance(retval, Exception):
self.db.session.commit()
if not app.config.get('CELERY_ALWAYS_EAGER'):
self.db.session.remove()
task_postrun.connect(handle_celery_postrun, weak=False)
def ensure_model(self, name):
if isinstance(name, self.db.Model):
return name
return self.db.Model._decl_class_registry[name]
def ensure_schema(self, name, fields):
model = self.ensure_model(name)
for fname, _ in fields.iteritems():
if fname not in model.__mapper__.attrs:
raise ModelSchemaError("Missing field '%s' in model '%s'" % (fname, name))
def inspect_fields(self, model):
if not inspect.isclass(model):
model = model.__class__
mapper = sqlainspect(model)
fields = []
for attr in mapper.column_attrs:
field_type = str
for coltype, pytype in sqla_type_mapping:
if isinstance(attr.columns[0].type, coltype):
field_type = pytype
break
fields.append((attr.key, dict(type=field_type)))
return fields
def begin_transaction(self):
self.db.session.begin(subtransactions=True)
def flusb_transaction(self):
self.db.session.fush()
def commit_transaction(self):
self.db.session.commit()
def rollback_transaction(self):
self.db.session.rollback()
def add(self, obj):
self.db.session.add(obj)
def remove(self, obj):
self.db.session.delete(obj)
def find_by_id(self, model, id):
return model.query.filter_by(id=id).first()
def find_all(self, query):
return self._transform_query(query).all()
def find_first(self, query):
return self._transform_query(query).first()
def find_one(self, query):
return self._transform_query(query).first()
def count(self, query):
return self._transform_query(query).count()
def update(self, query, data):
return self._transform_query(query).update(
self._prepare_data(query.model, data),
synchronize_session=False)
def delete(self, query):
return self._transform_query(query).delete(
synchronize_session=False)
def _transform_query(self, q):
qs = q.model.query
if q._filters:
qs = qs.filter(self._transform_query_filter_group(q.model, and_(*q._filters)))
if q._order_by:
qs = qs.order_by(*[k + ' ' + v for k, v in q._order_by])
if q._offset:
qs = qs.offset(q._offset)
if q._limit:
qs = qs.limit(q._limit)
return qs
def _transform_query_filter_group(self, model, group):
operator, filters = group.items()[0]
transformed_filters = []
for filter in filters:
if isinstance(filter, dict):
q = self._transform_query_filter_group(model, filter)
if q is None:
continue
else:
q = self._transform_query_filter(model, filter)
transformed_filters.append(q)
if operator == "$or":
return sqlalchemy.or_(*transformed_filters)
return sqlalchemy.and_(*transformed_filters)
def _transform_query_filter(self, model, filter):
field, value = filter
field, operator, py_operator = split_field_operator(field, with_python_operator=True)
value = clean_proxy(value)
column = getattr(model, field)
if py_operator:
return py_operator(column, value)
if operator == 'in':
return column.in_(value)
if operator == 'nin':
return ~column.in_(value)
raise QueryError("Cannot convert operator '%s' to sqlalchemy operator" % operator)
def _prepare_data(self, model, data):
out = {}
for field, value in data.iteritems():
field, operator = split_field_operator(field)
column = getattr(model, field)
if operator == 'incr':
out[column] = column + value
elif operator == 'push':
raise QueryError("Operator 'push' not supported by sqlalchemy")
else:
out[column] = value
return out
def graph_circular_dependency_error(self, e, filename='sqla_circular_dep_graph.png'):
# from: http://ilyasterin.com/blog/2014/01/cyclical-dependency-detection-in-the-database.html
import networkx as nx
G=nx.DiGraph()
cycle_tables = set([t.name for t in e.cycles])
for t in e.cycles:
for fk in t.foreign_keys:
table, col = fk.target_fullname.split('.')
if (table in cycle_tables):
G.add_edge(t.name, table)
agraph = nx.to_agraph(G)
agraph.draw(filename, format='png', prog='dot')
| 35.675 | 117 | 0.623266 | 839 | 7,135 | 5.084625 | 0.272944 | 0.018284 | 0.037975 | 0.033755 | 0.14346 | 0.055087 | 0.048289 | 0.021566 | 0.021566 | 0 | 0 | 0.001558 | 0.280448 | 7,135 | 199 | 118 | 35.854271 | 0.829373 | 0.012754 | 0 | 0.072289 | 0 | 0 | 0.055382 | 0.01349 | 0 | 0 | 0 | 0 | 0 | 1 | 0.156627 | false | 0.006024 | 0.10241 | 0.054217 | 0.391566 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2bf38cdd992aadb6144180b5dfb94255173ff3 | 538 | py | Python | 17.05.2022/POO/parte1/teste3/main.py | N0N4T0/python-codes | ac2b884f86749a8b179ff972cdb316ec4e005b32 | [
"MIT"
] | null | null | null | 17.05.2022/POO/parte1/teste3/main.py | N0N4T0/python-codes | ac2b884f86749a8b179ff972cdb316ec4e005b32 | [
"MIT"
] | null | null | null | 17.05.2022/POO/parte1/teste3/main.py | N0N4T0/python-codes | ac2b884f86749a8b179ff972cdb316ec4e005b32 | [
"MIT"
] | null | null | null | from pessoa import Pessoa
p1 = Pessoa('Luiz', 29)
p2 = Pessoa('Joana', 59)
# Teste para já está comendo
# p1.comer('maçã')
# p1.comer('maçã')
# Teste para não está comendo
# p1.para_comer()
# Teste para já está falando
# p1.falar('Política')
# p1.falar('Política')
# Teste para não falar comendo
# p1.comer('maçã')
# p1.falar('Política')
# Teste para não está falando
# p1.parar_falar()
# Teste para não pode comer falando
# p1.falar('Estudo')
# p1.comer('Maçã')
p1.comer('ovo')
p1.para_comer()
p1.falar('COmida')
p1.parar_falar() | 17.354839 | 35 | 0.684015 | 85 | 538 | 4.282353 | 0.294118 | 0.148352 | 0.120879 | 0.107143 | 0.315934 | 0.148352 | 0 | 0 | 0 | 0 | 0 | 0.043668 | 0.148699 | 538 | 31 | 36 | 17.354839 | 0.751092 | 0.659851 | 0 | 0 | 0 | 0 | 0.107784 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2db398b4ddbcc67edec80cc25d50f9ccebdf17 | 2,022 | py | Python | pipeline/run_perturbation_test.py | martin-fabbri/nlp-behavioral-testing | 352cab74f5d62561e08d92135e4609db1fe38fd5 | [
"Apache-2.0"
] | null | null | null | pipeline/run_perturbation_test.py | martin-fabbri/nlp-behavioral-testing | 352cab74f5d62561e08d92135e4609db1fe38fd5 | [
"Apache-2.0"
] | 1 | 2021-02-19T08:08:52.000Z | 2021-02-19T08:08:52.000Z | pipeline/run_perturbation_test.py | martin-fabbri/nlp-behavioral-testing | 352cab74f5d62561e08d92135e4609db1fe38fd5 | [
"Apache-2.0"
] | null | null | null | import json
import nlpaug.augmenter.char as nac
import pandas as pd
from transformers import AutoModelForSequenceClassification, AutoTokenizer, pipeline
from transformers.tokenization_utils import PreTrainedTokenizer
from config import Config
transformer_model = "distilbert-base-uncased-finetuned-sst-2-english"
# load model
tokenizer = AutoTokenizer.from_pretrained(transformer_model)
inference_model = AutoModelForSequenceClassification.from_pretrained(transformer_model)
model = pipeline("sentiment-analysis", model=inference_model, tokenizer=tokenizer)
# define text perturbation
keyboard_aug = nac.KeyboardAug(aug_word_max=1)
def typo(aug, input):
output = aug.augment(input)
return output
def eval_perturb(input_a, input_b):
output_a, output_b = model([input_a, input_b])
sq_error = (output_a["score"] - output_b["score"]) ** 2
acc = output_a["label"] == output_b["label"]
# print(input_a, input_b)
# print(output_a["label"], output_b["label"])
# print("---")
return sq_error, acc, output_b["score"]
# read in our test dataset
f = open(Config.TEST_SET)
test_dataset = f.read().split("\t")[:-1]
# Loop over all test examples and evaluate
mse, total_acc = 0, 0
n = len(test_dataset)
interesting_cases = []
for sentence in test_dataset:
sentence_mod = typo(keyboard_aug, sentence)
sq_error, acc, perturb_score = eval_perturb(sentence, sentence_mod)
mse += (1 / n) * sq_error
total_acc += (1 / n) * acc
if acc == False:
interesting_cases.append((sentence, sentence_mod, perturb_score))
interesting_cases.sort(key=lambda tup: tup[2], reverse=True)
# Write out our favorite interesting cases
to_report = interesting_cases[:5]
df = pd.DataFrame(to_report, columns=["Original", "Perturbed", "Model confidence"])
with open(Config.TOP_PERTURBATIONS, "w") as outfile:
outfile.write(df.to_markdown(index=False))
# Write results to file
with open(Config.TEST_SCORES, "w") as outfile:
json.dump({"accuracy": total_acc, "mse": mse}, outfile)
| 31.59375 | 87 | 0.740851 | 277 | 2,022 | 5.212996 | 0.422383 | 0.024238 | 0.022853 | 0.024931 | 0.040166 | 0.040166 | 0.040166 | 0 | 0 | 0 | 0 | 0.005754 | 0.140455 | 2,022 | 63 | 88 | 32.095238 | 0.825086 | 0.121167 | 0 | 0 | 0 | 0 | 0.078098 | 0.026599 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.157895 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2e70b06755b783fc866f681653a747759d1c68 | 4,534 | py | Python | quant.py | coreyauger/ema-65-crossover-kaggle | 5b84294381b01b9f0f84f14006572c9cb9e5cca7 | [
"MIT"
] | null | null | null | quant.py | coreyauger/ema-65-crossover-kaggle | 5b84294381b01b9f0f84f14006572c9cb9e5cca7 | [
"MIT"
] | null | null | null | quant.py | coreyauger/ema-65-crossover-kaggle | 5b84294381b01b9f0f84f14006572c9cb9e5cca7 | [
"MIT"
] | 1 | 2021-05-01T15:50:17.000Z | 2021-05-01T15:50:17.000Z |
from functools import reduce
import numpy as np
import matplotlib.pyplot as plt
from dateutil import parser
import glob
import os
import csv
import json
def loadData(path, subset = -1, loadDebug = False):
allFiles = glob.glob(os.path.join(path, "data_*.csv"))
if(subset > 0):
allFiles = allFiles[0:subset]
data = []
debug = []
for file in allFiles:
print(file)
fileNum = file.split('/')[-1].replace("data_","").replace(".csv","")
with open(file, 'r') as f:
data.append([float(val) for sublist in list(csv.reader(f)) for val in sublist])
if loadDebug:
with open(file.replace("data_"+fileNum+".csv","debug_"+fileNum+".csv")) as f:
debug.append(json.load(f))
return (np.array(data), debug)
def calculateEma(price, interval = 9, startEma = -1):
#https://www.investopedia.com/ask/answers/122314/what-exponential-moving-average-ema-formula-and-how-ema-calculated.asp
# (Closing price-EMA(previous day)) x multiplier + EMA(previous day)
k = 2/(interval + 1)
if startEma > 0:
return reduce(lambda x,y: x + [ (y - x[-1]) * k + x[-1] ], price, [startEma])
else:
subset = price[0:interval]
sma = sum(subset) / len(subset)
start = [sma] * interval
return reduce(lambda x,y: x + [ (y - x[-1]) * k + x[-1] ], price[interval:], start)
def rewindEma(price, interval, startEma):
k = 2/(interval + 1)
return reduce(lambda x,c: x + [ (-c*k + x[-1]) / (-k+1) ], price, [startEma])
def priceChangeToPrice(data, initial = 100):
return list(reduce(lambda x,y: x + [ x[-1]+(x[-1]*y) ], data, list([initial]) ) )
def rewindPriceChangeToPrice(data, initial = 100):
return list(reduce(lambda x,y: x + [ x[-1] / (y+1.0) ], data, list([initial]) ) )
def debugPlot(data, debug, timeDomains = [1,5,15,30]):
sample1Min = data[0:181]
sample1Min = sample1Min[1::2] # only want price
trigger = debug["Trigger"]["parent"][0]
trainingExampleId = debug["TrainingExample"]["id"]
symbol = debug["TrainingExample"]["symbol"]["sym"]
triggerData = [val for sublist in trigger["event"]["data"] for val in sublist]
priceData = list(filter(lambda x: x["$type"] == "m.q.PriceTs", triggerData ))
ema15Data = list(filter(lambda x: x["$type"] == "m.q.EmaTs" and x["data"]["timePeriod"] == 15, triggerData ))
ema65Data = list(filter(lambda x: x["$type"] == "m.q.EmaTs" and x["data"]["timePeriod"] == 65, triggerData ))
# we need to rewind these values through time now.
rewindPrice1 = rewindPriceChangeToPrice(sample1Min[::-1], initial=priceData[0]["data"]["close"])
#print("15: "+str(ema15Data[0]["data"]["ema"]))
#print("65: "+str(ema65Data[0]["data"]["ema"]))
rewindEma15 = rewindEma(rewindPrice1, 15, startEma = ema15Data[0]["data"]["ema"])
rewindEma65 = rewindEma(rewindPrice1, 65, startEma = ema65Data[0]["data"]["ema"])
#print("rewindPrice1: " + str(rewindPrice1[-1]))
#print("rewindEma15: " + str(rewindEma15[-1]))
#print("rewindEma65: " + str(rewindEma65[-1]))
enterPrice = priceData[0]["data"]["close"]
print("symbol: "+symbol)
print("Training Example: " + trainingExampleId)
print("enter price: " + str(enterPrice))
print("enter time: " + priceData[0]["time"])
time = parser.parse(priceData[0]["time"])
print(time.minute)
graph1 = priceChangeToPrice(sample1Min, initial=rewindPrice1[-1])
ema15 = calculateEma(graph1, 15, startEma=graph1[0])
ema65 = calculateEma(graph1, 65, startEma=graph1[0])
series = [graph1, ema65, ema15]
ind = 1
for t in filter(lambda x: x != 1,timeDomains):
start = (ind*180)+1
end = ((ind+1)*180)+1
sampleXMin = data[start:end]
sampleXMin = sampleXMin[::2]
remainder = (60+time.minute) % t
#print("x: "+str(60+time.minute))
#print("remainder: " + str(remainder) )
#print(graph1[-(remainder+1)])
rewindPriceX = rewindPriceChangeToPrice(sampleXMin[::-1], initial=graph1[-(remainder+1)])
extra = 90*t - 90*timeDomains[ind-1]
series = [([None] * extra) + x for x in series]
graphX = priceChangeToPrice(sampleXMin, initial=rewindPriceX[-1])
graphX = [[x]*t for x in graphX]
graphX = [val for sublist in graphX for val in sublist][remainder:]
series.append(graphX)
ind = ind+1
for x in series:
plt.plot(x)
plt.show()
| 40.846847 | 123 | 0.604764 | 574 | 4,534 | 4.770035 | 0.275261 | 0.02301 | 0.006574 | 0.020453 | 0.113952 | 0.096421 | 0.096421 | 0.096421 | 0.087655 | 0.087655 | 0 | 0.044105 | 0.219894 | 4,534 | 111 | 124 | 40.846847 | 0.729997 | 0.127481 | 0 | 0.024691 | 0 | 0 | 0.067444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.098765 | 0.024691 | 0.246914 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d2eb9e41d9b7aaa806035fe7e73dc2a5bc3e057 | 5,913 | py | Python | Scripts/GeoDBSCAN.py | Holisticnature/geo-learn | cfa4485b376ae1b22457251bcddbe26bac103a10 | [
"Apache-2.0"
] | 4 | 2018-03-06T14:38:42.000Z | 2018-06-04T14:57:26.000Z | Scripts/GeoDBSCAN.py | Holisticnature/geo-learn | cfa4485b376ae1b22457251bcddbe26bac103a10 | [
"Apache-2.0"
] | null | null | null | Scripts/GeoDBSCAN.py | Holisticnature/geo-learn | cfa4485b376ae1b22457251bcddbe26bac103a10 | [
"Apache-2.0"
] | null | null | null | # --------------------------------
# Name: GeoDBSCAN.py
# Purpose: This script is intended to allow ArcGIS users that have Scikit Learn installed in their python installation
# utilize DBSCAN to create clusters of geographic features based on their centroids.
# Current Owner: David Wasserman
# Last Modified: 4/5/2020
# Copyright: (c) David Wasserman
# ArcGIS Version: ArcGIS Pro
# Python Version: 3.6
# --------------------------------
# Copyright 2016 David J. Wasserman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------
# Import Modules
import os, arcpy
import numpy as np
import pandas as pd
import glearnlib as gl
try:
from sklearn import cluster
from sklearn import metrics
from sklearn.preprocessing import StandardScaler
except:
arcpy.AddError("This library requires Sci-kit Learn installed in the ArcGIS Python Install."
" Might require installing pre-requisite libraries and software.")
# Function Definitions
def classify_features_dbscan(in_fc, neighborhood_size, minimum_samples, weight_field):
"""Take in a feature class of points and classify them into clusters using DBSCAN from Scikit learn.
Append field labels to the input feature class using Extend Numpy Array function."""
try:
# Declare Starting Variables
desc = arcpy.Describe(in_fc)
OIDFieldName = desc.OIDFieldName
workspace = os.path.dirname(desc.catalogPath)
gl.arc_print("Converting '{0}' feature class geometry to X-Y centroid numpy arrays.".format(str(desc.name)))
centroid_x, centroid_y = 'SHAPE@X', 'SHAPE@Y'
objectid = 'OID@'
fields = [centroid_x, centroid_y, objectid]
use_weight = False
if gl.field_exist(in_fc, weight_field):
fields.append(weight_field)
use_weight = True
# Convert Feature Class to NP array
geoarray = arcpy.da.FeatureClassToNumPyArray(in_fc, fields,
null_value=1) # Null Values of treated as one feature -weight
cluster_fields = [centroid_x, centroid_y]
data = pd.DataFrame(geoarray)
coordinates_cluster = data[cluster_fields]
if use_weight:
gl.arc_print("Using weight field {0} and geographic coordinates for clustering with DBSCAN.".format(
str(weight_field)), True)
weight = np.asarray(data[weight_field], dtype=np.float64)
dbscan_classification = cluster.DBSCAN(neighborhood_size, minimum_samples).fit(coordinates_cluster, weight)
else:
gl.arc_print("Using geographic coordinates to classify with DBSCAN.", True)
dbscan_classification = cluster.DBSCAN(neighborhood_size, minimum_samples).fit(coordinates_cluster)
core_samples_mask = np.zeros_like(dbscan_classification.labels_, dtype=bool)
core_samples_mask[dbscan_classification.core_sample_indices_] = True
labels = dbscan_classification.labels_
# Number of clusters in labels, ignoring noise if present.
cluster_count = len(set([i for i in labels if i != -1]))
gl.arc_print('Estimated number of clusters: {0}'.format(cluster_count), True)
try:
gl.arc_print("Silhouette Coefficient: {0}.".format(metrics.silhouette_score(coordinates_cluster, labels)),
True)
gl.arc_print(
"""Wikipedia: The silhouette value is a measure of how similar an object is to its own cluster (cohesion) compared to other clusters (separation). The silhouette ranges from -1 to 1, where a high value indicate that the object is well matched to its own cluster and poorly matched to neighboring clusters.""")
except Exception as e:
gl.arc_print("Could not compute Silhouette Coefficient. Error: {0}".format(str(e.args[0])), True)
gl.arc_print("Appending Labels from DBSCAN to new numpy array.", True)
JoinField = str(arcpy.ValidateFieldName("NPIndexJoin", workspace))
LabelField = str(arcpy.ValidateFieldName("DBSCANLabel", workspace))
finalDBSCANArray = np.array(list(zip(data[objectid], labels)),
dtype=[(JoinField, np.int32), (LabelField, np.int32)])
gl.arc_print("Extending Label Fields to Output Feature Class. Clusters labels start at 0, noise is labeled -1.",
True)
arcpy.da.ExtendTable(in_fc, OIDFieldName, finalDBSCANArray, JoinField, append_only=False)
del geoarray, finalDBSCANArray, labels, dbscan_classification, core_samples_mask
gl.arc_print("Script Completed Successfully.", True)
except arcpy.ExecuteError:
gl.arc_print(arcpy.GetMessages(2))
except Exception as e:
print(str(e.args[0]))
arcpy.AddError(str(e.args[0]))
# End do_analysis function
# This test allows the script to be used from the operating
# system command prompt (stand-alone), in a Python IDE,
# as a geoprocessing script tool, or as a module imported in
# another script
if __name__ == '__main__':
# Define input parameters
input_feature_class = arcpy.GetParameterAsText(0)
neighborhood_size = arcpy.GetParameter(1)
minimum_samples = arcpy.GetParameter(2)
weight_field = arcpy.GetParameterAsText(3)
classify_features_dbscan(input_feature_class, neighborhood_size, minimum_samples, weight_field)
| 51.417391 | 325 | 0.690851 | 750 | 5,913 | 5.328 | 0.388 | 0.013764 | 0.027528 | 0.03003 | 0.074575 | 0.062563 | 0.042042 | 0.042042 | 0.042042 | 0.042042 | 0 | 0.008838 | 0.215457 | 5,913 | 114 | 326 | 51.868421 | 0.852554 | 0.275326 | 0 | 0.1 | 0 | 0 | 0.170949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014286 | false | 0 | 0.1 | 0 | 0.114286 | 0.171429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d3055e14122623c95b8881341a1fc725f67eb41 | 1,017 | py | Python | examples/research/O2-graphene/O2-C24/proj/O2-C24.py | fishjojo/pydmfe | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | 3 | 2021-02-26T06:26:00.000Z | 2022-02-20T08:58:20.000Z | examples/research/O2-graphene/O2-C24/proj/O2-C24.py | fishjojo/pydmfet | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | null | null | null | examples/research/O2-graphene/O2-C24/proj/O2-C24.py | fishjojo/pydmfet | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | null | null | null | from pydmfet import proj_ao, tools
from pydmfet.qcwrap.pyscf_rks_ao import rks_ao
from pyscf import gto,scf
import numpy as np
from pyscf.tools import molden
t0 = tools.time0()
bas ='6-31G*'
temp = 0.005
mol = gto.Mole()
mol.atom = open('O2-C24.xyz').read()
mol.basis = bas
mol.charge = 0
mol.build(max_memory = 24000, verbose=4)
dm_guess=None
_, _, mo_coeff, mo_occ, _, _ = molden.load("MO_pbe.molden")
dm_guess = np.dot(mo_coeff*mo_occ, mo_coeff.T)
#mf = scf.UKS(mol)
mf = rks_ao(mol,smear_sigma = temp)
mf.xc = "pbe,pbe"
mf.max_cycle = 50
mf.scf(dm0=dm_guess)
'''
with open( 'MO.molden', 'w' ) as thefile:
molden.header(mf.mol, thefile)
molden.orbital_coeff(mf.mol, thefile, mf.mo_coeff,occ = mf.mo_occ, ene = mf.mo_energy)
'''
natoms = mol.natm
impAtom = np.zeros([natoms], dtype=int)
for i in range(8):
impAtom[i] = 1
embed = proj_ao.proj_embed(mf,impAtom, Ne_env = 110)
embed.pop_method = 'meta_lowdin'
embed.pm_exponent = 2
embed.make_frozen_orbs(norb = 83)
embed.embedding_potential()
| 21.1875 | 90 | 0.707965 | 182 | 1,017 | 3.769231 | 0.516484 | 0.040816 | 0.026239 | 0.034985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034443 | 0.143559 | 1,017 | 47 | 91 | 21.638298 | 0.753157 | 0.016716 | 0 | 0 | 0 | 0 | 0.057108 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.172414 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d32dfae4e4a82d0da73b0d4fd9b140d33288330 | 2,314 | py | Python | pymmw/lib/probe.py | iljoobaek/Radar_boundary_detection | e7a61827e57eac29d4a1200a2856ce3bc00b92cf | [
"MIT"
] | null | null | null | pymmw/lib/probe.py | iljoobaek/Radar_boundary_detection | e7a61827e57eac29d4a1200a2856ce3bc00b92cf | [
"MIT"
] | null | null | null | pymmw/lib/probe.py | iljoobaek/Radar_boundary_detection | e7a61827e57eac29d4a1200a2856ce3bc00b92cf | [
"MIT"
] | null | null | null | #
# Copyright (c) 2019, Manfred Constapel
# This file is licensed under the terms of the MIT license.
#
import sys, time
from lib.utility import *
# ------------------------------------------------
VID, PID = 0x0451, 0xbef3 # XDS110
# ------------------------------------------------
try:
import usb
except Exception as e:
print('exception : lib :', e, file=sys.stderr, flush=True)
# ------------------------------------------------
def usb_init(desc_print=True, nodev_exit=True):
if 'usb' not in sys.modules: return None
try:
dev = usb.core.find(idVendor=VID, idProduct=PID)
if dev is not None:
dev._detached_ = []
m = usb.util.get_string(dev, dev.iManufacturer)
p = usb.util.get_string(dev, dev.iProduct)
s = usb.util.get_string(dev, dev.iSerialNumber)
dev._serno_ = s
if desc_print:
print('{} : {} : {}'.format(m, p, s), file=sys.stderr, flush=True)
return dev
elif nodev_exit:
print('exception : main :', 'no device has been detected', file=sys.stderr)
sys.exit(1)
except Exception as e:
print(e)
return None
def usb_point(dev, num, out):
if 'usb' not in sys.modules: return None
ept = (usb.util.ENDPOINT_IN, usb.util.ENDPOINT_OUT)
cfg = dev.get_active_configuration()
intf = cfg[(num, 0)]
ep = usb.util.find_descriptor(intf,
custom_match=lambda e: usb.util.endpoint_direction(
e.bEndpointAddress) == ept[int(out % 2)])
return ep
def usb_free(dev):
if 'usb' not in sys.modules: return None
usb.util.dispose_resources(dev)
for ifn in dev._detached_:
usb.util.release_interface(dev, ifn)
try: dev.attach_kernel_driver(ifn)
except: pass
# ------------------------------------------------
def xds_reset(dev, delay=50):
#_ = {0:'CDC Communication',
# 1:'CDC Data', 2:'Vendor Specific', 3:'CDC Communication',
# 4:'CDC Data', 5:'Human Interface Device', 6:'Vendor Specific'}
ep = usb_point(dev, 2, True)
if ep is None: return False
for v in ('00', '01'):
ep.write(hex2dec('{} {} {} {}'.format('2a', '02', '00', '0e {}'.format(v))))
time.sleep(delay / 1000)
return True
| 30.853333 | 87 | 0.544512 | 295 | 2,314 | 4.169492 | 0.430508 | 0.05122 | 0.031707 | 0.02439 | 0.2 | 0.126829 | 0.073171 | 0.073171 | 0 | 0 | 0 | 0.024221 | 0.250648 | 2,314 | 74 | 88 | 31.27027 | 0.685121 | 0.196197 | 0 | 0.142857 | 0 | 0 | 0.059015 | 0 | 0 | 0 | 0.006497 | 0 | 0 | 1 | 0.081633 | false | 0.020408 | 0.061224 | 0 | 0.22449 | 0.122449 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d3623ec2975daf6edfc533f9031ebffefe6e10e | 1,467 | py | Python | src/datasets/negative_samplers/popular.py | JiachengLi1995/FastRec | 88174f8beb822c6b5b348c888de184d48d9d013c | [
"MIT"
] | null | null | null | src/datasets/negative_samplers/popular.py | JiachengLi1995/FastRec | 88174f8beb822c6b5b348c888de184d48d9d013c | [
"MIT"
] | null | null | null | src/datasets/negative_samplers/popular.py | JiachengLi1995/FastRec | 88174f8beb822c6b5b348c888de184d48d9d013c | [
"MIT"
] | null | null | null | from .base import AbstractNegativeSampler
from tqdm import trange
from tqdm import tqdm
from collections import Counter
import numpy as np
class PopularNegativeSampler(AbstractNegativeSampler):
@classmethod
def code(cls):
return 'popular'
def generate_negative_samples(self):
popularity = self.items_by_popularity()
keys = list(popularity.keys())
values = [popularity[k] for k in keys]
sum_value = np.sum(values)
p = [value / sum_value for value in values]
negative_samples = {}
print('Sampling negative items')
for user in tqdm(self.test):
seen = set(self.train[user])
seen.update(self.val[user])
seen.update(self.test[user])
samples = []
while len(samples) < self.sample_size:
sampled_ids = np.random.choice(keys, self.sample_size, replace=False, p=p).tolist()
sampled_ids = [x for x in sampled_ids if x not in seen and x not in samples]
samples.extend(sampled_ids)
samples = samples[:self.sample_size]
negative_samples[user] = samples
return negative_samples
def items_by_popularity(self):
popularity = Counter()
for user in tqdm(self.test):
popularity.update(self.train[user])
popularity.update(self.val[user])
popularity.update(self.test[user])
return popularity
| 30.5625 | 99 | 0.625085 | 175 | 1,467 | 5.137143 | 0.342857 | 0.055617 | 0.046719 | 0.028921 | 0.046719 | 0.046719 | 0 | 0 | 0 | 0 | 0 | 0 | 0.28698 | 1,467 | 47 | 100 | 31.212766 | 0.859465 | 0 | 0 | 0.055556 | 0 | 0 | 0.02045 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.138889 | 0.027778 | 0.333333 | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d3685b227eda67cb5f0fb424b7e5b1d410a46d7 | 2,854 | py | Python | td/lab3_skeleton.py | K1ntus/Deep-Learning | 8737c0cf32d34d1533f249ddd3eb8ada181fb7ad | [
"MIT"
] | null | null | null | td/lab3_skeleton.py | K1ntus/Deep-Learning | 8737c0cf32d34d1533f249ddd3eb8ada181fb7ad | [
"MIT"
] | null | null | null | td/lab3_skeleton.py | K1ntus/Deep-Learning | 8737c0cf32d34d1533f249ddd3eb8ada181fb7ad | [
"MIT"
] | null | null | null | from __future__ import print_function
import tensorflow as tf
import keras
from keras.datasets import mnist
from keras.models import Sequential,model_from_json
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
import numpy as np
def save_model(model,filename):
model_json = model.to_json()
with open(filename+".json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(filename+".h5")
print("Saved model to disk in files:", filename)
def load_model(filename):
# load json and create model
json_file = open(filename+".json", 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(filename +".h5")
print("Loaded model from disk")
#### EXERCICE 3
(x_train,y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1)
x_train = x_train.astype('float32')
y_train = y_train.astype('float32') #y: verite terrain
x_test = x_test.astype('float32')
y_test = y_test.astype('float32')
x_train = x_train/ 255 #niveau de gris entre 0 et 255 (regionde 28*28*1)
x_test = x_test/255
''' One hot encoding:
transposition en vecteur de taille 10
Valeur 6-> |0|0|0|0|0|0|1|0|0|0|
Valeur 2-> |0|0|1|0|0|0|0|0|0|0|
Valeur 9-> |0|0|1|0|0|0|0|0|0|1|
Fonction de perte (loss):L(
'''
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
filename_from_model="test"
# load_model(filename_from_model)
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
def cnn1():
cnn_model = Sequential()
cnn_model.add(
keras.layers.Conv2D(64,
kernel_size=(3,3),
activation='relu',
input_shape=(28,28,1)
)
)
cnn_model.add(keras.layers.MaxPooling2D(pool_size=(2,2)))
cnn_model.add(Flatten())
cnn_model.add(Dense(10, activation='relu', kernel_initializer='normal')) #param1=nb neuronne, param2=activation
numClasses=10
cnn_model.add(Dense(numClasses, activation='softmax', kernel_initializer='normal')) #param1=nb neuronne, param2=activation
cnn_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
return cnn_model
epochs=10 #nombre d epoques
batch_size=64
cnn = cnn1()
cnn.summary()
cnn.fit(x_train, y_train, validation_data=(x_test,y_test), epochs=epochs, batch_size=batch_size)
save_model(cnn, filename_from_model)
#possibilite de save un model pour le sauvegarder et eviter de repartir de 0
| 30.361702 | 126 | 0.70042 | 444 | 2,854 | 4.29955 | 0.297297 | 0.020953 | 0.022001 | 0.020953 | 0.165532 | 0.073337 | 0.071241 | 0.067051 | 0.009429 | 0 | 0 | 0.042553 | 0.176594 | 2,854 | 93 | 127 | 30.688172 | 0.769787 | 0.124387 | 0 | 0 | 0 | 0 | 0.071678 | 0.01049 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.175439 | 0 | 0.245614 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d36c1e2e090aa8edbe34bd4cb5777a00843c1e4 | 9,733 | py | Python | image/modified/m_vqvae_multi_level8.py | arash-safari/vp | 377e0172112157b79690b32349481a17e7590063 | [
"MIT"
] | null | null | null | image/modified/m_vqvae_multi_level8.py | arash-safari/vp | 377e0172112157b79690b32349481a17e7590063 | [
"MIT"
] | null | null | null | image/modified/m_vqvae_multi_level8.py | arash-safari/vp | 377e0172112157b79690b32349481a17e7590063 | [
"MIT"
] | null | null | null | import sys
import torch
from torch import nn
from torch.nn import functional as F
# sys.path.append('../')
#
# from image.vqvae import Quantize
# Copyright 2018 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Borrowed from https://github.com/deepmind/sonnet and ported it to PyTorch
class Quantize(nn.Module):
def __init__(self, dim, n_embed, decay=0.99, eps=1e-5, qw=10):
super().__init__()
self.qw = qw
self.dim = dim
self.n_embed = n_embed
self.decay = decay
self.eps = eps
embed = torch.randn(dim, n_embed)
self.register_buffer('embed', embed)
self.register_buffer('cluster_size', torch.zeros(n_embed))
self.register_buffer('embed_avg', embed.clone())
def forward(self, input):
flatten = input.reshape(-1, self.dim)
# print('flatten input {}'.format(flatten.shape))
# print('quantize embed {}'.format(self.embed))
dist = (
flatten.pow(2).sum(1, keepdim=True)
- 2 * flatten @ self.embed
+ self.embed.pow(2).sum(0, keepdim=True)
)
# print('quantize dist {}'.format(dist.shape))
_, embed_ind = (-dist).max(1)
# print('quantize embed_ind {}'.format(embed_ind.shape))
embed_onehot = F.one_hot(embed_ind, self.n_embed).type(flatten.dtype)
embed_ind = embed_ind.view(*input.shape[:-1])
quantize = self.embed_code(embed_ind)
if self.training:
self.cluster_size.data.mul_(self.decay).add_(
1 - self.decay, embed_onehot.sum(0)
)
embed_sum = flatten.transpose(0, 1) @ embed_onehot
self.embed_avg.data.mul_(self.decay).add_(1 - self.decay, embed_sum)
n = self.cluster_size.sum()
cluster_size = (
(self.cluster_size + self.eps) / (n + self.n_embed * self.eps) * n
)
embed_normalized = self.embed_avg / cluster_size.unsqueeze(0)
self.embed.data.copy_(embed_normalized)
norm = input.detach().pow(2).mean() + self.qw * quantize.detach().pow(2).mean()
# print(norm)
diff = ((quantize.detach() - input).pow(2).mean()) / norm
quantize = input + (quantize - input).detach()
# print('quantize quantize {}'.format(quantize.shape))
return quantize, diff, embed_ind
def embed_code(self, embed_id):
return F.embedding(embed_id, self.embed.transpose(0, 1))
class ResBlock(nn.Module):
def __init__(self, in_channel, channel):
super().__init__()
self.conv = nn.Sequential(
nn.ReLU(inplace=True),
nn.Conv2d(in_channel, channel, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel, in_channel, 1),
)
def forward(self, input):
out = self.conv(input)
out += input
return out
class Encoder(nn.Module):
def __init__(self, in_channel, channel, n_res_block, n_res_channel, stride):
super().__init__()
if stride == 8:
blocks = [
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 3, padding=1),
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 3, padding=1),
]
elif stride == 4:
blocks = [
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 3, padding=1),
]
elif stride == 2:
blocks = [
nn.Conv2d(in_channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(channel // 2, channel, 3, padding=1),
]
for i in range(n_res_block):
blocks.append(ResBlock(channel, n_res_channel))
blocks.append(nn.ReLU(inplace=True))
self.blocks = nn.Sequential(*blocks)
def forward(self, input):
return self.blocks(input)
class Decoder(nn.Module):
def __init__(
self, in_channel, out_channel, channel, n_res_block, n_res_channel, stride
):
super().__init__()
blocks = [nn.Conv2d(in_channel, channel, 3, padding=1)]
for i in range(n_res_block):
blocks.append(ResBlock(channel, n_res_channel))
blocks.append(nn.ReLU(inplace=True))
if stride == 8:
blocks.extend(
[
nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(
channel // 2, out_channel, 4, stride=2, padding=1
),
nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1),
]
)
elif stride == 4:
blocks.extend(
[
nn.ConvTranspose2d(channel, channel // 2, 4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(
channel // 2, out_channel, 4, stride=2, padding=1
),
]
)
elif stride == 2:
blocks.append(
nn.ConvTranspose2d(channel, out_channel, 4, stride=2, padding=1)
)
self.blocks = nn.Sequential(*blocks)
def forward(self, input):
return self.blocks(input)
class VQVAE_ML(nn.Module):
def __init__(
self,
in_channel=3,
channel=64,
n_res_block=2,
n_res_channel=16,
embed_dim=16,
n_level=4,
n_embed = 16,
decay=0.80,
stride=4,
):
super().__init__()
self.device = 'cuda'
self.enc = Encoder(in_channel, channel, n_res_block, n_res_channel, stride=stride)
self.enc_t = Encoder(channel, channel, n_res_block, n_res_channel, stride=2)
self.quantize_conv = nn.Conv2d(channel, embed_dim, 1)
self.n_level = n_level
self.quantizes = nn.ModuleList()
self.quantizes_conv = nn.ModuleList()
self.bns = nn.ModuleList()
for i in range(n_level):
self.quantizes.append(Quantize(embed_dim, n_embed,decay))
self.quantizes_conv.append(nn.Conv2d(embed_dim, embed_dim, 1))
self.bns.append(nn.BatchNorm2d(embed_dim))
self.dec_t = Decoder(
embed_dim, embed_dim, channel, n_res_block, n_res_channel, stride=2
)
# self.quantize_conv_b = nn.Conv2d(embed_dim + channel, embed_dim, 1)
# self.quantize_b = Quantize(embed_dim, n_embed)
# self.upsample_t = nn.ConvTranspose2d(
# embed_dim, embed_dim, 4, stride=2, padding=1
# )
self.dec = Decoder(
embed_dim ,
in_channel,
channel,
n_res_block,
n_res_channel,
stride=stride,
)
def forward(self, input):
quant, diff, _,_ = self.encode(input)
# print('quant shape {}'.format(quant.shape))
dec = self.decode(quant)
return dec, diff
def encode(self, input):
enc = self.enc(input)
enc = self.enc_t(enc)
bottleneck = self.quantize_conv(enc)
ids = None
quants = None
diffs = None
quant_sum = None
total_quantize = len(self.quantizes)
for i,quantize in enumerate(self.quantizes):
quant, diff, id = quantize(bottleneck.permute(0, 2, 3, 1))
quant = quant.permute(0, 3, 1, 2)
diff = diff.unsqueeze(0)
if diffs is None:
# diffs = total_quantize * diff
diffs = diff
quant_sum = quant
quants = quant.unsqueeze(1)
ids = id.unsqueeze(1)
else:
# diffs += (total_quantize - i) * diff
diffs += diff
quant_sum += quant
quants = torch.cat((quants,quant.unsqueeze(1)),dim=1)
ids = torch.cat((ids, id.unsqueeze(1)), dim=1)
bottleneck -= quant
# bottleneck = F.relu(self.bns[i](self.quantizes_conv[i](bottleneck)))
return quant_sum, diffs, quants, ids
def decode(self, quant):
dec = self.dec_t(quant)
return self.dec(dec)
def decode_code(self, codes):
quants = None
for i, code in enumerate(codes):
quant = self.quantizes.embed_code(code)
quant = quant.permute(0, 3, 1, 2)
quants += quant
dec = self.decode(quants)
return dec
| 34.392226 | 90 | 0.551526 | 1,182 | 9,733 | 4.384941 | 0.170051 | 0.029327 | 0.020066 | 0.037623 | 0.408065 | 0.385877 | 0.360602 | 0.329346 | 0.310824 | 0.284777 | 0 | 0.025411 | 0.324771 | 9,733 | 282 | 91 | 34.514184 | 0.763238 | 0.146615 | 0 | 0.325243 | 0 | 0 | 0.003627 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067961 | false | 0 | 0.019417 | 0.014563 | 0.15534 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d3b6a39414163158df318015d15ebef78eb5567 | 10,249 | py | Python | tests/test_data_loader.py | cccntu/accelerate | 4ad11b12d97717db0931de1c17d562bd5c7f4f41 | [
"Apache-2.0"
] | 2,313 | 2021-03-05T21:49:49.000Z | 2022-03-31T05:26:00.000Z | tests/test_data_loader.py | cccntu/accelerate | 4ad11b12d97717db0931de1c17d562bd5c7f4f41 | [
"Apache-2.0"
] | 230 | 2021-03-05T22:00:05.000Z | 2022-03-31T21:38:44.000Z | tests/test_data_loader.py | cccntu/accelerate | 4ad11b12d97717db0931de1c17d562bd5c7f4f41 | [
"Apache-2.0"
] | 152 | 2021-03-06T04:25:37.000Z | 2022-03-26T03:09:36.000Z | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import unittest
from torch.utils.data import BatchSampler, IterableDataset
from accelerate.data_loader import BatchSamplerShard, IterableDatasetShard
class RandomIterableDataset(IterableDataset):
# For testing, an iterable dataset of random length
def __init__(self, p_stop=0.01, max_length=1000):
self.p_stop = p_stop
self.max_length = max_length
def __iter__(self):
count = 0
stop = False
while not stop and count < self.max_length:
yield count
count += 1
stop = random.random() < self.p_stop
class DataLoaderTester(unittest.TestCase):
def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False):
batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, split_batches) for i in range(2)]
batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(shard) for shard in batch_sampler_shards], [len(e) for e in expected])
self.assertListEqual(batch_sampler_lists, expected)
def test_batch_sampler_shards_with_no_splits(self):
# Check the shards when the dataset is a round multiple of total batch size.
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True)
expected = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(batch_sampler, expected)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False)
expected = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected)
batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected)
def test_batch_sampler_shards_with_splits(self):
# Check the shards when the dataset is a round multiple of batch size.
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True)
# Expected shouldn't change
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size.
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True)
expected = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
# Check the shards when the dataset is very small.
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False)
expected = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True)
expected = [[], []]
self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True)
def check_iterable_dataset_shards(
self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False
):
random.seed(seed)
reference = list(dataset)
iterable_dataset_shards = [
IterableDatasetShard(
dataset,
batch_size=batch_size,
drop_last=drop_last,
num_processes=num_processes,
process_index=i,
split_batches=split_batches,
)
for i in range(num_processes)
]
iterable_dataset_lists = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(seed)
iterable_dataset_lists.append(list(iterable_dataset_shard))
shard_batch_size = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
first_list = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(l), len(first_list))
self.assertTrue(len(l) % shard_batch_size == 0)
observed = []
for idx in range(0, len(first_list), shard_batch_size):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(reference) < len(observed):
reference += reference
self.assertListEqual(observed, reference[: len(observed)])
def test_iterable_dataset_shard(self):
seed = 42
dataset = RandomIterableDataset()
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
# Edge case with a very small dataset
dataset = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True)
self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True)
| 45.959641 | 119 | 0.630208 | 1,403 | 10,249 | 4.395581 | 0.132573 | 0.128425 | 0.07005 | 0.070861 | 0.642776 | 0.629479 | 0.613588 | 0.599319 | 0.599319 | 0.591049 | 0 | 0.0632 | 0.251244 | 10,249 | 222 | 120 | 46.166667 | 0.740422 | 0.161284 | 0 | 0.363057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031847 | 1 | 0.044586 | false | 0 | 0.025478 | 0 | 0.082803 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d3d4864859ee40632563ce0af32cebdd0937175 | 32,094 | py | Python | scheduler/routes.py | ddiatlov/scheduler | f3343f20a2d104e20ec0031af6f6de2313656212 | [
"MIT"
] | null | null | null | scheduler/routes.py | ddiatlov/scheduler | f3343f20a2d104e20ec0031af6f6de2313656212 | [
"MIT"
] | null | null | null | scheduler/routes.py | ddiatlov/scheduler | f3343f20a2d104e20ec0031af6f6de2313656212 | [
"MIT"
] | null | null | null | import calendar
import os
from datetime import datetime
from dateutil.parser import parse
# server side rendering related imports
from flask import render_template, url_for, flash, redirect, request, jsonify, send_from_directory
# login related imports
from flask_login import login_user, logout_user, login_required, current_user
# SQLAlchemy related imports
from sqlalchemy import extract, and_, func
# __init__.py imports
from scheduler import app, db, bcrypt
# Forms
from scheduler.forms import RegistrationForm, LoginForm, EmployeeForm, DepartmentForm, ForecastItemsForm, ProjectForm, \
AssignmentForm, FilterForm, AccountForm, SearchEmployeeForm, SearchAssignmentForm, \
SearchProjectsForm
from scheduler.models import ForecastItemSchema, ProjectSchema, EmployeesSchema
# DB Models
from scheduler.models import User, Department, Employee, Project, ForecastItem, AssignmentTypes
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@app.route("/", methods=['GET', 'POST'])
@app.route("/home", methods=['GET', 'POST'])
@login_required
def home():
curr_user = current_user.id
form = FilterForm()
newTask = ForecastItemsForm()
cal = calendar.TextCalendar(calendar.SUNDAY)
days = []
selected_year = datetime.today().year
selected_month = datetime.today().month
timeObject = cal.monthdatescalendar(selected_year, selected_month)
for _time in timeObject:
for _date in _time:
if _date.month == selected_month:
days.append(_date.strftime('%m/%d/%Y'))
if current_user.role == 'Admin':
data = db.session.query(ForecastItem) \
.filter(extract('month', ForecastItem.start_date) == selected_month).all()
elif current_user.role == 'Editor':
data = db.session.query(ForecastItem) \
.filter(and_(extract('month', ForecastItem.start_date) == selected_month),
ForecastItem.user_id == curr_user).all()
else:
data = db.session.query(ForecastItem) \
.filter(and_(extract('month', ForecastItem.start_date) == selected_month),
ForecastItem.employee_id == current_user.employee_id).all()
if request.method == "POST" and form.validate_on_submit:
new_year = request.form["filter_by_year"]
new_month = request.form["filter_by_month"]
days = []
selected_year = int(new_year)
selected_month = datetime.strptime(new_month, '%B').month
timeObject = cal.monthdatescalendar(selected_year, selected_month)
for _time in timeObject:
for _date in _time:
if _date.month == selected_month:
days.append(_date.strftime('%m/%d/%Y'))
form.filter_by_year.data = new_year
form.filter_by_month.choices = [
item.start_date.strftime('%B') for item in
db.session.query(ForecastItem)
.distinct(func.to_char(ForecastItem.start_date, "FMMonth"))
.filter(extract('year', ForecastItem.start_date) == f'{new_year}')
]
data = db.session.query(ForecastItem).filter(
and_(extract('year', ForecastItem.start_date) == new_year,
extract('month', ForecastItem.start_date) == selected_month,
ForecastItem.user_id == curr_user)).all()
return render_template('home.html',
days=days,
data=data,
form=form,
newTask=newTask)
@app.route("/assignments", methods=['GET', 'POST'])
@login_required
def assignments():
form = ForecastItemsForm()
search_form = SearchAssignmentForm()
res = search_form.searchField.data
choice = search_form.searchBy.data
curr_user = current_user.id
if current_user.role == 'Admin':
data = ForecastItem.query.all()
elif current_user.role == 'Editor':
data = ForecastItem.query.filter(ForecastItem.user_id == curr_user)
else:
data = ForecastItem.query.filter(ForecastItem.employee_id == current_user.employee_id)
if res and search_form.validate_on_submit():
if choice == 'First Name':
data = ForecastItem.query.join(Employee) \
.filter(and_(ForecastItem.user_id == curr_user, Employee.first_name.like(f'%{res.capitalize()}%'))
).all()
elif choice == 'Last Name':
data = ForecastItem.query.join(Employee) \
.filter(and_(ForecastItem.user_id == curr_user, Employee.last_name.like(f'%{res.capitalize()}%'))
).all()
elif choice == 'Project Name':
data = ForecastItem.query.join(Project) \
.filter(and_(ForecastItem.user_id == curr_user, Project.project_name.like(f'%{res.capitalize()}%'))
).all()
elif choice == 'Project Number':
data = ForecastItem.query.join(Project) \
.filter(and_(ForecastItem.user_id == curr_user, Project.project_number == res)
).all()
else:
data = ForecastItem.query.filter(ForecastItem.user_id == curr_user).all()
return render_template('assignments.html',
title='Assignments',
form=form,
search_form=search_form,
data=data)
if form.validate_on_submit():
forecast_item = ForecastItem(
employee_id=form.employee.data,
user_id=current_user.id,
project_id=form.project.data,
assignment_id=form.assignment.data,
status=form.status.data,
description=form.description.data,
start_date=form.start_date.data,
end_date=form.end_date.data
)
db.session.add(forecast_item)
db.session.commit()
flash('Task has been added.', 'success')
return redirect(url_for('home'))
return render_template('assignments.html',
title='Assignments',
form=form,
search_form=search_form,
data=data)
@app.route("/assignments/<int:assignment_id>", methods=['GET', 'POST'])
@login_required
def assignment(assignment_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = ForecastItem.query.get_or_404(assignment_id)
form = ForecastItemsForm()
if form.validate_on_submit():
data.employee_id = form.employee.data
data.project_id = form.project.data
data.assignment_id = form.assignment.data
data.status = form.status.data
data.description = form.description.data
data.start_date = form.start_date.data
data.end_date = form.end_date.data
db.session.commit()
flash('Task has been updated!', 'success')
return redirect(url_for('assignments',
assignment_id=assignment_id))
elif request.method == 'GET':
form.employee.data = data.employee_id
form.project.data = data.project_id
form.assignment.data = data.assignment_id
form.status.data = data.status
form.description.data = data.description
form.start_date.data = data.start_date
form.end_date.data = data.end_date
return render_template('assignment.html',
title='Update Assignment',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/assignments/<int:assignment_id>/delete", methods=['POST'])
@login_required
def delete_assignment(assignment_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = ForecastItem.query.get_or_404(assignment_id)
db.session.delete(data)
db.session.commit()
flash('Task has been deleted!.', 'success')
return redirect(url_for('assignments'))
else:
return redirect(url_for('home'))
@app.route("/assignments_report", methods=['GET', 'POST'])
@login_required
def assignments_report():
form = FilterForm()
if current_user.role == 'Admin':
data = ForecastItem.query.join(Employee).all()
elif current_user.role == 'Editor':
data = ForecastItem.query.filter(ForecastItem.user_id == current_user.id)
else:
data = ForecastItem.query.filter(ForecastItem.employee_id == current_user.employee_id)
return render_template('assignments_report.html', title='Assignments Report', data=data, form=form)
@app.route("/assignments_report_csv", methods=['GET'])
@login_required
def assignments_report_csv():
if request.method == 'GET':
if current_user.role == 'Admin':
data = ForecastItem.query.join(Employee).all()
serialized = ForecastItemSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
elif current_user.role == 'Editor':
data = ForecastItem.query \
.join(Employee) \
.filter(ForecastItem.user_id == current_user.id)
serialized = ForecastItemSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
data = ForecastItem.query \
.join(Employee) \
.filter(ForecastItem.employee_id == current_user.employee_id)
serialized = ForecastItemSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
return redirect(url_for(assignments_report))
@app.route("/employees", methods=['GET', 'POST'])
@login_required
def employees():
if current_user.role == 'Admin' or current_user.role == 'Editor':
form = EmployeeForm()
search_form = SearchEmployeeForm()
res = search_form.searchField.data
choice = search_form.searchBy.data
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
if res and search_form.validate_on_submit():
if choice == 'First Name':
data = Employee.query.join(Department) \
.filter(Employee.first_name.like(f'%{res.capitalize()}%'))
elif choice == 'Last Name':
data = Employee.query.join(Department) \
.filter(Employee.last_name.like(f'%{res.capitalize()}%'))
elif choice == 'Department':
data = Employee.query.join(Department) \
.filter(Department.name.like(f'%{res.capitalize()}%'))
else:
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
return render_template('employees.html',
title='Employees',
form=form,
search_form=search_form,
data=data)
if form.validate_on_submit():
new_employee = Employee(first_name=form.first_name.data,
last_name=form.last_name.data,
department_id=form.department.data
)
db.session.add(new_employee)
db.session.commit()
flash(f'{form.first_name.data} {form.first_name.data} has been added.', 'success')
return redirect(url_for('home'))
return render_template('employees.html',
title='Employees',
form=form,
search_form=search_form,
data=data)
else:
return redirect(url_for('home'))
@app.route("/employees/<int:employee_id>", methods=['GET', 'POST'])
@login_required
def employee(employee_id):
data = Employee.query.get_or_404(employee_id)
form = EmployeeForm()
related = ForecastItem.query.join(Employee).filter(
and_(Employee.id == employee_id, ForecastItem.employee_id == employee_id))
if form.validate_on_submit():
data.first_name = form.first_name.data
data.last_name = form.last_name.data
data.department_id = form.department.data
db.session.commit()
flash(f'{data.first_name} {data.last_name}\'s information has been updated.', 'success')
return redirect(url_for('employees',
employee_id=employee_id))
elif request.method == 'GET':
form.first_name.data = data.first_name
form.last_name.data = data.last_name
form.department.data = data.department_id
return render_template('employee.html',
title='Employee Information',
data=data,
form=form,
related=related)
@app.route("/employees/<int:employee_id>/delete", methods=['POST'])
@login_required
def delete_employee(employee_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Employee.query.get_or_404(employee_id)
db.session.delete(data)
db.session.commit()
flash('Employee has been deleted.', 'success')
return redirect(url_for('employees'))
else:
return redirect(url_for('home'))
@app.route("/employees_report", methods=['GET', 'POST'])
@login_required
def employees_report():
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
form = FilterForm()
return render_template('employees_report.html',
title='Projects Report',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/employees_report_csv", methods=['GET'])
@login_required
def employees_report_csv():
if current_user.role == 'Admin' or current_user.role == 'Editor':
if request.method == 'GET':
data = Employee.query.join(Department) \
.filter(Employee.department_id == Department.id) \
.order_by(Employee.first_name.asc())
serialized = EmployeesSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
return redirect(url_for('home'))
@app.route("/departments", methods=['GET', 'POST'])
@login_required
def departments():
if current_user.role == 'Admin':
data = Department.query.order_by(Department.name.asc()).all()
form = DepartmentForm()
if form.validate_on_submit():
new_department = Department(name=form.name.data)
db.session.add(new_department)
db.session.commit()
flash(f'{form.name.data} department has been added.', 'success')
return redirect(url_for('home'))
return render_template('departments.html',
title='Departments',
form=form,
data=data)
else:
return redirect(url_for('home'))
@app.route("/departments/<int:department_id>", methods=['GET', 'POST'])
@login_required
def department(department_id):
if current_user.role == 'Admin':
data = Department.query.get_or_404(department_id)
assigned_employees = Employee.query.filter_by(department_id=department_id).order_by(
Employee.first_name.asc()).all()
form = DepartmentForm()
if form.validate_on_submit():
data.name = form.name.data
db.session.commit()
flash('Department name has been updated.', 'success')
return redirect(url_for('departments',
department_id=department_id))
elif request.method == 'GET':
form.name.data = data.name
return render_template('department.html',
title='Edit Department',
data=data,
form=form,
assigned_employees=assigned_employees)
else:
return redirect(url_for('home'))
@app.route("/departments/<int:department_id>/delete", methods=['POST'])
@login_required
def delete_department(department_id):
if current_user.role == 'Admin':
data = Department.query.get_or_404(department_id)
db.session.delete(data)
db.session.commit()
flash('Department has been deleted.', 'success')
return redirect(url_for('departments'))
else:
return redirect(url_for('home'))
@app.route("/projects", methods=['GET', 'POST'])
@login_required
def projects():
if current_user.role == 'Admin' or current_user.role == 'Editor':
form = ProjectForm()
search_form = SearchProjectsForm()
res = search_form.searchField.data
choice = search_form.searchBy.data
data = Project.query.join(Employee).order_by(Project.project_number.asc()).all()
if res and search_form.validate_on_submit():
if choice == 'Project Number':
data = Project.query.join(Employee) \
.filter(Project.project_number == res) \
.order_by(Project.project_number.asc()).all()
elif choice == 'Project Name':
data = Project.query.join(Employee).filter(Project.project_name.like(f'%{res.capitalize()}%')).all()
elif choice == 'Manager\'s First Name':
data = Project.query.join(Employee).filter(Employee.first_name.like(f'%{res.capitalize()}%')).all()
elif choice == 'Manager\'s Last Name':
data = Project.query.join(Employee).filter(Employee.last_name.like(f'%{res.capitalize()}%')).all()
else:
data = Project.query.join(Employee).order_by(Project.project_number.asc()).all()
return render_template('projects.html',
title='Projects',
form=form,
search_form=search_form,
data=data)
if form.validate_on_submit():
new_project = Project(
user_id=current_user.id,
project_number=form.project_number.data,
project_name=form.project_name.data,
manager_id=form.project_manager.data,
project_status=form.project_status.data,
project_description=form.project_description.data
)
db.session.add(new_project)
db.session.commit()
flash(f'{form.project_number.data} has been added.', 'success')
return redirect(url_for('home'))
else:
return redirect(url_for('home'))
return render_template('projects.html',
title='Projects',
form=form,
search_form=search_form,
data=data)
@app.route("/projects/<int:project_id>", methods=['GET', 'POST'])
@login_required
def project(project_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Project.query.get_or_404(project_id)
form = ProjectForm()
related = ForecastItem.query.join(Employee).filter(
and_(Project.id == project_id, ForecastItem.project_id == project_id))
if form.validate_on_submit():
data.user_id = current_user.id
data.project_number = form.project_number.data
data.project_name = form.project_name.data
data.manager_id = form.project_manager.data
data.project_status = form.project_status.data
data.project_description = form.project_description.data
db.session.commit()
flash('Project information has been updated.', 'success')
return redirect(url_for('projects',
project_id=project_id))
elif request.method == 'GET':
form.project_number.data = data.project_number
form.project_name.data = data.project_name
form.project_manager.data = data.manager_id
form.project_status.data = data.project_status
form.project_description.data = data.project_description
return render_template('project.html',
title='Edit Project Information',
data=data,
form=form,
related=related)
else:
return redirect(url_for('home'))
@app.route("/projects/<int:project_id>/delete", methods=['POST'])
@login_required
def delete_project(project_id):
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Project.query.get_or_404(project_id)
db.session.delete(data)
db.session.commit()
flash('Project has been deleted.', 'success')
return redirect(url_for('projects'))
else:
return redirect(url_for('home'))
@app.route("/projects_report", methods=['GET', 'POST'])
@login_required
def projects_report():
if current_user.role == 'Admin' or current_user.role == 'Editor':
data = Project.query.join(Employee)
form = FilterForm()
return render_template('project_reports.html',
title='Projects Report',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/projects_report_csv", methods=['GET'])
@login_required
def projects_report_csv():
if current_user.role == 'Admin' or current_user.role == 'Editor':
if request.method == 'GET':
data = Project.query.join(Employee)
serialized = ProjectSchema(many=True)
result = serialized.dump(data)
return jsonify({'data': result})
else:
return redirect(url_for('home'))
@app.route("/assignment_types", methods=['GET', 'POST'])
@login_required
def assignment_types():
if current_user.role == 'Admin':
data = AssignmentTypes.query.order_by(AssignmentTypes.assignment_type.asc()).all()
form = AssignmentForm()
if form.validate_on_submit():
assignment = AssignmentTypes(assignment_type=form.assignment_name.data)
db.session.add(assignment)
db.session.commit()
flash('New assignment type has been added.', 'success')
return redirect(url_for('home'))
return render_template('assignment_types.html',
title='Assignment Types',
form=form,
data=data)
else:
return redirect(url_for('home'))
@app.route("/assignment_types/<int:assignment_type_id>", methods=['GET', 'POST'])
@login_required
def assignment_type(assignment_type_id):
if current_user.role == 'Admin':
data = AssignmentTypes.query.get_or_404(assignment_type_id)
form = AssignmentForm()
if form.validate_on_submit():
data.assignment_type = form.assignment_name.data
db.session.commit()
flash('Assignment type has been updated.', 'success')
return redirect(url_for('assignment_types',
assignment_type_id=assignment_type_id))
elif request.method == 'GET':
form.assignment_name.data = data.assignment_type
return render_template('assignment_type.html',
title='Assignment Type Information',
data=data,
form=form)
else:
return redirect(url_for('home'))
@app.route("/assignment_types/<int:assignment_type_id>/delete", methods=['POST'])
@login_required
def delete_assignment_type(assignment_type_id):
if current_user.role == 'Admin':
data = AssignmentTypes.query.get_or_404(assignment_type_id)
db.session.delete(data)
db.session.commit()
flash('assignment type has been deleted.', 'success')
return redirect(url_for('assignment_types'))
else:
return redirect(url_for('home'))
# enable this route for manual registration when needed. remember to enable the appropriate links and templates
# @app.route("/register", methods=['GET', 'POST'])
# def register():
# if current_user.is_authenticated:
# return redirect(url_for('home'))
#
# form = RegistrationForm()
# if form.validate_on_submit():
# hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
# user = User(
# username=form.username.data,
# email=form.email.data,
# password=hashed_password
# )
# db.session.add(user)
# db.session.commit()
# flash(f'Account created for {form.email.data}.', 'success')
# return redirect(url_for('login'))
# return render_template('register.html',
# title='Register',
# form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated and current_user.is_active:
return redirect(url_for('home'))
form = LoginForm()
if form.validate_on_submit():
user_name = User.query.filter_by(username=form.username.data).first()
if user_name and bcrypt.check_password_hash(user_name.password, form.password.data):
login_user(user_name, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please contact your IT Administrator.', 'danger')
return render_template('login.html',
title='Login',
form=form)
@app.route("/logout")
def logout():
logout_user()
return redirect(url_for('home'))
@app.route("/account", methods=['GET', 'POST'])
@login_required
def account():
users_data = User.query.all()
projects_data = Project.query \
.join(Employee) \
.order_by(Project.project_number.asc()) \
.all()
if current_user.role == 'Admin':
assignments_data = ForecastItem.query \
.join(Project) \
.join(Employee) \
.join(AssignmentTypes) \
.all()
elif current_user.role == 'Editor':
projects_data = Project.query \
.join(Employee) \
.join(User) \
.filter(Project.user_id == current_user.id) \
.order_by(Project.project_number.asc())
assignments_data = ForecastItem.query \
.join(Project, ForecastItem.project_id == Project.id) \
.join(Employee, Employee.id == ForecastItem.employee_id) \
.join(User, ForecastItem.user_id == User.id) \
.join(AssignmentTypes) \
.filter(ForecastItem.user_id == current_user.id) \
.limit(10)
else:
assignments_data = ForecastItem.query \
.filter(ForecastItem.employee_id == current_user.employee_id)
return render_template('account.html',
title='Account',
users_data=users_data,
current_user=current_user,
projects_data=projects_data,
assignments_data=assignments_data
)
# connects to fetch_months.js script to get the distinct dates from the database without refresh the form
@app.route("/fetch_months", methods=['POST'])
@login_required
def fetch_months():
req = request.json['year_num']
def isDate(string, fuzzy=False):
try:
parse(string, fuzzy=fuzzy)
choices = [
item.start_date.strftime('%B') for item in
db.session.query(ForecastItem)
.distinct(func.to_char(ForecastItem.start_date, "FMMonth"))
.filter(extract('year', ForecastItem.start_date) == string)
]
return choices
except Exception as err:
print(err)
return False
return jsonify({'month_choices': isDate(req)})
# For admin use only
@app.route("/account/<int:user_id>", methods=['GET', 'POST'])
@login_required
def edit_account(user_id):
curr_user = current_user
if curr_user.role == 'Admin':
data = User.query.get_or_404(user_id)
form = AccountForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
data.username = form.username.data
data.email = form.email.data
data.password = hashed_password
data.role = form.role.data
data.is_active = form.is_active.data
data.employee_id = form.employee.data
db.session.commit()
flash('User information has been updated.', 'success')
return redirect(url_for('account', id=user_id))
elif request.method == 'GET':
form.username.data = data.username
form.email.data = data.email
form.role.data = data.role
form.is_active.data = data.is_active
form.employee.data = data.employee_id
return render_template('edit_account.html',
title='Edit Account',
form=form,
data=data)
else:
return redirect(url_for('home'))
# For admin use only
@app.route("/account/<int:user_id>/delete", methods=['POST'])
@login_required
def delete_account(user_id):
data = User.query.get_or_404(user_id)
db.session.delete(data)
db.session.commit()
flash(f'{data.username} has been deleted.', 'success')
return redirect(url_for('account'))
# For admin use only
@app.route("/register_as_admin", methods=['GET', 'POST'])
@login_required
def register_as_admin():
curr_user = current_user
if curr_user.role == 'Admin':
form = RegistrationForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = User(
username=form.username.data,
email=form.email.data,
role=form.role.data,
employee_id=form.employee.data,
password=hashed_password
)
db.session.add(user)
db.session.commit()
flash(f'Account created for {form.email.data}.', 'success')
return redirect(url_for('account'))
return render_template('register_as_admin.html',
title='Register New',
form=form)
else:
return redirect(url_for('home'))
# error pages
@app.errorhandler(403)
def page_not_found(e):
return render_template('errors/403.html')
# error pages
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html')
# error pages
@app.errorhandler(500)
def page_not_found(e):
return render_template('errors/500.html')
| 39.330882 | 120 | 0.593226 | 3,450 | 32,094 | 5.330145 | 0.073623 | 0.035891 | 0.032737 | 0.045679 | 0.762847 | 0.699603 | 0.608135 | 0.525586 | 0.442221 | 0.374626 | 0 | 0.002594 | 0.291176 | 32,094 | 815 | 121 | 39.379141 | 0.80575 | 0.036362 | 0 | 0.535503 | 0 | 0 | 0.104849 | 0.020426 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050296 | false | 0.007396 | 0.016272 | 0.005917 | 0.176036 | 0.001479 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d3ea1065e8e6914e61c9e8dc14c875d892ae858 | 6,320 | py | Python | projects/mid_atlantic/study/plot_Fig3_storage_potential.py | EnergyModels/caes | 7bb3e7f9cfbfcc2e781918333f6be3b718ca0743 | [
"MIT"
] | 1 | 2022-03-30T07:40:45.000Z | 2022-03-30T07:40:45.000Z | projects/mid_atlantic/study/plot_Fig3_storage_potential.py | EnergyModels/caes | 7bb3e7f9cfbfcc2e781918333f6be3b718ca0743 | [
"MIT"
] | null | null | null | projects/mid_atlantic/study/plot_Fig3_storage_potential.py | EnergyModels/caes | 7bb3e7f9cfbfcc2e781918333f6be3b718ca0743 | [
"MIT"
] | 1 | 2021-11-02T17:02:27.000Z | 2021-11-02T17:02:27.000Z | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from math import pi
import numpy as np
import matplotlib.colors as colors
import matplotlib.patches as mpatches
df = pd.read_csv('all_analysis.csv')
# f, a = plt.subplots(2,1)
# a = a.ravel()
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='feasible_fr', hue='NEAR_FC', ax=a[0])
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='RASTERVALU', hue='NEAR_FC', ax=a[1])
# conversions and column renaming
df.loc[:, 'Distance to shore (km)'] = df.loc[:, 'NEAR_DIST'] / 1000.0
df.loc[:, 'Water depth (m)'] = df.loc[:, 'RASTERVALU']
df.loc[:, 'Feasibility (%)'] = df.loc[:, 'feasible_fr'] * 100.0
df.loc[:, 'Formation (-)'] = df.loc[:, 'formation']
df.loc[:, 'Nearest State (-)'] = df.loc[:, 'NEAR_FC']
loc_dict = {'VA_shore': 'Virginia', 'MD_shore': 'Maryland', 'NJ_shore': 'New Jersey', 'DE_shore': 'Delaware',
'NY_shore': 'New York', 'MA_shore': 'Massachusetts', 'RI_shore': 'Rhode Island'}
formation_dict = {'LK1': 'Lower Cretaceous', 'MK1-3': 'Middle Cretaceous', 'UJ1': 'Upper Jurassic'}
# rename
for loc in df.loc[:, 'Nearest State (-)'].unique():
ind = df.loc[:, 'Nearest State (-)'] == loc
df.loc[ind, 'Nearest State (-)'] = loc_dict[loc]
# rename
for formation in df.loc[:, 'Formation (-)'].unique():
ind = df.loc[:, 'Formation (-)'] == formation
df.loc[ind, 'Formation (-)'] = formation_dict[formation]
# Filter data with feasibility greater than 0.8
# df = df[df.loc[:,'Feasibility (%)']>=0.8]
# Filter data with mean RTE greater than 0.5
# df = df[df.loc[:, 'RTE_mean'] >= 0.5]
sns.histplot(df, x='Water depth (m)')
df.loc[:, 'RTE [%]'] = df.loc[:, 'RTE_mean']
df.loc[:, 'Water depth'] = '> 60m'
df.loc[df.loc[:, 'Water depth (m)'] > -60.0, 'Water depth'] = '30m - 60m'
df.loc[df.loc[:, 'Water depth (m)'] > -30.0, 'Water depth'] = '<30 m'
# sns.histplot(df, x='RTE [%]', hue='Water depth', hue_order=['<30 m', '30m - 60m', '> 60m'])
palette_rgb = np.array([[69, 117, 180],
[145, 191, 219],
[224, 243, 248]])
palette_hex = []
for rgb in palette_rgb:
palette_hex.append(colors.rgb2hex(rgb / 255))
# cmap = colors.ListedColormap(palette_hex)
# Calculate storage potential
frac = 0.1 # fraction of grid available for storage
A_grid = 19790 * 19790 # each square is 20 km by 20 km
well_MWh = 200 * 24 # 200 MW at 24 hour duration
df.loc[:, 'A_well'] = pi * df.loc[:, 'r_f'] ** 2
df.loc[:, 'n_wells'] = frac * A_grid / df.loc[:, 'A_well']
df.loc[:, 'MWh'] = df.loc[:, 'n_wells'] * well_MWh
# bin results
entries = ['RTE', 'MWh', 'Depth']
RTE_bins = [0.40, 0.50, 0.60, 0.65]
RTE_labels = ['40 - 50', '50 - 60', '> 60']
Depth_bins = np.arange(0.0, -200.1, -10.0)
df_smry = pd.DataFrame(index=RTE_labels, columns=Depth_bins[:-1])
df_highEff = pd.DataFrame()
for i in range(len(RTE_bins) - 1):
for j in range(len(Depth_bins) - 1):
# Select relevant indices
ind = (RTE_bins[i] <= df.loc[:, 'RTE_mean']) & (df.loc[:, 'RTE_mean'] < RTE_bins[i + 1]) \
& (Depth_bins[j + 1] < df.loc[:, 'Water depth (m)']) & (df.loc[:, 'Water depth (m)'] <= Depth_bins[j])
# store result
df_smry.loc[RTE_labels[i], Depth_bins[j]] = df.loc[ind, 'MWh'].sum()
if RTE_bins[i] >= 0.60:
df_highEff = df_highEff.append(df.loc[ind, :], ignore_index=True)
# plot
widths = []
for j in range(len(Depth_bins) - 1):
widths.append(Depth_bins[j] - Depth_bins[j + 1])
for i, index in enumerate(reversed(df_smry.index)):
# ind = df_smry.loc[:, 'RTE'] == RTE_label
x = df_smry.columns * -1.0
height = df_smry.loc[index, :] / 1e6 # TWh
if i == 0:
plt.bar(x, height, width=widths, label=index, align='edge', color=palette_hex[i])
bottom = height
else:
plt.bar(x, height, bottom=bottom, width=widths, label=index, align='edge', color=palette_hex[i])
bottom = bottom + height
# Add outline
plt.step(x, bottom, 'k', where='post')
# labels
plt.xlabel('Water depth (m)')
plt.ylabel('Storage capacity (TWh)')
# limits
xlims = [0.0, Depth_bins[-1] * -1.0]
ylims = [0.0, 400]
plt.xlim(left=xlims[0], right=xlims[1])
plt.ylim(bottom=ylims[0], top=ylims[1])
# Additional line - Wind turbines
plt.plot([60.0, 60.0], ylims, 'k--')
# set background color
# ax = plt.gca()
# ax.set_facecolor((0.95, 0.95, 0.95))
# create legend
ax = plt.gca()
patches = [mpatches.Patch(edgecolor='black', facecolor=palette_hex[2], label=RTE_labels[0]),
mpatches.Patch(edgecolor='black', facecolor=palette_hex[1], label=RTE_labels[1]),
mpatches.Patch(edgecolor='black', facecolor=palette_hex[0], label=RTE_labels[2])]
leg1 = ax.legend(handles=patches, bbox_to_anchor=(1.0, 1.0), loc="upper right", title='Round-trip Efficiency (%)')
# Add text
plt.text(35, 375, 'Fixed bottom\nwind turbines', horizontalalignment='center', verticalalignment='center',
fontsize='medium')
plt.text(85, 375, 'Floating\nwind turbines', horizontalalignment='center', verticalalignment='center',
fontsize='medium')
# Add arrows
ax.arrow(x=60 - 5, y=350, dx=-25, dy=0.0, width=2.0, color='black')
ax.arrow(x=60 + 5, y=350, dx=25, dy=0.0, width=2.0, color='black')
# Set size
# Column width guidelines https://www.elsevier.com/authors/author-schemas/artwork-and-media-instructions/artwork-sizing
# Single column: 90mm = 3.54 in
# 1.5 column: 140 mm = 5.51 in
# 2 column: 190 mm = 7.48 i
width = 8.0 # inches
height = 6.5 # inches
f = plt.gcf()
f.set_size_inches(width, height)
#
savename = "Fig3_storage_potential.png"
plt.savefig(savename, dpi=400)
# Sum total TWh in less than 60m for greater than 50% efficiency
rows = ['50 - 60', '> 60']
cols = [0, -10, -20, -30, -40, -50]
total_MWh = df_smry.loc[rows, cols].sum().sum()
total_GWh = total_MWh / 1000
print('GWh RTE >50% and water depth <60m: ' + str(total_GWh))
# Sum total TWh in less than 60m for greater than 60% efficiency
rows = ['> 60']
cols = [0, -10, -20, -30, -40, -50]
total_MWh = df_smry.loc[rows, cols].sum().sum()
total_GWh = total_MWh / 1000
print('GWh RTE >60% and water depth <60m: ' + str(total_GWh))
# save high efficiency and shallow water sites to csv
df_highEff.to_csv('high_efficiency_and_shallow_sites.csv') | 36.744186 | 119 | 0.637184 | 1,008 | 6,320 | 3.889881 | 0.294643 | 0.047182 | 0.019638 | 0.022953 | 0.27442 | 0.252997 | 0.244325 | 0.184647 | 0.106605 | 0.106605 | 0 | 0.058487 | 0.169462 | 6,320 | 172 | 120 | 36.744186 | 0.688512 | 0.211709 | 0 | 0.09901 | 0 | 0 | 0.199554 | 0.012763 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.079208 | 0 | 0.079208 | 0.019802 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d3ffe34f95afc554efd559de389de6e17e5f469 | 2,353 | py | Python | setup.py | diecutter/piecutter | 250a90a4cae1b72ff3c141dffb8c58de74dbedfd | [
"BSD-3-Clause"
] | 2 | 2016-05-02T02:22:34.000Z | 2021-02-08T18:17:30.000Z | setup.py | diecutter/piecutter | 250a90a4cae1b72ff3c141dffb8c58de74dbedfd | [
"BSD-3-Clause"
] | 2 | 2016-03-22T10:09:13.000Z | 2016-07-01T08:04:43.000Z | setup.py | diecutter/piecutter | 250a90a4cae1b72ff3c141dffb8c58de74dbedfd | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Python packaging."""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class Tox(TestCommand):
"""Test command that runs tox."""
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox # import here, cause outside the eggs aren't loaded.
errno = tox.cmdline(self.test_args)
sys.exit(errno)
#: Absolute path to directory containing setup.py file.
here = os.path.abspath(os.path.dirname(__file__))
#: Boolean, ``True`` if environment is running Python version 2.
IS_PYTHON2 = sys.version_info[0] == 2
# Data for use in setup.
NAME = 'piecutter'
DESCRIPTION = 'Templating framework.'
README = open(os.path.join(here, 'README.rst')).read()
VERSION = open(os.path.join(here, 'VERSION')).read().strip()
AUTHOR = u'Rémy HUBSCHER'
EMAIL = 'hubscher.remy@gmail.com'
LICENSE = 'BSD'
URL = 'https://{name}.readthedocs.io/'.format(name=NAME)
CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
]
KEYWORDS = [
'template',
'templates',
'template engine',
'jinja2',
'django',
'generator',
'file generation',
'scaffold',
]
PACKAGES = [NAME.replace('-', '_')]
REQUIREMENTS = [
'Django',
'jinja2',
'requests',
'setuptools',
'six',
]
if IS_PYTHON2:
REQUIREMENTS.extend(['mock'])
ENTRY_POINTS = {}
TEST_REQUIREMENTS = ['tox']
CMDCLASS = {'test': Tox}
if __name__ == '__main__': # Do not run setup() when we import this module.
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=README,
classifiers=CLASSIFIERS,
keywords=' '.join(KEYWORDS),
author=AUTHOR,
author_email=EMAIL,
url=URL,
license=LICENSE,
packages=PACKAGES,
include_package_data=True,
zip_safe=False,
install_requires=REQUIREMENTS,
entry_points=ENTRY_POINTS,
tests_require=TEST_REQUIREMENTS,
cmdclass=CMDCLASS,
)
| 25.576087 | 76 | 0.640034 | 269 | 2,353 | 5.472119 | 0.524164 | 0.016304 | 0.025815 | 0.019022 | 0.024457 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007104 | 0.222269 | 2,353 | 91 | 77 | 25.857143 | 0.797268 | 0.138547 | 0 | 0.027027 | 0 | 0 | 0.222886 | 0.011443 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.067568 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d40243559b725d753f07723a2dc092564c538dc | 12,205 | py | Python | energy_demand/plotting/plotting_results.py | nismod/energy_demand | 247fcea074a846026710ed9b039b22f8b9835643 | [
"MIT"
] | 14 | 2018-02-23T10:03:45.000Z | 2022-03-03T13:59:30.000Z | energy_demand/plotting/plotting_results.py | nismod/energy_demand | 247fcea074a846026710ed9b039b22f8b9835643 | [
"MIT"
] | 59 | 2017-02-22T15:03:30.000Z | 2020-12-16T12:26:17.000Z | energy_demand/plotting/plotting_results.py | nismod/energy_demand | 247fcea074a846026710ed9b039b22f8b9835643 | [
"MIT"
] | 5 | 2017-08-22T11:31:42.000Z | 2020-06-24T18:30:12.000Z | """Plotting model results and storing as PDF to result folder
"""
import os
import logging
from energy_demand.technologies import tech_related
from energy_demand.plotting import plotting_styles
from energy_demand.plotting import fig_lad_related
from energy_demand.plotting import fig_one_fueltype_multiple_regions_peak_h
from energy_demand.plotting import fig_fuels_enduses_y
from energy_demand.plotting import fig_stacked_enduse
from energy_demand.plotting import fig_cross_graphs
from energy_demand.plotting import fig_stacked_enduse_sectors
from energy_demand.plotting import fig_lf
from energy_demand.plotting import fig_fuels_enduses_week
from energy_demand.plotting import fig_load_profile_dh_multiple
from energy_demand.plotting import fig_fuels_peak_h
from energy_demand.plotting import fig_weather_variability_priod
#matplotlib.use('Agg') # Used to make it work in linux179
def run_all_plot_functions(
results_container,
reg_nrs,
regions,
lookups,
result_paths,
assumptions,
enduses,
plot_crit,
base_yr,
comparison_year
):
"""Summary function to plot all results
comparison_year : int
Year to generate comparison plots
"""
if plot_crit['plot_lad_cross_graphs']:
try:
# Plot cross graph where very region is a dot
fig_cross_graphs.plot_cross_graphs(
base_yr=base_yr,
comparison_year=comparison_year,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
reg_load_factor_y=results_container['reg_load_factor_y'],
fueltype_int=lookups['fueltypes']['electricity'],
fueltype_str='electricity',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_cross_graph_electricity_by_cy.pdf"),
label_points=False,
plotshow=False)
fig_cross_graphs.plot_cross_graphs(
base_yr=base_yr,
comparison_year=comparison_year,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
reg_load_factor_y=results_container['reg_load_factor_y'],
fueltype_int=lookups['fueltypes']['gas'],
fueltype_str='gas',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_cross_graph_gas_by_cy.pdf"),
label_points=False,
plotshow=False)
except KeyError:
logging.info("Check if correct comparison year is provided, i.e. really data exists for this year")
# ----------
# Plot LAD differences for first and last year
# ----------
try:
fig_lad_related.plot_lad_comparison(
base_yr=2015,
comparison_year=2050,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
fueltype_int=lookups['fueltypes']['electricity'],
fueltype_str='electricity',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_modelled_electricity_by_cy.pdf"),
label_points=False,
plotshow=False)
print("... plotted by-cy LAD energy demand compariosn")
# Plot peak h for every hour
fig_lad_related.lad_comparison_peak(
base_yr=2015,
comparison_year=2050,
regions=regions,
ed_year_fueltype_regs_yh=results_container['ed_fueltype_regs_yh'],
fueltype_int=lookups['fueltypes']['electricity'],
fueltype_str='electricity',
fig_name=os.path.join(
result_paths['data_results_PDF'], "comparions_LAD_modelled_electricity_peakh_by_cy.pdf"),
label_points=False,
plotshow=False)
print("... plotted by-cy LAD energy demand compariosn")
except:
pass
# ----------------
# Plot demand for every region over time
# -------------------
if plot_crit['plot_line_for_every_region_of_peak_demand']:
logging.info("... plot fuel per fueltype for every region over annual teimsteps")
fig_one_fueltype_multiple_regions_peak_h.plt_regions_peak_h(
results_container['ed_fueltype_regs_yh'],
lookups,
regions,
os.path.join(
result_paths['data_results_PDF'],
'peak_h_total_electricity.pdf'),
fueltype_str_to_plot="electricity")
if plot_crit['plot_fuels_enduses_y']:
#... Plot total fuel (y) per fueltype as line chart"
fig_fuels_enduses_y.run(
results_container['ed_fueltype_regs_yh'],
lookups,
os.path.join(
result_paths['data_results_PDF'],
'y_fueltypes_all_enduses.pdf'))
# ------------
# Plot stacked annual enduses
# ------------
if plot_crit['plot_stacked_enduses']:
rs_enduses_sorted = [
'rs_space_heating',
'rs_water_heating',
'rs_lighting',
'rs_cold',
'rs_wet',
'rs_consumer_electronics',
'rs_home_computing',
'rs_cooking']
ss_enduses_sorted = [
'ss_space_heating',
'ss_water_heating',
'ss_lighting',
'ss_catering',
'ss_small_power',
'ss_fans',
'ss_cooling_humidification',
'ss_ICT_equipment',
'ss_other_gas',
'ss_other_electricity',
'ss_cooled_storage']
is_enduses_sorted = [
'is_space_heating',
'is_lighting',
'is_refrigeration',
'is_motors',
'is_compressed_air',
'is_high_temp_process',
'is_low_temp_process',
'is_other',
'is_drying_separation']
rs_color_list = plotting_styles.rs_color_list_selection()
ss_color_list = plotting_styles.ss_color_list_selection()
is_color_list = plotting_styles.is_color_list_selection()
# Residential
fig_stacked_enduse.run(
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
rs_enduses_sorted,
rs_color_list,
os.path.join(
result_paths['data_results_PDF'], "stacked_rs_country.pdf"),
plot_legend=True)
# Service
fig_stacked_enduse.run(
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
ss_enduses_sorted,
ss_color_list,
os.path.join(
result_paths['data_results_PDF'], "stacked_ss_country.pdf"),
plot_legend=True)
# Industry
fig_stacked_enduse.run(
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
is_enduses_sorted,
is_color_list,
os.path.join(
result_paths['data_results_PDF'], "stacked_is_country_.pdf"),
plot_legend=True)
# ------------------------------
# Plot annual demand for enduses for all submodels
# ------------------------------
if plot_crit['plot_y_all_enduses']:
fig_stacked_enduse_sectors.run(
lookups,
assumptions['sim_yrs'],
results_container['results_enduse_every_year'],
enduses['residential'],
enduses['service'],
enduses['industry'],
os.path.join(result_paths['data_results_PDF'],
"stacked_all_enduses_country.pdf"))
# --------------
# Fuel per fueltype for whole country over annual timesteps
# ----------------
if plot_crit['plot_fuels_enduses_y']:
logging.info("... plot fuel per fueltype for whole country over annual timesteps")
#... Plot total fuel (y) per fueltype as line chart"
fig_fuels_enduses_y.run(
results_container['ed_fueltype_regs_yh'],
lookups,
os.path.join(
result_paths['data_results_PDF'],
'y_fueltypes_all_enduses.pdf'))
# ----------
# Plot seasonal typical load profiles
# Averaged load profile per daytpe for a region
# ----------
# ------------------------------------
# Load factors per fueltype and region
# ------------------------------------
if plot_crit['plot_lf'] :
for fueltype_str, fueltype_int in lookups['fueltypes'].items():
'''fig_lf.plot_seasonal_lf(
fueltype_int,
fueltype_str,
results_container['load_factor_seasons'],
reg_nrs,
os.path.join(
result_paths['data_results_PDF'],
'lf_seasonal_{}.pdf'.format(fueltype_str)))'''
'''fig_lf.plot_lf_y(
fueltype_int,
fueltype_str,
results_container['reg_load_factor_yd'],
reg_nrs,
os.path.join(
result_paths['data_results_PDF'], 'lf_yd_{}.pdf'.format(fueltype_str)))'''
# reg_load_factor_yd = max daily value / average annual daily value
fig_lf.plot_lf_y(
fueltype_int,
fueltype_str,
results_container['reg_load_factor_y'],
reg_nrs,
os.path.join(
result_paths['data_results_PDF'],
'lf_y_{}.pdf'.format(fueltype_str)))
# --------------
# Fuel week of base year
# ----------------
if plot_crit['plot_week_h']:
fig_fuels_enduses_week.run(
results_resid=results_container['ed_fueltype_regs_yh'],
lookups=lookups,
hours_to_plot=range(7*24),
year_to_plot=2015,
fig_name=os.path.join(result_paths['data_results_PDF'], "tot_all_enduse03.pdf"))
# ------------------------------------
# Plot averaged per season and fueltype
# ------------------------------------
if plot_crit['plot_averaged_season_fueltype']:
for year in results_container['av_season_daytype_cy'].keys():
for fueltype_int in results_container['av_season_daytype_cy'][year].keys():
fueltype_str = tech_related.get_fueltype_str(
lookups['fueltypes'], fueltype_int)
fig_load_profile_dh_multiple.run(
path_fig_folder=result_paths['data_results_PDF'],
path_plot_fig=os.path.join(
result_paths['data_results_PDF'],
'season_daytypes_by_cy_comparison__{}__{}.pdf'.format(year, fueltype_str)),
calc_av_lp_modelled=results_container['av_season_daytype_cy'][year][fueltype_int], # current year
calc_av_lp_real=results_container['av_season_daytype_cy'][base_yr][fueltype_int], # base year
calc_lp_modelled=results_container['season_daytype_cy'][year][fueltype_int], # current year
calc_lp_real=results_container['season_daytype_cy'][base_yr][fueltype_int], # base year
plot_peak=True,
plot_all_entries=False,
plot_max_min_polygon=True,
plotshow=False,
plot_radar=plot_crit['plot_radar_seasonal'],
max_y_to_plot=120,
fueltype_str=fueltype_str,
year=year)
# ---------------------------------
# Plot hourly peak loads over time for different fueltypes
# --------------------------------
if plot_crit['plot_h_peak_fueltypes']:
fig_fuels_peak_h.run(
results_container['ed_fueltype_regs_yh'],
lookups,
os.path.join(
result_paths['data_results_PDF'],
'fuel_fueltypes_peak_h.pdf'))
print("finisthed plotting")
return
| 38.140625 | 118 | 0.583367 | 1,334 | 12,205 | 4.908546 | 0.170915 | 0.061087 | 0.041234 | 0.060476 | 0.580177 | 0.553604 | 0.513439 | 0.456475 | 0.405925 | 0.343006 | 0 | 0.00364 | 0.302253 | 12,205 | 319 | 119 | 38.260188 | 0.765265 | 0.117165 | 0 | 0.401786 | 0 | 0 | 0.228263 | 0.068627 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004464 | false | 0.004464 | 0.066964 | 0 | 0.075893 | 0.013393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d40c1c0d41fce2558ee51eafbcd6328fd08433e | 2,637 | py | Python | models.py | hpaul/iconic_network | f2e4678dd0acd2ccc1fefde9fba34b69c99c7ba7 | [
"MIT"
] | null | null | null | models.py | hpaul/iconic_network | f2e4678dd0acd2ccc1fefde9fba34b69c99c7ba7 | [
"MIT"
] | 5 | 2021-03-10T00:31:40.000Z | 2022-02-26T20:47:30.000Z | models.py | hpaul/iconic_network | f2e4678dd0acd2ccc1fefde9fba34b69c99c7ba7 | [
"MIT"
] | 1 | 2019-05-27T20:45:02.000Z | 2019-05-27T20:45:02.000Z | from playhouse.sqlite_ext import *
from playhouse.migrate import *
from peewee import *
# The initialisation
db = SqliteDatabase('../cache/iconic.db', pragmas={
'journal_mode': 'wal',
'cache_size': '-2000',
'fullfsync': 'on',
'journal_size_limit': '-1',
'threads': '8',
'foreign_keys': 1, # Enforce foreign-key constraints
})
# The initialisation
#network = MySQLDatabase('iconic', user='root', host='127.0.0.1', password='H@mst3rdigital')
class BaseModel(Model):
class Meta:
database = db
# class BaseNetwork(Model):
# class Meta:
# database = network
class Author(BaseModel):
id = BigIntegerField(unique=True, index=True, primary_key=True)
full_name = JSONField(null=True)
subject_areas = JSONField(null=True)
document_count = BigIntegerField(null=True)
cited_by_count = BigIntegerField(null=True)
citations_count = BigIntegerField(null=True)
h_index = BigIntegerField(null=True)
coauthors_count = BigIntegerField(null=True)
# Data about affiliation
affiliation_current = JSONField(null=True)
cat = JSONField(null=True)
country = JSONField(null=True)
docs_fetched = BooleanField(default=False)
last_page = BigIntegerField(null=True,default=0)
is_sample = BooleanField(default=False)
citations = JSONField(null=True)
class Collaboration(BaseModel):
abs_id = BigIntegerField(unique=True, index=True, primary_key=True)
authors = JSONField(null=True)
published = DateField(null=True)
cited_by = IntegerField(null=True)
keywords = JSONField(null=True)
coll_count = IntegerField(null=True)
message = TextField(null=True)
saved = BooleanField(default=False)
class Coauthors(BaseModel):
id = BigIntegerField(unique=True, index=True, primary_key=True)
co_list = JSONField(null=True)
last_page = IntegerField(null=True)
saved = BooleanField(default=False)
# class AuthorDetails(BaseNetwork):
# id = BigAutoField(unique=True, index=True, primary_key=True)
# full_name = TextField(null=True)
# preferred_name = TextField(null=True)
# affiliation_id = BigIntegerField(unique=True, index=True, null=True)
# url = TextField(null=True)
# class Network(BaseNetwork):
# id = BigAutoField(unique=True, index=True, primary_key=True)
# from_author = BigIntegerField(index=True)
# to_author = BigIntegerField(index=True)
# article = BigIntegerField(index=True)
# keywords = JSONField(null=True)
# year = IntegerField(null=True)
# citations = IntegerField(null=True)
# class Affiliation(BaseNetwork):
# url = TextField(null=True)
| 31.771084 | 92 | 0.708381 | 304 | 2,637 | 6.036184 | 0.328947 | 0.122071 | 0.092643 | 0.062125 | 0.26049 | 0.228883 | 0.209264 | 0.163488 | 0.163488 | 0.12752 | 0 | 0.006871 | 0.172165 | 2,637 | 82 | 93 | 32.158537 | 0.833715 | 0.353811 | 0 | 0.090909 | 0 | 0 | 0.058964 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.068182 | 0 | 0.795455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d40ed8fbb09f62fdaa077c4ee9a62191cabbc99 | 23,977 | py | Python | ublox/modules.py | pwitab/ublox | 30fc175c6c1b6d2077523d2b676ca8a3a50c86b2 | [
"MIT"
] | 9 | 2018-10-18T12:57:17.000Z | 2022-01-12T05:37:55.000Z | ublox/modules.py | pwitab/ublox | 30fc175c6c1b6d2077523d2b676ca8a3a50c86b2 | [
"MIT"
] | 5 | 2018-10-23T07:42:12.000Z | 2019-09-29T10:16:04.000Z | ublox/modules.py | pwitab/ublox | 30fc175c6c1b6d2077523d2b676ca8a3a50c86b2 | [
"MIT"
] | 4 | 2018-10-12T18:34:12.000Z | 2022-01-13T01:11:17.000Z | import time
import serial
import binascii
from collections import namedtuple
import logging
from ublox.socket import UDPSocket
logger = logging.getLogger(__name__)
Stats = namedtuple('Stats', 'type name value')
class CMEError(Exception):
"""CME ERROR on Module"""
class ATError(Exception):
"""AT Command Error"""
class ATTimeoutError(ATError):
"""Making an AT Action took to long"""
class ConnectionTimeoutError(ATTimeoutError):
"""Module did not connect within the specified time"""
class SaraN211Module:
"""
Represents a Ublox SARA N211 module.
Power-optimized NB-IoT (LTE Cat NB1) module.
"""
BAUDRATE = 9600
RTSCTS = False
AT_ENABLE_NETWORK_REGISTRATION = 'AT+CEREG=1'
AT_ENABLE_SIGNALING_CONNECTION_URC = 'AT+CSCON=1'
AT_ENABLE_POWER_SAVING_MODE = 'AT+NPSMR=1'
AT_ENABLE_ALL_RADIO_FUNCTIONS = 'AT+CFUN=1'
AT_REBOOT = 'AT+NRB'
AT_CLOSE_SOCKET = 'AT+NSOCL'
AT_GET_IP = 'AT+CGPADDR'
AT_SEND_TO = 'AT+NSOST'
AT_CHECK_CONNECTION_STATUS = 'AT+CSCON?'
AT_RADIO_INFORMATION = 'AT+NUESTATS="RADIO"'
REBOOT_TIME = 0
SUPPORTED_SOCKET_TYPES = ['UDP']
def __init__(self, serial_port: str, roaming=False, echo=False):
self._serial_port = serial_port
self._serial = serial.Serial(self._serial_port, baudrate=self.BAUDRATE,
rtscts=self.RTSCTS, timeout=5)
self.echo = echo
self.roaming = roaming
self.ip = None
self.connected = False
self.sockets = {}
self.available_messages = list()
self.imei = None
# TODO: make a class containing all states
self.registration_status = 0
self.radio_signal_power = None
self.radio_total_power = None
self.radio_tx_power = None
self.radio_tx_time = None
self.radio_rx_time = None
self.radio_cell_id = None
self.radio_ecl = None
self.radio_snr = None
self.radio_earfcn = None
self.radio_pci = None
self.radio_rsrq = None
self.radio_rsrp = None
def reboot(self):
"""
Rebooting the module. Will run the AT_REBOOT command and also flush the
serial port to get rid of trash input from when the module restarted.
"""
logger.info('Rebooting module')
self._at_action(self.AT_REBOOT)
logger.info('waiting for module to boot up')
time.sleep(self.REBOOT_TIME)
self._serial.flushInput() # Flush the serial ports to get rid of crap.
self._serial.flushOutput()
logger.info('Module rebooted')
def setup(self):
"""
Running all commands to get the module up an working
"""
logger.info(f'Starting initiation process')
self.enable_signaling_connection_urc()
self.enable_network_registration()
self.enable_psm_mode()
self.enable_radio_functions()
logger.info(f'Finished initiation process')
def enable_psm_mode(self):
"""
Enable Power Save Mode
"""
self._at_action(self.AT_ENABLE_POWER_SAVING_MODE)
logger.info('Enabled Power Save Mode')
def enable_signaling_connection_urc(self):
"""
Enable Signaling Connection URC
"""
self._at_action(self.AT_ENABLE_SIGNALING_CONNECTION_URC)
logger.info('Signaling Connection URC enabled')
def enable_network_registration(self):
"""
Enable Network registration
"""
self._at_action(self.AT_ENABLE_NETWORK_REGISTRATION)
logger.info('Network registration enabled')
def enable_radio_functions(self):
"""
Enable all radio functions.
"""
self._at_action(self.AT_ENABLE_ALL_RADIO_FUNCTIONS)
logger.info('All radio functions enabled')
def connect(self, operator: int, roaming=False):
"""
Will initiate commands to connect to operators network and wait until
connected.
"""
logger.info(f'Trying to connect to operator {operator} network')
# TODO: Handle connection independent of home network or roaming.
if operator:
at_command = f'AT+COPS=1,2,"{operator}"'
else:
at_command = f'AT+COPS=0'
self._at_action(at_command, timeout=300)
self._await_connection(roaming or self.roaming)
logger.info(f'Connected to {operator}')
def create_socket(self, socket_type='UDP', port: int = None):
"""
Will return a socket-like object that mimics normal python
sockets. The socket will then translate the commands to correct method
calls on the module.
It will also register the socket on the module class so that they can be
reused in the future if they are not closed.
:param socket_type:
:param port:
:return: UbloxSocket
"""
logger.info(f'Creating {socket_type} socket')
if socket_type.upper() not in self.SUPPORTED_SOCKET_TYPES:
raise ValueError(f'Module does not support {socket_type} sockets')
sock = None
if socket_type.upper() == 'UDP':
sock = self._create_upd_socket(port)
elif socket_type.upper() == 'TCP':
sock = self._create_tcp_socket(port)
logger.info(f'{socket_type} socket created')
self.sockets[sock.socket_id] = sock
return sock
def _create_upd_socket(self, port):
"""
Will create a UDP-socket for the N211 module
"""
at_command = f'AT+NSOCR="DGRAM",17'
if port:
at_command = at_command + f',{port}'
socket_id = self._at_action(at_command)
sock = UDPSocket(socket_id, self, port)
return sock
def _create_tcp_socket(self, port):
"""
N211 module only supports UDP.
"""
raise NotImplementedError('Sara211 does not support TCP')
def close_socket(self, socket_id):
"""
Will send the correct AT action to close specified socket and remove
the reference of it on the module object.
"""
logger.info(f'Closing socket {socket_id}')
if socket_id not in self.sockets.keys():
raise ValueError('Specified socket id does not exist')
result = self._at_action(f'{self.AT_CLOSE_SOCKET}={socket_id}')
del self.sockets[socket_id]
return result
def send_udp_data(self, socket: int, host: str, port: int, data: str):
"""
Send a UDP message
"""
logger.info(f'Sending UDP message to {host}:{port} : {data}')
_data = binascii.hexlify(data.encode()).upper().decode()
length = len(data)
atc = f'{self.AT_SEND_TO}={socket},"{host}",{port},{length},"{_data}"'
result = self._at_action(atc)
return result
def receive_udp_data(self):
"""
Recieve a UDP message
"""
logger.info(f'Waiting for UDP message')
self._read_line_until_contains('+NSONMI')
message_info = self.available_messages.pop(0)
message = self._at_action(f'AT+NSORF={message_info.decode()}')
response = self._parse_udp_response(message[0])
logger.info(f'Recieved UDP message: {response}')
return response
def _at_action(self, at_command, timeout=10, capture_urc=False):
"""
Small wrapper to issue a AT command. Will wait for the Module to return
OK. Some modules return answers to AT actions as URC:s before the OK
and to handle them as IRCs it is possible to set the capture_urc flag
and all URCs between the at action and OK will be returned as result.
"""
logger.debug(f'Applying AT Command: {at_command}')
self._write(at_command)
time.sleep(0.02) # To give the end devices some time to answer.
irc = self._read_line_until_contains('OK', timeout=timeout,
capture_urc=capture_urc)
if irc is not None:
logger.debug(f'AT Command response = {irc}')
return irc
def _write(self, data):
"""
Writing data to the module is simple. But it needs to end with \r\n
to accept the command. The module will answer with an empty line as
acknowledgement. If echo is enabled everything that the is sent to the
module is returned in the serial line. So we just need to omit it from
the acknowledge.
"""
data_to_send = data
if isinstance(data, str): # if someone sent in a string make it bytes
data_to_send = data.encode()
if not data_to_send.endswith(b'\r\n'):
# someone didnt add the CR an LN so we need to send it
data_to_send += b'\r\n'
# start_time = time.time()
self._serial.write(data_to_send)
time.sleep(0.02) # To give the module time to respond.
logger.debug(f'Sent: {data_to_send}')
ack = self._serial.read_until()
logger.debug(f'Recieved ack: {ack}')
if self.echo:
# when echo is on we will have recieved the message we sent and
# will get it in the ack response read. But it will not send \n.
# so we can omitt the data we send + i char for the \r
_echo = ack[:-2]
wanted_echo = data_to_send[:-2] + b'\r'
if _echo != wanted_echo:
raise ValueError(f'Data echoed from module: {_echo} is not the '
f'same data as sent to the module')
ack = ack[len(wanted_echo):]
if ack != b'\r\n':
raise ValueError(f'Ack was not received properly, received {ack}')
@staticmethod
def _remove_line_ending(line: bytes):
"""
To not have to deal with line endings in the data we can use this to
remove them.
"""
if line.endswith(b'\r\n'):
return line[:-2]
else:
return line
def _read_line_until_contains(self, slice, capture_urc=False, timeout=5):
"""
Similar to read_until, but will read whole lines so we can use proper
timeout management. Any URC:s that is read will be handled and we will
return the IRC:s collected. If capture_urc is set we will return all
data as IRCs.
"""
_slice = slice
if isinstance(slice, str):
_slice = slice.encode()
data_list = list()
irc_list = list()
start_time = time.time()
while True:
try:
data = self._serial.read_until()
except serial.SerialTimeoutException:
# continue to read lines until AT Timeout
duration = time.time() - start_time
if duration > timeout:
raise ATTimeoutError
continue
line = self._remove_line_ending(data)
if line.startswith(b'+'):
if capture_urc:
irc_list.append(line) # add the urc as an irc
else:
self._process_urc(line)
elif line == b'OK':
pass
elif line.startswith(b'ERROR'):
raise ATError('Error on AT Command')
elif line == b'':
pass
else:
irc_list.append(line) # the can only be an IRC
if _slice in line:
data_list.append(line)
break
else:
data_list.append(line)
duration = time.time() - start_time
if duration > timeout:
raise ATTimeoutError
clean_list = [response for response in data_list if not response == b'']
logger.debug(f'Received: {clean_list}')
return irc_list
@staticmethod
def _parse_udp_response(message: bytes):
_message = message.replace(b'"', b'')
socket, ip, port, length, _data, remaining_bytes = _message.split(b',')
data = bytes.fromhex(_data.decode())
return data
def _process_urc(self, urc: bytes):
"""
URC = unsolicited result code
When waiting on answer from the module it is possible that the module
sends urcs via +commands. So after the urcs are
collected we run this method to process them.
"""
_urc = urc.decode()
logger.debug(f'Processing URC: {_urc}')
urc_id = _urc[1:_urc.find(':')]
if urc_id == 'CSCON':
self._update_connection_status_callback(urc)
elif urc_id == 'CEREG':
self._update_eps_reg_status_callback(urc)
elif urc_id == 'CGPADDR':
self._update_ip_address_callback(urc)
elif urc_id == 'NSONMI':
self._add_available_message_callback(urc)
elif urc_id == 'CME ERROR':
self._handle_cme_error(urc)
else:
logger.debug(f'Unhandled urc: {urc}')
def _handle_cme_error(self, urc: bytes):
"""
Callback to raise CME Error.
"""
raise CMEError(urc.decode())
def _add_available_message_callback(self, urc: bytes):
"""
Callback to handle recieved messages.
"""
_urc, data = urc.split(b':')
result = data.lstrip()
logger.debug(f'Recieved data: {result}')
self.available_messages.append(result)
def update_radio_statistics(self):
"""
Read radio statistics and update the module object.
"""
radio_data = self._at_action(self.AT_RADIO_INFORMATION)
self._parse_radio_stats(radio_data)
def _update_connection_status_callback(self, urc):
"""
In the AT urc +CSCON: 1 the last char is indication if the
connection is idle or connected
"""
status = bool(int(urc[-1]))
self.connected = status
logger.info(f'Changed the connection status to {status}')
def _update_eps_reg_status_callback(self, urc):
"""
The command could return more than just the status.
Maybe a regex would be good
But for now we just check the last as int
"""
status = int(chr(urc[-1]))
self.registration_status = status
logger.info(f'Updated status EPS Registration = {status}')
def _update_ip_address_callback(self, urc: bytes):
"""
Update the IP Address of the module
"""
# TODO: this is per socket. Need to implement socket handling
_urc = urc.decode()
ip_addr = _urc[(_urc.find('"') + 1):-1]
self.ip = ip_addr
logger.info(f'Updated the IP Address of the module to {ip_addr}')
def _parse_radio_stats(self, irc_buffer):
"""
Parser for radio statistic result
"""
stats = [self._parse_radio_stats_string(item) for item in irc_buffer]
for stat in stats:
if not stat:
continue
if stat.type == 'RADIO' and stat.name == 'Signal power':
self.radio_signal_power = stat.value
elif stat.type == 'RADIO' and stat.name == 'Total power':
self.radio_total_power = stat.value
elif stat.type == 'RADIO' and stat.name == 'TX power':
self.radio_tx_power = stat.value
elif stat.type == 'RADIO' and stat.name == 'TX time':
self.radio_tx_time = stat.value
elif stat.type == 'RADIO' and stat.name == 'RX time':
self.radio_rx_time = stat.value
elif stat.type == 'RADIO' and stat.name == 'Cell ID':
self.radio_cell_id = stat.value
elif stat.type == 'RADIO' and stat.name == 'ECL':
self.radio_ecl = stat.value
elif stat.type == 'RADIO' and stat.name == 'SNR':
self.radio_snr = stat.value
elif stat.type == 'RADIO' and stat.name == 'EARFCN':
self.radio_earfcn = stat.value
elif stat.type == 'RADIO' and stat.name == 'PCI':
self.radio_pci = stat.value
elif stat.type == 'RADIO' and stat.name == 'RSRQ':
self.radio_rsrq = stat.value
else:
logger.debug(f'Unhandled statistics data: {stat}')
@staticmethod
def _parse_radio_stats_string(stats_byte_string: bytes):
"""
The string is like: b'NUESTATS: "RADIO","Signal power",-682'
:param stats_byte_string:
:return: NamedTuple Stats
"""
parts = stats_byte_string.decode().split(':')
irc: str = parts[0].strip()
data: str = parts[1].strip().replace('"', '')
data_parts = data.split(',')
if irc == 'NUESTATS':
return Stats(data_parts[0], data_parts[1], int(data_parts[2]))
else:
return None
def __repr__(self):
return f'NBIoTModule(serial_port="{self._serial_port}")'
def _await_connection(self, roaming, timeout=180):
"""
The process to verify that connection has occured is a bit different on
different devices. On N211 we need to wait intil we get the +CERREG: x
URC.
"""
logging.info(f'Awaiting Connection')
if roaming:
self._read_line_until_contains('CEREG: 5')
else:
self._read_line_until_contains('CEREG: 1')
class SaraR4Module(SaraN211Module):
"""
Represents a Ublox SARA R4XX module.
"""
BAUDRATE = 115200
RTSCTS = 1
DEFAULT_BANDS = [20]
AT_CREATE_UDP_SOCKET = 'AT+USOCR=17'
AT_CREATE_TCP_SOCKET = 'AT+USOCR=6'
AT_ENABLE_LTE_M_RADIO = 'AT+URAT=7'
AT_ENABLE_NBIOT_RADIO = 'AT+URAT=8'
AT_CLOSE_SOCKET = 'AT+USOCL'
AT_REBOOT = 'AT+CFUN=15' # R4 specific
REBOOT_TIME = 10
SUPPORTED_SOCKET_TYPES = ['UDP', 'TCP']
SUPPORTED_RATS = {'NBIOT': AT_ENABLE_NBIOT_RADIO,
'LTEM': AT_ENABLE_LTE_M_RADIO}
def __init__(self, serial_port: str, roaming=False, echo=True):
super().__init__(serial_port, roaming, echo)
self.current_rat = None
def setup(self, radio_mode='NBIOT'):
"""
Running all commands to get the module up an working
"""
self.read_imei()
self.set_radio_mode(mode=radio_mode)
self.enable_radio_functions()
self.enable_network_registration()
self.set_error_format()
self.set_data_format()
self.enable_quality_reporting()
def set_data_format(self):
self._at_action('AT+UDCONF=1,1') # Set data format to HEX
logger.info('Data format set to HEX')
def read_imei(self):
logger.info('Reading IMEI from module')
result = self._at_action('AT+CGSN')
self.imei = int(result[0])
def set_error_format(self):
self._at_action('AT+CMEE=2') # enable verbose errors
logger.info('Verbose errors enabled')
def set_band_mask(self, bands: list = None):
"""
Band is set using a bit for each band. Band 1=bit 0, Band 64=Bit 63
.. note:
Only supports NB IoT RAT.
"""
logger.info(f'Setting Band Mask for bands {bands}')
bands_to_set = bands or self.DEFAULT_BANDS
total_band_mask = 0
for band in bands_to_set:
individual_band_mask = 1 << (band - 1)
total_band_mask = total_band_mask | individual_band_mask
self._at_action(f'AT+UBANDMASK=1,{total_band_mask},{total_band_mask}')
def enable_quality_reporting(self):
logger.info('Enables reporting of RSRP and RSRQ via AT+UCGED')
self._at_action('AT+UCGED=5')
def set_radio_mode(self, mode):
response = self._at_action(self.SUPPORTED_RATS[mode.upper()])
logger.info(f'Radio Mode set to {mode}')
self.current_rat = mode.upper()
return response
def set_pdp_context(self, apn, pdp_type="IP", cid=1):
logger.info(f'Setting PDP Context')
_at_command = f'AT+CGDCONT={cid},"{pdp_type}","{apn}"'
self._at_action(_at_command)
logger.info(f'PDP Context: {apn}, {pdp_type}')
def update_radio_statistics(self):
"""
On the R4xx only rsrp and rsrq is available.
"""
result = self._at_action('AT+UCGED?', capture_urc=True)
cell_id = None
channel_nr = None
rsrq = None
rsrp = None
try:
for item in result:
data = item[7:] # remove the data description
if data.endswith(b'\r'):
data = data[:-2]
else:
data = data[:-1]
if item.startswith(b'+RSRQ'):
cell_id, channel_nr, rsrq = data.split(b',')
elif item.startswith(b'+RSRP'):
cell_id, channel_nr, rsrp = data.split(b',')
self.radio_earfcn = channel_nr
self.radio_cell_id = cell_id
self.radio_rsrp = float(rsrp.decode().replace('"', ''))
self.radio_rsrq = float(rsrq.decode().replace('"', ''))
except ValueError as e:
logger.info('Error in parsing radio statistics')
def _create_upd_socket(self, port):
at_command = f'{self.AT_CREATE_UDP_SOCKET}'
if port:
at_command = at_command + f',{port}'
response = self._at_action(at_command, capture_urc=True)
socket_id = int(chr(response[0][-1]))
sock = UDPSocket(socket_id, self, port)
self.sockets[sock.socket_id] = sock
return sock
def send_udp_data(self, socket: int, host: str, port: int, data: str):
"""
Send a UDP message
"""
logger.info(f'Sending UDP message to {host}:{port} : {data}')
_data = binascii.hexlify(data.encode()).upper().decode()
length = len(data)
atc = f'AT+USOST={socket},"{host}",{port},{length},"{_data}"'
result = self._at_action(atc)
return result
def read_udp_data(self, socket, length, timeout=10):
"""
Reads data from a udp socket.
..note
there is an issue on the R410 module that it is not issuing URCs
So to get the data we poll for data until we get some.
"""
start_time = time.time()
while True:
time.sleep(2)
data = self._at_action(f'AT+USORF={socket},{length}',
capture_urc=True)
result = data[0].replace(b'"', b'').split(b',')[1:] # remove URC
if result[0]: # the IP address part
return result
duration = time.time() - start_time
if duration > timeout:
break
logger.info('No UDP response read')
return None
def set_listening_socket(self, socket: int, port: int):
"""Set a socket into listening mode to be able to receive data on
the socket."""
self._at_action(f'AT+USOLI={socket},{port}')
def _await_connection(self, roaming, timeout=180):
"""
The process to verify that connection has occurred is a bit different on
different devices. On R4xx we need continuously poll the connection
status and see if the connection status has changed.
"""
logging.info(f'Awaiting Connection')
start_time = time.time()
while True:
time.sleep(2)
self._at_action('AT+CEREG?')
if self.registration_status == 0:
continue
if roaming and self.registration_status == 5:
break
if (not roaming) and self.registration_status == 1:
break
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
raise ConnectionTimeoutError(f'Could not connect')
| 34.058239 | 80 | 0.591275 | 3,063 | 23,977 | 4.444009 | 0.155077 | 0.014987 | 0.021158 | 0.01293 | 0.255804 | 0.16772 | 0.12489 | 0.117837 | 0.105348 | 0.074934 | 0 | 0.008105 | 0.310422 | 23,977 | 703 | 81 | 34.106686 | 0.815169 | 0.181966 | 0 | 0.210402 | 0 | 0 | 0.131087 | 0.022371 | 0 | 0 | 0 | 0.004267 | 0 | 1 | 0.106383 | false | 0.004728 | 0.014184 | 0.002364 | 0.238771 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d4193bd35998684210f11a056d6009a9bc98371 | 6,965 | py | Python | tests/cli/test_cap_table.py | PillarDevelopment/sto | a3067f5a31053dc780c4812d38e062ff4726b21a | [
"Apache-2.0"
] | 71 | 2018-11-30T10:15:23.000Z | 2022-03-31T23:51:22.000Z | tests/cli/test_cap_table.py | PillarDevelopment/sto | a3067f5a31053dc780c4812d38e062ff4726b21a | [
"Apache-2.0"
] | 7 | 2019-01-24T10:23:41.000Z | 2021-06-01T22:43:59.000Z | tests/cli/test_cap_table.py | PillarDevelopment/sto | a3067f5a31053dc780c4812d38e062ff4726b21a | [
"Apache-2.0"
] | 32 | 2018-11-29T21:19:51.000Z | 2022-03-14T06:32:01.000Z | import pytest
from sto.distribution import read_csv
from sto.ethereum.issuance import contract_status
from sto.ethereum.status import update_status
from sto.ethereum.tokenscan import token_scan
from sto.generic.captable import generate_cap_table, print_cap_table
from sto.models.implementation import TokenScanStatus, TokenHolderAccount
from sto.identityprovider import NullIdentityProvider
from sto.cli.main import cli
@pytest.fixture(params=['unrestricted', 'restricted'])
def sample_token(
logger,
dbsession,
web3,
private_key_hex,
sample_csv_file,
db_path,
click_runner,
get_contract_deployed_tx,
kyc_contract,
monkeypatch_get_contract_deployed_tx,
request
):
"""Create a security token used in these tests."""
if request.param == 'restricted':
from sto.ethereum.utils import priv_key_to_address
# whitelist owner
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'kyc-manage',
'--whitelist-address', priv_key_to_address(private_key_hex)
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'issue',
'--name', "Moo Corp",
'--symbol', "MOO",
'--url', "https://tokenmarket.net",
'--amount', 9999,
'--transfer-restriction', request.param
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
token_address = get_contract_deployed_tx(dbsession, "SecurityToken").contract_address
# Check that we can view the token status
status = contract_status(logger,
dbsession,
"testing",
web3,
ethereum_abi_file=None,
ethereum_private_key=private_key_hex,
ethereum_gas_limit=None,
ethereum_gas_price=None,
token_contract=token_address,
)
assert status["name"] == "Moo Corp"
assert status["totalSupply"] == 9999 * 10 ** 18
dbsession.commit()
return token_address
@pytest.fixture
def scanned_distribution(logger, dbsession, web3, private_key_hex, sample_csv_file, sample_token, click_runner, db_path, monkeypatch_create_web3):
"""Create some sample transactions so we can scan the token holder balances."""
token_address = sample_token
entries = read_csv(logger, sample_csv_file)
for entry in entries:
# whitelist customers
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'kyc-manage',
'--whitelist-address', entry.address
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
"distribute-multiple",
'--csv-input', sample_csv_file,
'--address', token_address
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
# Check they got mined
# Send transactions to emphmereal test chain
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
# Check they got mined
txs = update_status(
logger,
dbsession,
"testing",
web3,
ethereum_private_key=private_key_hex,
ethereum_gas_limit=None,
ethereum_gas_price=None,
)
# Check that rerun does not recreate txs
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
"distribute-multiple",
'--csv-input', sample_csv_file,
'--address', token_address
]
)
assert result.exit_code == 0
result = click_runner.invoke(
cli,
[
'--database-file', db_path,
'--ethereum-private-key', private_key_hex,
'--ethereum-gas-limit', 999999999,
'tx-broadcast',
]
)
assert result.exit_code == 0
token_scan(logger, dbsession, "testing", web3, None, token_address)
return token_address
def test_cap_table_formats(logger, dbsession, network, scanned_distribution, web3):
"""We format cap tables with different orderings."""
identity_provider = NullIdentityProvider()
token_address = scanned_distribution
for sort_direction in ["asc", "desc"]:
for sort_order in ["address", "name", "balance", "updated"]:
generate_cap_table(
logger,
dbsession,
token_address,
order_by=sort_order,
identity_provider=identity_provider,
order_direction=sort_direction,
include_empty=False,
TokenScanStatus=TokenScanStatus,
TokenHolderAccount=TokenHolderAccount,
)
def test_cap_table_printer(logger, dbsession, network, scanned_distribution, web3):
"""We print cap tables with different orderings."""
identity_provider = NullIdentityProvider()
token_address = scanned_distribution
table = generate_cap_table(
logger,
dbsession,
token_address,
order_by="balance",
identity_provider=identity_provider,
include_empty=False,
order_direction="desc",
TokenScanStatus=TokenScanStatus,
TokenHolderAccount=TokenHolderAccount
)
print_cap_table(table, max_entries=1000, accuracy=2)
| 29.264706 | 146 | 0.576884 | 684 | 6,965 | 5.633041 | 0.230994 | 0.064885 | 0.047236 | 0.071373 | 0.530496 | 0.530496 | 0.511809 | 0.487412 | 0.487412 | 0.4381 | 0 | 0.024515 | 0.32649 | 6,965 | 237 | 147 | 29.388186 | 0.796845 | 0.059153 | 0 | 0.531915 | 0 | 0 | 0.140623 | 0.033737 | 0 | 0 | 0 | 0 | 0.058511 | 1 | 0.021277 | false | 0 | 0.053191 | 0 | 0.085106 | 0.015957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d44bdb8e298f04d76d63165a0c0c77c08501a4e | 5,729 | py | Python | Descriptive_Evaluation_System.py | ananya-sundar/Descriptive-Answer-Evaluation-System | fea3f56e5058bd75526de23e198e0d512addcdf6 | [
"MIT"
] | null | null | null | Descriptive_Evaluation_System.py | ananya-sundar/Descriptive-Answer-Evaluation-System | fea3f56e5058bd75526de23e198e0d512addcdf6 | [
"MIT"
] | null | null | null | Descriptive_Evaluation_System.py | ananya-sundar/Descriptive-Answer-Evaluation-System | fea3f56e5058bd75526de23e198e0d512addcdf6 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import six
import language_check
from tkinter import *
from tkinter import messagebox
import rake
import operator
import io
counter=1
file=open("questions.txt","r")
q=[line.rstrip('\n') for line in file]
totmark=[0,0,0,0,0,0]
def nex():
global counter
if(counter<6):
counter=counter+1
ques.set(str(q[counter-1]))
else:
messagebox.showwarning("Limit Exceeded","Sorry, No more questions available!")
#print(counter)
def prev():
global counter
if(counter>1):
counter=counter-1
ques.set(str(q[counter-1]))
else:
messagebox.showwarning("Limit Exceeded","This is the first question!")
#print(counter)
def finish():
s=0
for i in totmark:
s=s+i
messagebox.showinfo("Total Score","The total score obtained in the test="+str(s)+"/40")
def enFunc():
global counter
ans = entry.get('1.0','end')
n=0
for line in ans:
words=[line.split(' ') for line in ans]
n=len(words)
if(counter==1 or counter==2):
if(n>=850):
marks1=10
elif(n>=400):
marks1=5
else:
marks1=3
else:
if(n>=250):
marks1=10
elif(n>=100):
marks1=5
else:
marks1=3
a=marks1
fname="data/docs/mp"+str(counter)+".txt"
stoppath = "data/stoplists/SmartStoplist.txt"
rake_object = rake.Rake(stoppath)
sample_file = io.open(fname, 'r',encoding="iso-8859-1")
text = ans
sentenceList = rake.split_sentences(text)
#for sentence in sentenceList:
# print("Sentence:", sentence)
stopwords = rake.load_stop_words(stoppath)
stopwordpattern = rake.build_stop_word_regex(stoppath)
phraseList = rake.generate_candidate_keywords(sentenceList, stopwordpattern, stopwords)
#print("Phrases:", phraseList)
wordscores = rake.calculate_word_scores(phraseList)
keywordcandidates = rake.generate_candidate_keyword_scores(phraseList, wordscores)
"""for candidate in keywordcandidates.keys():
print("Candidate: ", candidate, ", score: ", keywordcandidates.get(candidate))
sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
totalKeywords = len(sortedKeywords)
for keyword in sortedKeywords[0:int(totalKeywords/3)]:
print("Keyword: ", keyword[0], ", score: ", keyword[1])"""
keyw=dict(rake_object.run(text))
print(keyw)
#l1=len(keyw)
print(fname)
f1=io.open(fname, 'r',encoding="iso-8859-1")
text1=f1.read()
que=text1.split("\n")
print(que[0])
l=text1.split("\n\n")
kw=l[2].split("\n")
print("keyword in original file=",kw)
total=len(kw)
print("No of keywords in original file=",total)
c=0
for i in keyw:
for j in range(0,total):
if(kw[j].lower() in i.lower()):
print("Detected= " +str(i))
c=c+1
print("count=",c)
percentage=(c/total)*100
if(percentage>=90):
marks2=30
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=80 and percentage<90):
marks2=28
message = "Marks obtained for keyword:"+ str(marks2) + "/30"
elif(percentage>=70 and percentage<80):
marks2=26
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=60 and percentage<80):
marks2=24
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=50 and percentage<60):
marks2=28
message = "Marks obtained for keyword:" + str(marks2) + "/30"
elif(percentage>=40 and percentage<50):
marks2=25
message = "Marks obtained for keyword:" + str(marks2) + "/30"
else:
marks2 = 0
message = "Marks obtained for keyword:" + str(marks2) + "/30"
mes2text = "\nMarks for length = " + str(a) + "/10" + "\nLength = " + str(n)
print(mes2text)
print(message)
b=marks2
tool=language_check.LanguageTool('en-US')
count=0
text=str(ans)
txtlen=len(text.split())
setxt = set(text.split())
setlen = len(setxt)
matches=tool.check(text)
#print("Error:",matches)
print("No. of Errors=",len(matches))
noOfError=len(matches)
for i in range (0,noOfError):
print(matches[i].msg)
if (noOfError<=3 and n>0):
marks3=10
elif (noOfError<=5):
marks3=8
elif (noOfError<=8):
marks3=5
else:
marks3=3
print("Marks obtained after parsing=",marks3,"/10")
c=marks3
d=a+b+c
print("Marks obtained out of 50 is=",d,"/50")
if(counter==1 or counter==2):
tot=(d/50)*12
else:
tot=(d/50)*4
m="\nMarks obtained for this question is"+str(tot)
messagebox.showinfo("Result",m)
global totmark
totmark[counter-1]=tot
root = Tk()
root.geometry('800x1800')
label= Label(root,text="ANSWER ALL THE FOLLOWING QUESTIONS",bg="lightyellow",bd=20)
label.place(x=300,y=10)
ques= StringVar()
ques.set(str(q[counter-1]))
labelQ=Label(root,textvariable=ques,text=str(q[0]),width=100, bg="lightyellow", bd=20)
labelQ.place(x=10,y=100)
entry= Text(root)
entry.place(x=100,y=200)
prevBtn= Button(root, text = '<', command = prev)
prevBtn.place(x=120,y=600)
button1= Button(root, text = 'Submit', command = enFunc)
button1.place(x=400,y=600)
nextBtn= Button(root, text = '>', command = nex)
nextBtn.place(x=700,y=600)
finishbtn=Button(root,text='Finish',command=finish)
finishbtn.place(x=400,y=650)
root.mainloop()
| 24.908696 | 103 | 0.612498 | 763 | 5,729 | 4.562254 | 0.284404 | 0.022982 | 0.040218 | 0.046251 | 0.193623 | 0.181557 | 0.164608 | 0.164608 | 0.124964 | 0.124964 | 0 | 0.054141 | 0.239134 | 5,729 | 229 | 104 | 25.017467 | 0.744437 | 0.026532 | 0 | 0.19375 | 0 | 0 | 0.147725 | 0.006196 | 0.00625 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.05625 | 0 | 0.08125 | 0.0875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
2d452d14cedfb9fd0b00a34f2acdf709cc1787cb | 4,364 | py | Python | tabnine-vim/third_party/ycmd/ycmd/tests/rust/subcommands_test.py | MrMonk3y/vimrc | 950230fb3fd7991d1234c2ab516ec03245945677 | [
"MIT"
] | null | null | null | tabnine-vim/third_party/ycmd/ycmd/tests/rust/subcommands_test.py | MrMonk3y/vimrc | 950230fb3fd7991d1234c2ab516ec03245945677 | [
"MIT"
] | null | null | null | tabnine-vim/third_party/ycmd/ycmd/tests/rust/subcommands_test.py | MrMonk3y/vimrc | 950230fb3fd7991d1234c2ab516ec03245945677 | [
"MIT"
] | null | null | null | # Copyright (C) 2015-2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import assert_that, contains, has_entry
from mock import patch
from nose.tools import eq_
from ycmd.tests.rust import IsolatedYcmd, PathToTestFile, SharedYcmd
from ycmd.tests.test_utils import ( BuildRequest,
MockProcessTerminationTimingOut,
WaitUntilCompleterServerReady )
from ycmd.utils import ReadFile
@SharedYcmd
def RunGoToTest( app, params ):
filepath = PathToTestFile( 'test.rs' )
contents = ReadFile( filepath )
command = params[ 'command' ]
goto_data = BuildRequest( completer_target = 'filetype_default',
command_arguments = [ command ],
line_num = 7,
column_num = 12,
contents = contents,
filetype = 'rust',
filepath = filepath )
results = app.post_json( '/run_completer_command',
goto_data )
eq_( {
'line_num': 1, 'column_num': 8, 'filepath': filepath
}, results.json )
def Subcommands_GoTo_all_test():
tests = [
{ 'command': 'GoTo' },
{ 'command': 'GoToDefinition' },
{ 'command': 'GoToDeclaration' }
]
for test in tests:
yield RunGoToTest, test
@SharedYcmd
def Subcommands_GetDoc_Method_test( app ):
filepath = PathToTestFile( 'docs.rs' )
contents = ReadFile( filepath )
event_data = BuildRequest( filepath = filepath,
filetype = 'rust',
line_num = 7,
column_num = 9,
contents = contents,
command_arguments = [ 'GetDoc' ],
completer_target = 'filetype_default' )
response = app.post_json( '/run_completer_command', event_data ).json
eq_( response, {
'detailed_info': 'pub fn fun()\n---\n'
'some docs on a function'
} )
@SharedYcmd
def Subcommands_GetDoc_Fail_Method_test( app ):
filepath = PathToTestFile( 'docs.rs' )
contents = ReadFile( filepath )
# no docs exist for this function
event_data = BuildRequest( filepath = filepath,
filetype = 'rust',
line_num = 8,
column_num = 9,
contents = contents,
command_arguments = [ 'GetDoc' ],
completer_target = 'filetype_default' )
response = app.post_json(
'/run_completer_command',
event_data,
expect_errors=True ).json
eq_( response[ 'exception' ][ 'TYPE' ], 'RuntimeError' )
eq_( response[ 'message' ], 'Can\'t lookup docs.' )
@IsolatedYcmd()
@patch( 'ycmd.utils.WaitUntilProcessIsTerminated',
MockProcessTerminationTimingOut )
def Subcommands_StopServer_Timeout_test( app ):
WaitUntilCompleterServerReady( app, 'rust' )
app.post_json(
'/run_completer_command',
BuildRequest(
filetype = 'rust',
command_arguments = [ 'StopServer' ]
)
)
request_data = BuildRequest( filetype = 'rust' )
assert_that( app.post_json( '/debug_info', request_data ).json,
has_entry(
'completer',
has_entry( 'servers', contains(
has_entry( 'is_running', False )
) )
) )
| 32.325926 | 71 | 0.601512 | 444 | 4,364 | 5.716216 | 0.394144 | 0.023641 | 0.021671 | 0.022065 | 0.256895 | 0.239559 | 0.193853 | 0.193853 | 0.193853 | 0.149724 | 0 | 0.006008 | 0.313474 | 4,364 | 134 | 72 | 32.567164 | 0.841122 | 0.174152 | 0 | 0.318681 | 0 | 0 | 0.127511 | 0.035435 | 0 | 0 | 0 | 0 | 0.021978 | 1 | 0.054945 | false | 0 | 0.120879 | 0 | 0.175824 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7409b7db18856a3e31c375938ade5b50aa04e657 | 7,121 | py | Python | app/modules/talkingHeads/network/components.py | andy6804tw/talking-hands-API | 4895c980565082b0fdcabbc704ee871855e6d5f5 | [
"MIT"
] | null | null | null | app/modules/talkingHeads/network/components.py | andy6804tw/talking-hands-API | 4895c980565082b0fdcabbc704ee871855e6d5f5 | [
"MIT"
] | 4 | 2021-06-08T20:44:12.000Z | 2022-03-12T00:09:38.000Z | app/modules/talkingHeads/network/components.py | andy6804tw/talking-hands-API | 4895c980565082b0fdcabbc704ee871855e6d5f5 | [
"MIT"
] | null | null | null | """ In this file, PyTorch modules are defined to be used in the Talking Heads model. """
import torch
import torch.nn as nn
from torch.nn import functional as F
def init_conv(conv):
nn.init.xavier_uniform_(conv.weight)
if conv.bias is not None:
conv.bias.data.zero_()
# region General Blocks
class SelfAttention(nn.Module):
def __init__(self, in_dim):
super(SelfAttention, self).__init__()
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.rand(1).normal_(0.0, 0.02))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
# B: mini batches, C: channels, W: width, H: height
B, C, H, W = x.shape
proj_query = self.query_conv(x).view(B, -1, W * H).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(x).view(B, -1, W * H) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(B, -1, W * H) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(B, C, H, W)
out = self.gamma * out + x
return out
class ConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding=None):
super(ConvLayer, self).__init__()
if padding is None:
padding = kernel_size // 2
self.reflection_pad = nn.ReflectionPad2d(padding)
self.conv2d = nn.utils.spectral_norm(nn.Conv2d(in_channels, out_channels, kernel_size, stride))
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class AdaIn(nn.Module):
def __init__(self):
super(AdaIn, self).__init__()
self.eps = 1e-5
def forward(self, x, mean_style, std_style):
B, C, H, W = x.shape
feature = x.view(B, C, -1)
std_feat = (torch.std(feature, dim=2) + self.eps).view(B, C, 1)
#std_feat = torch.var(feature, dim=2) + self.eps
#std_feat = std_feat.sqrt().view(B, C, 1)
mean_feat = torch.mean(feature, dim=2).view(B, C, 1)
adain = std_style * (feature - mean_feat) / std_feat + mean_style
adain = adain.view(B, C, H, W)
return adain
# endregion
# region Non-Adaptive Residual Blocks
class ResidualBlockDown(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=None):
super(ResidualBlockDown, self).__init__()
# Right Side
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride, padding)
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride, padding)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x):
residual = x
# Right Side
out = F.relu(x)
out = self.conv_r1(out)
out = F.relu(out)
out = self.conv_r2(out)
out = F.avg_pool2d(out, 2)
# Left Side
residual = self.conv_l(residual)
residual = F.avg_pool2d(residual, 2)
# Merge
out = residual + out
return out
class ResidualBlockUp(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, upsample=2):
super(ResidualBlockUp, self).__init__()
# General
self.upsample = nn.Upsample(scale_factor=upsample, mode='nearest')
# Right Side
self.norm_r1 = nn.InstanceNorm2d(in_channels, affine=True)
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride)
self.norm_r2 = nn.InstanceNorm2d(out_channels, affine=True)
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x):
residual = x
# Right Side
out = self.norm_r1(x)
out = F.relu(out)
out = self.upsample(out)
out = self.conv_r1(out)
out = self.norm_r2(out)
out = F.relu(out)
out = self.conv_r2(out)
# Left Side
residual = self.upsample(residual)
residual = self.conv_l(residual)
# Merge
out = residual + out
return out
class ResidualBlock(nn.Module):
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = nn.InstanceNorm2d(channels, affine=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.in1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.in2(out)
out = out + residual
return out
# endregion
# region Adaptive Residual Blocks
class AdaptiveResidualBlockUp(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, upsample=2):
super(AdaptiveResidualBlockUp, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
# General
self.upsample = nn.Upsample(scale_factor=upsample, mode='nearest')
# Right Side
self.norm_r1 = AdaIn()
self.conv_r1 = ConvLayer(in_channels, out_channels, kernel_size, stride)
self.norm_r2 = AdaIn()
self.conv_r2 = ConvLayer(out_channels, out_channels, kernel_size, stride)
# Left Side
self.conv_l = ConvLayer(in_channels, out_channels, 1, 1)
def forward(self, x, mean1, std1, mean2, std2):
residual = x
# Right Side
out = self.norm_r1(x, mean1, std1)
out = F.relu(out)
out = self.upsample(out)
out = self.conv_r1(out)
out = self.norm_r2(out, mean2, std2)
out = F.relu(out)
out = self.conv_r2(out)
# Left Side
residual = self.upsample(residual)
residual = self.conv_l(residual)
# Merge
out = residual + out
return out
class AdaptiveResidualBlock(nn.Module):
def __init__(self, channels):
super(AdaptiveResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = AdaIn()
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = AdaIn()
def forward(self, x, mean1, std1, mean2, std2):
residual = x
out = self.conv1(x)
out = self.in1(out, mean1, std1)
out = F.relu(out)
temp = out
out = self.conv2(out)
out = self.in2(out, mean1, std1)
out = out + residual
return out
# endregion
| 29.670833 | 103 | 0.61452 | 978 | 7,121 | 4.279141 | 0.144172 | 0.060454 | 0.0681 | 0.055197 | 0.637037 | 0.585424 | 0.546714 | 0.498925 | 0.494146 | 0.471207 | 0 | 0.023108 | 0.270748 | 7,121 | 239 | 104 | 29.794979 | 0.782785 | 0.079062 | 0 | 0.462069 | 0 | 0 | 0.002147 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117241 | false | 0 | 0.02069 | 0 | 0.248276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7409f1af0ea4329b3748af9c24220e3c22dd281d | 869 | py | Python | lambdafunctions/RDS failover/snapshot_creation.py | khihouston/AWS_Lambda | af36fb469a5e71569d4b6f2ae96bb2de91b3cf4a | [
"Apache-2.0"
] | null | null | null | lambdafunctions/RDS failover/snapshot_creation.py | khihouston/AWS_Lambda | af36fb469a5e71569d4b6f2ae96bb2de91b3cf4a | [
"Apache-2.0"
] | null | null | null | lambdafunctions/RDS failover/snapshot_creation.py | khihouston/AWS_Lambda | af36fb469a5e71569d4b6f2ae96bb2de91b3cf4a | [
"Apache-2.0"
] | null | null | null | import botocore
import datetime
import re
import logging
import boto3
region='us-west-1'
db_instance_class='db.m4.large'
db_subnet='default'
instances = ['master']
print('Loading function')
def lambda_handler(event, context):
source = boto3.client('rds', region_name=region)
for instance in instances:
try:
#timestamp1 = '{%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())
timestamp1 = str(datetime.datetime.now().strftime('%Y-%m-%d-%H-%-M-%S')) + "lambda-snap"
snapshot = "{0}-{1}-{2}".format("mysnapshot", instance,timestamp1)
response = source.create_db_snapshot(DBSnapshotIdentifier=snapshot, DBInstanceIdentifier=instance)
print(response)
except botocore.exceptions.ClientError as e:
raise Exception("Could not create snapshot: %s" % e) | 36.208333 | 111 | 0.644419 | 103 | 869 | 5.368932 | 0.601942 | 0.007233 | 0.01085 | 0.014467 | 0.0217 | 0.0217 | 0 | 0 | 0 | 0 | 0 | 0.01462 | 0.212888 | 869 | 24 | 112 | 36.208333 | 0.79386 | 0.075949 | 0 | 0 | 0 | 0 | 0.163138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.3 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
740b1289aa49c74c0bf1b913185895b820bbdd76 | 3,009 | py | Python | crispr_ont_prediction.py | Peppags/A-CNNCrispr | 6022dd34b54172d7e6c99435ac5f34314212e269 | [
"MIT"
] | 3 | 2021-03-11T07:47:47.000Z | 2021-06-02T10:36:24.000Z | crispr_ont_prediction.py | Peppags/A-CNNCrispr | 6022dd34b54172d7e6c99435ac5f34314212e269 | [
"MIT"
] | 3 | 2021-04-26T15:26:23.000Z | 2022-03-28T02:12:12.000Z | crispr_ont_prediction.py | Peppags/CRISPRont-CRISPRofft | 6022dd34b54172d7e6c99435ac5f34314212e269 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from keras.preprocessing import text, sequence
from keras.models import Model
from keras.layers import Input, Embedding
from keras.layers.core import Dropout
from keras.layers.convolutional import Conv1D, AveragePooling1D
from keras.layers import multiply
from keras.layers.core import Dense, Reshape, Lambda, Permute, Flatten
from keras.initializers import RandomUniform
from keras import regularizers
import keras.backend as K
import keras
import numpy as np
import pandas as pd
def attention(x, g, TIME_STEPS):
input_dim = int(x.shape[2])
x1 = K.permute_dimensions(x, (0, 2, 1))
g1 = K.permute_dimensions(g, (0, 2, 1))
x2 = Reshape((input_dim, TIME_STEPS))(x1)
g2 = Reshape((input_dim, TIME_STEPS))(g1)
x3 = Dense(TIME_STEPS, kernel_initializer=RandomUniform(seed=2020))(x2)
g3 = Dense(TIME_STEPS, kernel_initializer=RandomUniform(seed=2020))(g2)
x4 = keras.layers.add([x3, g3])
a = Dense(TIME_STEPS, activation="softmax", use_bias=False)(x4)
a_probs = Permute((2, 1))(a)
output_attention_mul = multiply([x, a_probs])
return output_attention_mul
def crispr_ont():
dropout_rate = 0.4
input = Input(shape=(24,))
embedded = Embedding(7, 44, input_length=24)(input)
conv1 = Conv1D(256, 5, activation="relu", name="conv1")(embedded)
pool1 = AveragePooling1D(2)(conv1)
drop1 = Dropout(dropout_rate)(pool1)
conv2 = Conv1D(256, 5, activation="relu", name="conv2")(pool1)
conv3 = Conv1D(256, 5, activation="relu", name="conv3")(drop1)
x = Lambda(lambda x: attention(x[0], x[1], 6))([conv3, conv2])
my_concat = Lambda(lambda x: K.concatenate([x[0], x[1]], axis=1))
weight_1 = Lambda(lambda x: x * 0.2)
weight_2 = Lambda(lambda x: x * 0.8)
flat1 = Flatten()(pool1)
flat2 = Flatten()(x)
flat = my_concat([weight_1(flat1), weight_2(flat2)])
dense1 = Dense(128,
kernel_regularizer=regularizers.l2(1e-4),
bias_regularizer=regularizers.l2(1e-4),
activation="relu",
name="dense1")(flat)
drop3 = Dropout(dropout_rate)(dense1)
dense2 = Dense(64,
kernel_regularizer=regularizers.l2(1e-4),
bias_regularizer=regularizers.l2(1e-4),
activation="relu",
name="dense2")(drop3)
drop4 = Dropout(dropout_rate)(dense2)
dense3 = Dense(32, activation="relu", name="dense3")(drop4)
drop5 = Dropout(dropout_rate)(dense3)
output = Dense(1, activation="linear", name="output")(drop5)
model = Model(inputs=[input], outputs=[output])
return model
if __name__ == '__main__':
model = crispr_ont()
print("Loading weights for the models")
model.load_weights("crispr_ont.h5")
data_path = "data/test_ont.csv"
data = pd.read_csv(data_path)
x_test = make_data(data["sgRNA"])
y_pred = model.predict([x_test])
| 30.704082 | 75 | 0.653373 | 407 | 3,009 | 4.687961 | 0.334152 | 0.042453 | 0.056604 | 0.056604 | 0.253669 | 0.186583 | 0.142558 | 0.142558 | 0.08805 | 0.08805 | 0 | 0.054668 | 0.209704 | 3,009 | 97 | 76 | 31.020619 | 0.747687 | 0.006647 | 0 | 0.086957 | 0 | 0 | 0.056913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.202899 | 0 | 0.26087 | 0.014493 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
740c8021591632f8589b775ff0990d59d773eb73 | 6,023 | py | Python | sr_tutorial.py | briliantnugraha/pytorch_tutorial | 640996759a2a3545429760d17b94a1f4896525fb | [
"MIT"
] | null | null | null | sr_tutorial.py | briliantnugraha/pytorch_tutorial | 640996759a2a3545429760d17b94a1f4896525fb | [
"MIT"
] | null | null | null | sr_tutorial.py | briliantnugraha/pytorch_tutorial | 640996759a2a3545429760d17b94a1f4896525fb | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import torchvision
import torchvision.models as models
from torchvision import transforms
from PIL import Image
from time import time
import matplotlib.pyplot as plt
import os
import numpy as np
import sys
import cv2
#=================================================
# HYPERPARAMETERS HERE...
img_transforms = transforms.Compose([
transforms.Resize((224,224)),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomVerticalFlip(p=0.5),
transforms.RandomRotation(45)
])
target_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
])
input_transform = transforms.Compose([
transforms.Resize((56, 56)),#((224,224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] )
])
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
loss_func = torch.nn.SmoothL1Loss()
epochs = 150
batch_size=4
lr = 0.001
datapath = 'catdog/srgan_datatrain'
train_data = os.listdir(datapath)
#=================================================
# CLASS AND FUNCTION HERE...
def read_batch(datapath, imgname):
imginput = []
imgtarget = []
for i in range(len(imgname)):
img = Image.open(os.path.join(datapath, imgname[i]))
img = img_transforms(img)
imgtgt = target_transform(img)
imgtarget.append(imgtgt)
img = input_transform(img)
imginput.append(img)
imginput = torch.stack([x for x in imginput], dim=0).to(device)
imgtarget = torch.stack([x for x in imgtarget], dim=0).to(device)
# print('imginput.size(): ', imginput.size())
# print('imgtarget.size(): ', imgtarget.size())
return imginput, imgtarget
# here comes the training function!
def train(model, optimizer, loss_fn, train_data, datapath=None, batch_size = 5, epochs=20, device="cpu"):
lowest_train_loss = 1e+6
train_num = np.arange(len(train_data))
train_data = np.array(train_data)
print('=======================')
print('Training data number: ', len(train_data))
print('Start Training...')
for epoch in range(epochs):
training_loss = 0.0
valid_loss = 0.0
start = time()
model.train()
np.random.shuffle(train_num)
ctr = batch_size
start = time()
for b in range(0, len(train_num), batch_size):
batch_data = train_data[train_num[b:b+ctr]]
ctr += b
optimizer.zero_grad()
inputs, targets = read_batch(datapath, batch_data)
inputs = inputs.to(device)
targets = targets.to(device)
output = model(inputs)
loss = loss_fn(output, targets)
loss.backward()
optimizer.step()
training_loss += loss.data.item() * inputs.size(0)
training_loss /= len(train_num)
print('Epoch: {}, time: {:.2f}s, Lowest train loss: {:.2f}, Training Loss: {:.2f}'.format(epoch,
time()-start, lowest_train_loss, training_loss))
if training_loss < lowest_train_loss:
lowest_train_loss = training_loss
if epochs > 10:
torch.save(model.state_dict(), r'D:\pytorch_tutorial\catdog\srgan_trained.pth')
class OurFirstSRNet(nn.Module):
def __init__(self):
super(OurFirstSRNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=8, stride=2, padding=3),
nn.ReLU(inplace=True),
nn.Conv2d(64, 192, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(192, 256, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True)
)
self.upsample = nn.Sequential(
nn.ConvTranspose2d(256,256,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(256,192,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(192,128,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(128,64,kernel_size=2, stride=2, padding=0),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64,3, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.features(x)
x = self.upsample(x)
return x
# test image transform
img = Image.open(os.path.join(datapath, train_data[0]))
img = img_transforms(img)
img = input_transform(img).unsqueeze(0).to(device)
# test forward propagation
model = OurFirstSRNet()
model.to(device)
print('=======================')
print('Example input-output...')
print('input: ', img.size())
output = model(img)
print('output:', output.size())
# define Backprop Optimizer
optimizer = optim.Adam(model.parameters(), lr=lr)
model.load_state_dict(torch.load( r'catdog/srgan_trained.pth') )
# training
if sys.argv[1] == 'training':
print('...You pick mode training...')
train(model, optimizer, loss_func, train_data, datapath=datapath,
batch_size = batch_size, epochs=epochs, device=device)
# testing
if sys.argv[1] == 'detecting':
print('...You pick mode detecting...')
model.load_state_dict(torch.load( r'catdog/srgan_trained.pth') )
model.eval()
with torch.no_grad():
output = model(img).cpu().squeeze(0).numpy()
print('output:', output.shape)
# cv2.imwrite('output.png', output[...,::-1].astype(np.uint8))
| 33.837079 | 105 | 0.61315 | 774 | 6,023 | 4.664083 | 0.244186 | 0.030471 | 0.039612 | 0.051801 | 0.258172 | 0.258172 | 0.225762 | 0.209141 | 0.209141 | 0.209141 | 0 | 0.044001 | 0.226465 | 6,023 | 177 | 106 | 34.028249 | 0.730844 | 0.072555 | 0 | 0.195804 | 0 | 0.006993 | 0.072045 | 0.028746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027972 | false | 0 | 0.104895 | 0 | 0.153846 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
740dc09c5d5989b4dd60755f7fb722bb32c864ad | 13,953 | py | Python | pysrc/op-eff-plot.py | TUD-UCB-Boda/tud_boda | aa52c85aa0af1110f6f1f5a50c71bb37994b332a | [
"BSD-2-Clause"
] | 62 | 2015-03-19T09:55:50.000Z | 2022-02-27T19:06:52.000Z | pysrc/op-eff-plot.py | moskewcz/boda | 27ec86e03ad004ad8beac7f7d74a5b0ef7676ef6 | [
"BSD-2-Clause"
] | 31 | 2015-12-02T23:36:06.000Z | 2019-04-17T20:11:28.000Z | pysrc/op-eff-plot.py | TUD-UCB-Boda/tud_boda | aa52c85aa0af1110f6f1f5a50c71bb37994b332a | [
"BSD-2-Clause"
] | 24 | 2015-05-20T14:33:15.000Z | 2021-04-25T01:08:36.000Z | from matplotlib import rc
rc('text', usetex=True) # this is if you want to use latex to print text. If you do you can create strings that go on labels or titles like this for example (with an r in front): r"$n=$ " + str(int(n))
from numpy import *
from pylab import *
import random
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import matplotlib.lines as lns
from scipy import stats
from matplotlib.patches import Polygon, Circle
import matplotlib.font_manager as fm
def latex_float(f):
float_str = "{0:.2g}".format(f)
if "e" in float_str:
base, exponent = float_str.split("e")
return r"{0} \times 10^{{{1}}}".format(base, int(exponent))
else:
return float_str
class EffPt( object ):
def __init__( self, elp ):
self.cols = ["varname","bxf","flops","ai","rts"]
self.varname = elp[4][6:-1]
self.bxf = float(elp[6])
self.flops = float(elp[7])
self.ai = float(elp[8])
self.rts = float(elp[9])
self.opinfo = elp[0:3] + elp[5:6] # for checking if two pts are the same operation
self.comp = None # point to compare this point against (if any)
def __str__( self ):
return " ".join( str(col)+"="+str(getattr(self,col)) for col in self.cols )
class varinfo( object ):
def __init__( self, name, color, mark='o', mark_comp='d' ):
self.name = name
self.color = color
self.mark = mark
self.mark_comp = mark_comp
self.art = plt.Line2D((0,0),(0,0), color=self.color, marker=self.mark, linestyle='')
self.art_comp = plt.Line2D((0,0),(0,0), color=self.color, marker=self.mark_comp, linestyle='')
self.num_use = 0
self.num_use_comp = 0
def clear_use( self ):
self.num_use = 0
self.num_use_comp = 0
def inc_use( self, is_comp ):
if is_comp:
self.num_use_comp += 1
else:
self.num_use += 1
def get_mark( self, is_comp ):
return self.mark_comp if is_comp else self.mark
def get_leg( self, leg_art, leg_lab ):
verb_name = "\\verb|"+self.name+"|"
if self.num_use:
leg_art.append( self.art)
leg_lab.append( verb_name )
if self.num_use_comp:
leg_art.append( self.art_comp)
leg_lab.append( verb_name[:-1] + " (Comp)|" )
self.clear_use()
vis = [
varinfo( "conv", "cornflowerblue" ),
varinfo( "conv_simd", "cornflowerblue" ),
varinfo( "k1conv", "green" ),
varinfo( "k1conv_simd", "green" ),
varinfo( "tconv", "purple" ),
varinfo( "cudnn_conv", "red" ),
]
vis_map = { vi.name:vi for vi in vis }
def inc_comp( epts ):
for ept in epts:
yield ept
if ept.comp: yield ept.comp
def read_eff_file( epts, fn ):
els = open( fn ).readlines()
for el in els:
elps = el.split("&")
elps = [ elp.strip() for elp in elps ]
#print len(elps), elps
assert len(elps) == 12
epts.append( EffPt( elps ) )
if math.isnan(epts[-1].rts): epts.pop()
def adj_tick_lab( lab ):
lt = lab.get_text()
if not lt: return ""
if lt[0] == "$": lt = lt[1:-1]
neg = 1.0
if lt[0] == u'\u2212': lt = lt[1:]; neg = -1.0
return "$%s$" % latex_float(10**(neg*float(lt)))
class EffPlot( object ):
def __init__( self, args ):
self.args = args
self.epts = []
self.epts_comp = []
read_eff_file( self.epts, self.args.eff_fn )
if self.args.eff_comp_fn:
read_eff_file( self.epts_comp, self.args.eff_comp_fn )
assert len(self.epts) == len(self.epts_comp)
for ept,ept_comp in zip(self.epts,self.epts_comp):
assert ept.opinfo == ept_comp.opinfo
ept.comp = ept_comp
self.do_plots()
if self.args.do_zooms:
for zl in [1,2]:
self.args.out_fn += "-zoom"
max_flops = max( ept.flops for ept in self.epts )
self.epts = [ ept for ept in self.epts if ept.flops < (max_flops/10.0) ]
self.do_plots()
def skip_plot_check_flops_vs_time( self, ept ):
if not ept.comp: return 0 # no comp? if so, never skip.
delta = abs( ept.rts - ept.comp.rts )
rel_delta = delta * 2.0 / (ept.rts + ept.comp.rts)
# if rel_delta < self.args.min_rel_delta_to_show: return 1 # we're really trying to show varaint difference, so skip this check
if ept.varname == ept.comp.varname: return 1 # FIXME: skip when comp is same varaint. not right in general, but okay for now
# FIXME: a few data points have the same variant, but sig. diff runtimes. there is certainly some noise in the runtimes, or the code might have shifted a bit between the two runs, or it's possible the tuning params were a little different between the two runs. for now, we'll skip such points, but we should investigate more.
return 0
def plot_flops_vs_time_pt( self, ax, ept, is_comp ):
vi = vis_map[ept.varname]
vi.inc_use( is_comp )
x,y = math.log(ept.flops,10), math.log(ept.rts,10)
ax.plot(x, y, color=vi.color, markersize=4, alpha=.7, marker=vi.get_mark(is_comp), linestyle=' ' )
return x,y
def plot_fps_vs_ai_pt( self, ax, ept, is_comp ):
vi = vis_map[ept.varname]
vi.inc_use( is_comp )
x = ept.ai
y = ept.flops/ept.rts
ax.plot( x,y, color=vi.color, markersize=2*max(1,math.log(ept.flops,10)-6), alpha=.7, marker=vi.get_mark(is_comp), linestyle=' ' )
return x,y
def do_plots( self ):
# flops vs runtime plot with 60GF/s line
background_color =(0.85,0.85,0.85) #'#C0C0C0'
grid_color = 'white' #FAFAF7'
rc('axes', facecolor = background_color)
rc('axes', edgecolor = grid_color)
rc('axes', linewidth = 1.2)
rc('axes', grid = True )
rc('axes', axisbelow = True)
rc('grid',color = grid_color)
rc('grid',linestyle='-' )
rc('grid',linewidth=0.7 )
#rc('xtick.major',size =0 )
#rc('xtick.minor',size =0 )
#rc('ytick.major',size =0 )
#rc('ytick.minor',size =0 )
# filter data based on skip check
self.epts = [ ept for ept in self.epts if not self.skip_plot_check_flops_vs_time( ept ) ]
fig = plt.figure()
ax = fig.add_subplot(111)
#formatting:
ax.set_title("RUNTIME (seconds) vs \\#-of-FLOPS [log/log scale]",fontsize=12,fontweight='bold')
ax.set_xlabel("\\#-of-FLOPS", fontsize=12) # ,fontproperties = font)
ax.set_ylabel("RUNTIME (seconds)", fontsize=12) # ,fontproperties = font)
x = [ math.log(ept.flops,10) for ept in inc_comp(self.epts) ]
y = [ math.log(ept.rts,10) for ept in inc_comp(self.epts) ]
self.set_bnds( ax, x, y )
# print matplotlib.lines.Line2D.filled_markers
# --> (u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd')
for ept in self.epts:
x,y = self.plot_flops_vs_time_pt( ax, ept, 0 )
if ept.comp:
xc,yc = self.plot_flops_vs_time_pt( ax, ept.comp, 1 )
ax.plot( [x,xc], [y,yc], linewidth=0.5, color='black' )
leg_art = []; leg_lab = []
for vi in vis: vi.get_leg( leg_art, leg_lab )
legend = ax.legend(leg_art,leg_lab,loc='lower right', shadow=True, fontsize='small',numpoints=1,ncol=1)
legend.get_frame().set_facecolor('#eeddcc')
max_fps = max( ept.flops/ept.rts for ept in inc_comp(self.epts) )
log10_max_fps = int(math.ceil(math.log(max_fps,10)))
if 1:
fps_bnd = 10**log10_max_fps
self.add_fps_line( ax, fps_bnd / 10.0 )
self.add_fps_line( ax, fps_bnd / 5.0 )
self.add_fps_line( ax, fps_bnd / 2.0 )
self.add_fps_line( ax, fps_bnd )
self.adj_ticks(ax,fig)
fig.savefig( self.args.out_fn + "." + self.args.out_fmt, dpi=600, bbox_inches='tight')
fig = plt.figure()
ax = fig.add_subplot(111)
#formatting:
ax.set_title("F/s vs Arithmetic Intensity",fontsize=12,fontweight='bold')
ax.set_xlabel("Arithmetic Intensity", fontsize=12) # ,fontproperties = font)
ax.set_ylabel("F/s", fontsize=12) # ,fontproperties = font)
x = [ ept.ai for ept in inc_comp(self.epts) ]
y = [ ept.flops/ept.rts for ept in inc_comp(self.epts) ]
self.set_bnds( ax, x, y )
# print matplotlib.lines.Line2D.filled_markers
# --> (u'o', u'v', u'^', u'<', u'>', u'8', u's', u'p', u'*', u'h', u'H', u'D', u'd')
for ept in self.epts:
x,y = self.plot_fps_vs_ai_pt( ax, ept, 0 )
if ept.comp:
xc,yc = self.plot_fps_vs_ai_pt( ax, ept.comp, 1 )
ax.plot( [x,xc], [y,yc], linewidth=0.5, color='black' )
leg_art = []; leg_lab = []
for vi in vis: vi.get_leg( leg_art, leg_lab )
max_flops = max( ept.flops for ept in inc_comp(self.epts) )
mfl = int(math.ceil(math.log(max_flops,10)))
for ls in range(max(mfl-5,1),mfl):
ms=2*max(1,ls-6)
leg_art += [plt.Line2D((0,0),(0,0), color="black", marker='o', linestyle='', markersize=ms)]
leg_lab += ["$10^{"+str(ls)+"}$ Flops"]
legend = ax.legend(leg_art,leg_lab,loc='upper right', shadow=True, fontsize='small',numpoints=1,ncol=1)
legend.get_frame().set_facecolor('#eeddcc')
fig.canvas.draw()
fig.savefig( self.args.out_fn + "-ai" + "." + self.args.out_fmt, dpi=600, bbox_inches='tight')
# ai vs GF/s plot
def set_bnds( self, ax, x, y ):
self.x_min = min(x)
self.x_max = max(x)*1.05
self.y_min = min(y)
self.y_max = max(y)*1.05
ax.axis([self.x_min,self.x_max,self.y_min,self.y_max])
self.data_aspect = float(self.x_max - self.x_min ) / (self.y_max - self.y_min)
#self.axis_aspect_rat = .618
self.axis_aspect_rat = 1
self.axis_aspect = self.axis_aspect_rat * self.data_aspect
ax.set_aspect(self.axis_aspect)
def adj_ticks( self, ax, fig ):
fig.canvas.draw()
tls = ax.get_xticklabels()
tls = [ adj_tick_lab(lab) for lab in tls ]
ax.set_xticklabels( tls )
tls = ax.get_yticklabels()
tls = [ adj_tick_lab(lab) for lab in tls ]
ax.set_yticklabels( tls )
def add_fps_line( self, ax, fps ): self.add_fps_line_log( ax, fps )
def add_fps_line_lin( self, ax, fps ):
#Peak performance line and text
x = [self.x_min,(self.x_min+self.x_max)*0.5,self.x_max]
y = [ v/fps for v in x ]
y_mid = (self.y_min+self.y_max)/2
if y[1] > y_mid: # high slope case; use target y val
y[1] = y_mid
x[1] = y[1]*fps
ax.plot(x,y, linewidth=1.0, color='black', linestyle=':' )
label_string = "%.1fGF/s" % (fps/1e9)
rot=np.arctan(y[1]/x[1]*self.axis_aspect) * 180 / np.pi
ax.text(x[1], y[1], label_string, fontsize=8, rotation=rot, ha="left", va="bottom")
def add_fps_line_log( self, ax, fps ):
#Peak performance line and text
x = [self.x_min,self.x_min*0.2+self.x_max*0.8,self.x_max]
y = [ v - math.log(fps,10) for v in x ]
y_mid = self.y_min*0.2+self.y_max*0.8
if y[1] > y_mid: # high slope case; use target y val
y[1] = y_mid
x[1] = y[1] + math.log(fps,10)
ax.plot(x,y, linewidth=1.0, color='black', linestyle=':' )
label_string = "%.1fGF/s" % (fps/1e9)
rot=np.arctan(self.data_aspect) * 180 / np.pi
ax.text(x[1], y[1], label_string, fontsize=12, rotation=rot, ha="left", va="bottom")
import argparse
parser = argparse.ArgumentParser(description='Create eff plots.')
parser.add_argument('--eff-fn', metavar="FN", type=str, default="eff-tab.raw", help="filename of eff values in latex table format" )
parser.add_argument('--eff-comp-fn', metavar="FN", type=str, default="", help="filename of eff values in latex table format for comparison to those from the file specified by --eff-fn" )
parser.add_argument('--out-fn', metavar="FN", type=str, default="eff", help="base filename of output plot image" )
parser.add_argument('--out-fmt', metavar="EXT", type=str, default="png", help="extention/format for output plot image" )
parser.add_argument('--do-zooms', metavar="BOOL", type=bool, default=0, help="if true, output zoomed and 2X zoomed graphs" )
parser.add_argument('--min-rel-delta-to-show', metavar="FLOAT", type=float, default=0.05, help="if true, skip showing points where delta/avg is < this value in comparison mode" )
args = parser.parse_args()
ep = EffPlot(args)
# example command lines for generating inputs to this script:
# boda on titan-X, optimized variants enabled
# boda cnn_op_info --cnn-func-sigs-fn='%(boda_test_dir)'/conv-ops-1-5-20-nin-alex-gn.txt --op-eff-tab-fn=conv-1-5-20-nin-alex-gn-titanX-boda.raw --rtc='(be=nvrtc)' --gen-data='(type=foo,str_vals=(vi=0.0f,mode=5))' --op-tune='(tconv=1,k1conv=1)' --rtc-comp='(be=nvrtc)' --max-err=10 --show-rtc-calls=1 --mad-toler=3e-3 --print-format=1 --inc-op-info-in-eff=1
# run on SD820, optimizations enabled, no comparison:
# export SD820_RTC="rtc=(be=ipc,remote_rtc=(be=ocl,gen_src=1,gen_src_output_dir=/data/local/rtc-gen-src),spawn_str=adb shell LD_LIBRARY_PATH=/data/local/lib /data/local/bin/boda,spawn_shell_escape_args=1,boda_parent_addr=tcp:10.0.0.100:12791)"
# export OP_TUNE="op_tune=(use_culibs=0,MNt=8:8,MNb=16:16,k1conv=1,tconv=0,Kb=1,vw=8,use_local_mem=2)"
# boda cnn_op_info --cnn-func-sigs-fn='%(boda_test_dir)'/conv-ops-1-5-20-nin-alex-gn.txt --op-eff-tab-fn=conv-1-5-20-nin-alex-gn-SD820-boda.raw --"${SD820_RTC}" --"${OP_TUNE}" --show-rtc-calls=1 --peak-flops=320e9 --print-format=1 --inc-op-info-in-eff=1
| 45.301948 | 359 | 0.600731 | 2,266 | 13,953 | 3.556929 | 0.196823 | 0.020844 | 0.011911 | 0.010422 | 0.437841 | 0.371216 | 0.331141 | 0.301241 | 0.257444 | 0.219975 | 0 | 0.029621 | 0.245109 | 13,953 | 307 | 360 | 45.449511 | 0.735593 | 0.193077 | 0 | 0.183333 | 0 | 0.004167 | 0.085763 | 0.00205 | 0 | 0 | 0 | 0.003257 | 0.0125 | 1 | 0.0875 | false | 0 | 0.045833 | 0.008333 | 0.179167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7410a9d371811ecb288d58e5e4a3a8a6f106fa60 | 5,547 | py | Python | bir_open_manipulator_p_with_gripper_cam_moveit/src/bir_omp_catch_simulation.py | Brazilian-Institute-of-Robotics/bir_open_manipulator_p | ce5eec7de2d361becddb3eecc44700cb12990083 | [
"Apache-2.0"
] | 1 | 2020-03-03T18:18:42.000Z | 2020-03-03T18:18:42.000Z | bir_open_manipulator_p_with_gripper_cam_moveit/src/bir_omp_catch_simulation.py | Brazilian-Institute-of-Robotics/bir_open_manipulator_p | ce5eec7de2d361becddb3eecc44700cb12990083 | [
"Apache-2.0"
] | null | null | null | bir_open_manipulator_p_with_gripper_cam_moveit/src/bir_omp_catch_simulation.py | Brazilian-Institute-of-Robotics/bir_open_manipulator_p | ce5eec7de2d361becddb3eecc44700cb12990083 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import rospy
from math import pi
import moveit_commander
from moveit_msgs.msg import DisplayTrajectory
from apriltag_ros.msg import AprilTagDetectionArray
# CONSTANTS
N_ROBOT_JOINTS = 6
POSE_TOLERANCE = 0.05
FRAMES_LIMIT = 25
ROTATION_DEGREE = -10
J1_LIMIT_DEGREE = -90
class openManipulatorPRO:
def __init__(self):
# ROS NODE INIT
rospy.init_node('OMP_gripper_moveit_commander')
# TAG DETECTION - VARS
self.tag_found = 0 # FOUND TAG
self.init_pose = 0 # STOP TO FIND TAG BEFORE START ROUTINE
self.frames_count = 0 # FRAMES COUNT
# MOVEIT INIT
moveit_commander.roscpp_initialize(sys.argv)
self.robot = moveit_commander.RobotCommander()
self.scene = moveit_commander.PlanningSceneInterface()
self.group = moveit_commander.MoveGroupCommander("arm")
self.display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', DisplayTrajectory,queue_size=20)
# TAG DETECTION INIT
self.tag_id_subscriber = rospy.Subscriber('/tag_detections', AprilTagDetectionArray,self.tagCB)
# MOVEIT RESTRICTIONS
self.group.set_goal_position_tolerance(POSE_TOLERANCE) # GOAL TOLERANCE
self.group.set_planning_time(5) # TIME TO PLANNING
# FUNCTION - CALLBACK
def tagCB(self,data):
if (self.tag_found is not 1) and (len(data.detections) is not 0):
if data.detections[0].id[0] == 1 and self.init_pose == 1:
self.frames_count += 1
else:
self.seq_counts = 0
if self.frames_count == FRAMES_LIMIT and self.init_pose == 1:
self.tag_found = 1
else:
pass
# FUNCTION - GO TO SPECIFIC POSE
def go_to_pose(self, pose_name):
## WE CAN PLAN AND EXECUTE A MOTION FOR THIS GROUP TO A DESIRED SAVED POSE FOR THE END-EFF
# 1 - PASS YOUR POSE SAVED ON SETUP ASSISTANT
self.group.set_named_target(pose_name)
# 2 - PLAN AND EXECUTE
self.group.go(wait=True)
# 3 - PREVENT RESIDUAL MOVEMENT
self.group.stop()
# 4 - CLEAR TARGET GOAL
self.group.clear_pose_targets()
def search_tag(self):
# DEFINE A VARIABLE FORMAT EQUIVALENT TO JOINTS
jointTarget = self.group.get_current_joint_values()
# VERIFY IF THE TARGET DEGREE IS HIGHER THAN J1 LIMIT
if (jointTarget[0]*pi/180) + ROTATION_DEGREE <= J1_LIMIT_DEGREE:
rospy.loginfo('J1 Limit Exit!')
sys.exit()
# SEARCH TAG
else:
jointTarget[0] = jointTarget[0] + (ROTATION_DEGREE*pi/180)
self.group.go(joints=jointTarget, wait=True)
self.group.stop()
self.group.clear_pose_targets()
# FUNCTION - SET SPECIFIC JOINT
def set_joint_go(self, jointIndex, joint_angle_rad):
# TRANSFORMATION
jointIndex = jointIndex - 1 # TRANSLATE TO i-1 INDEX
# 1 - DEFINE A VARIABLE FORMAT EQUIVALENT TO JOINTS
joint_goal = self.group.get_current_joint_values()
# 2 - INSERT DESIRED ANGLE
joint_goal[jointIndex] = joint_angle_rad
# 3 - GO!
self.group.go(joints=joint_goal, wait=True)
# 4 - STOP ANY RESIDUAL MOVEMENT
self.group.stop()
# 5 - CLEAR TARGET GOAL
self.group.clear_pose_targets()
# FUNCTION - PROCESS ROUTINE
def detect_catch_routine(self):
## STEP 1 - OPEN GRIPPER
self.go_to_pose('pHome')
## STEP 2 - GO TO SEARCH ZONE
self.go_to_pose('pSearch')
## 2.1 - INIT FLAGS
self.init_pose = 1
self.tag_found = 0
# STEP 3 - SEARCH THE TAG
while not rospy.is_shutdown():
# 3.1 - IF TAG WAS NOT FOUND, ROTATE
if self.tag_found is 0:
self.search_tag()
rospy.loginfo('############# ROTATION COMPLETE')
# 3.2 - IF TAG WAS FOUND, GO TO TARGET POSITION
elif self.tag_found is 1:
rospy.loginfo('############# TAG FOUND')
# GO TO CATCH POSITION
self.go_to_pose('pCatch')
# GO TO CATCH POSITION BOTTLE
self.go_to_pose('pCatchBottle')
# CATCH WITH GRIPPER
self.set_joint_go(7, 1.0)
# GET UP THE BOTTLE
self.set_joint_go(5, -0.95)
# GO TO SEARCH INIT
self.go_to_pose('pSearch')
# OPEN THE GRIPPER
self.set_joint_go(7, 2.0)
# FINISH PROGRAM
sys.exit()
# AUXILIAR FUNCTION FOR SEVERAL TESTS
def test_routine(self):
#self.go_to_pose('pHome')
#self.go_to_pose('pSearch')
#self.go_to_pose('pCatch')
self.go_to_pose('pCatchBottle')
self.set_joint_go(7, 1.0)
self.set_joint_go(5, -0.95)
if __name__ == '__main__':
try:
# INITIALIZE YOUR OBJECT
omanip = openManipulatorPRO()
# INITIALIZE THE OPERATION BY CLICKING ENTER
raw_input("Press Enter to start!")
# OPERATION
omanip.detect_catch_routine()
except rospy.ROSInterruptException:
pass
except KeyboardInterrupt:
pass | 37.47973 | 128 | 0.579773 | 666 | 5,547 | 4.635135 | 0.292793 | 0.020732 | 0.025915 | 0.034985 | 0.225462 | 0.139618 | 0.090055 | 0.025915 | 0 | 0 | 0 | 0.021757 | 0.337119 | 5,547 | 148 | 129 | 37.47973 | 0.817786 | 0.236885 | 0 | 0.272727 | 0 | 0 | 0.053601 | 0.014358 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079545 | false | 0.034091 | 0.068182 | 0 | 0.159091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
741163dbbe679412527cbd7810452d4d457b3e59 | 780 | py | Python | tests/test_read.py | markreidvfx/pyavb | 48b05a84ff9752703a9218d5ca157ee672b020ab | [
"MIT"
] | 21 | 2019-02-05T22:08:32.000Z | 2022-03-07T03:37:59.000Z | tests/test_read.py | markreidvfx/pyavb | 48b05a84ff9752703a9218d5ca157ee672b020ab | [
"MIT"
] | 10 | 2019-07-25T04:03:44.000Z | 2021-08-12T21:31:37.000Z | tests/test_read.py | markreidvfx/pyavb | 48b05a84ff9752703a9218d5ca157ee672b020ab | [
"MIT"
] | 1 | 2020-02-20T15:08:40.000Z | 2020-02-20T15:08:40.000Z | from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
import os
import unittest
import avb
import avb.utils
test_file_01 = os.path.join(os.path.dirname(__file__), 'test_files', 'test_file_01.avb')
class TestRead(unittest.TestCase):
def test_basic(self):
with avb.open(test_file_01) as f:
for item in f.content.mobs:
pass
# print(item)
def test_read_all_known_classes(self):
with avb.open(test_file_01) as f:
for i, chunk in enumerate(f.chunks()):
if chunk.class_id in avb.utils.AVBClaseID_dict:
item = f.read_object(i)
# print(item)
if __name__ == "__main__":
unittest.main()
| 22.285714 | 88 | 0.608974 | 103 | 780 | 4.262136 | 0.495146 | 0.072893 | 0.091116 | 0.068337 | 0.14123 | 0.14123 | 0.14123 | 0.14123 | 0.14123 | 0.14123 | 0 | 0.014599 | 0.297436 | 780 | 34 | 89 | 22.941176 | 0.786496 | 0.029487 | 0 | 0.086957 | 0 | 0 | 0.045093 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0.043478 | 0.26087 | 0 | 0.391304 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7411a2ab5983624eb22485823a916a1cbef00994 | 2,278 | py | Python | game.py | akash1309/Rod-and-Ball-Game | cd492ca8fea0cbb391de44c44b71984efe0e01ab | [
"MIT"
] | null | null | null | game.py | akash1309/Rod-and-Ball-Game | cd492ca8fea0cbb391de44c44b71984efe0e01ab | [
"MIT"
] | null | null | null | game.py | akash1309/Rod-and-Ball-Game | cd492ca8fea0cbb391de44c44b71984efe0e01ab | [
"MIT"
] | null | null | null | import sys, pygame
from pygame.locals import *
import time
import pygame.freetype
pygame.init()
size = width, height = 800, 600
ball_speed = [3, 7]
black = 0, 0, 0
rod_speed = [5,0]
score = 1
green = (0, 255, 0)
blue = (0, 0, 128)
white = (255, 255, 255)
screen = pygame.display.set_mode(size)
#ball loading and shortning its image size
ball_img = pygame.image.load("image.jpeg")
ball = pygame.transform.scale(ball_img,(80,50))
ballrect = ball.get_rect()
#setting the caption for game
pygame.display.set_caption("Rod and Ball Game")
#rod loading and shortning its image size
rod_img = pygame.image.load("rod.png")
rod = pygame.transform.scale(rod_img,(100,20))
rodrect = rod.get_rect()
font = pygame.font.Font('freesansbold.ttf', 32)
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#image collision function added
if ballrect.colliderect(rodrect):
score +=1
#print(score)
elif ballrect.top < 0:
print(score)
screen.fill(white)
text = font.render('Your Score ' + str(score), True, green, blue)
textRect = text.get_rect()
textRect.center = (width // 2, height // 2)
screen.blit(text, textRect)
pygame.display.update()
pygame.quit()
pygame.time.delay(2000)
quit()
#ball code
ballrect = ballrect.move(ball_speed)
if ballrect.left < 0 or ballrect.right > width:
ball_speed[0] = -ball_speed[0]
if ballrect.top < 0 or ballrect.bottom > height:
ball_speed[1] = -ball_speed[1]
#rod code
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RIGHT:
if rod_speed[0] > 0:
rod_speed[0] = -rod_speed[0]
elif event.key == pygame.K_LEFT:
if rod_speed[0] < 0:
rod_speed[0] = -rod_speed[0]
rodrect = rodrect.move(rod_speed)
if rodrect.left < 0 or rodrect.right > width:
rod_speed[0] = -rod_speed[0]
#screen pixels and setting adjustment
screen.fill(black)
screen.blit(ball, ballrect)
screen.blit(rod, rodrect)
#turning full image upside down
screen.blit(pygame.transform.rotate(screen, 180), (0, 0))
pygame.display.flip()
| 25.032967 | 73 | 0.627744 | 323 | 2,278 | 4.343653 | 0.325077 | 0.057021 | 0.051319 | 0.035638 | 0.099786 | 0.099786 | 0.042766 | 0.042766 | 0.042766 | 0.042766 | 0 | 0.043681 | 0.246269 | 2,278 | 90 | 74 | 25.311111 | 0.773442 | 0.102722 | 0 | 0.083333 | 0 | 0 | 0.029975 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0.016667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74121ac3394a3fef86202bc8cb9f42490a0298e6 | 888 | py | Python | oasis/clipper_offsetting.py | AmericaMakes/OASIS-marcwang | 7aa10040251d7a1b807a773a45d123e1a52faac5 | [
"BSD-2-Clause"
] | 1 | 2021-03-07T14:47:09.000Z | 2021-03-07T14:47:09.000Z | oasis/clipper_offsetting.py | AmericaMakes/OASIS-marcwang | 7aa10040251d7a1b807a773a45d123e1a52faac5 | [
"BSD-2-Clause"
] | null | null | null | oasis/clipper_offsetting.py | AmericaMakes/OASIS-marcwang | 7aa10040251d7a1b807a773a45d123e1a52faac5 | [
"BSD-2-Clause"
] | 1 | 2021-03-07T00:24:57.000Z | 2021-03-07T00:24:57.000Z | import pyclipper
from shapely.geometry import Polygon
from typing import List
from oasis.util import clean_polygon, convert_to_clipper, convert_from_clipper
class ClipperOffsetting():
def __init__(self, poly : Polygon, clipper_scale : int = 1000) -> None:
self.poly = clean_polygon(poly)
self.offsetter = pyclipper.PyclipperOffset()
self.clipper_scale = clipper_scale
cl_path = convert_to_clipper(self.poly, self.clipper_scale)
self.offsetter.AddPaths(cl_path, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
def get_offset(self, dist : float, n_contour: int = 1) -> List[Polygon]:
result = []
for i in range(n_contour):
c_path = self.offsetter.Execute(-1*dist*i*self.clipper_scale)
poly = convert_from_clipper(c_path, self.clipper_scale)
result.append(poly)
return result | 40.363636 | 88 | 0.699324 | 114 | 888 | 5.192982 | 0.429825 | 0.121622 | 0.108108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008584 | 0.212838 | 888 | 22 | 89 | 40.363636 | 0.83834 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7412ae64b971ee798f8e6d33e8212d9bb55ee3b7 | 2,798 | py | Python | src/py/args_definition.py | snwjas/RandomDesktopBackground-WEBUI | 1e34fb38a1088687987a9c2acd0ce5386b70489c | [
"MIT"
] | 17 | 2022-02-16T11:16:05.000Z | 2022-03-17T07:44:12.000Z | src/py/args_definition.py | snwjas/RandomDesktopBackground-WEBUI | 1e34fb38a1088687987a9c2acd0ce5386b70489c | [
"MIT"
] | 1 | 2022-03-11T08:40:05.000Z | 2022-03-12T02:04:06.000Z | src/py/args_definition.py | snwjas/RandomDesktopBackground-WEBUI | 1e34fb38a1088687987a9c2acd0ce5386b70489c | [
"MIT"
] | 4 | 2022-02-17T11:10:09.000Z | 2022-03-18T00:45:07.000Z | # -*-coding:utf-8-*-
"""
程序启动参数定义、获取与解析
@author Myles Yang
"""
import argparse
import const_config as const
""" 设置命令参数时的KEY """
# 程序运行方式
ARG_KEY_RUN = ARG_RUN = '--run'
# 程序日志记录方式
ARG_KEY_LOG = ARG_LOG = '--log'
# 创建程序快捷方式
ARG_KEY_LNK = ARG_LNK = '--lnk'
# 启动环境
ARG_KEY_ENV = ARG_ENV = '--env'
# 运行中可执行指令
ARG_KEY_CMD = ARG_CMD = '--cmd'
""" --run 命令参数选项 """
# 控制台启动
ARG_RUN_TYPE_CONSOLE = 'console'
# 控制台后台启动
ARG_RUN_TYPE_BACKGROUND = 'background'
# 开机自启,控制台后台启动
ARG_RUN_TYPE_POWERBOOT = 'powerboot'
# 启动WEBUI
ARG_RUN_TYPE_WEBUI = 'webui'
# 创建快捷方式
ARG_RUN_TYPE_LNK = 'lnk'
# 发送执行命令
ARG_RUN_TYPE_CMD = 'cmd'
# 选择项
CHOICES_ARG_RUN_TYPE = [ARG_RUN_TYPE_CONSOLE, ARG_RUN_TYPE_BACKGROUND, ARG_RUN_TYPE_POWERBOOT,
ARG_RUN_TYPE_WEBUI, ARG_RUN_TYPE_LNK, ARG_RUN_TYPE_CMD]
""" --log 命令参数选项 """
# 控制台打印方式记录运行日志
ARG_LOG_TYPE_CONSOLE = 'console'
# 文件方式记录运行日志
ARG_LOG_TYPE_FILE = 'file'
# 文件和控制台打印方式记录运行日志
ARG_LOG_TYPE_BOTH = 'both'
# 禁用日志记录
ARG_LOG_TYPE_NONE = 'none'
# 选择项
CHOICES_ARG_LOG_TYPE = [ARG_LOG_TYPE_CONSOLE, ARG_LOG_TYPE_FILE, ARG_LOG_TYPE_BOTH, ARG_LOG_TYPE_NONE]
""" --env 命令参数选项 """
# 生产环境
ARG_ENV_TYPE_PROD = 'prod'
# 开发环境
ARG_ENV_TYPE_DEV = 'dev'
# 选择项
CHOICES_ARG_ENV_TYPE = [ARG_ENV_TYPE_PROD, ARG_ENV_TYPE_DEV]
""" --cmd 命令参数选项 """
# 下一张壁纸
ARG_CMD_TYPE_NXT = 'nxt'
# 上一张壁纸
ARG_CMD_TYPE_PRE = 'pre'
# 收藏当前壁纸
ARG_CMD_TYPE_FAV = 'fav'
# 定位当前壁纸
ARG_CMD_TYPE_LOC = 'loc'
# 选择项
CHOICES_ARG_CMD_TYPE = [ARG_CMD_TYPE_NXT, ARG_CMD_TYPE_PRE, ARG_CMD_TYPE_FAV, ARG_CMD_TYPE_LOC]
"""
定义命令行输入参数
"""
parser = argparse.ArgumentParser(
prog=const.app_name,
description='{}命令行参数'.format(const.app_name),
)
parser.add_argument('-r', ARG_RUN,
help='指定程序的运行方式',
type=str,
choices=CHOICES_ARG_RUN_TYPE,
dest=ARG_KEY_RUN
)
parser.add_argument('-l', ARG_LOG,
help='指定运行日志记录方式',
type=str,
choices=CHOICES_ARG_LOG_TYPE,
dest=ARG_KEY_LOG
)
parser.add_argument('-e', ARG_ENV,
help='指定程序的运行环境',
type=str,
choices=CHOICES_ARG_ENV_TYPE,
dest=ARG_KEY_ENV,
default=ARG_ENV_TYPE_PROD
)
parser.add_argument('-s', ARG_LNK,
help='根据给的路径创建程序的快捷方式,与--run组合使用',
type=str,
nargs='*',
dest=ARG_KEY_LNK
)
parser.add_argument('-c', ARG_CMD,
help='运行中可执行指令,与--run组合使用',
type=str,
choices=CHOICES_ARG_CMD_TYPE,
dest=ARG_KEY_CMD
)
arg_dict = vars(parser.parse_args())
| 23.512605 | 102 | 0.605432 | 360 | 2,798 | 4.252778 | 0.25 | 0.062704 | 0.091444 | 0.054866 | 0.062704 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0005 | 0.284489 | 2,798 | 118 | 103 | 23.711864 | 0.764236 | 0.086848 | 0 | 0.078125 | 0 | 0 | 0.079385 | 0.010806 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.03125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
741323c5ca5353953499cc57c205c55afeeb9d7b | 693 | py | Python | spec/puzzle/puzzlepedia/annotation_widget_spec.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | 2 | 2020-08-18T18:43:09.000Z | 2020-08-18T20:05:59.000Z | spec/puzzle/puzzlepedia/annotation_widget_spec.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | null | null | null | spec/puzzle/puzzlepedia/annotation_widget_spec.py | PhilHarnish/forge | 663f19d759b94d84935c14915922070635a4af65 | [
"MIT"
] | null | null | null | from puzzle.constraints import constraints, validator
from puzzle.puzzlepedia import annotation_widget
from spec.mamba import *
widget_patch = mock.patch('puzzle.puzzlepedia.annotation_widget.widgets')
class TestConstraints(constraints.Constraints):
test: str = 'value'
with description('annotation_widget'):
with before.each:
self.mock_widgets = widget_patch.start()
with after.each:
widget_patch.stop()
with it('renders NumberInRange as a FloatSlider'):
annotation_widget.AnnotationWidget(
validator.NumberInRange(0.0, 2.0), TestConstraints(), 'test', 1.0,
None,
mock.Mock())
expect(self.mock_widgets.FloatSlider).to(have_been_called)
| 26.653846 | 74 | 0.746032 | 83 | 693 | 6.096386 | 0.506024 | 0.126482 | 0.059289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010169 | 0.148629 | 693 | 25 | 75 | 27.72 | 0.847458 | 0 | 0 | 0 | 0 | 0 | 0.155844 | 0.063492 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.176471 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7413824fcd9d05cbbace3c88ba8b9036d58fbdad | 1,805 | py | Python | PRESUBMIT.py | Acidburn0zzz/trace-viewer | c4d7cee712b0306afc564787085cff76fd5bb5d9 | [
"BSD-3-Clause"
] | 2 | 2015-02-07T05:19:08.000Z | 2016-12-12T21:17:50.000Z | PRESUBMIT.py | Acidburn0zzz/trace-viewer | c4d7cee712b0306afc564787085cff76fd5bb5d9 | [
"BSD-3-Clause"
] | null | null | null | PRESUBMIT.py | Acidburn0zzz/trace-viewer | c4d7cee712b0306afc564787085cff76fd5bb5d9 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
_EXCLUDED_PATHS = []
_LICENSE_HEADER = (
r".*? Copyright \(c\) 20\d\d The Chromium Authors\. All rights reserved\."
"\n"
r".*? Use of this source code is governed by a BSD-style license that can "
"be\n"
r".*? found in the LICENSE file\."
"\n"
)
def _CommonChecksImpl(input_api, output_api):
results = []
results += input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS)
from trace_viewer import build
from tvcm import presubmit_checker
checker = presubmit_checker.PresubmitChecker(input_api, output_api)
results += checker.RunChecks()
from trace_viewer.build import check_gyp
gyp_result = check_gyp.GypCheck()
if len(gyp_result) > 0:
results += [output_api.PresubmitError(gyp_result)]
from trace_viewer.build import check_gn
gn_result = check_gn.GnCheck()
if len(gn_result) > 0:
results += [output_api.PresubmitError(gn_result)]
black_list = input_api.DEFAULT_BLACK_LIST
sources = lambda x: input_api.FilterSourceFile(x, black_list=black_list)
results += input_api.canned_checks.CheckLicense(
input_api, output_api, _LICENSE_HEADER,
source_file_filter=sources)
return results
def _CommonChecks(input_api, output_api):
tvcm_path = input_api.change.RepositoryRoot()
sys.path.append(tvcm_path)
try:
return _CommonChecksImpl(input_api, output_api)
finally:
sys.path.remove(tvcm_path)
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
| 28.650794 | 77 | 0.750693 | 252 | 1,805 | 5.107143 | 0.337302 | 0.09324 | 0.10878 | 0.13209 | 0.500389 | 0.327894 | 0.167832 | 0.167832 | 0.167832 | 0.167832 | 0 | 0.005263 | 0.157895 | 1,805 | 62 | 78 | 29.112903 | 0.841447 | 0.088089 | 0 | 0.090909 | 0 | 0 | 0.11084 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.136364 | 0.045455 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7418ced2999b5c9ae7697c9504a1ff1a23adb71d | 2,437 | py | Python | prototype/webapp.py | Tethik/whistleblower | 56747cbf3c4eda95cee7eded36b4a853d33d6ee3 | [
"MIT"
] | 1 | 2016-06-20T12:35:42.000Z | 2016-06-20T12:35:42.000Z | prototype/webapp.py | Tethik/whistleblower | 56747cbf3c4eda95cee7eded36b4a853d33d6ee3 | [
"MIT"
] | null | null | null | prototype/webapp.py | Tethik/whistleblower | 56747cbf3c4eda95cee7eded36b4a853d33d6ee3 | [
"MIT"
] | null | null | null | import os
from flask import Flask, request, redirect, url_for, render_template, send_from_directory, jsonify
from werkzeug import secure_filename
import uuid
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'upload/'
app.config['REPLY_FOLDER'] = 'replies/'
app.config['CLIENT_FOLDER'] = 'client/'
def upload_file_to_folder(folder, filename):
content = request.form['content']
if content:
file = open(os.path.join(folder, filename), "w+")
file.write(content)
file.close()
return True
return False
@app.route('/api/reply_to/<codename>', methods=['POST'])
def reply(codename):
codename = "".join(codename.strip().replace("+", "").split(" "))
filename = secure_filename(codename + ".json")
print(filename)
if upload_file_to_folder(app.config['REPLY_FOLDER'], filename):
return "Ok"
return "Content is empty", 405
@app.route('/api/fetch_reply/<codename>', methods=['GET'])
def fetch_reply(codename):
codename = "".join(codename.strip().replace("+", "").split(" "))
filename = secure_filename(codename + ".json")
return send_from_directory(app.config['REPLY_FOLDER'], filename)
@app.route('/api/submit', methods=['POST'])
def submit():
id = str(uuid.uuid4())
filename = secure_filename(id+".asc")
if upload_file_to_folder(app.config['UPLOAD_FOLDER'], filename):
return id
return "Content is empty", 405
# content = request.form['content']
#
# if content:
# filename = secure_filename(id+".asc")
# file = open(os.path.join(app.config['UPLOAD_FOLDER'], filename), "w+")
# file.write(content)
# file.close()
# return id
# else:
# return "Content is empty", 405
@app.route('/api/submissions', methods=['GET'])
def list_submissions():
files = os.listdir(app.config['UPLOAD_FOLDER'])
files.remove(".gitignore")
return jsonify(files=sorted(files, key=lambda f:
os.stat(os.path.join(app.config['UPLOAD_FOLDER'],f)).st_mtime, reverse=True))
@app.route('/api/submissions/<path:path>', methods=['GET'])
def fetch_submission(path):
return send_from_directory(app.config['UPLOAD_FOLDER'], path)
@app.route('/')
def default():
return serve_client('index.html')
@app.route('/<path:path>')
def serve_client(path):
return send_from_directory(app.config['CLIENT_FOLDER'], path)
if __name__ == "__main__":
app.run(port=5011, debug=True)
| 31.24359 | 98 | 0.664341 | 310 | 2,437 | 5.045161 | 0.277419 | 0.063299 | 0.057545 | 0.080563 | 0.495524 | 0.392583 | 0.328645 | 0.209719 | 0.16624 | 0.107417 | 0 | 0.006886 | 0.165778 | 2,437 | 77 | 99 | 31.649351 | 0.76242 | 0.105868 | 0 | 0.113208 | 0 | 0 | 0.172509 | 0.036439 | 0.037736 | 0 | 0 | 0 | 0 | 1 | 0.150943 | false | 0 | 0.075472 | 0.056604 | 0.433962 | 0.018868 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7419bb062ae8d53ccf6a7504895e66cc726771c1 | 346 | pyw | Python | file.pyw | ArezalGame89/Corn-Engine | fb71f501943bd4136268309059e81f0e4f3ceb79 | [
"MIT"
] | null | null | null | file.pyw | ArezalGame89/Corn-Engine | fb71f501943bd4136268309059e81f0e4f3ceb79 | [
"MIT"
] | null | null | null | file.pyw | ArezalGame89/Corn-Engine | fb71f501943bd4136268309059e81f0e4f3ceb79 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import filedialog
import os
def openFile():
filepath = filedialog.askopenfilename(initialdir="/",title="Open file",filetypes= (("text files","*.txt"),("all files","*.*")))
os.startfile(filepath)
window = Tk()
button = Button(text="OpenFile",command=openFile)
button.pack()
window.mainloop() | 28.833333 | 132 | 0.687861 | 39 | 346 | 6.102564 | 0.641026 | 0.092437 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.138728 | 346 | 12 | 133 | 28.833333 | 0.798658 | 0 | 0 | 0 | 0 | 0 | 0.133929 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
741d95c42594e69744089d0d1e3f7163af1adde1 | 526 | py | Python | P1/plot_VI.py | xyp8023/lab1 | ac9693636f25b8e65236503035d241f124568d42 | [
"MIT"
] | null | null | null | P1/plot_VI.py | xyp8023/lab1 | ac9693636f25b8e65236503035d241f124568d42 | [
"MIT"
] | null | null | null | P1/plot_VI.py | xyp8023/lab1 | ac9693636f25b8e65236503035d241f124568d42 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--T", type=int, default=46,
help="Time-horizon.")
args = parser.parse_args()
T = args.T
data_dir = 'dataForVI'
file_path = data_dir+'/'+'Vn'+str(T)+'.npy'
v = np.load(file_path)
df = pd.DataFrame(v)
# df = pd.read_csv(file_path)
y = df.values
plt.plot(y)
plt.savefig('plot_value.png')
plt.show()
print('Done') | 25.047619 | 61 | 0.718631 | 81 | 526 | 4.54321 | 0.617284 | 0.065217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004376 | 0.131179 | 526 | 21 | 62 | 25.047619 | 0.800875 | 0.051331 | 0 | 0 | 0 | 0 | 0.100402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
741dcec3224bbbaf709d95744c9d3eef8f6a4ddd | 2,808 | py | Python | main.py | sakunamary/raspi-info | 5a22165505947ef6b687d03ed23d963a2db0fe1f | [
"MIT"
] | null | null | null | main.py | sakunamary/raspi-info | 5a22165505947ef6b687d03ed23d963a2db0fe1f | [
"MIT"
] | null | null | null | main.py | sakunamary/raspi-info | 5a22165505947ef6b687d03ed23d963a2db0fe1f | [
"MIT"
] | null | null | null |
import time
import datetime
import threading
from get_net_info import RaspberryMonitorNetSpeed as rmn
import subprocess
import Adafruit_SSD1306
import Adafruit_GPIO.SPI as SPI
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
# Raspberry Pi pin configuration:
RST = None # on the PiOLED this pin isnt used
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# 128x64 display with hardware I2C:
disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 2
font = ImageFont.load_default()
IP = subprocess.check_output(["hostname", "-I"]).split()[0]
print ('Local IP :'+str(IP))
ns = [-1, -1]
def network_speed():
global ns
b = rmn('admin', 'Sakuna0711')
while True:
time.sleep(1)
ns = b.get_human_speed()
#%%
def main():
tmp = threading.Thread(target=network_speed)
tmp.setDaemon(True)
tmp.start()
while True:
try:
draw.rectangle((0,0,width,height), outline=0, fill=0)
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell = True )
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%sMB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell = True )
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d/%dGB %s\", $3,$2,$5}'"
Disk = subprocess.check_output(cmd, shell = True )
# Write some text.
draw.text((x, top), "IP: " + str(IP), font=font, fill=255)
draw.text((x, top+8), 'U/S: ' + str(ns[1]), font=font, fill=255)
draw.text((x, top+16), 'D/S: ' + str(ns[0]), font=font, fill=255)
draw.text((x, top+25), str(CPU), font=font, fill=255)
draw.text((x, top+33), str(MemUsage), font=font, fill=255)
draw.text((x, top+41), str(Disk), font=font, fill=255)
disp.image(image)
disp.display()
time.sleep(1)
except KeyboardInterrupt:
exit(0)
#%%
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| 27.529412 | 91 | 0.614672 | 406 | 2,808 | 4.187192 | 0.41133 | 0.028235 | 0.031765 | 0.042353 | 0.187059 | 0.187059 | 0.167647 | 0.125294 | 0.045882 | 0.045882 | 0 | 0.044235 | 0.243234 | 2,808 | 101 | 92 | 27.80198 | 0.755765 | 0.187322 | 0 | 0.181818 | 0 | 0 | 0.086131 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0.015152 | 0.151515 | 0 | 0.181818 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
742178c1a58413819c231f7bb06c85855868f4e1 | 3,085 | py | Python | main.py | jleightcap/ScrollingScore | 8b87e3571117001ecc9e26c79526f3ece91169d4 | [
"MIT"
] | 8 | 2017-10-04T21:02:36.000Z | 2021-01-22T19:32:27.000Z | main.py | jleightcap/ScrollingScore | 8b87e3571117001ecc9e26c79526f3ece91169d4 | [
"MIT"
] | 1 | 2018-10-30T01:06:37.000Z | 2018-10-30T01:12:23.000Z | main.py | jleightcap/ScrollingScore | 8b87e3571117001ecc9e26c79526f3ece91169d4 | [
"MIT"
] | null | null | null | import os, sys
from PIL import Image
### TERMINAL ARGUMENTS ###
# -h := help
# -q := quiet
# -single := forced one page per slide
args = sys.argv[1:]
# -h, print README and quit
if "-h" in args:
with open('./README.md') as f:
print(f.read())
quit()
# -q, toggle print statements
loud = True
if "-q" in args:
loud = False
# -single, toggle forced single image per slide
double = True
if "-single" in args:
double = False
def verifyDirectory(dirname):
if not os.path.isdir(dirname):
try:
os.mkdir(dirname)
except OSError:
if loud: print("Could not create {} directory.".format(dirname))
quit()
verifyDirectory('./Sheets')
if not os.listdir('./Sheets'): # Empty sheets directory
if loud: print("No images to convert.")
verifyDirectory('./Slides')
### IMAGE MANIPULATION ###
# Is better suited for a double slide (two tall images side by side)?
def isTall(img):
# img = Image.open(img)
return img.size[0]/img.size[1] < (16/9)
# White dimensioned BG image
def bgImg(size):
return Image.new('RGB', size, (255,255,255))
def singleImage(img):
W, H = img.size
if W/H > (16/9):
size = W, int((9/16) * W)
else:
size = int((16/9) * H), H
# size = tuple(buff*x for x in size)
imgBG = bgImg(size)
imgBG.paste(img, (int((size[0] - W) / 2), int((size[1] - H) /2))) # Centered on BG
return imgBG
def twoImage(img1, img2):
# img1 = Image.open('./Sheets/{}'.format(img1))
# img2 = Image.open('./Sheets/{}'.format(img2))
W1, H1 = img1.size
W2, H2 = img2.size
imgBG = bgImg((W1 + W2, max(H1, H2)))
if H1 < H2:
imgBG.paste(img1, (0,int((H2-H1)/2)))
imgBG.paste(img2, (W1,0))
else: # H1 = H2 reduces to either case.
imgBG.paste(img1, (0,0))
imgBG.paste(img2, (W1,int((H1-H2)/2)))
return singleImage(imgBG)
def main():
imageFormats = ('.jpg', '.png') # If adding image formats, check compatibility with PIL.
pages = list(filter(lambda x: x.endswith(imageFormats), sorted(os.listdir('./Sheets'))))
pages = list(map(lambda x: Image.open('./Sheets/{}'.format(x)), pages))
os.chdir('./Slides')
filenum = 0
if double:
while pages:
if not pages[1:]:
singleImage(pages[0]).save('{}.png'.format(filenum))
if loud: print('e',pages[0])
break
elif isTall(pages[0]) and isTall(pages[1]):
twoImage(pages[0], pages[1]).save('{}.png'.format(filenum))
if loud: print('d',pages[0],pages[1])
pages = pages[2:]
else:
singleImage(pages[0]).save('{}.png'.format(filenum))
if loud: print('s',pages[0])
filenum += 1
else: # -single
for page in pages:
singleImage(page).save('{}.png'.format(filenum))
filenum += 1
if __name__ == "__main__":
main()
| 29.663462 | 93 | 0.540032 | 408 | 3,085 | 4.063725 | 0.32598 | 0.025332 | 0.033173 | 0.048251 | 0.076598 | 0.076598 | 0.076598 | 0.057901 | 0.057901 | 0.057901 | 0 | 0.036372 | 0.295948 | 3,085 | 103 | 94 | 29.951456 | 0.72698 | 0.189951 | 0 | 0.135135 | 0 | 0 | 0.071851 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0 | 0.027027 | 0.027027 | 0.162162 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7423abd661f73d484f9731e84a27b1c7ed10e42f | 1,372 | py | Python | imp_flask/views/imp_flask/products.py | thijsmie/imp_flask | 2cad9f628c54572c02a5f893ef9c0cb1149b18ea | [
"MIT"
] | 1 | 2019-06-13T07:28:02.000Z | 2019-06-13T07:28:02.000Z | imp_flask/views/imp_flask/products.py | k-automation/imp_flask | 2cad9f628c54572c02a5f893ef9c0cb1149b18ea | [
"MIT"
] | 4 | 2016-12-06T11:25:36.000Z | 2021-04-29T07:43:14.000Z | imp_flask/views/imp_flask/products.py | k-automation/imp_flask | 2cad9f628c54572c02a5f893ef9c0cb1149b18ea | [
"MIT"
] | 2 | 2017-09-06T14:26:04.000Z | 2018-02-03T18:55:36.000Z | from flask import render_template
from flask_login import login_required
from imp_flask.blueprints import products
from imp_flask.models.imps import Product, Mod
from imp_flask.forms.product import Product as ProductForm
@products.route('/', defaults=dict(page=1))
@products.route('/page/<int:page>')
@login_required
def index(page):
if page <= 0:
page = 1
pagination = Product.query.order_by('id').paginate(page, per_page=20, error_out=False)
return render_template('imp_flask_products.html', showgroup=True, pagination=pagination)
@products.route('/group/<group>', defaults=dict(page=1))
@products.route('/group/<group>/page/<int:page>')
@login_required
def showgroup(group, page):
if page <= 0:
page = 1
pagination = Product.query.filter(Product.group == group).order_by('id').paginate(page, per_page=20, error_out=False)
return render_template('imp_flask_products.html', showgroup=False, pagination=pagination)
@products.route('/add')
@login_required
def addproduct():
form = ProductForm()
modlist = [(mod.id, mod.name) for mod in Mod.query.all()]
form.gainmods.choices = modlist
form.losemods.choices = modlist
if form.validate_on_submit():
return 'sumtin'
return render_template('imp_flask_newproduct.html', form=form, mods=Mod.query.all())
| 31.906977 | 122 | 0.706268 | 183 | 1,372 | 5.153005 | 0.333333 | 0.050901 | 0.038176 | 0.073171 | 0.426299 | 0.396607 | 0.275716 | 0.275716 | 0.275716 | 0.195122 | 0 | 0.008711 | 0.163265 | 1,372 | 42 | 123 | 32.666667 | 0.812718 | 0 | 0 | 0.225806 | 0 | 0 | 0.109857 | 0.075997 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.16129 | 0 | 0.387097 | 0.032258 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74242d1feb64459e2d3f13e6fe80316eb9b3ead9 | 370 | py | Python | Section 3/cisco_iosxr_napalm_example.py | magnusmel/-Hands-on-Network-Programming-with-Python | f1d8856791aed3392ac0ee280e4d3470afa63e9c | [
"MIT"
] | 8 | 2018-07-05T09:22:11.000Z | 2021-11-08T13:11:00.000Z | Section 3/cisco_iosxr_napalm_example.py | magnusmel/-Hands-on-Network-Programming-with-Python | f1d8856791aed3392ac0ee280e4d3470afa63e9c | [
"MIT"
] | null | null | null | Section 3/cisco_iosxr_napalm_example.py | magnusmel/-Hands-on-Network-Programming-with-Python | f1d8856791aed3392ac0ee280e4d3470afa63e9c | [
"MIT"
] | 6 | 2018-11-02T04:00:47.000Z | 2021-04-22T12:01:54.000Z | #
# Example From:
# https://xrdocs.io/application-hosting/tutorials/2016-08-15-netmiko-and-napalm-with-ios-xr-quick-look/
#
from napalm import get_network_driver
import pprint
driver = get_network_driver('iosxr')
device = driver('172.16.1.13', 'cisco', 'cisco')
device.open()
pprint.pprint(device.get_facts())
pprint.pprint(device.get_interfaces())
device.close()
| 21.764706 | 103 | 0.751351 | 54 | 370 | 5.037037 | 0.648148 | 0.073529 | 0.117647 | 0.154412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047198 | 0.083784 | 370 | 16 | 104 | 23.125 | 0.755162 | 0.313514 | 0 | 0 | 0 | 0 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
742519fcfb7dd6c47cbb93d0049edd5300973d43 | 1,598 | py | Python | textcrawler/textcrawler/spiders/voz.py | rootofmylife/vietnamese-text-generation | f95fa3667a908ce213c0c24ac2cf5ed26ec5fa7c | [
"MIT"
] | null | null | null | textcrawler/textcrawler/spiders/voz.py | rootofmylife/vietnamese-text-generation | f95fa3667a908ce213c0c24ac2cf5ed26ec5fa7c | [
"MIT"
] | null | null | null | textcrawler/textcrawler/spiders/voz.py | rootofmylife/vietnamese-text-generation | f95fa3667a908ce213c0c24ac2cf5ed26ec5fa7c | [
"MIT"
] | null | null | null | import scrapy
class VozSpider(scrapy.Spider):
name = 'voz'
start_urls = ['https://voz.vn/f/chuyen-tro-linh-tinh.17/']
custom_settings = { 'FEED_URI': "voz_%(time)s.json",
'FEED_FORMAT': 'json',
'FEED_EXPORT_ENCODING': 'utf-8'}
def parse(self, response):
print("Current URL: {}".format(response.url))
if "https://voz.vn/f/chuyen-tro-linh-tinh.17/page-2" in response.url:
return
post_urls = response.xpath('//div[@class="structItem-title"]//a/@href').extract()
for url_item in post_urls:
yield scrapy.Request('https://voz.vn' + url_item, callback=self.content_parse)
next_page = response.xpath('//a[contains(@class, "pageNav-jump--next")]//@href').get()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
def content_parse(self, response):
yield {
'url': response.url,
'title': response.xpath('//h1[contains(@class, "p-title-value")]/text()').get().strip(),
'text': '[_SEP_]'.join(response.xpath('//article[@class="message-body js-selectToQuote"]//div[contains(@class, "bbWrapper")]/text()[not(ancestor::blockquote)]').extract()).strip(),
}
next_page = response.xpath('//a[contains(@class, "pageNav-jump--next")]//@href').get()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.content_parse)
| 44.388889 | 196 | 0.597622 | 197 | 1,598 | 4.715736 | 0.401015 | 0.086114 | 0.068891 | 0.023681 | 0.376749 | 0.376749 | 0.376749 | 0.376749 | 0.376749 | 0.312164 | 0 | 0.005673 | 0.227785 | 1,598 | 35 | 197 | 45.657143 | 0.747164 | 0 | 0 | 0.214286 | 0 | 0.071429 | 0.319149 | 0.163329 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.035714 | 0 | 0.285714 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7426de94b22c4fbcf84a5f14e77b0aacfb42e96e | 4,853 | py | Python | tests/route53/test_route53.py | kazhala/fawsf | 4abefb2301f7b489b11ed3f0b303faafa5941d5b | [
"MIT"
] | 66 | 2020-07-26T12:43:30.000Z | 2022-01-25T12:09:40.000Z | tests/route53/test_route53.py | kazhala/fawsf | 4abefb2301f7b489b11ed3f0b303faafa5941d5b | [
"MIT"
] | null | null | null | tests/route53/test_route53.py | kazhala/fawsf | 4abefb2301f7b489b11ed3f0b303faafa5941d5b | [
"MIT"
] | 3 | 2020-07-26T22:09:45.000Z | 2020-07-28T01:09:26.000Z | import unittest
import os
import io
import sys
from unittest.mock import patch
from fzfaws.route53 import Route53
from fzfaws.utils import Pyfzf, FileLoader
from botocore.paginate import Paginator
from pathlib import Path
class TestRoute53(unittest.TestCase):
def setUp(self):
fileloader = FileLoader()
config_path = Path(__file__).resolve().parent.joinpath("../data/fzfaws.yml")
fileloader.load_config_file(config_path=str(config_path))
capturedOutput = io.StringIO()
sys.stdout = capturedOutput
self.route53 = Route53()
def tearDown(self):
sys.stdout = sys.__stdout__
def test_constructor(self):
self.assertEqual(self.route53.zone_ids, [""])
self.assertEqual(self.route53.profile, "default")
self.assertEqual(self.route53.region, "us-east-1")
route53 = Route53(profile="root", region="us-west-1")
self.assertEqual(route53.zone_ids, [""])
self.assertEqual(route53.profile, "root")
self.assertEqual(route53.region, "us-west-1")
@patch.object(Pyfzf, "execute_fzf")
@patch.object(Pyfzf, "process_list")
@patch.object(Paginator, "paginate")
def test_set_zone_id(self, mocked_result, mocked_fzf_process, mocked_fzf_execute):
mocked_result.return_value = [
{
"ResponseMetadata": {"HTTPStatusCode": 200, "RetryAttempts": 0,},
"HostedZones": [
{
"Id": "/hostedzone/111111",
"Name": "bilibonshop.xyz.",
"Config": {"PrivateZone": False},
"ResourceRecordSetCount": 7,
},
{
"Id": "/hostedzone/222222",
"Name": "mealternative.com.",
"Config": {
"Comment": "HostedZone created by Route53 Registrar",
"PrivateZone": False,
},
"ResourceRecordSetCount": 7,
},
],
"IsTruncated": False,
"MaxItems": "100",
}
]
# general test
mocked_fzf_execute.return_value = "111111"
self.route53.set_zone_id()
mocked_fzf_process.assert_called_with(
[
{"Id": "111111", "Name": "bilibonshop.xyz."},
{"Id": "222222", "Name": "mealternative.com."},
],
"Id",
"Name",
)
self.assertEqual(self.route53.zone_ids, ["111111"])
# parameter test
self.route53.set_zone_id(multi_select=True)
self.assertEqual(self.route53.zone_ids, ["111111"])
self.route53.set_zone_id(zone_ids=["111111", "222222"])
self.assertEqual(self.route53.zone_ids, ["111111", "222222"])
self.route53.zone_ids = [""]
self.route53.set_zone_id(zone_ids="222222")
self.assertEqual(self.route53.zone_ids, ["222222"])
# empty result test
self.route53.zone_ids = [""]
mocked_fzf_execute.reset_mock()
mocked_fzf_process.reset_mock()
mocked_fzf_execute.return_value = ""
mocked_result.return_value = []
self.route53.set_zone_id()
mocked_fzf_process.assert_not_called()
mocked_fzf_execute.assert_called_once()
self.assertEqual(self.route53.zone_ids, [""])
def test_process_hosted_zone(self):
# general
test_list = [
{
"Id": "/hostedzone/111111",
"Name": "bilibonshop.xyz.",
"Config": {"PrivateZone": False},
"ResourceRecordSetCount": 7,
},
{
"Id": "/hostedzone/222222",
"Name": "mealternative.com.",
"Config": {
"Comment": "HostedZone created by Route53 Registrar",
"PrivateZone": False,
},
"ResourceRecordSetCount": 7,
},
]
result = self.route53._process_hosted_zone(test_list)
self.assertEqual(
[
{"Id": "111111", "Name": "bilibonshop.xyz."},
{"Id": "222222", "Name": "mealternative.com."},
],
result,
)
# empty result test
test_list = []
result = self.route53._process_hosted_zone(test_list)
self.assertEqual([], result)
# missing attr test
test_list = [
{"Id": "/hostedzone/111111",},
{"Id": "/hostedzone/222222",},
]
result = self.route53._process_hosted_zone(test_list)
self.assertEqual(
[{"Id": "111111", "Name": None}, {"Id": "222222", "Name": None}], result,
)
| 34.664286 | 86 | 0.53163 | 440 | 4,853 | 5.654545 | 0.231818 | 0.084003 | 0.050643 | 0.083601 | 0.505225 | 0.432074 | 0.403135 | 0.316318 | 0.316318 | 0.282556 | 0 | 0.062422 | 0.33979 | 4,853 | 139 | 87 | 34.913669 | 0.714107 | 0.018339 | 0 | 0.352941 | 0 | 0 | 0.172167 | 0.018499 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.042017 | false | 0 | 0.07563 | 0 | 0.12605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
742a3ab0f1445fb6ee395010f779ab9f70f5457e | 6,504 | py | Python | rs_course/utils.py | inpefess/recommender-systems-course | 1f890b12e0b6c6ffefe7822a8b878c322c469543 | [
"Apache-2.0"
] | null | null | null | rs_course/utils.py | inpefess/recommender-systems-course | 1f890b12e0b6c6ffefe7822a8b878c322c469543 | [
"Apache-2.0"
] | 11 | 2022-01-31T11:05:16.000Z | 2022-03-06T19:45:40.000Z | rs_course/utils.py | inpefess/recommender-systems-course | 1f890b12e0b6c6ffefe7822a8b878c322c469543 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021-2022 Boris Shminke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Useful Function for the Whole Course
====================================
"""
from typing import Optional, Tuple
import numpy as np
import pandas as pd
from implicit.nearest_neighbours import ItemItemRecommender
from rs_datasets import MovieLens
from rs_metrics import hitrate
from scipy.sparse import csr_matrix
from tqdm import tqdm
def pandas_to_scipy(
pd_dataframe: pd.DataFrame,
data_name: str,
rows_name: str,
cols_name: str,
shape: Tuple[int, int],
) -> csr_matrix:
"""
transform pandas dataset with three columns to a sparse matrix
:param data_name: column name with values for the matrix cells
:param rows_name: column name with row numbers of the cells
:param cols_name: column name with column numbers of the cells
:param shape: a pair (total number of rows, total number of columns)
:returns: a ``csr_matrix``
"""
return csr_matrix(
(
pd_dataframe[data_name].astype(float),
(pd_dataframe[rows_name], pd_dataframe[cols_name]),
),
shape=shape,
)
def movielens_split(
ratings: pd.DataFrame,
train_percentage: float,
warm_users_only: bool = False,
) -> Tuple[csr_matrix, pd.DataFrame, Tuple[int, int]]:
"""
split ``ratings`` dataset to train and test
:param ratings: ratings dataset from MovieLens
:param train_percentage: percentage of data to put into training dataset
:param warm_users_only: test on only those users, who were in training set
:returns: sparse matrix for training and pandas dataset for testing
"""
time_split = ratings.timestamp.quantile(train_percentage) # type: ignore
train = ratings[ratings.timestamp < time_split]
test = ratings[ratings.timestamp >= time_split]
if warm_users_only:
warm_users = list(set(train.user_id).intersection(set(test.user_id)))
final_test = test[test.user_id.isin(warm_users)]
else:
final_test = test
return (
train,
final_test,
(ratings.user_id.max() + 1, ratings.item_id.max() + 1),
)
def evaluate_implicit_recommender(
recommender: ItemItemRecommender,
train: csr_matrix,
test: pd.DataFrame,
split_test_users_into: int,
top_k: int,
) -> float:
"""
compute hit-rate for a recommender from ``implicit`` package
:param recommender: some recommender from ``implicit`` package
:param train: sparse matrix of ratings
:param test: pandas dataset of ratings for testing
:param split_test_users_into: split ``test`` by users into several chunks
to fit into memory
:param top_k: how many items to recommend to each user
:returns: hitrate@10
"""
all_recs = []
test_users_parts = np.array_split(
test.user_id.unique(), split_test_users_into
)
for test_users_part in tqdm(test_users_parts):
item_ids, weights = recommender.recommend(
test_users_part, train[test_users_part], top_k
)
user_recs = pd.DataFrame(
np.vstack([item_ids.reshape((1, -1)), weights.reshape((1, -1))]).T,
columns=["item_id", "weight"],
)
user_recs["user_id"] = np.repeat(test_users_part, top_k)
all_recs.append(user_recs)
all_recs_pd = pd.concat(all_recs)
return hitrate(test, all_recs_pd)
def get_sparse_item_features(
movielens: MovieLens, ratings: pd.DataFrame
) -> Tuple[csr_matrix, pd.DataFrame]:
"""
extract item features from ``tags`` dataset
:param movielens: full MovieLens dataset
:returns: sparse matrix and a `pandas` DataFrame of item features (tags)
"""
genres_data = movielens.items[["item_id", "genres"]]
genres_data["user_id"] = -1
genres_data["tag"] = genres_data.genres.str.split("|")
genres_tags = genres_data.explode("tag")[["item_id", "user_id", "tag"]]
all_tags = movielens.tags.drop(columns=["timestamp"]).append(genres_tags)
agg_tags = (
all_tags[all_tags.item_id.isin(ratings.item_id)]
.groupby(["item_id", "tag"])
.count()
.reset_index()
)
agg_tags["tag_id"] = agg_tags.tag.astype("category").cat.codes
return (
pandas_to_scipy(
agg_tags,
"user_id",
"item_id",
"tag_id",
(ratings.item_id.max() + 1, agg_tags.tag_id.max() + 1),
),
agg_tags,
)
def enumerate_users_and_items(ratings: pd.DataFrame) -> None:
"""inplace change of user and item IDs into numbers"""
ratings["user_id"] = (
ratings.user_id.astype("category").cat.codes + 1 # type: ignore
)
ratings["item_id"] = (
ratings.item_id.astype("category").cat.codes + 1 # type: ignore
)
def filter_users_and_items(
ratings: pd.DataFrame,
min_items_per_user: Optional[int],
min_users_per_item: Optional[int],
) -> pd.DataFrame:
"""
leave only items with at least ``min_users_per_item`` users who rated them
and only users who rated at least ``min_items_per_user``
:param min_items_per_user: if ``None`` then don't filter
:param min_users_per_item: if ``None`` then don't filter
:returns: filtered ratings dataset
"""
filtered_ratings = ratings
if min_items_per_user is not None:
item_counts = ratings.groupby("user_id").count().item_id
active_users = item_counts[
item_counts >= min_items_per_user
].reset_index()["user_id"]
filtered_ratings = filtered_ratings[
filtered_ratings.user_id.isin(active_users)
]
if min_users_per_item is not None:
user_counts = ratings.groupby("item_id").count().user_id
popular_items = user_counts[
user_counts >= min_users_per_item
].reset_index()["item_id"]
filtered_ratings = filtered_ratings[
filtered_ratings.item_id.isin(popular_items)
]
return filtered_ratings
| 33.875 | 79 | 0.667897 | 880 | 6,504 | 4.715909 | 0.248864 | 0.021687 | 0.018795 | 0.018072 | 0.140241 | 0.064096 | 0.039518 | 0.016867 | 0 | 0 | 0 | 0.004968 | 0.226322 | 6,504 | 191 | 80 | 34.052356 | 0.819754 | 0.338407 | 0 | 0.084746 | 0 | 0 | 0.042558 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.050847 | false | 0 | 0.067797 | 0 | 0.161017 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
742d53fcb251b5c4874903797e2381fec53f64e4 | 1,738 | py | Python | day/6/survey_pt2.py | philparkbot/advent2020 | e5ee7e0703ae82d4c773024e4289d7d37879af35 | [
"MIT"
] | null | null | null | day/6/survey_pt2.py | philparkbot/advent2020 | e5ee7e0703ae82d4c773024e4289d7d37879af35 | [
"MIT"
] | null | null | null | day/6/survey_pt2.py | philparkbot/advent2020 | e5ee7e0703ae82d4c773024e4289d7d37879af35 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
'''
Input is a group of one or more lines with letters.
Each letter = question to which the answer was 'yes'.
Each line represents survey answers by a person, contains one or more letter.
Adjacent lines are part of the same group.
For each group, count the number of questions to which all people answered 'yes'
'''
g_file = 'input.txt'
#------------------------------------------------------------------------------
def run():
#------------------------------------------------------------------------------
l_all_groups = list()
l_curr_group = list()
for l_line in open(g_file).readlines():
l_line = l_line.rstrip()
if len(l_line) == 0:
# end of group, evaluate the current group's answers
l_tmp = None
for l_person in l_curr_group:
if l_tmp == None:
l_tmp = l_person
else:
l_tmp = l_tmp.intersection(l_person)
l_all_groups.append(l_tmp)
l_curr_group = list()
continue
l_person = set()
for l_idx in range(len(l_line)):
l_person.add(l_line[l_idx])
l_curr_group.append(l_person)
if len(l_curr_group) > 0:
l_tmp = None
for l_person in l_curr_group:
if l_tmp == None:
l_tmp = l_person
else:
l_tmp = l_tmp.intersection(l_person)
l_all_groups.append(l_tmp)
# now count
l_sum = 0
for l_idx, l_entry in enumerate(l_all_groups):
l_sum += len(l_entry)
print("Group {}: {} ({})".format(l_idx, len(l_entry), l_entry))
print("Count is {}".format(l_sum))
#------------------------------------------------------------------------------
def main():
#------------------------------------------------------------------------------
run()
main() | 25.188406 | 80 | 0.521864 | 239 | 1,738 | 3.543933 | 0.330544 | 0.056671 | 0.070838 | 0.033058 | 0.24085 | 0.24085 | 0.24085 | 0.24085 | 0.24085 | 0.24085 | 0 | 0.002913 | 0.210012 | 1,738 | 69 | 81 | 25.188406 | 0.613984 | 0.405063 | 0 | 0.444444 | 0 | 0 | 0.036239 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0 | 0 | 0.055556 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
742d81b9ef0eae8d2a138809ba186426bf9511d7 | 882 | py | Python | test/ut/tools/annotation/testcase/annotated/handwrite.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 9,680 | 2019-05-07T01:42:30.000Z | 2022-03-31T16:48:33.000Z | test/ut/tools/annotation/testcase/annotated/handwrite.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,957 | 2019-05-06T21:44:21.000Z | 2022-03-31T09:21:53.000Z | test/ut/tools/annotation/testcase/annotated/handwrite.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,571 | 2019-05-07T06:42:55.000Z | 2022-03-31T03:19:24.000Z | import nni
def max_pool(k):
pass
h_conv1 = 1
nni.choice({'foo': foo, 'bar': bar})(1)
conv_size = nni.choice({2: 2, 3: 3, 5: 5, 7: 7}, name='conv_size')
abc = nni.choice({'2': '2', 3: 3, '(5 * 6)': 5 * 6, 7: 7}, name='abc')
h_pool1 = nni.function_choice({'max_pool': lambda : max_pool(h_conv1),
'h_conv1': lambda : h_conv1,
'avg_pool': lambda : avg_pool(h_conv2, h_conv3)}
)
h_pool1 = nni.function_choice({'max_pool(h_conv1)': lambda : max_pool(
h_conv1), 'avg_pool(h_conv2, h_conv3)': lambda : avg_pool(h_conv2,
h_conv3)}, name='max_pool')
h_pool2 = nni.function_choice({'max_poo(h_conv1)': lambda : max_poo(h_conv1
), '(2 * 3 + 4)': lambda : 2 * 3 + 4, '(lambda x: 1 + x)': lambda : lambda
x: 1 + x}, name='max_poo')
tmp = nni.qlognormal(1.2, 3, 4.5)
test_acc = 1
nni.report_intermediate_result(test_acc)
test_acc = 2
nni.report_final_result(test_acc)
| 36.75 | 78 | 0.643991 | 162 | 882 | 3.240741 | 0.246914 | 0.091429 | 0.060952 | 0.114286 | 0.371429 | 0.299048 | 0.262857 | 0 | 0 | 0 | 0 | 0.07027 | 0.160998 | 882 | 23 | 79 | 38.347826 | 0.639189 | 0 | 0 | 0 | 0 | 0 | 0.172336 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0.045455 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
742d95bbe3f86400b205fe61790ce9cbad881b41 | 7,019 | py | Python | scripts/update-seminars.py | geem-lab/seminars | 0ea259c902c98ca49a1da252a331ac21eb1e094a | [
"MIT"
] | 6 | 2022-03-09T13:10:24.000Z | 2022-03-28T22:20:39.000Z | scripts/update-seminars.py | geem-lab/seminars | 0ea259c902c98ca49a1da252a331ac21eb1e094a | [
"MIT"
] | 9 | 2022-03-08T22:35:29.000Z | 2022-03-28T14:33:10.000Z | scripts/update-seminars.py | geem-lab/seminars | 0ea259c902c98ca49a1da252a331ac21eb1e094a | [
"MIT"
] | null | null | null | from __future__ import annotations
import calendar
import logging
import os
from dataclasses import dataclass
from datetime import datetime
import dateparser
import requests
from markdown import markdown
def request_github_api(
query_url: str, owner="geem-lab", token=None, logger=None
) -> dict:
if token is None:
token = os.environ.get("GITHUB_TOKEN", None)
gh_session = requests.Session()
gh_session.auth = (owner, token)
params = {"state": "all"}
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization": authorization,
}
response = gh_session.get(query_url, headers=headers, params=params).json()
if "message" in response and response["message"] == "Bad credentials":
raise PermissionError(
"Github API token is invalid. Please set the GITHUB_TOKEN environment variable."
)
if logger is not None:
logger.info(f"{query_url} returned:\n'{response}'")
return response
def tag(tag_name):
def _tag(*args, **kwargs):
def _normalize_key(key):
if key.endswith("_"):
return key[:-1]
return key
attrs = " ".join(f'{_normalize_key(k)}="{v}"' for k, v in kwargs.items())
contents = "".join(arg for arg in args if arg)
if attrs and contents:
return f"<{tag_name} {attrs}>{contents}</{tag_name}>"
if attrs:
return f"<{tag_name} {attrs} />"
if contents:
return f"<{tag_name}>{contents}</{tag_name}>"
return f"<{tag_name} />"
return _tag
em = tag("em")
time = tag("time")
h2 = tag("h2")
p = tag("p")
strong = tag("strong")
a = tag("a")
details = tag("details")
summary = tag("summary")
span = tag("span")
li = tag("li")
ul = tag("ul")
small = tag("small")
img = tag("img")
@dataclass
class Seminar:
title: str
speaker: dict
description: str
date: datetime
STRFTIME_FORMAT = "%b %-d %Y"
def __post_init__(self):
if isinstance(self.speaker, str):
self.speaker = request_github_api(
f"https://api.github.com/users/{self.speaker}"
)
def _date_to_markdown(self):
dt = time(
"📅 ",
self.date.strftime(self.STRFTIME_FORMAT),
datetime=self.date.isoformat(),
)
return small(strong(dt))
def _title_to_markdown(self):
return em(self.title)
@property
def speaker_name(self):
if "name" in self.speaker and self.speaker["name"]:
return self.speaker["name"]
return f"@{self.speaker['login']}"
@property
def speaker_url(self):
return f"https://github.com/{self.speaker['login']}"
def _speaker_name_to_markdown(self):
return a(self.speaker_name, href=self.speaker_url)
AVATAR_WIDTH = 128
def _speaker_avatar_to_markdown(self):
if "avatar_url" in self.speaker:
return a(
img(
src=self.speaker["avatar_url"],
alt=self.speaker["login"],
title=self.speaker_name,
align="left",
width=self.AVATAR_WIDTH,
),
href=self.speaker_url,
)
return None
def _description_to_markdown(self):
return markdown(self.description)
def to_markdown(self):
return details(
summary(
self._date_to_markdown(),
" ",
self._title_to_markdown(),
" (",
self._speaker_name_to_markdown(),
")",
),
self._speaker_avatar_to_markdown(),
self._description_to_markdown(),
)
DATE_MARKER = "**Date**:"
SEMINAR_TITLE_MARKER = "[SEMINAR]"
@classmethod
def from_github_issue(cls, issue, logger=None):
title = issue["title"].replace(cls.SEMINAR_TITLE_MARKER, "").strip()
description, date = issue["body"].split(cls.DATE_MARKER)[:2]
description = description.rstrip(cls.DATE_MARKER).strip()
date = date.splitlines()[0].strip()
date = dateparser.parse(date)
if issue["assignees"]:
speaker = issue["assignees"][0]["login"]
else:
speaker = issue["user"]["login"]
seminar = Seminar(
title=title, speaker=speaker, description=description, date=date
)
if logger is not None:
logger.info(f"seminar: {seminar}")
return seminar
@dataclass
class SeminarList:
seminars: list[Seminar]
def __post_init__(self):
self.seminars = sorted(
self.seminars, key=lambda seminar: seminar.date, reverse=True
)
HEADER = """
Click on each seminar to see more details.
"""
CALENDAR = (
calendar.HTMLCalendar()
.formatmonth(datetime.today().year, datetime.today().month)
.replace(
f">{int(datetime.today().day)}<",
f' bgcolor="#66ff66"><b><u>{int(datetime.today().day)}</u></b><',
)
)
BEGIN_UPCOMING_SEMINARS = """
## Upcoming Seminars
"""
END_UPCOMING_SEMINARS = """
> Want to add *your* seminar? Check if the date of interest is available and take a look at [the instructions page](/seminars/instructions).
"""
BEGIN_PAST_SEMINARS = """
## Past Seminars
"""
END_PAST_SEMINARS = ""
def to_markdown(self):
next_seminars = filter(
lambda seminar: seminar.date >= datetime.today(), self.seminars
)
past_seminars = filter(
lambda seminar: seminar.date < datetime.today(), self.seminars
)
return (
self.HEADER
+ self.CALENDAR
+ self.END_UPCOMING_SEMINARS
+ self.BEGIN_UPCOMING_SEMINARS
+ "".join(seminar.to_markdown() for seminar in next_seminars)
+ self.BEGIN_PAST_SEMINARS
+ "".join(seminar.to_markdown() for seminar in past_seminars)
+ self.END_PAST_SEMINARS
)
@staticmethod
def from_github_issues(issues, logger=None):
seminars = [
Seminar.from_github_issue(issue, logger=logger)
for issue in issues
if Seminar.SEMINAR_TITLE_MARKER in issue["title"]
]
return SeminarList(seminars)
@staticmethod
def from_github_repo(owner, repo, token=None, logger=None):
issues = request_github_api(
f"https://api.github.com/repos/{owner}/{repo}/issues",
owner=owner,
token=token,
logger=logger,
)
return SeminarList.from_github_issues(issues, logger=logger)
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
seminars = SeminarList.from_github_repo(
owner="geem-lab", repo="seminars", logger=logger
)
print(seminars.to_markdown())
| 26.486792 | 140 | 0.583274 | 785 | 7,019 | 5.030573 | 0.242038 | 0.047354 | 0.038997 | 0.014181 | 0.155989 | 0.084072 | 0.084072 | 0.084072 | 0.031907 | 0.031907 | 0 | 0.002823 | 0.293347 | 7,019 | 264 | 141 | 26.587121 | 0.793145 | 0 | 0 | 0.086957 | 0 | 0.004831 | 0.152728 | 0.040889 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.043478 | 0.024155 | 0.318841 | 0.004831 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7431452de01929e5a219b4f8d212634cdab0aa06 | 387 | py | Python | failures/mapper.py | ALaks96/MapReduce_Pyspark_Large_Matrix_Multiplication | 1a37a4a66464e7f11295ebf55507853b959748ad | [
"MIT"
] | null | null | null | failures/mapper.py | ALaks96/MapReduce_Pyspark_Large_Matrix_Multiplication | 1a37a4a66464e7f11295ebf55507853b959748ad | [
"MIT"
] | null | null | null | failures/mapper.py | ALaks96/MapReduce_Pyspark_Large_Matrix_Multiplication | 1a37a4a66464e7f11295ebf55507853b959748ad | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
# input comes from STDIN (standard input)
for line in sys.stdin:
# remove leading and trailing whitespace
line = line.strip()
# split the line into words
element1 = line.split(',')
c = 0
for element in element1:
c += 1
if c == 1:
print (element1)
else:
continue | 16.125 | 44 | 0.537468 | 47 | 387 | 4.425532 | 0.680851 | 0.019231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024896 | 0.377261 | 387 | 24 | 45 | 16.125 | 0.838174 | 0.312662 | 0 | 0 | 0 | 0 | 0.003802 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74315d90853ccb50d6e8d6018b567f14aa463a3b | 5,589 | py | Python | winchester/db/alembic_command.py | SandyWalsh/stacktach-winchester | ac49955386b695868945a28b6597fe72b3b657e6 | [
"Apache-2.0"
] | null | null | null | winchester/db/alembic_command.py | SandyWalsh/stacktach-winchester | ac49955386b695868945a28b6597fe72b3b657e6 | [
"Apache-2.0"
] | null | null | null | winchester/db/alembic_command.py | SandyWalsh/stacktach-winchester | ac49955386b695868945a28b6597fe72b3b657e6 | [
"Apache-2.0"
] | null | null | null | from alembic import util, command, config
import argparse
import inspect
class AlembicCommandLine(object):
prog = None
description = None
allowed_commands = None
def __init__(self, prog=None, description=None, allowed_commands=None):
if prog is not None:
self.prog = prog
if description is not None:
self.description = description
if allowed_commands is not None:
self.allowed_commands = allowed_commands
self.parser = self.generate_options()
def add_command_options(self, parser, positional, kwargs):
if 'template' in kwargs:
parser.add_argument("-t", "--template",
default='generic',
type=str,
help="Setup template for use with 'init'")
if 'message' in kwargs:
parser.add_argument("-m", "--message",
type=str,
help="Message string to use with 'revision'")
if 'sql' in kwargs:
parser.add_argument("--sql",
action="store_true",
help="Don't emit SQL to database - dump to "
"standard output/file instead")
if 'tag' in kwargs:
parser.add_argument("--tag",
type=str,
help="Arbitrary 'tag' name - can be used by "
"custom env.py scripts.")
if 'autogenerate' in kwargs:
parser.add_argument("--autogenerate",
action="store_true",
help="Populate revision script with candidate "
"migration operations, based on comparison "
"of database to model.")
# "current" command
if 'head_only' in kwargs:
parser.add_argument("--head-only",
action="store_true",
help="Only show current version and "
"whether or not this is the head revision.")
if 'rev_range' in kwargs:
parser.add_argument("-r", "--rev-range",
action="store",
help="Specify a revision range; "
"format is [start]:[end]")
positional_help = {
'directory': "location of scripts directory",
'revision': "revision identifier"
}
for arg in positional:
parser.add_argument(arg, help=positional_help.get(arg))
def add_options(self, parser):
parser.add_argument("-c", "--config",
type=str,
default="alembic.ini",
help="Alternate config file")
parser.add_argument("-n", "--name",
type=str,
default="alembic",
help="Name of section in .ini file to "
"use for Alembic config")
parser.add_argument("-x", action="append",
help="Additional arguments consumed by "
"custom env.py scripts, e.g. -x "
"setting1=somesetting -x setting2=somesetting")
def generate_options(self):
parser = argparse.ArgumentParser(prog=self.prog)
self.add_options(parser)
subparsers = parser.add_subparsers()
for fn, name, doc, positional, kwarg in self.get_commands():
subparser = subparsers.add_parser(name, help=doc)
self.add_command_options(subparser, positional, kwarg)
subparser.set_defaults(cmd=(fn, positional, kwarg))
return parser
def get_commands(self):
cmds = []
for fn in [getattr(command, n) for n in dir(command)]:
if (inspect.isfunction(fn) and
fn.__name__[0] != '_' and
fn.__module__ == 'alembic.command'):
if (self.allowed_commands and
fn.__name__ not in self.allowed_commands):
continue
spec = inspect.getargspec(fn)
if spec[3]:
positional = spec[0][1:-len(spec[3])]
kwarg = spec[0][-len(spec[3]):]
else:
positional = spec[0][1:]
kwarg = []
cmds.append((fn, fn.__name__, fn.__doc__, positional, kwarg))
return cmds
def get_config(self, options):
return config.Config(file_=options.config,
ini_section=options.name,
cmd_opts=options)
def run_cmd(self, config, options):
fn, positional, kwarg = options.cmd
try:
fn(config, *[getattr(options, k) for k in positional],
**dict((k, getattr(options, k)) for k in kwarg))
except util.CommandError as e:
util.err(str(e))
def main(self, argv=None):
options = self.parser.parse_args(argv)
if not hasattr(options, "cmd"):
# see http://bugs.python.org/issue9253, argparse
# behavior changed incompatibly in py3.3
self.parser.error("too few arguments")
else:
self.run_cmd(self.get_config(options), options)
if __name__ == '__main__':
cmdline = AlembicCommandLine()
cmdline.main()
| 39.083916 | 77 | 0.501879 | 555 | 5,589 | 4.911712 | 0.304505 | 0.039618 | 0.068599 | 0.043654 | 0.125092 | 0.046222 | 0.030814 | 0 | 0 | 0 | 0 | 0.005076 | 0.400787 | 5,589 | 142 | 78 | 39.359155 | 0.808898 | 0.018429 | 0 | 0.084746 | 0 | 0 | 0.16764 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0 | 0.025424 | 0.008475 | 0.152542 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
74316554f8575ef9ba36a94a68a372ec2ee916f0 | 7,892 | py | Python | tests/test_glamor_functions.py | Denis-Alexeev/pytest-glamor-allure | e7b5f11115eefd61ab1c7b775d6bafec0b1623e2 | [
"MIT"
] | 8 | 2021-11-15T21:59:46.000Z | 2022-03-28T07:49:55.000Z | tests/test_glamor_functions.py | Denis-Alexeev/pytest-glamor-allure | e7b5f11115eefd61ab1c7b775d6bafec0b1623e2 | [
"MIT"
] | 11 | 2021-11-15T16:07:35.000Z | 2022-02-17T08:20:05.000Z | tests/test_glamor_functions.py | Denis-Alexeev/pytest-glamor-allure | e7b5f11115eefd61ab1c7b775d6bafec0b1623e2 | [
"MIT"
] | 2 | 2021-12-28T08:27:35.000Z | 2022-01-17T02:19:26.000Z | """
Here we test `glamor.include_scope_in_title` and
`glamor.logging_allure_steps` functions.
"""
import io
import logging
from allure_commons_test.container import has_container
from allure_commons_test.report import has_test_case
from hamcrest import assert_that
from glamor.patches import PatchHelper, include_scope_in_title
import glamor as allure
import pitest as pytest
from .matchers import has_after, has_before
autouse_values = ('True', 'False')
scopes = ('function', 'class', 'module', 'package', 'session')
def scopes_ids(val):
return f'scope={val}'
def autouse_ids(val):
return f'autouse={val}'
@pytest.mark.parametrize('autouse', autouse_values, ids=autouse_ids)
@pytest.mark.parametrize('scope', scopes, ids=scopes_ids)
@pytest.mark.parametrize('place', ('before', 'after'))
@pytest.mark.parametrize('include_autouse', autouse_values)
class TestInclude:
@pytest.fixture
def monkey_patchhelper(self):
p = PatchHelper
backup_add_autouse = getattr(p, '_add_autouse')
backup_add_scope_after_name = getattr(p, '_add_scope_after_name')
backup_add_scope_before_name = getattr(p, '_add_scope_before_name')
yield
setattr(p, '_add_autouse', backup_add_autouse)
setattr(p, '_add_scope_after_name', backup_add_scope_after_name)
setattr(p, '_add_scope_before_name', backup_add_scope_before_name)
include_scope_in_title.called = False
def test_scope_autouse(
self,
glamor_pytester,
scope: str,
autouse: str,
place: str,
include_autouse: str,
monkey_patchhelper,
):
setup = 'FANCY setup name'
tear = 'FANCY teardown name'
test_name = 'test_test'
fixt_one = 'fixture_one'
fixt_two = 'fixture_two'
autouse_prefix = 'a' if {autouse, include_autouse} == {'True'} else ''
glamor_pytester.pytester.makepyfile(
f"""
import glamor as allure
import pitest as pytest
allure.include_scope_in_title('{place}', autouse={include_autouse})
@pytest.fixture(scope='{scope}', autouse={autouse})
@allure.title.setup('{setup}')
@allure.title.teardown('{tear}')
def {fixt_one}():
yield
@pytest.fixture
def {fixt_two}():
yield
def {test_name}({fixt_one}, {fixt_two}):
pass
"""
)
prefix = f'[{scope[:1].upper()}{autouse_prefix}]'
if place == 'before':
prefixed_setup_one = f'{prefix} {setup}'
prefixed_tear_one = f'{prefix} {tear}'
prefixed_fixt_two = f'[F] {fixt_two}'
elif place == 'after':
prefixed_setup_one = f'{setup} {prefix}'
prefixed_tear_one = f'{tear} {prefix}'
prefixed_fixt_two = f'{fixt_two} [F]'
else:
raise RuntimeError('Unknown "place" parameter')
glamor_pytester.runpytest()
report = glamor_pytester.allure_report
assert_that(
report,
has_test_case(
test_name,
has_container(
report,
has_before(prefixed_setup_one),
has_after(prefixed_tear_one),
),
has_container(
report,
has_before(prefixed_fixt_two),
has_after(prefixed_fixt_two),
),
),
)
def test_fixture_as_method(
self,
glamor_pytester,
scope: str,
autouse: str,
place: str,
include_autouse: str,
monkey_patchhelper,
):
fixt_name = 'fixt'
test_name = 'test_in_class'
glamor_pytester.pytester.makepyfile(
f"""
import pitest as pytest
import glamor as allure
allure.include_scope_in_title('{place}', autouse={include_autouse})
class TestClass:
@pytest.fixture(scope='{scope}', autouse={autouse})
def {fixt_name}(self):
yield
def {test_name}(self, fixt):
pass
"""
)
glamor_pytester.runpytest()
report = glamor_pytester.allure_report
autouse_prefix = 'a' if {autouse, include_autouse} == {'True'} else ''
prefix = f'[{scope[:1].upper()}{autouse_prefix}]'
if place == 'before':
fixt_title = f'{prefix} {fixt_name}'
elif place == 'after':
fixt_title = f'{fixt_name} {prefix}'
else:
raise RuntimeError('Unknown "place" parameter')
assert_that(
report,
has_test_case(
test_name,
has_container(
report,
has_before(fixt_title),
has_after(fixt_title),
),
),
)
class TestLogging:
logger_name = 'GlamorAsAllureLogger'
@pytest.fixture(autouse=True)
def backup_and_store_step_ctx(self):
backup_enter = allure.step_ctx.__enter__
yield
allure.step_ctx.__enter__ = backup_enter
@pytest.fixture
def logger_stream(self):
logger = logging.getLogger(self.logger_name)
logger.setLevel(logging.INFO)
stream = io.StringIO()
handler = logging.StreamHandler(stream=stream)
handler.setLevel(logging.INFO)
fmt = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(fmt)
logger.addHandler(handler)
yield logger, stream
logger.handlers.clear()
@pytest.mark.parametrize('switch', ('on', 'off'))
@pytest.mark.parametrize('times', (1, 2), ids=('once', 'twice'))
def test_logging_step_can_be_on_or_off(self, logger_stream, switch, times):
logger, stream = logger_stream
for i in range(times):
allure.logging_allure_steps(logger if switch == 'on' else None)
expected_messages = []
logger.info('start message')
expected_messages.append('[INFO] start message')
with allure.step('step message'):
if switch == 'on':
expected_messages.append('[STEP] step message')
logger.error('end message')
expected_messages.append('[ERROR] end message')
logger_messages = stream.getvalue().strip().split('\n')
assert logger_messages == expected_messages
@pytest.mark.parametrize('start', ('on', 'off'))
@pytest.mark.parametrize('steps', (1, 2, 3, 4))
def test_logging_state_can_be_changed(self, start, logger_stream, steps):
logger, stream = logger_stream
expected_messages = []
odd = logger if start == 'on' else None
even = None if start == 'on' else logger
allure.logging_allure_steps(odd)
with allure.step('one'):
if odd:
expected_messages.append('[STEP] one')
if steps >= 2:
allure.logging_allure_steps(even)
with allure.step('two'):
if even:
expected_messages.append('[STEP] two')
if steps >= 3:
allure.logging_allure_steps(odd)
with allure.step('three'):
if odd:
expected_messages.append('[STEP] three')
if steps >= 4:
allure.logging_allure_steps(even)
with allure.step('four'):
if even:
expected_messages.append('[STEP] four')
logger_messages_str = stream.getvalue().strip()
if logger_messages_str:
logger_messages = logger_messages_str.split('\n')
else:
logger_messages = []
assert expected_messages == logger_messages
| 30.708171 | 79 | 0.578814 | 846 | 7,892 | 5.125296 | 0.170213 | 0.04059 | 0.038745 | 0.02191 | 0.364391 | 0.326799 | 0.226015 | 0.226015 | 0.129151 | 0.084871 | 0 | 0.002031 | 0.313609 | 7,892 | 256 | 80 | 30.828125 | 0.798412 | 0.011277 | 0 | 0.441176 | 0 | 0 | 0.226684 | 0.056575 | 0 | 0 | 0 | 0 | 0.02451 | 1 | 0.044118 | false | 0.009804 | 0.063725 | 0.009804 | 0.132353 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7433c8c30300e5235615677f9a888944d6d1c778 | 1,827 | py | Python | gorzdrav_parser.py | hound672/gorzdrav_parser | a6e5175111e8a0072484cb1b8c73dcc9afeec172 | [
"MIT"
] | null | null | null | gorzdrav_parser.py | hound672/gorzdrav_parser | a6e5175111e8a0072484cb1b8c73dcc9afeec172 | [
"MIT"
] | null | null | null | gorzdrav_parser.py | hound672/gorzdrav_parser | a6e5175111e8a0072484cb1b8c73dcc9afeec172 | [
"MIT"
] | 1 | 2021-02-07T19:46:37.000Z | 2021-02-07T19:46:37.000Z | import requests
import pprint
import time
from datetime import datetime
def send_push(text):
try:
print(f"Send push: {text}")
TOKEN = "put your token here"
args = {"title": text, "identifier": TOKEN}
requests.get("https://pushmeapi.jagcesar.se", params=args)
except Exception as e:
print(f"Error send push {e}")
def get_data(district_id):
data = None
try:
GET_URL = f"https://gorzdrav.spb.ru/_api/api/district/{district_id}/lpu"
r = requests.get(GET_URL)
data = r.json()
if r.status_code != 200:
send_push("ERROR get data")
except Exception as e:
print(e)
send_push("ERROR get data")
return data
send_push("Hello from python")
# Get info from https://gorzdrav.spb.ru/service-covid-vaccination-schedule
# Put your hospital number here
HOSPITAL_NAME_PATTERN = "78"
# District ID
DISTRIC_ID = 17
REFRESH_PERIOD = 10 * 60
RETRY_PERIOD = 5 * 60
if __name__ == '__main__':
while True:
print("---------------------------------------------------------")
now = datetime.now()
print(now.strftime("%d/%m/%Y %H:%M:%S"))
data = get_data(DISTRIC_ID)
if not data:
send_push(f"No data, wait {RETRY_PERIOD} and try againt")
time.sleep(REFRESH_PERIOD)
continue
try:
for item in data["result"]:
if HOSPITAL_NAME_PATTERN in item["lpuShortName"]:
print(f"Check hospital {item['lpuShortName']}")
print(item["covidVaccination"])
if item["covidVaccination"]:
send_push("REGISTRATION IS OPEN")
except Exception as e:
send_push(f"Error parse responce {e}")
time.sleep(REFRESH_PERIOD)
| 26.478261 | 80 | 0.570881 | 225 | 1,827 | 4.484444 | 0.431111 | 0.071358 | 0.050545 | 0.053518 | 0.085233 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010761 | 0.287904 | 1,827 | 68 | 81 | 26.867647 | 0.764796 | 0.062945 | 0 | 0.204082 | 0 | 0 | 0.269906 | 0.046253 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040816 | false | 0 | 0.081633 | 0 | 0.142857 | 0.163265 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
743528a5657356c58159ba08649a9c47c2edc92a | 239 | py | Python | Python3/96.unique-binary-search-trees.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/96.unique-binary-search-trees.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/96.unique-binary-search-trees.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=96 lang=python3
#
# [96] Unique Binary Search Trees
#
# @lc code=start
class Solution:
def numTrees(self, n: int):
if n < 2:
return n
dp = [0] * n
# @lc code=end
| 13.277778 | 37 | 0.502092 | 33 | 239 | 3.636364 | 0.787879 | 0.1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04698 | 0.376569 | 239 | 17 | 38 | 14.058824 | 0.758389 | 0.39749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7436b8a2d033f080a4d8a0fedb57c06470287cd7 | 1,377 | py | Python | deepcell_label/exporters_test.py | naterenegar/deepcell-label | 5ebe3dd5786746dbdea3dc2ff37f07bb45ebffb8 | [
"Apache-2.0"
] | null | null | null | deepcell_label/exporters_test.py | naterenegar/deepcell-label | 5ebe3dd5786746dbdea3dc2ff37f07bb45ebffb8 | [
"Apache-2.0"
] | 2 | 2020-10-15T23:09:02.000Z | 2020-10-22T19:59:09.000Z | deepcell_label/exporters_test.py | naterenegar/deepcell-label | 5ebe3dd5786746dbdea3dc2ff37f07bb45ebffb8 | [
"Apache-2.0"
] | null | null | null | """Tests for exporters.py"""
import pytest
import io
from deepcell_label import models
from deepcell_label import exporters
from deepcell_label.conftest import DummyLoader
@pytest.fixture
def npz_exporter(app, db_session):
with app.app_context():
db_session.autoflush = False
project = models.Project.create(DummyLoader(path='test.npz'))
exporter = exporters.Exporter(project)
return exporter
@pytest.fixture
def trk_exporter(app, db_session):
with app.app_context():
db_session.autoflush = False
project = models.Project.create(DummyLoader(path='test.trk'))
exporter = exporters.Exporter(project)
return exporter
class TestExporter():
def test_export_npz(self, npz_exporter):
file_ = npz_exporter.export()
assert isinstance(file_, io.BytesIO)
def test_export_trk(self, trk_exporter):
file_ = trk_exporter.export()
assert isinstance(file_, io.BytesIO)
class TestS3Exporter():
def test_export(self, mocker, app, db_session):
with app.app_context():
mocked = mocker.patch('boto3.s3.inject.upload_fileobj')
db_session.autoflush = False
project = models.Project.create(DummyLoader())
exporter = exporters.S3Exporter(project)
exporter.export('test')
mocked.assert_called()
| 27.54 | 69 | 0.682643 | 159 | 1,377 | 5.72956 | 0.308176 | 0.059276 | 0.055982 | 0.052689 | 0.5236 | 0.5236 | 0.422613 | 0.296378 | 0.296378 | 0.230516 | 0 | 0.003731 | 0.221496 | 1,377 | 49 | 70 | 28.102041 | 0.846082 | 0.015977 | 0 | 0.4 | 0 | 0 | 0.037064 | 0.022239 | 0 | 0 | 0 | 0 | 0.085714 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
743894d56d7c6c4c5887273915d861d1f752013b | 6,047 | py | Python | petition.py | corazzon/petitions | 76b2110cb7b664fa25a6a71527d50a5e8a9acf9a | [
"MIT"
] | 1 | 2019-02-18T00:47:07.000Z | 2019-02-18T00:47:07.000Z | petition.py | corazzon/petitions | 76b2110cb7b664fa25a6a71527d50a5e8a9acf9a | [
"MIT"
] | null | null | null | petition.py | corazzon/petitions | 76b2110cb7b664fa25a6a71527d50a5e8a9acf9a | [
"MIT"
] | 2 | 2018-05-31T07:57:54.000Z | 2021-09-07T22:47:38.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import logging
import os
import random
import re
from concurrent.futures import ThreadPoolExecutor
from typing import Dict
from urllib import request
from urllib.error import HTTPError
from bs4 import BeautifulSoup
DATA_DIR = 'data'
CSV_FILE_WHOLE = os.path.join(DATA_DIR, 'petition.csv')
CSV_FILE_CORRUPTED = os.path.join(DATA_DIR, 'petition_corrupted.csv')
CSV_FILE_SAMPLED = os.path.join(DATA_DIR, 'petition_sampled.csv')
SAMPLE_RATE = 0.05
logging.basicConfig(level=logging.INFO)
def main():
# 데이터 저장 디렉터리 생성
try:
os.mkdir(DATA_DIR)
except FileExistsError:
pass
# 새로 만료된 청원을 수집하여 CSV 파일에 덧붙이기
latest_id = get_latest_article_id()
next_id = get_latest_saved_article_id() + 1
logging.info(
f'From {next_id} to {latest_id}: '
f'about {latest_id - next_id} articles to go...'
)
with ThreadPoolExecutor(max_workers=2) as exe:
for article in exe.map(fetch_article, range(next_id, latest_id)):
if article is None:
continue
save_article(article)
logging.info(
f'{article["article_id"]} of {latest_id}: {article["title"]}'
)
random.seed(0)
generate_corrupted_data()
generate_sampled_data()
def generate_corrupted_data():
"""일부 필드값을 고의로 삭제한 CSV 파일 만들기"""
candidates = ['category', 'votes', 'start', 'end']
with open(CSV_FILE_WHOLE, 'r') as whole:
with open(CSV_FILE_CORRUPTED, 'w') as corrupted:
csvr = csv.DictReader(whole)
csvw = csv.DictWriter(corrupted, csvr.fieldnames)
for row in csvr:
# 범주가 '육아/교육'이고 투표수가 50건 초과이면 20% 확률로 투표수에 결측치 넣기
category = row['category'] == '육아/교육'
votes = int(row['votes']) > 50
if category and votes and random.random() <= 0.2:
row['votes'] = ''
csvw.writerow(row)
# 각 행마다 5% 확률로 특정 필드에 결측치 넣기
if random.random() <= 0.05:
key = random.choice(candidates)
row[key] = ''
def generate_sampled_data():
"""전체 CSV 파일에서 일부만 임의추출하여 작은 CSV 파일 만들기"""
with open(CSV_FILE_WHOLE, 'r') as whole:
with open(CSV_FILE_SAMPLED, 'w') as sampled:
sampled.write(whole.readline())
sampled.writelines(
l for l in whole if random.random() <= SAMPLE_RATE
)
def get_latest_article_id() -> int:
"""만료된 청원 목록 페이지를 분석하여 가장 최근에 만료된 글번호를 가져오기"""
html = fetch_html('https://www1.president.go.kr/petitions?only=finished')
soup = BeautifulSoup(html, "html5lib")
href = soup.select_one('.bl_body .bl_wrap .bl_subject a')['href']
article_id = int(re.match(r'.+/petitions/(\d+).*', href).group(1))
return article_id
def get_latest_saved_article_id() -> int:
"""이미 저장한 가장 최근 글번호를 가져오기. 저장된 글이 없으면 0을 반환"""
# 글이 없으면 0
if not os.path.isfile(CSV_FILE_WHOLE):
return 0
# 파일 끝 부분에서 몇 줄 읽어온 뒤 마지막 줄의 첫 칼럼(article_id) 반환
with open(CSV_FILE_WHOLE, 'rb') as f:
# 마지막 줄을 빠르게 찾기 위해 "거의" 끝 부분으로 이동
f.seek(0, os.SEEK_END)
f.seek(-min([f.tell(), 1024 * 100]), os.SEEK_CUR)
# 마지막 줄에서 article id 추출
last_line = f.readlines()[-1].decode('utf-8')
article_id = int(last_line.split(',')[0])
return article_id
def fetch_article(article_id: int) -> Dict[str, any]:
"""글번호에 해당하는 글의 HTML 텍스트를 가져와서 파싱. 해당 글이 없으면 None"""
url = f'https://www1.president.go.kr/petitions/{article_id}'
try:
html = fetch_html(url)
except ValueError:
return None
soup = BeautifulSoup(html, "html5lib")
title = query(soup, '.petitionsView_title')
votes = int(query(soup, '.petitionsView_count .counter').replace(',', ''))
category = query(soup, '.petitionsView_info_list li:nth-of-type(1)')[4:]
start = query(soup, '.petitionsView_info_list li:nth-of-type(2)')[4:]
end = query(soup, '.petitionsView_info_list li:nth-of-type(3)')[4:]
answered = query(soup, '.petitionsView_progress h4') == '브리핑'
if answered:
content_selector = '.petitionsView_write > div:nth-of-type(4)'
else:
content_selector = '.petitionsView_write > div:nth-of-type(2)'
content = remove_whitespaces(query(soup, content_selector)) \
.replace('\n', '\\n') \
.replace('\t', '\\t')
return {
'article_id': article_id,
'title': title,
'votes': votes,
'answered': 1 if answered else 0,
'category': category,
'start': start,
'end': end,
'content': content,
}
def save_article(article: Dict[str, any]) -> None:
"""글을 CSV 형태로 저장한다"""
cols = [
'article_id', 'start', 'end', 'answered', 'votes', 'category', 'title',
'content'
]
# 파일이 없으면 새로 만들고 칼럼 이름 저장
if not os.path.isfile(CSV_FILE_WHOLE):
with open(CSV_FILE_WHOLE, 'w', newline='') as f:
w = csv.writer(f)
w.writerow(cols)
# 새로운 행 추가
with open(CSV_FILE_WHOLE, 'a', newline='') as f:
w = csv.writer(f)
w.writerow(article[col] for col in cols)
def fetch_html(url: str) -> str:
"""웹에서 HTML 문서를 읽어서 반환"""
try:
with request.urlopen(url) as f:
if f.getcode() != 200:
raise ValueError(f'Invalid status code: {f.getcode()}')
html = f.read().decode('utf-8')
return html
except HTTPError as e:
if e.code == 404:
raise ValueError(f'Not found: {url}')
else:
raise e
def query(soup: BeautifulSoup, selector: str) -> str:
"""CSS selector로 요소를 찾은 뒤 텍스트 컨텐츠를 반환"""
return soup.select_one(selector).text
def remove_whitespaces(text: str) -> str:
"""본문 텍스트에서 불필요한 공백 문자들 제거"""
lines = text.split('\n')
lines = (l.strip() for l in lines)
lines = (l for l in lines if len(l) > 0)
return '\n'.join(lines)
if __name__ == '__main__':
main()
| 30.235 | 79 | 0.592525 | 836 | 6,047 | 4.147129 | 0.34689 | 0.041534 | 0.02769 | 0.030286 | 0.190366 | 0.159792 | 0.120277 | 0.120277 | 0.077589 | 0.024805 | 0 | 0.013173 | 0.27187 | 6,047 | 199 | 80 | 30.386935 | 0.774245 | 0.097238 | 0 | 0.123188 | 0 | 0 | 0.164754 | 0.025916 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0.007246 | 0.072464 | 0 | 0.202899 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7438b8b5f4189488f134c9a9c07bc97d4f67a23a | 1,080 | py | Python | Examples/QLinearGradientDemo.py | GrayLand119/PyQt5-Demos | 792379b4f474abd00aceb2982aad599277a6139d | [
"MIT"
] | null | null | null | Examples/QLinearGradientDemo.py | GrayLand119/PyQt5-Demos | 792379b4f474abd00aceb2982aad599277a6139d | [
"MIT"
] | null | null | null | Examples/QLinearGradientDemo.py | GrayLand119/PyQt5-Demos | 792379b4f474abd00aceb2982aad599277a6139d | [
"MIT"
] | null | null | null | from PyQt5 import QtGui
from PyQt5.QtWidgets import QApplication, QMainWindow
import sys
from PyQt5.QtGui import QPainter, QBrush, QPen, QLinearGradient
from PyQt5.QtCore import Qt
class Window(QMainWindow):
def __init__(self):
super().__init__()
self.title = "PyQt5 Window"
self.top = 200
self.left = 500
self.width = 400
self.height = 300
self.InitWindow()
def InitWindow(self):
self.setWindowIcon(QtGui.QIcon("icon.png"))
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
self.show()
def paintEvent(self, e):
painter = QPainter(self)
painter.setPen(QPen(Qt.black, 0, Qt.SolidLine))
grad1 = QLinearGradient(0, 0, 200, 200)
grad1.setColorAt(0.0, Qt.darkGray)
grad1.setColorAt(0.5, Qt.green)
grad1.setColorAt(1.0, Qt.yellow)
painter.setBrush(QBrush(grad1))
painter.drawRect(10, 10, 200, 200)
App = QApplication(sys.argv)
window = Window()
sys.exit(App.exec())
| 25.116279 | 70 | 0.641667 | 135 | 1,080 | 5.074074 | 0.42963 | 0.052555 | 0.046715 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057387 | 0.241667 | 1,080 | 42 | 71 | 25.714286 | 0.778999 | 0 | 0 | 0 | 0 | 0 | 0.018519 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.096774 | false | 0 | 0.16129 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
743c019783cebe50823cfa7cf0403d2a844efab9 | 26,854 | py | Python | kernel/components/boosting/horzsecureboost/horz_secureboosting_client.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 39 | 2021-10-12T01:43:27.000Z | 2022-03-28T04:46:35.000Z | kernel/components/boosting/horzsecureboost/horz_secureboosting_client.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 6 | 2021-10-14T02:11:47.000Z | 2022-03-23T02:41:50.000Z | kernel/components/boosting/horzsecureboost/horz_secureboosting_client.py | rinceyuan/WeFe | 8482cb737cb7ba37b2856d184cd42c1bd35a6318 | [
"Apache-2.0"
] | 10 | 2021-10-14T09:36:03.000Z | 2022-02-10T11:05:12.000Z | # Copyright 2021 Tianmian Tech. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from operator import itemgetter
from typing import List
import numpy as np
from numpy import random
from common.python.utils import log_utils
from kernel.components.binning.horzfeaturebinning.horz_split_points import HorzFeatureBinningClient
from kernel.components.boosting import BoostingTree
from kernel.components.boosting import HorzDecisionTreeClient
from kernel.components.boosting import SecureBoostClientAggregator
from kernel.components.evaluation.param import EvaluateParam
from kernel.model_selection.k_fold import KFold
from kernel.optimizer.loss import FairLoss
from kernel.optimizer.loss import HuberLoss
from kernel.optimizer.loss import LeastAbsoluteErrorLoss
from kernel.optimizer.loss import LeastSquaredErrorLoss
from kernel.optimizer.loss import LogCoshLoss
from kernel.optimizer.loss import SigmoidBinaryCrossEntropyLoss
from kernel.optimizer.loss import SoftmaxCrossEntropyLoss
from kernel.optimizer.loss import TweedieLoss
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import BoostingTreeModelMeta
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import DecisionTreeModelMeta, CriterionMeta
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import ObjectiveMeta
from kernel.protobuf.generated.boosting_tree_model_meta_pb2 import QuantileMeta
from kernel.protobuf.generated.boosting_tree_model_param_pb2 import BoostingTreeModelParam
from kernel.protobuf.generated.boosting_tree_model_param_pb2 import DecisionTreeModelParam
from kernel.protobuf.generated.boosting_tree_model_param_pb2 import FeatureImportanceInfo
from kernel.transfer.variables.transfer_class.horz_secure_boost_transfer_variable import \
HorzSecureBoostingTransferVariable
from kernel.utils import consts
from kernel.utils.data_util import NoneType
from kernel.utils.label_checker import ClassifyLabelChecker, RegressionLabelChecker
LOGGER = log_utils.get_logger()
class HorzSecureBoostingClient(BoostingTree):
def __init__(self):
super(HorzSecureBoostingClient, self).__init__()
self.mode = consts.HORZ
self.validation_strategy = None
self.loss_fn = None
self.cur_sample_weights = None
self.y = None
self.y_hat = None
self.y_hat_predict = None
self.feature_num = None
self.num_classes = 2
self.tree_dim = 1
self.trees = []
self.feature_importance = {}
self.transfer_inst = HorzSecureBoostingTransferVariable()
self.role = None
self.data_bin = None
self.bin_split_points = None
self.bin_sparse_points = None
self.init_score = None
self.local_loss_history = []
self.classes_ = []
self.role = consts.PROMOTER
# store learnt model param
self.tree_meta = None
self.learnt_tree_param = []
self.aggregator = SecureBoostClientAggregator()
# Since arbiter is not needed in oot mode, it will always wait for the data blocking value
# when creating the HorzFeatureBinningClient object, so the object will not be created here
self.binning_obj = None
# self.binning_obj = HorzFeatureBinningClient()
def set_loss_function(self, objective_param):
loss_type = objective_param.objective
params = objective_param.params
LOGGER.info("set objective, objective is {}".format(loss_type))
if self.task_type == consts.CLASSIFICATION:
if loss_type == "cross_entropy":
if self.num_classes == 2:
self.loss_fn = SigmoidBinaryCrossEntropyLoss()
else:
self.loss_fn = SoftmaxCrossEntropyLoss()
else:
raise NotImplementedError("objective %s not supported yet" % (loss_type))
elif self.task_type == consts.REGRESSION:
if loss_type == "lse":
self.loss_fn = LeastSquaredErrorLoss()
elif loss_type == "lae":
self.loss_fn = LeastAbsoluteErrorLoss()
elif loss_type == "huber":
self.loss_fn = HuberLoss(params[0])
elif loss_type == "fair":
self.loss_fn = FairLoss(params[0])
elif loss_type == "tweedie":
self.loss_fn = TweedieLoss(params[0])
elif loss_type == "log_cosh":
self.loss_fn = LogCoshLoss()
else:
raise NotImplementedError("objective %s not supported yet" % loss_type)
else:
raise NotImplementedError("objective %s not supported yet" % loss_type)
def federated_binning(self, data_instance):
# In order to be compatible with oot mode, the object is not created when it is initialized,
# so it can only be created after it is used somewhere
if self.binning_obj is None:
self.binning_obj = HorzFeatureBinningClient()
if self.use_missing:
binning_result = self.binning_obj.average_run(data_instances=data_instance,
bin_num=self.bin_num, abnormal_list=[NoneType()])
else:
binning_result = self.binning_obj.average_run(data_instances=data_instance,
bin_num=self.bin_num)
return self.binning_obj.convert_feature_to_bin(data_instance, binning_result)
def compute_local_grad_and_hess(self, y_hat):
loss_method = self.loss_fn
if self.task_type == consts.CLASSIFICATION:
grad_and_hess = self.y.join(y_hat, lambda y, f_val: \
(loss_method.compute_grad(y, loss_method.predict(f_val)), \
loss_method.compute_hess(y, loss_method.predict(f_val))))
else:
grad_and_hess = self.y.join(y_hat, lambda y, f_val:
(loss_method.compute_grad(y, f_val),
loss_method.compute_hess(y, f_val)))
return grad_and_hess
def compute_local_loss(self, y, y_hat):
LOGGER.info('computing local loss')
loss_method = self.loss_fn
if self.objective_param.objective in ["lse", "lae", "logcosh", "tweedie", "log_cosh", "huber"]:
# regression tasks
y_predict = y_hat
else:
# classification tasks
y_predict = y_hat.mapValues(lambda val: loss_method.predict(val))
loss = loss_method.compute_loss(y, y_predict)
return float(loss)
@staticmethod
def get_subtree_grad_and_hess(g_h, t_idx: int):
"""
Args:
g_h of g_h val
t_idx: tree index
Returns: grad and hess of sub tree
"""
LOGGER.info("get grad and hess of tree {}".format(t_idx))
grad_and_hess_subtree = g_h.mapValues(
lambda grad_and_hess: (grad_and_hess[0][t_idx], grad_and_hess[1][t_idx]))
return grad_and_hess_subtree
def sample_valid_feature(self):
if self.feature_num is None:
self.feature_num = self.bin_split_points.shape[0]
chosen_feature = random.choice(range(0, self.feature_num), \
max(1, int(self.subsample_feature_rate * self.feature_num)), replace=False)
valid_features = [False for i in range(self.feature_num)]
for fid in chosen_feature:
valid_features[fid] = True
return valid_features
@staticmethod
def add_y_hat(f_val, new_f_val, lr=0.1, idx=0):
f_val[idx] += lr * new_f_val
return f_val
def update_y_hat_val(self, new_val=None, mode='train', tree_idx=0):
LOGGER.debug('update y_hat value, current tree is {}'.format(tree_idx))
add_func = functools.partial(self.add_y_hat, lr=self.learning_rate, idx=tree_idx)
if mode == 'train':
self.y_hat = self.y_hat.join(new_val, add_func)
else:
self.y_hat_predict = self.y_hat_predict.join(new_val, add_func)
def update_feature_importance(self, tree_feature_importance):
for fid in tree_feature_importance:
if fid not in self.feature_importance:
self.feature_importance[fid] = tree_feature_importance[fid]
else:
self.feature_importance[fid] += tree_feature_importance[fid]
def sync_feature_num(self):
self.transfer_inst.feature_number.remote(self.feature_num, role=consts.ARBITER, idx=-1, suffix=('feat_num',))
def sync_local_loss(self, cur_loss: float, sample_num: int, suffix):
data = {'cur_loss': cur_loss, 'sample_num': sample_num}
self.transfer_inst.loss_status.remote(data, role=consts.ARBITER, idx=-1, suffix=suffix)
LOGGER.debug('loss status sent')
def sync_tree_dim(self, tree_dim: int):
self.transfer_inst.tree_dim.remote(tree_dim, suffix=('tree_dim',))
LOGGER.debug('tree dim sent')
def sync_stop_flag(self, suffix) -> bool:
flag = self.transfer_inst.stop_flag.get(idx=0, suffix=suffix)
return flag
def check_labels(self, data_inst, ) -> List[int]:
LOGGER.debug('checking labels')
classes_ = None
if self.task_type == consts.CLASSIFICATION:
num_classes, classes_ = ClassifyLabelChecker.validate_label(data_inst)
else:
RegressionLabelChecker.validate_label(data_inst)
return classes_
def generate_flowid(self, round_num, tree_num):
LOGGER.info("generate flowid, flowid {}".format(self.flowid))
return ".".join(map(str, [self.flowid, round_num, tree_num]))
def label_alignment(self, labels: List[int]):
self.transfer_inst.local_labels.remote(labels, suffix=('label_align',))
def get_valid_features(self, epoch_idx, t_idx):
valid_feature = self.transfer_inst.valid_features.get(idx=0, suffix=('valid_features', epoch_idx, t_idx))
return valid_feature
def fit(self, data_inst, validate_data=None, ):
# print(data_inst.count())
# print(list(data_inst.collect()))
# binning
data_inst = self.data_alignment(data_inst)
self.data_bin, self.bin_split_points, self.bin_sparse_points = self.federated_binning(data_inst)
print(self.data_bin.first())
# fid mapping
self.gen_feature_fid_mapping(data_inst.schema)
# set feature_num
self.feature_num = self.bin_split_points.shape[0]
# sync feature num
self.sync_feature_num()
# initialize validation strategy
self.validation_strategy = self.init_validation_strategy(train_data=data_inst, validate_data=validate_data, )
# check labels
local_classes = self.check_labels(self.data_bin) # [0,1]
# sync label class and set y
if self.task_type == consts.CLASSIFICATION:
self.transfer_inst.local_labels.remote(local_classes, role=consts.ARBITER, suffix=('label_align',))
new_label_mapping = self.transfer_inst.label_mapping.get(idx=0, suffix=('label_mapping',)) # {0: 0, 1: 1}
self.classes_ = [new_label_mapping[k] for k in new_label_mapping]
# set labels
self.num_classes = len(new_label_mapping)
LOGGER.debug('num_classes is {}'.format(self.num_classes))
self.y = self.data_bin.mapValues(lambda instance: new_label_mapping[instance.label])
# set tree dimension
self.tree_dim = self.num_classes if self.num_classes > 2 else 1
else:
self.y = self.data_bin.mapValues(lambda instance: instance.label)
# print(list(self.y.collect()))
# set loss function
self.set_loss_function(self.objective_param)
# set y_hat_val
self.y_hat, self.init_score = self.loss_fn.initialize(self.y) if self.tree_dim == 1 else \
self.loss_fn.initialize(self.y, self.tree_dim)
# print(list(self.y_hat.collect()))
for epoch_idx in range(self.num_trees):
g_h = self.compute_local_grad_and_hess(self.y_hat)
# print(list(g_h.collect()))
for t_idx in range(self.tree_dim):
valid_features = self.get_valid_features(epoch_idx,
t_idx) # <class 'list'>: [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]
LOGGER.debug('valid features are {}'.format(valid_features))
subtree_g_h = self.get_subtree_grad_and_hess(g_h, t_idx)
flow_id = self.generate_flowid(epoch_idx, t_idx)
new_tree = HorzDecisionTreeClient(self.tree_param, self.data_bin, self.bin_split_points,
self.bin_sparse_points, subtree_g_h, valid_feature=valid_features
, epoch_idx=epoch_idx, role=self.role, tree_idx=t_idx,
flow_id=flow_id, mode='train')
new_tree.fit()
# update y_hat_val
self.update_y_hat_val(new_val=new_tree.sample_weights, mode='train', tree_idx=t_idx)
self.trees.append(new_tree)
self.tree_meta, new_tree_param = new_tree.get_model()
self.learnt_tree_param.append(new_tree_param)
self.update_feature_importance(new_tree.get_feature_importance())
# sync loss status
loss = self.compute_local_loss(self.y, self.y_hat)
LOGGER.debug('local loss of epoch {} is {}'.format(epoch_idx, loss))
self.local_loss_history.append(loss)
self.aggregator.send_local_loss(loss, self.data_bin.count(), suffix=(epoch_idx,))
# validate
if self.validation_strategy:
self.validation_strategy.validate(self, epoch_idx)
# check stop flag if n_iter_no_change is True
if self.n_iter_no_change:
should_stop = self.aggregator.get_converge_status(suffix=(str(epoch_idx),))
LOGGER.debug('got stop flag {}'.format(should_stop))
if should_stop:
LOGGER.debug('stop triggered')
break
self.tracker.add_task_progress(1)
LOGGER.debug('fitting tree {}/{}'.format(epoch_idx, self.num_trees))
LOGGER.debug('fitting horz decision tree done')
def predict(self, data_inst):
to_predict_data = self.data_alignment(data_inst)
init_score = self.init_score
self.y_hat_predict = data_inst.mapValues(lambda x: init_score)
round_num = len(self.learnt_tree_param) // self.tree_dim
idx = 0
for round_idx in range(round_num):
for tree_idx in range(self.tree_dim):
tree_inst = HorzDecisionTreeClient(tree_param=self.tree_param, mode='predict')
tree_inst.load_model(model_meta=self.tree_meta, model_param=self.learnt_tree_param[idx])
idx += 1
predict_val = tree_inst.predict(to_predict_data)
self.update_y_hat_val(predict_val, mode='predict', tree_idx=tree_idx)
predict_result = None
if self.task_type == consts.REGRESSION and \
self.objective_param.objective in ["lse", "lae", "huber", "log_cosh", "fair", "tweedie"]:
predict_result = to_predict_data.join(self.y_hat_predict,
lambda inst, pred: [inst.label, float(pred), float(pred),
{"label": float(pred)}])
elif self.task_type == consts.CLASSIFICATION:
classes_ = self.classes_
loss_func = self.loss_fn
if self.num_classes == 2:
predicts = self.y_hat_predict.mapValues(lambda f: float(loss_func.predict(f)))
threshold = self.predict_param.threshold
predict_result = to_predict_data.join(predicts, lambda inst, pred: [inst.label,
classes_[1] if pred > threshold else
classes_[0], pred,
{"0": 1 - pred, "1": pred}])
else:
predicts = self.y_hat_predict.mapValues(lambda f: loss_func.predict(f).tolist())
predict_result = to_predict_data.join(predicts, lambda inst, preds: [inst.label, \
classes_[np.argmax(preds)],
np.max(preds), dict(
zip(map(str, classes_), preds))])
return predict_result
def get_feature_importance(self):
return self.feature_importance
def get_model_meta(self):
model_meta = BoostingTreeModelMeta()
model_meta.tree_meta.CopyFrom(self.tree_meta)
model_meta.learning_rate = self.learning_rate
model_meta.num_trees = self.num_trees
model_meta.quantile_meta.CopyFrom(QuantileMeta(bin_num=self.bin_num))
model_meta.objective_meta.CopyFrom(ObjectiveMeta(objective=self.objective_param.objective,
param=self.objective_param.params))
model_meta.task_type = self.task_type
model_meta.n_iter_no_change = self.n_iter_no_change
model_meta.tol = self.tol
meta_name = "HorzSecureBoostingTreePromoterMeta"
return meta_name, model_meta
def set_model_meta(self, model_meta):
if type(model_meta) is dict:
tree_meta = model_meta.get("treeMeta")
self.tree_meta = DecisionTreeModelMeta()
self.tree_meta.max_depth = tree_meta.get("maxDepth")
self.tree_meta.min_sample_split = tree_meta.get("minSampleSplit")
self.tree_meta.min_impurity_split = tree_meta.get("minImpuritySplit")
self.tree_meta.min_leaf_node = tree_meta.get("minLeafNode")
if tree_meta.get("criterionMeta"):
self.tree_meta.criterion_meta.CopyFrom(
CriterionMeta(criterion_method=tree_meta.get("criterionMeta").get("criterionMethod"),
criterion_param=list(tree_meta.get("criterionMeta").get("criterionParam"))))
self.tree_meta.use_missing = tree_meta.get("useMissing")
self.tree_meta.zero_as_missing = tree_meta.get("zeroAsMissing")
self.learning_rate = model_meta.get("learningRate")
self.num_trees = model_meta.get("numTrees")
self.bin_num = model_meta.get("quantileMeta").get("binNum")
self.objective_param.objective = model_meta.get("objectiveMeta").get("objective")
self.objective_param.params = list(model_meta.get("objectiveMeta").get("param"))
self.task_type = model_meta.get("taskType")
self.n_iter_no_change = model_meta.get("nIterNoChange")
self.tol = model_meta.get("tol")
else:
self.tree_meta = model_meta.tree_meta
self.learning_rate = model_meta.learning_rate
self.num_trees = model_meta.num_trees
self.bin_num = model_meta.quantile_meta.bin_num
self.objective_param.objective = model_meta.objective_meta.objective
self.objective_param.params = list(model_meta.objective_meta.param)
self.task_type = model_meta.task_type
self.n_iter_no_change = model_meta.n_iter_no_change
self.tol = model_meta.tol
def get_model_param(self):
model_param = BoostingTreeModelParam()
model_param.tree_num = len(list(self.learnt_tree_param))
model_param.tree_dim = self.tree_dim
model_param.trees_.extend(self.learnt_tree_param)
model_param.init_score.extend(self.init_score)
model_param.losses.extend(self.local_loss_history)
model_param.classes_.extend(map(str, self.classes_))
model_param.num_classes = self.num_classes
model_param.best_iteration = -1
feature_importance = list(self.get_feature_importance().items())
feature_importance = sorted(feature_importance, key=itemgetter(1), reverse=True)
feature_importance_param = []
for fid, importance in feature_importance:
feature_importance_param.append(FeatureImportanceInfo(fid=fid,
fullname=self.feature_name_fid_mapping[fid],
sitename=self.role,
importance=importance.importance,
importance2=importance.importance_2,
main=importance.main_type
))
model_param.feature_importances.extend(feature_importance_param)
model_param.feature_name_fid_mapping.update(self.feature_name_fid_mapping)
param_name = "HorzSecureBoostingTreePromoterParam"
return param_name, model_param
def get_cur_model(self):
meta_name, meta_protobuf = self.get_model_meta()
param_name, param_protobuf = self.get_model_param()
return {meta_name: meta_protobuf,
param_name: param_protobuf
}
def set_model_param(self, model_param):
if type(model_param) is dict:
for tree in list(model_param.get("trees")):
tree_param = DecisionTreeModelParam()
for node in tree['tree']:
tree_param.tree_.add(id=node['id'],
sitename=node['sitename'],
fid=node['fid'],
bid=node['bid'],
weight=node['weight'],
is_leaf=node['isLeaf'],
left_nodeid=node['leftNodeid'],
right_nodeid=node['rightNodeid'],
missing_dir=node['missingDir'])
splitMaskdict = dict([int(b), v] for b, v in tree['splitMaskdict'].items())
missingDirMaskdict = dict([int(b), v] for b, v in tree['missingDirMaskdict'].items())
tree_param.split_maskdict.update(splitMaskdict)
tree_param.missing_dir_maskdict.update(missingDirMaskdict)
self.trees.append(tree_param)
self.learnt_tree_param.append(tree_param)
# self.learnt_tree_param = list(model_param.get("trees"))
self.tree_dim = model_param.get("treeDim")
self.init_score = np.array(list(model_param.get("initScore")))
self.history_loss = list(model_param.get("losses"))
self.classes_ = list(map(int, model_param.get("classes")))
self.num_classes = model_param.get("numClasses")
featureNameFidMapping = dict([int(b), v] for b, v in model_param['featureNameFidMapping'].items())
self.feature_name_fid_mapping.update(featureNameFidMapping)
else:
self.learnt_tree_param = list(model_param.trees_)
self.init_score = np.array(list(model_param.init_score))
self.local_loss_history = list(model_param.losses)
self.classes_ = list(model_param.classes_)
self.tree_dim = model_param.tree_dim
self.num_classes = model_param.num_classes
self.feature_name_fid_mapping.update(model_param.feature_name_fid_mapping)
def get_metrics_param(self):
if self.task_type == consts.CLASSIFICATION:
if self.num_classes == 2:
return EvaluateParam(eval_type="binary",
pos_label=self.classes_[1])
else:
return EvaluateParam(eval_type="multi")
else:
return EvaluateParam(eval_type="regression")
def export_model(self):
if self.need_cv:
return None
return self.get_cur_model()
def load_model(self, model_dict):
model_param = None
model_meta = None
for _, value in model_dict["model"].items():
for model in value:
if type(model) == str:
if model.endswith("Meta"):
model_meta = value[model]
if model.endswith("Param"):
model_param = value[model]
else:
for obj in model.items():
key = obj[0]
if key.endswith("Meta"):
model_meta = obj[1]
if key.endswith("Param"):
model_param = obj[1]
LOGGER.info("load model")
self.set_model_meta(model_meta)
self.set_model_param(model_param)
self.set_loss_function(self.objective_param)
def cross_validation(self, data_instances):
if not self.need_run:
return data_instances
kflod_obj = KFold()
cv_param = self._get_cv_param()
kflod_obj.run(cv_param, data_instances, self, True)
return data_instances
| 46.379965 | 221 | 0.624116 | 3,212 | 26,854 | 4.947385 | 0.130448 | 0.024353 | 0.015858 | 0.020137 | 0.379083 | 0.258952 | 0.20219 | 0.167705 | 0.13687 | 0.120131 | 0 | 0.003721 | 0.289454 | 26,854 | 578 | 222 | 46.460208 | 0.829097 | 0.08643 | 0 | 0.091765 | 0 | 0 | 0.048551 | 0.003684 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070588 | false | 0 | 0.115294 | 0.002353 | 0.24 | 0.002353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
744107433714491e0a8e46ba4af32bda59ff3f79 | 8,238 | py | Python | src/nr/util/process/root.py | NiklasRosenstein/python-nr.util | 087f2410d38006c1005a5fb330c47a56bcdb2279 | [
"MIT"
] | null | null | null | src/nr/util/process/root.py | NiklasRosenstein/python-nr.util | 087f2410d38006c1005a5fb330c47a56bcdb2279 | [
"MIT"
] | 3 | 2022-02-16T13:17:28.000Z | 2022-03-14T15:28:41.000Z | src/nr/util/process/root.py | NiklasRosenstein/python-nr.util | 087f2410d38006c1005a5fb330c47a56bcdb2279 | [
"MIT"
] | null | null | null |
from __future__ import annotations
import os
import sys
if __name__ == '__main__':
# Ensure that the parent directory is not in sys.path.
norm = lambda x: os.path.normpath(os.path.abspath(x))
dirname = os.path.dirname(norm(__file__))
sys.path[:] = [x for x in sys.path if norm(x) != dirname]
del norm, dirname
import ctypes
import io
import json
import re
import shlex
import shutil
import subprocess
import tempfile
import traceback
import typing as t
if os.name == 'nt':
import ctypes.wintypes as wintypes
windll = ctypes.windll # type: ignore
WinError = ctypes.WinError # type: ignore
get_last_error = ctypes.get_last_error # type: ignore
class winapi:
_WaitForSingleObject = windll.kernel32.WaitForSingleObject
_WaitForSingleObject.restype = wintypes.DWORD
_WaitForSingleObject.argtypes = [wintypes.HANDLE, wintypes.DWORD]
@staticmethod
def WaitForSingleObject(handle, ms=0):
return winapi._WaitForSingleObject(handle, ms)
_GetExitCodeProcess = windll.kernel32.GetExitCodeProcess
_GetExitCodeProcess.restype = wintypes.BOOL
_GetExitCodeProcess.argtypes = [wintypes.HANDLE, ctypes.POINTER(wintypes.DWORD)]
@staticmethod
def GetExitCodeProcess(handle):
result = wintypes.DWORD()
success = winapi._GetExitCodeProcess(handle, ctypes.byref(result))
if not success:
raise WinError(get_last_error())
return result.value
_MessageBox = windll.user32.MessageBoxW
_MessageBox.restype = ctypes.c_int
_MessageBox.argtypes = [wintypes.HWND, wintypes.LPWSTR, wintypes.LPWSTR, wintypes.UINT]
@staticmethod
def MessageBox(hwnd, text, caption, type):
return winapi._MessageBox(hwnd, text, caption, type)
class _SHELLEXECUTEINFO(ctypes.Structure):
_fields_ = [
('cbSize', wintypes.DWORD),
('fMask', wintypes.ULONG),
('hwnd', wintypes.HWND),
('lpVerb', wintypes.LPCSTR),
('lpFile', wintypes.LPCSTR),
('lpParameters', wintypes.LPCSTR),
('lpDirectory', wintypes.LPCSTR),
('nShow', ctypes.c_int),
('hInstApp', wintypes.HINSTANCE),
('lpIDList', wintypes.LPVOID),
('lpClass', wintypes.LPCSTR),
('hkeyClass', wintypes.HKEY),
('dwHotKey', wintypes.DWORD),
('DUMMYUNIONNAME', wintypes.HANDLE),
('hProcess', wintypes.HANDLE),
]
_ShellExecuteEx = windll.shell32.ShellExecuteEx
_ShellExecuteEx.restype = wintypes.BOOL
_ShellExecuteEx.argtypes = [ctypes.POINTER(_SHELLEXECUTEINFO)]
SW_HIDE = 0
SW_MAXIMIMIZE = 3
SW_MINIMIZE = 6
SW_RESTORE = 9
SW_SHOW = 5
SW_SHOWDEFAULT = 10
SW_SHOWMAXIMIZED = 3
SW_SHOWMINIMIZED = 2
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_SHOWNOACTIVE = 4
SW_SHOWNORMAL = 1
@staticmethod
def ShellExecuteEx(hwnd=None, verb='', file='', parameters=None,
directory=None, show=SW_SHOW, mask=0): # TODO: More parameters
data = winapi._SHELLEXECUTEINFO()
data.cbSize = ctypes.sizeof(data)
data.fMask = mask
data.hwnd = hwnd
data.lpVerb = verb.encode()
data.lpFile = file.encode()
data.lpParameters = parameters.encode()
data.lpDirectory = directory.encode()
data.nShow = show
data.hInstApp = None
data.lpIDList = None
data.lpClass = None
data.hkeyClass = None
data.dwHotKey = 0
data.DUMMYUNIONNAME = None
data.hProcess = None
result = winapi._ShellExecuteEx(ctypes.byref(data))
if not result:
raise WinError(get_last_error())
return {'hInstApp': data.hInstApp, 'hProcess': data.hProcess}
def alert(*msg: str) -> None:
# TODO (@NiklasRosenstein): Support GUI alerts for other systems.
message = ' '.join(map(str, msg))
print(message, file=sys.stderr)
sys.stderr.flush()
if os.name == 'nt':
winapi.MessageBox(None, message, "Python", 0)
def quote(s: str) -> str:
if os.name == 'nt' and os.sep == '\\':
s = s.replace('"', '\\"')
if re.search(r'\s', s) or any(c in s for c in '<>'):
s = '"' + s + '"'
else:
s = shlex.quote(s)
return s
def is_root() -> bool:
if os.name == 'nt':
try:
return bool(windll.shell32.IsUserAnAdmin())
except:
traceback.print_exc()
print("ctypes.windll.shell32.IsUserAnAdmin() failed -- "
"assuming not an admin.", file=sys.stderr)
sys.stderr.flush()
return False
elif os.name == 'posix':
return os.getuid() == 0
else:
raise RuntimeError('Unsupported os: {!r}'.format(os.name))
def elevate(command: str | list[str], cwd: str | None = None, environ: t.Mapping[str, str] | None = None) -> None:
"""
Runs a command as an admin in the specified *cwd* and *environ*. On Windows, this creates a temporary directory where
this information is stored temporarily so that the new process can launch the proper subprocess.
"""
if isinstance(command, str):
command = shlex.split(command)
if os.name == 'nt':
return _elevate_windows(command, cwd, environ)
elif os.name == 'posix':
command = ['sudo', '-E'] + list(command)
sys.exit(subprocess.call(command))
else:
raise RuntimeError('Unsupported os: {!r}'.format(os.name))
def _elevate_windows(command, cwd, environ):
assert os.name == 'nt'
datadir = tempfile.mkdtemp()
try:
# TODO: Maybe we could also use named pipes and transfer them
# via the processdata.json to the elevated process.
# This file will receive all the process information.
datafile = os.path.join(datadir, 'processdata.json')
data = {
'command': command,
'cwd': cwd or os.getcwd(),
'environ': environ or os.environ.copy(),
'outfile': os.path.join(datadir, 'out.bin')
}
with open(datafile, 'w') as fp:
json.dump(data, fp)
# Ensure the output file exists.
open(data['outfile'], 'w').close()
# Create the windows elevated process that calls this file. This
# file will then know what to do with the information from the
# process data directory.
hProc = winapi.ShellExecuteEx(
file=sys.executable,
verb='runas',
parameters=' '.join(map(quote, [os.path.abspath(__file__), '--windows-process-data', datadir])),
directory=datadir,
mask=64,
show=winapi.SW_HIDE
)['hProcess']
# Read the output from the process and write it to our stdout.
with open(data['outfile'], 'rb+', 0) as outfile:
while True:
hr = winapi.WaitForSingleObject(hProc, 40)
while True:
line = outfile.readline()
if not line: break
sys.stdout.buffer.write(line)
if hr != 0x102: break
return winapi.GetExitCodeProcess(hProc)
finally:
try:
shutil.rmtree(datadir)
except:
print("ERROR: Unable to remove data directory of elevated process.")
print("ERROR: Directory at \"{}\"".format(datadir))
traceback.print_exc()
def _elevate_windows_elevated(datadir):
assert os.name == 'nt'
datafile = os.path.join(datadir, 'processdata.json')
with open(datafile, 'r') as pdata_fp:
data = json.load(pdata_fp)
try:
with open(data['outfile'], 'wb', 0) as fp:
sys.stderr = sys.stdout = io.TextIOWrapper(fp)
os.environ.update(data['environ'])
return subprocess.call(data['command'], cwd=data['cwd'], stdout=fp, stderr=fp)
except:
alert(traceback.format_exc())
sys.exit(1)
def main(argv=None, prog=None):
import argparse
parser = argparse.ArgumentParser(prog=prog)
parser.add_argument('--windows-process-data',
help='The path to a Windows process data directory. This is used to '
'provide data for the elevated process since no environment variables '
'can be via ShellExecuteEx().')
args, unknown = parser.parse_known_args(argv)
if args.windows_process_data:
if not is_root():
alert("--windows-process-data can only be used in an elevated process.")
sys.exit(1)
sys.exit(_elevate_windows_elevated(args.windows_process_data))
elif unknown:
elevate(unknown)
sys.exit()
else:
parser.print_usage()
_entry_point = lambda: sys.exit(main())
if __name__ == '__main__':
_entry_point()
| 30.511111 | 119 | 0.657563 | 1,011 | 8,238 | 5.257171 | 0.295747 | 0.012418 | 0.010536 | 0.009407 | 0.079586 | 0.058325 | 0.0365 | 0.021449 | 0.021449 | 0.021449 | 0 | 0.006535 | 0.219835 | 8,238 | 269 | 120 | 30.624535 | 0.820445 | 0.097232 | 0 | 0.183962 | 0 | 0 | 0.104872 | 0.013902 | 0 | 0 | 0.000675 | 0.007435 | 0.009434 | 1 | 0.051887 | false | 0 | 0.070755 | 0.009434 | 0.259434 | 0.033019 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |